filename
stringlengths
3
9
code
stringlengths
4
1.87M
249913.c
/* Copyright (c) 2013, Linaro Limited * Copyright (c) 2013, Nokia Solutions and Networks * All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include "config.h" #include <odp_api.h> #include <odp_packet_internal.h> #include <odp_packet_io_internal.h> #include <odp_classification_internal.h> #include <odp_debug_internal.h> #include <odp/api/hints.h> #include <odp_queue_if.h> #include <protocols/eth.h> #include <protocols/ip.h> #include <errno.h> #include <inttypes.h> #include <limits.h> /* MAC address for the "loop" interface */ static const char pktio_loop_mac[] = {0x02, 0xe9, 0x34, 0x80, 0x73, 0x01}; static int loopback_stats_reset(pktio_entry_t *pktio_entry); static int loopback_open(odp_pktio_t id, pktio_entry_t *pktio_entry, const char *devname, odp_pool_t pool ODP_UNUSED) { if (strcmp(devname, "loop")) return -1; char loopq_name[ODP_QUEUE_NAME_LEN]; snprintf(loopq_name, sizeof(loopq_name), "%" PRIu64 "-pktio_loopq", odp_pktio_to_u64(id)); pktio_entry->s.pkt_loop.loopq = odp_queue_create(loopq_name, NULL); if (pktio_entry->s.pkt_loop.loopq == ODP_QUEUE_INVALID) return -1; loopback_stats_reset(pktio_entry); return 0; } static int loopback_close(pktio_entry_t *pktio_entry) { return odp_queue_destroy(pktio_entry->s.pkt_loop.loopq); } static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED, odp_packet_t pkts[], int len) { int nbr, i; odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX]; queue_t queue; odp_packet_hdr_t *pkt_hdr; odp_packet_t pkt; odp_time_t ts_val; odp_time_t *ts = NULL; int num_rx = 0; int failed = 0; if (odp_unlikely(len > QUEUE_MULTI_MAX)) len = QUEUE_MULTI_MAX; odp_ticketlock_lock(&pktio_entry->s.rxl); queue = queue_fn->from_ext(pktio_entry->s.pkt_loop.loopq); nbr = queue_fn->deq_multi(queue, hdr_tbl, len); if (pktio_entry->s.config.pktin.bit.ts_all || pktio_entry->s.config.pktin.bit.ts_ptp) { ts_val = odp_time_global(); ts = &ts_val; } for (i = 0; i < nbr; i++) { uint32_t pkt_len; pkt = packet_from_buf_hdr(hdr_tbl[i]); pkt_len = odp_packet_len(pkt); pkt_hdr = odp_packet_hdr(pkt); if (pktio_cls_enabled(pktio_entry)) { odp_packet_t new_pkt; odp_pool_t new_pool; uint8_t *pkt_addr; uint8_t buf[PACKET_PARSE_SEG_LEN]; int ret; uint32_t seg_len = odp_packet_seg_len(pkt); /* Make sure there is enough data for the packet * parser in the case of a segmented packet. */ if (odp_unlikely(seg_len < PACKET_PARSE_SEG_LEN && pkt_len > PACKET_PARSE_SEG_LEN)) { odp_packet_copy_to_mem(pkt, 0, PACKET_PARSE_SEG_LEN, buf); seg_len = PACKET_PARSE_SEG_LEN; pkt_addr = buf; } else { pkt_addr = odp_packet_data(pkt); } ret = cls_classify_packet(pktio_entry, pkt_addr, pkt_len, seg_len, &new_pool, pkt_hdr); if (ret) { failed++; odp_packet_free(pkt); continue; } if (new_pool != odp_packet_pool(pkt)) { new_pkt = odp_packet_copy(pkt, new_pool); odp_packet_free(pkt); if (new_pkt == ODP_PACKET_INVALID) { failed++; continue; } pkt = new_pkt; } } else { packet_parse_layer(pkt_hdr, pktio_entry->s.config.parser.layer); } packet_set_ts(pkt_hdr, ts); pkt_hdr->input = pktio_entry->s.handle; pktio_entry->s.stats.in_octets += pkt_len; pkts[num_rx++] = pkt; } pktio_entry->s.stats.in_errors += failed; pktio_entry->s.stats.in_ucast_pkts += num_rx - failed; odp_ticketlock_unlock(&pktio_entry->s.rxl); return num_rx; } static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED, const odp_packet_t pkt_tbl[], int len) { odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX]; queue_t queue; int i; int ret; uint32_t bytes = 0; if (odp_unlikely(len > QUEUE_MULTI_MAX)) len = QUEUE_MULTI_MAX; for (i = 0; i < len; ++i) { hdr_tbl[i] = packet_to_buf_hdr(pkt_tbl[i]); bytes += odp_packet_len(pkt_tbl[i]); } odp_ticketlock_lock(&pktio_entry->s.txl); queue = queue_fn->from_ext(pktio_entry->s.pkt_loop.loopq); ret = queue_fn->enq_multi(queue, hdr_tbl, len); if (ret > 0) { pktio_entry->s.stats.out_ucast_pkts += ret; pktio_entry->s.stats.out_octets += bytes; } else { ODP_DBG("queue enqueue failed %i\n", ret); ret = -1; } odp_ticketlock_unlock(&pktio_entry->s.txl); return ret; } static uint32_t loopback_mtu_get(pktio_entry_t *pktio_entry ODP_UNUSED) { /* the loopback interface imposes no maximum transmit size limit */ return INT_MAX; } static int loopback_mac_addr_get(pktio_entry_t *pktio_entry ODP_UNUSED, void *mac_addr) { memcpy(mac_addr, pktio_loop_mac, ETH_ALEN); return ETH_ALEN; } static int loopback_link_status(pktio_entry_t *pktio_entry ODP_UNUSED) { /* loopback interfaces are always up */ return 1; } static int loopback_capability(pktio_entry_t *pktio_entry ODP_UNUSED, odp_pktio_capability_t *capa) { memset(capa, 0, sizeof(odp_pktio_capability_t)); capa->max_input_queues = 1; capa->max_output_queues = 1; capa->set_op.op.promisc_mode = 1; odp_pktio_config_init(&capa->config); capa->config.pktin.bit.ts_all = 1; capa->config.pktin.bit.ts_ptp = 1; return 0; } static int loopback_promisc_mode_set(pktio_entry_t *pktio_entry, odp_bool_t enable) { pktio_entry->s.pkt_loop.promisc = enable; return 0; } static int loopback_promisc_mode_get(pktio_entry_t *pktio_entry) { return pktio_entry->s.pkt_loop.promisc ? 1 : 0; } static int loopback_stats(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats) { memcpy(stats, &pktio_entry->s.stats, sizeof(odp_pktio_stats_t)); return 0; } static int loopback_stats_reset(pktio_entry_t *pktio_entry ODP_UNUSED) { memset(&pktio_entry->s.stats, 0, sizeof(odp_pktio_stats_t)); return 0; } static int loop_init_global(void) { ODP_PRINT("PKTIO: initialized loop interface.\n"); return 0; } const pktio_if_ops_t loopback_pktio_ops = { .name = "loop", .print = NULL, .init_global = loop_init_global, .init_local = NULL, .term = NULL, .open = loopback_open, .close = loopback_close, .start = NULL, .stop = NULL, .stats = loopback_stats, .stats_reset = loopback_stats_reset, .recv = loopback_recv, .send = loopback_send, .mtu_get = loopback_mtu_get, .promisc_mode_set = loopback_promisc_mode_set, .promisc_mode_get = loopback_promisc_mode_get, .mac_get = loopback_mac_addr_get, .link_status = loopback_link_status, .capability = loopback_capability, .pktin_ts_res = NULL, .pktin_ts_from_ns = NULL, .config = NULL, .input_queues_config = NULL, .output_queues_config = NULL, };
508094.c
/* $NetBSD: dnssec.c,v 1.5 2009/03/12 10:57:26 tteras Exp $ */ /* $KAME: dnssec.c,v 1.2 2001/08/05 18:46:07 itojun Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "config.h" #include <sys/types.h> #include <sys/param.h> #include <stdlib.h> #include <string.h> #include "var.h" #include "vmbuf.h" #include "misc.h" #include "plog.h" #include "debug.h" #include "isakmp_var.h" #include "isakmp.h" #include "ipsec_doi.h" #include "oakley.h" #include "netdb_dnssec.h" #include "strnames.h" #include "dnssec.h" #include "gcmalloc.h" extern int h_errno; vchar_t * dnssec_getcert(id) vchar_t *id; { vchar_t *cert = NULL; struct certinfo *res = NULL; struct ipsecdoi_id_b *id_b; int type; char *name = NULL; int namelen; int error; id_b = (struct ipsecdoi_id_b *)id->v; namelen = id->l - sizeof(*id_b); name = racoon_malloc(namelen + 1); if (!name) { plog(LLV_ERROR, LOCATION, NULL, "failed to get buffer.\n"); return NULL; } memcpy(name, id_b + 1, namelen); name[namelen] = '\0'; switch (id_b->type) { case IPSECDOI_ID_FQDN: error = getcertsbyname(name, &res); if (error != 0) { plog(LLV_ERROR, LOCATION, NULL, "getcertsbyname(\"%s\") failed.\n", name); goto err; } break; case IPSECDOI_ID_IPV4_ADDR: case IPSECDOI_ID_IPV6_ADDR: /* XXX should be processed to query PTR ? */ default: plog(LLV_ERROR, LOCATION, NULL, "inpropper ID type passed %s " "though getcert method is dnssec.\n", s_ipsecdoi_ident(id_b->type)); goto err; } /* check response */ if (res->ci_next != NULL) { plog(LLV_WARNING, LOCATION, NULL, "not supported multiple CERT RR.\n"); } switch (res->ci_type) { case DNSSEC_TYPE_PKIX: /* XXX is it enough condition to set this type ? */ type = ISAKMP_CERT_X509SIGN; break; default: plog(LLV_ERROR, LOCATION, NULL, "not supported CERT RR type %d.\n", res->ci_type); goto err; } /* create cert holder */ cert = vmalloc(res->ci_certlen + 1); if (cert == NULL) { plog(LLV_ERROR, LOCATION, NULL, "failed to get cert buffer.\n"); goto err; } cert->v[0] = type; memcpy(&cert->v[1], res->ci_cert, res->ci_certlen); plog(LLV_DEBUG, LOCATION, NULL, "created CERT payload:\n"); plogdump(LLV_DEBUG, cert->v, cert->l); err: if (name) racoon_free(name); if (res) freecertinfo(res); return cert; }
188430.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of The University of Texas at Austin nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" extern gemm_t* gemm3m2_cntl; // -- gemm --------------------------------------------------------------------- #undef GENFRONT #define GENFRONT( opname, cname, imeth ) \ \ void PASTEMAC(opname,imeth)( \ obj_t* alpha, \ obj_t* a, \ obj_t* b, \ obj_t* beta, \ obj_t* c \ ) \ { \ cntx_t cntx; \ \ if ( bli_obj_is_real( *c ) ) \ { \ PASTEMAC0(opname)( alpha, a, b, beta, c ); \ } \ else \ { \ PASTEMAC(opname,_front)( alpha, a, b, beta, c, &cntx, \ PASTECH2(cname,imeth,_cntl) ); \ } \ } GENFRONT( gemm, gemm, 3m2 ) // -- hemm/symm/trmm3 ---------------------------------------------------------- #undef GENFRONT #define GENFRONT( opname, cname, imeth ) \ \ void PASTEMAC(opname,imeth)( \ side_t side, \ obj_t* alpha, \ obj_t* a, \ obj_t* b, \ obj_t* beta, \ obj_t* c \ ) \ { \ cntx_t cntx; \ \ if ( bli_obj_is_real( *c ) ) \ { \ PASTEMAC0(opname)( side, alpha, a, b, beta, c ); \ } \ else \ { \ PASTEMAC(opname,_front)( side, alpha, a, b, beta, c, &cntx, \ PASTECH2(cname,imeth,_cntl) ); \ } \ } //GENFRONT( hemm, gemm, 3m2 ) //GENFRONT( symm, gemm, 3m2 ) //GENFRONT( trmm3, gemm, 3m2 ) // -- herk/syrk ---------------------------------------------------------------- #undef GENFRONT #define GENFRONT( opname, cname, imeth ) \ \ void PASTEMAC(opname,imeth)( \ obj_t* alpha, \ obj_t* a, \ obj_t* beta, \ obj_t* c \ ) \ { \ cntx_t cntx; \ \ if ( bli_obj_is_real( *c ) ) \ { \ PASTEMAC0(opname)( alpha, a, beta, c ); \ } \ else \ { \ PASTEMAC(opname,_front)( alpha, a, beta, c, &cntx, \ PASTECH2(cname,imeth,_cntl) ); \ } \ } //GENFRONT( herk, gemm, 3m2 ) //GENFRONT( syrk, gemm, 3m2 ) // -- her2k/syr2k -------------------------------------------------------------- #undef GENFRONT #define GENFRONT( opname, cname, imeth ) \ \ void PASTEMAC(opname,imeth)( \ obj_t* alpha, \ obj_t* a, \ obj_t* b, \ obj_t* beta, \ obj_t* c \ ) \ { \ cntx_t cntx; \ \ if ( bli_obj_is_real( *c ) ) \ { \ PASTEMAC0(opname)( alpha, a, b, beta, c ); \ } \ else \ { \ PASTEMAC(opname,_front)( alpha, a, b, beta, c, &cntx, \ PASTECH2(cname,imeth,_cntl) ); \ } \ } //GENFRONT( her2k, gemm, 3m2 ) //GENFRONT( syr2k, gemm, 3m2 )
994499.c
/* * The Clear BSD License * Copyright 2012-2017 NXP * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted (subject to the limitations in the * disclaimer below) provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*! \file GreenPowerProxyTableRequest.c \brief Proxy table request send and receive functions */ #ifdef CLD_GREENPOWER /****************************************************************************/ /*** Include files ***/ /****************************************************************************/ #include <jendefs.h> #include "zcl.h" #include "zcl_options.h" #include "GreenPower.h" #include "GreenPower_internal.h" /****************************************************************************/ /*** Macro Definitions ***/ /****************************************************************************/ #define APP_ID_MASK_PROXY_TABLE_REQ (7) /****************************************************************************/ /*** Type Definitions ***/ /****************************************************************************/ /****************************************************************************/ /*** Local Function Prototypes ***/ /****************************************************************************/ /****************************************************************************/ /*** Exported Variables ***/ /****************************************************************************/ /****************************************************************************/ /*** Local Variables ***/ /****************************************************************************/ /****************************************************************************/ /*** Public Functions ***/ /****************************************************************************/ /**************************************************************************** ** ** NAME: eGP_ProxyTableRequestSend ** ** DESCRIPTION: ** Sends Proxy Table request command ** ** PARAMETERS: Name Usage ** uint8 u8SourceEndPointId Source EP Id ** uint8 u8DestinationEndPointId Destination EP Id ** tsZCL_Address *psDestinationAddress Destination Address ** uint8 *pu8TransactionSequenceNumber Sequence number Pointer ** tsGP_ZgpProxyCommissioningModeCmdPayload *psZgpProxyCommissioningModeCmdPayload command payload ** ** RETURN: ** teZCL_Status ** ****************************************************************************/ PUBLIC teZCL_Status eGP_ProxyTableRequestSend( uint8 u8SourceEndPointId, uint8 u8DestEndPointId, tsZCL_Address sDestinationAddress, tsGP_ZgpProxyTableRequestCmdPayload *psZgpProxyTableRequestCmdPayload) { zbmap8 b8Options; uint8 u8ItemsInPayload = 2; uint8 u8TransactionSequenceNumber; tsZCL_TxPayloadItem asPayloadDefinition[4] = { //size included for app id 2 {1, E_ZCL_BMAP8, &psZgpProxyTableRequestCmdPayload->b8Options}, {1, E_ZCL_UINT32, &psZgpProxyTableRequestCmdPayload->uZgpdDeviceAddr.u32ZgpdSrcId}, {1, E_ZCL_UINT8, &psZgpProxyTableRequestCmdPayload->u8Index}, {1, E_ZCL_UINT8, &psZgpProxyTableRequestCmdPayload->u8Index} }; b8Options = psZgpProxyTableRequestCmdPayload->b8Options; if(( b8Options & APP_ID_MASK_PROXY_TABLE_REQ) == GP_APPL_ID_8_BYTE) { u8ItemsInPayload += 1; asPayloadDefinition[1].eType = E_ZCL_UINT64; asPayloadDefinition[1].pvData = &psZgpProxyTableRequestCmdPayload->uZgpdDeviceAddr.sZgpdDeviceAddrAppId2.u64ZgpdIEEEAddr; asPayloadDefinition[2].eType = E_ZCL_UINT8; asPayloadDefinition[2].pvData = &psZgpProxyTableRequestCmdPayload->uZgpdDeviceAddr.sZgpdDeviceAddrAppId2.u8EndPoint; asPayloadDefinition[3].eType = E_ZCL_UINT8; asPayloadDefinition[3].pvData = &psZgpProxyTableRequestCmdPayload->u8Index; } if(b8Options & BIT_MAP_REQUEST_TYPE) { u8ItemsInPayload = 2; asPayloadDefinition[1].eType = E_ZCL_UINT8; asPayloadDefinition[1].pvData = &psZgpProxyTableRequestCmdPayload->u8Index; } return eZCL_CustomCommandSend(u8SourceEndPointId, u8DestEndPointId, &sDestinationAddress, GREENPOWER_CLUSTER_ID, TRUE, E_GP_ZGP_PROXY_TABLE_REQUEST, &u8TransactionSequenceNumber, asPayloadDefinition, FALSE, 0, u8ItemsInPayload); } /**************************************************************************** ** ** NAME: eGP_ProxyTableResponseReceive ** ** DESCRIPTION: ** Handles ProxyTable Response command ** ** PARAMETERS: Name Usage ** ZPS_tsAfEvent *pZPSevent Zigbee stack event structure ** tsGP_ZgpProxyCommissioningModeCmdPayload *psZgpProxyCommissioningModeCmdPayload command payload ** ** RETURN: ** teZCL_Status ** ****************************************************************************/ PUBLIC teZCL_Status eGP_ProxyTableRequestReceive( ZPS_tsAfEvent *pZPSevent, uint16 u16Offset, tsGP_ZgpProxyTableRequestCmdPayload *psZgpProxyTableRequestCmdPayload) { uint8 u8TransactionSequenceNumber; uint16 u16ActualQuantity; zbmap8 b8Options; uint8 u8ItemsInPayload = 2; tsZCL_RxPayloadItem asPayloadDefinition[4] = { //maximum size considered {1, &u16ActualQuantity, E_ZCL_BMAP8, &psZgpProxyTableRequestCmdPayload->b8Options}, {1, &u16ActualQuantity, E_ZCL_UINT32, &psZgpProxyTableRequestCmdPayload->uZgpdDeviceAddr.u32ZgpdSrcId}, {1, &u16ActualQuantity, E_ZCL_UINT8, &psZgpProxyTableRequestCmdPayload->u8Index}, {1, &u16ActualQuantity, E_ZCL_UINT8, &psZgpProxyTableRequestCmdPayload->u8Index} }; /* Read Options field from received command */ u16ZCL_APduInstanceReadNBO(pZPSevent->uEvent.sApsDataIndEvent.hAPduInst, u16Offset, E_ZCL_BMAP8, &b8Options); if(( b8Options & APP_ID_MASK_PROXY_TABLE_REQ) == GP_APPL_ID_8_BYTE) { u8ItemsInPayload += 1; asPayloadDefinition[1].eType = E_ZCL_UINT64; asPayloadDefinition[1].pvDestination = &psZgpProxyTableRequestCmdPayload->uZgpdDeviceAddr.sZgpdDeviceAddrAppId2.u64ZgpdIEEEAddr; asPayloadDefinition[2].eType = E_ZCL_UINT8; asPayloadDefinition[2].pvDestination = &psZgpProxyTableRequestCmdPayload->uZgpdDeviceAddr.sZgpdDeviceAddrAppId2.u8EndPoint; asPayloadDefinition[3].eType = E_ZCL_UINT8; asPayloadDefinition[3].pvDestination = &psZgpProxyTableRequestCmdPayload->u8Index; } if(b8Options & BIT_MAP_REQUEST_TYPE) { u8ItemsInPayload = 2; asPayloadDefinition[1].eType = E_ZCL_UINT8; asPayloadDefinition[1].pvDestination = &psZgpProxyTableRequestCmdPayload->u8Index; } return eZCL_CustomCommandReceive(pZPSevent, &u8TransactionSequenceNumber, asPayloadDefinition, u8ItemsInPayload, E_ZCL_ACCEPT_EXACT|E_ZCL_DISABLE_DEFAULT_RESPONSE); } #endif //#ifdef CLD_GREENPOWER /****************************************************************************/ /*** END OF FILE ***/ /****************************************************************************/
340966.c
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "uv.h" #include "internal.h" #if TARGET_OS_IPHONE || MAC_OS_X_VERSION_MAX_ALLOWED < 1070 /* iOS (currently) doesn't provide the FSEvents-API (nor CoreServices) */ /* macOS prior to 10.7 doesn't provide the full FSEvents API so use kqueue */ int uv__fsevents_init(uv_fs_event_t* handle) { return 0; } int uv__fsevents_close(uv_fs_event_t* handle) { return 0; } void uv__fsevents_loop_delete(uv_loop_t* loop) { } #else /* TARGET_OS_IPHONE */ #include <dlfcn.h> #include <assert.h> #include <stdlib.h> #include <pthread.h> #include <CoreFoundation/CFRunLoop.h> #include <CoreServices/CoreServices.h> /* These are macros to avoid "initializer element is not constant" errors * with old versions of gcc. */ #define kFSEventsModified (kFSEventStreamEventFlagItemFinderInfoMod | \ kFSEventStreamEventFlagItemModified | \ kFSEventStreamEventFlagItemInodeMetaMod | \ kFSEventStreamEventFlagItemChangeOwner | \ kFSEventStreamEventFlagItemXattrMod) #define kFSEventsRenamed (kFSEventStreamEventFlagItemCreated | \ kFSEventStreamEventFlagItemRemoved | \ kFSEventStreamEventFlagItemRenamed) #define kFSEventsSystem (kFSEventStreamEventFlagUserDropped | \ kFSEventStreamEventFlagKernelDropped | \ kFSEventStreamEventFlagEventIdsWrapped | \ kFSEventStreamEventFlagHistoryDone | \ kFSEventStreamEventFlagMount | \ kFSEventStreamEventFlagUnmount | \ kFSEventStreamEventFlagRootChanged) typedef struct uv__fsevents_event_s uv__fsevents_event_t; typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t; typedef struct uv__cf_loop_state_s uv__cf_loop_state_t; enum uv__cf_loop_signal_type_e { kUVCFLoopSignalRegular, kUVCFLoopSignalClosing }; typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t; struct uv__cf_loop_signal_s { QUEUE member; uv_fs_event_t* handle; uv__cf_loop_signal_type_t type; }; struct uv__fsevents_event_s { QUEUE member; int events; char path[1]; }; struct uv__cf_loop_state_s { CFRunLoopRef loop; CFRunLoopSourceRef signal_source; int fsevent_need_reschedule; FSEventStreamRef fsevent_stream; uv_sem_t fsevent_sem; uv_mutex_t fsevent_mutex; void* fsevent_handles[2]; unsigned int fsevent_handle_count; }; /* Forward declarations */ static void uv__cf_loop_cb(void* arg); static void* uv__cf_loop_runner(void* arg); static int uv__cf_loop_signal(uv_loop_t* loop, uv_fs_event_t* handle, uv__cf_loop_signal_type_t type); /* Lazy-loaded by uv__fsevents_global_init(). */ static CFArrayRef (*pCFArrayCreate)(CFAllocatorRef, const void**, CFIndex, const CFArrayCallBacks*); static void (*pCFRelease)(CFTypeRef); static void (*pCFRunLoopAddSource)(CFRunLoopRef, CFRunLoopSourceRef, CFStringRef); static CFRunLoopRef (*pCFRunLoopGetCurrent)(void); static void (*pCFRunLoopRemoveSource)(CFRunLoopRef, CFRunLoopSourceRef, CFStringRef); static void (*pCFRunLoopRun)(void); static CFRunLoopSourceRef (*pCFRunLoopSourceCreate)(CFAllocatorRef, CFIndex, CFRunLoopSourceContext*); static void (*pCFRunLoopSourceSignal)(CFRunLoopSourceRef); static void (*pCFRunLoopStop)(CFRunLoopRef); static void (*pCFRunLoopWakeUp)(CFRunLoopRef); static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)( CFAllocatorRef, const char*); static CFStringEncoding (*pCFStringGetSystemEncoding)(void); static CFStringRef (*pkCFRunLoopDefaultMode); static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef, FSEventStreamCallback, FSEventStreamContext*, CFArrayRef, FSEventStreamEventId, CFTimeInterval, FSEventStreamCreateFlags); static void (*pFSEventStreamFlushSync)(FSEventStreamRef); static void (*pFSEventStreamInvalidate)(FSEventStreamRef); static void (*pFSEventStreamRelease)(FSEventStreamRef); static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef, CFRunLoopRef, CFStringRef); static Boolean (*pFSEventStreamStart)(FSEventStreamRef); static void (*pFSEventStreamStop)(FSEventStreamRef); #define UV__FSEVENTS_PROCESS(handle, block) \ do { \ QUEUE events; \ QUEUE* q; \ uv__fsevents_event_t* event; \ int err; \ uv_mutex_lock(&(handle)->cf_mutex); \ /* Split-off all events and empty original queue */ \ QUEUE_MOVE(&(handle)->cf_events, &events); \ /* Get error (if any) and zero original one */ \ err = (handle)->cf_error; \ (handle)->cf_error = 0; \ uv_mutex_unlock(&(handle)->cf_mutex); \ /* Loop through events, deallocating each after processing */ \ while (!QUEUE_EMPTY(&events)) { \ q = QUEUE_HEAD(&events); \ event = QUEUE_DATA(q, uv__fsevents_event_t, member); \ QUEUE_REMOVE(q); \ /* NOTE: Checking uv__is_active() is required here, because handle \ * callback may close handle and invoking it after it will lead to \ * incorrect behaviour */ \ if (!uv__is_closing((handle)) && uv__is_active((handle))) \ block \ /* Free allocated data */ \ uv__free(event); \ } \ if (err != 0 && !uv__is_closing((handle)) && uv__is_active((handle))) \ (handle)->cb((handle), NULL, 0, err); \ } while (0) /* Runs in UV loop's thread, when there're events to report to handle */ static void uv__fsevents_cb(uv_async_t* cb) { uv_fs_event_t* handle; handle = cb->data; UV__FSEVENTS_PROCESS(handle, { handle->cb(handle, event->path[0] ? event->path : NULL, event->events, 0); }); } /* Runs in CF thread, pushed event into handle's event list */ static void uv__fsevents_push_event(uv_fs_event_t* handle, QUEUE* events, int err) { assert(events != NULL || err != 0); uv_mutex_lock(&handle->cf_mutex); /* Concatenate two queues */ if (events != NULL) QUEUE_ADD(&handle->cf_events, events); /* Propagate error */ if (err != 0) handle->cf_error = err; uv_mutex_unlock(&handle->cf_mutex); uv_async_send(handle->cf_cb); } /* Runs in CF thread, when there're events in FSEventStream */ static void uv__fsevents_event_cb(ConstFSEventStreamRef streamRef, void* info, size_t numEvents, void* eventPaths, const FSEventStreamEventFlags eventFlags[], const FSEventStreamEventId eventIds[]) { size_t i; int len; char** paths; char* path; char* pos; uv_fs_event_t* handle; QUEUE* q; uv_loop_t* loop; uv__cf_loop_state_t* state; uv__fsevents_event_t* event; FSEventStreamEventFlags flags; QUEUE head; loop = info; state = loop->cf_state; assert(state != NULL); paths = eventPaths; /* For each handle */ uv_mutex_lock(&state->fsevent_mutex); QUEUE_FOREACH(q, &state->fsevent_handles) { handle = QUEUE_DATA(q, uv_fs_event_t, cf_member); QUEUE_INIT(&head); /* Process and filter out events */ for (i = 0; i < numEvents; i++) { flags = eventFlags[i]; /* Ignore system events */ if (flags & kFSEventsSystem) continue; path = paths[i]; len = strlen(path); if (handle->realpath_len == 0) continue; /* This should be unreachable */ /* Filter out paths that are outside handle's request */ if (len < handle->realpath_len) continue; if (handle->realpath_len != len && path[handle->realpath_len] != '/') /* Make sure that realpath actually named a directory, * or that we matched the whole string */ continue; if (memcmp(path, handle->realpath, handle->realpath_len) != 0) continue; if (!(handle->realpath_len == 1 && handle->realpath[0] == '/')) { /* Remove common prefix, unless the watched folder is "/" */ path += handle->realpath_len; len -= handle->realpath_len; /* Ignore events with path equal to directory itself */ if (len <= 1 && (flags & kFSEventStreamEventFlagItemIsDir)) continue; if (len == 0) { /* Since we're using fsevents to watch the file itself, * realpath == path, and we now need to get the basename of the file back * (for commonality with other codepaths and platforms). */ while (len < handle->realpath_len && path[-1] != '/') { path--; len++; } /* Created and Removed seem to be always set, but don't make sense */ flags &= ~kFSEventsRenamed; } else { /* Skip forward slash */ path++; len--; } } /* Do not emit events from subdirectories (without option set) */ if ((handle->cf_flags & UV_FS_EVENT_RECURSIVE) == 0 && *path != '\0') { pos = strchr(path + 1, '/'); if (pos != NULL) continue; } event = uv__malloc(sizeof(*event) + len); if (event == NULL) break; memset(event, 0, sizeof(*event)); memcpy(event->path, path, len + 1); event->events = UV_RENAME; if (0 == (flags & kFSEventsRenamed)) { if (0 != (flags & kFSEventsModified) || 0 == (flags & kFSEventStreamEventFlagItemIsDir)) event->events = UV_CHANGE; } QUEUE_INSERT_TAIL(&head, &event->member); } if (!QUEUE_EMPTY(&head)) uv__fsevents_push_event(handle, &head, 0); } uv_mutex_unlock(&state->fsevent_mutex); } /* Runs in CF thread */ static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) { uv__cf_loop_state_t* state; FSEventStreamContext ctx; FSEventStreamRef ref; CFAbsoluteTime latency; FSEventStreamCreateFlags flags; /* Initialize context */ ctx.version = 0; ctx.info = loop; ctx.retain = NULL; ctx.release = NULL; ctx.copyDescription = NULL; latency = 0.05; /* Explanation of selected flags: * 1. NoDefer - without this flag, events that are happening continuously * (i.e. each event is happening after time interval less than `latency`, * counted from previous event), will be deferred and passed to callback * once they'll either fill whole OS buffer, or when this continuous stream * will stop (i.e. there'll be delay between events, bigger than * `latency`). * Specifying this flag will invoke callback after `latency` time passed * since event. * 2. FileEvents - fire callback for file changes too (by default it is firing * it only for directory changes). */ flags = kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents; /* * NOTE: It might sound like a good idea to remember last seen StreamEventId, * but in reality one dir might have last StreamEventId less than, the other, * that is being watched now. Which will cause FSEventStream API to report * changes to files from the past. */ ref = pFSEventStreamCreate(NULL, &uv__fsevents_event_cb, &ctx, paths, kFSEventStreamEventIdSinceNow, latency, flags); assert(ref != NULL); state = loop->cf_state; pFSEventStreamScheduleWithRunLoop(ref, state->loop, *pkCFRunLoopDefaultMode); if (!pFSEventStreamStart(ref)) { pFSEventStreamInvalidate(ref); pFSEventStreamRelease(ref); return UV_EMFILE; } state->fsevent_stream = ref; return 0; } /* Runs in CF thread */ static void uv__fsevents_destroy_stream(uv_loop_t* loop) { uv__cf_loop_state_t* state; state = loop->cf_state; if (state->fsevent_stream == NULL) return; /* Stop emitting events */ pFSEventStreamStop(state->fsevent_stream); /* Release stream */ pFSEventStreamInvalidate(state->fsevent_stream); pFSEventStreamRelease(state->fsevent_stream); state->fsevent_stream = NULL; } /* Runs in CF thread, when there're new fsevent handles to add to stream */ static void uv__fsevents_reschedule(uv_fs_event_t* handle, uv__cf_loop_signal_type_t type) { uv__cf_loop_state_t* state; QUEUE* q; uv_fs_event_t* curr; CFArrayRef cf_paths; CFStringRef* paths; unsigned int i; int err; unsigned int path_count; state = handle->loop->cf_state; paths = NULL; cf_paths = NULL; err = 0; /* NOTE: `i` is used in deallocation loop below */ i = 0; /* Optimization to prevent O(n^2) time spent when starting to watch * many files simultaneously */ uv_mutex_lock(&state->fsevent_mutex); if (state->fsevent_need_reschedule == 0) { uv_mutex_unlock(&state->fsevent_mutex); goto final; } state->fsevent_need_reschedule = 0; uv_mutex_unlock(&state->fsevent_mutex); /* Destroy previous FSEventStream */ uv__fsevents_destroy_stream(handle->loop); /* Any failure below will be a memory failure */ err = UV_ENOMEM; /* Create list of all watched paths */ uv_mutex_lock(&state->fsevent_mutex); path_count = state->fsevent_handle_count; if (path_count != 0) { paths = uv__malloc(sizeof(*paths) * path_count); if (paths == NULL) { uv_mutex_unlock(&state->fsevent_mutex); goto final; } q = &state->fsevent_handles; for (; i < path_count; i++) { q = QUEUE_NEXT(q); assert(q != &state->fsevent_handles); curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); assert(curr->realpath != NULL); paths[i] = pCFStringCreateWithFileSystemRepresentation(NULL, curr->realpath); if (paths[i] == NULL) { uv_mutex_unlock(&state->fsevent_mutex); goto final; } } } uv_mutex_unlock(&state->fsevent_mutex); err = 0; if (path_count != 0) { /* Create new FSEventStream */ cf_paths = pCFArrayCreate(NULL, (const void**) paths, path_count, NULL); if (cf_paths == NULL) { err = UV_ENOMEM; goto final; } err = uv__fsevents_create_stream(handle->loop, cf_paths); } final: /* Deallocate all paths in case of failure */ if (err != 0) { if (cf_paths == NULL) { while (i != 0) pCFRelease(paths[--i]); uv__free(paths); } else { /* CFArray takes ownership of both strings and original C-array */ pCFRelease(cf_paths); } /* Broadcast error to all handles */ uv_mutex_lock(&state->fsevent_mutex); QUEUE_FOREACH(q, &state->fsevent_handles) { curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); uv__fsevents_push_event(curr, NULL, err); } uv_mutex_unlock(&state->fsevent_mutex); } /* * Main thread will block until the removal of handle from the list, * we must tell it when we're ready. * * NOTE: This is coupled with `uv_sem_wait()` in `uv__fsevents_close` */ if (type == kUVCFLoopSignalClosing) uv_sem_post(&state->fsevent_sem); } static int uv__fsevents_global_init(void) { static pthread_mutex_t global_init_mutex = PTHREAD_MUTEX_INITIALIZER; static void* core_foundation_handle; static void* core_services_handle; int err; err = 0; pthread_mutex_lock(&global_init_mutex); if (core_foundation_handle != NULL) goto out; /* The libraries are never unloaded because we currently don't have a good * mechanism for keeping a reference count. It's unlikely to be an issue * but if it ever becomes one, we can turn the dynamic library handles into * per-event loop properties and have the dynamic linker keep track for us. */ err = UV_ENOSYS; core_foundation_handle = dlopen("/System/Library/Frameworks/" "CoreFoundation.framework/" "Versions/A/CoreFoundation", RTLD_LAZY | RTLD_LOCAL); if (core_foundation_handle == NULL) goto out; core_services_handle = dlopen("/System/Library/Frameworks/" "CoreServices.framework/" "Versions/A/CoreServices", RTLD_LAZY | RTLD_LOCAL); if (core_services_handle == NULL) goto out; err = UV_ENOENT; #define V(handle, symbol) \ do { \ *(void **)(&p ## symbol) = dlsym((handle), #symbol); \ if (p ## symbol == NULL) \ goto out; \ } \ while (0) V(core_foundation_handle, CFArrayCreate); V(core_foundation_handle, CFRelease); V(core_foundation_handle, CFRunLoopAddSource); V(core_foundation_handle, CFRunLoopGetCurrent); V(core_foundation_handle, CFRunLoopRemoveSource); V(core_foundation_handle, CFRunLoopRun); V(core_foundation_handle, CFRunLoopSourceCreate); V(core_foundation_handle, CFRunLoopSourceSignal); V(core_foundation_handle, CFRunLoopStop); V(core_foundation_handle, CFRunLoopWakeUp); V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation); V(core_foundation_handle, CFStringGetSystemEncoding); V(core_foundation_handle, kCFRunLoopDefaultMode); V(core_services_handle, FSEventStreamCreate); V(core_services_handle, FSEventStreamFlushSync); V(core_services_handle, FSEventStreamInvalidate); V(core_services_handle, FSEventStreamRelease); V(core_services_handle, FSEventStreamScheduleWithRunLoop); V(core_services_handle, FSEventStreamStart); V(core_services_handle, FSEventStreamStop); #undef V err = 0; out: if (err && core_services_handle != NULL) { dlclose(core_services_handle); core_services_handle = NULL; } if (err && core_foundation_handle != NULL) { dlclose(core_foundation_handle); core_foundation_handle = NULL; } pthread_mutex_unlock(&global_init_mutex); return err; } /* Runs in UV loop */ static int uv__fsevents_loop_init(uv_loop_t* loop) { CFRunLoopSourceContext ctx; uv__cf_loop_state_t* state; pthread_attr_t attr_storage; pthread_attr_t* attr; int err; if (loop->cf_state != NULL) return 0; err = uv__fsevents_global_init(); if (err) return err; state = uv__calloc(1, sizeof(*state)); if (state == NULL) return UV_ENOMEM; err = uv_mutex_init(&loop->cf_mutex); if (err) goto fail_mutex_init; err = uv_sem_init(&loop->cf_sem, 0); if (err) goto fail_sem_init; QUEUE_INIT(&loop->cf_signals); err = uv_sem_init(&state->fsevent_sem, 0); if (err) goto fail_fsevent_sem_init; err = uv_mutex_init(&state->fsevent_mutex); if (err) goto fail_fsevent_mutex_init; QUEUE_INIT(&state->fsevent_handles); state->fsevent_need_reschedule = 0; state->fsevent_handle_count = 0; memset(&ctx, 0, sizeof(ctx)); ctx.info = loop; ctx.perform = uv__cf_loop_cb; state->signal_source = pCFRunLoopSourceCreate(NULL, 0, &ctx); if (state->signal_source == NULL) { err = UV_ENOMEM; goto fail_signal_source_create; } /* In the unlikely event that pthread_attr_init() fails, create the thread * with the default stack size. We'll use a little more address space but * that in itself is not a fatal error. */ attr = &attr_storage; if (pthread_attr_init(attr)) attr = NULL; if (attr != NULL) if (pthread_attr_setstacksize(attr, 4 * PTHREAD_STACK_MIN)) abort(); loop->cf_state = state; /* uv_thread_t is an alias for pthread_t. */ err = UV__ERR(pthread_create(&loop->cf_thread, attr, uv__cf_loop_runner, loop)); if (attr != NULL) pthread_attr_destroy(attr); if (err) goto fail_thread_create; /* Synchronize threads */ uv_sem_wait(&loop->cf_sem); return 0; fail_thread_create: loop->cf_state = NULL; fail_signal_source_create: uv_mutex_destroy(&state->fsevent_mutex); fail_fsevent_mutex_init: uv_sem_destroy(&state->fsevent_sem); fail_fsevent_sem_init: uv_sem_destroy(&loop->cf_sem); fail_sem_init: uv_mutex_destroy(&loop->cf_mutex); fail_mutex_init: uv__free(state); return err; } /* Runs in UV loop */ void uv__fsevents_loop_delete(uv_loop_t* loop) { uv__cf_loop_signal_t* s; uv__cf_loop_state_t* state; QUEUE* q; if (loop->cf_state == NULL) return; if (uv__cf_loop_signal(loop, NULL, kUVCFLoopSignalRegular) != 0) abort(); uv_thread_join(&loop->cf_thread); uv_sem_destroy(&loop->cf_sem); uv_mutex_destroy(&loop->cf_mutex); /* Free any remaining data */ while (!QUEUE_EMPTY(&loop->cf_signals)) { q = QUEUE_HEAD(&loop->cf_signals); s = QUEUE_DATA(q, uv__cf_loop_signal_t, member); QUEUE_REMOVE(q); uv__free(s); } /* Destroy state */ state = loop->cf_state; uv_sem_destroy(&state->fsevent_sem); uv_mutex_destroy(&state->fsevent_mutex); pCFRelease(state->signal_source); uv__free(state); loop->cf_state = NULL; } /* Runs in CF thread. This is the CF loop's body */ static void* uv__cf_loop_runner(void* arg) { uv_loop_t* loop; uv__cf_loop_state_t* state; loop = arg; state = loop->cf_state; state->loop = pCFRunLoopGetCurrent(); pCFRunLoopAddSource(state->loop, state->signal_source, *pkCFRunLoopDefaultMode); uv_sem_post(&loop->cf_sem); pCFRunLoopRun(); pCFRunLoopRemoveSource(state->loop, state->signal_source, *pkCFRunLoopDefaultMode); return NULL; } /* Runs in CF thread, executed after `uv__cf_loop_signal()` */ static void uv__cf_loop_cb(void* arg) { uv_loop_t* loop; uv__cf_loop_state_t* state; QUEUE* item; QUEUE split_head; uv__cf_loop_signal_t* s; loop = arg; state = loop->cf_state; uv_mutex_lock(&loop->cf_mutex); QUEUE_MOVE(&loop->cf_signals, &split_head); uv_mutex_unlock(&loop->cf_mutex); while (!QUEUE_EMPTY(&split_head)) { item = QUEUE_HEAD(&split_head); QUEUE_REMOVE(item); s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); /* This was a termination signal */ if (s->handle == NULL) pCFRunLoopStop(state->loop); else uv__fsevents_reschedule(s->handle, s->type); uv__free(s); } } /* Runs in UV loop to notify CF thread */ int uv__cf_loop_signal(uv_loop_t* loop, uv_fs_event_t* handle, uv__cf_loop_signal_type_t type) { uv__cf_loop_signal_t* item; uv__cf_loop_state_t* state; item = uv__malloc(sizeof(*item)); if (item == NULL) return UV_ENOMEM; item->handle = handle; item->type = type; uv_mutex_lock(&loop->cf_mutex); QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member); uv_mutex_unlock(&loop->cf_mutex); state = loop->cf_state; assert(state != NULL); pCFRunLoopSourceSignal(state->signal_source); pCFRunLoopWakeUp(state->loop); return 0; } /* Runs in UV loop to initialize handle */ int uv__fsevents_init(uv_fs_event_t* handle) { int err; uv__cf_loop_state_t* state; err = uv__fsevents_loop_init(handle->loop); if (err) return err; /* Get absolute path to file */ handle->realpath = realpath(handle->path, NULL); if (handle->realpath == NULL) return UV__ERR(errno); handle->realpath_len = strlen(handle->realpath); /* Initialize event queue */ QUEUE_INIT(&handle->cf_events); handle->cf_error = 0; /* * Events will occur in other thread. * Initialize callback for getting them back into event loop's thread */ handle->cf_cb = uv__malloc(sizeof(*handle->cf_cb)); if (handle->cf_cb == NULL) { err = UV_ENOMEM; goto fail_cf_cb_malloc; } handle->cf_cb->data = handle; uv_async_init(handle->loop, handle->cf_cb, uv__fsevents_cb); handle->cf_cb->flags |= UV_HANDLE_INTERNAL; uv_unref((uv_handle_t*) handle->cf_cb); err = uv_mutex_init(&handle->cf_mutex); if (err) goto fail_cf_mutex_init; /* Insert handle into the list */ state = handle->loop->cf_state; uv_mutex_lock(&state->fsevent_mutex); QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member); state->fsevent_handle_count++; state->fsevent_need_reschedule = 1; uv_mutex_unlock(&state->fsevent_mutex); /* Reschedule FSEventStream */ assert(handle != NULL); err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalRegular); if (err) goto fail_loop_signal; return 0; fail_loop_signal: uv_mutex_destroy(&handle->cf_mutex); fail_cf_mutex_init: uv__free(handle->cf_cb); handle->cf_cb = NULL; fail_cf_cb_malloc: uv__free(handle->realpath); handle->realpath = NULL; handle->realpath_len = 0; return err; } /* Runs in UV loop to de-initialize handle */ int uv__fsevents_close(uv_fs_event_t* handle) { int err; uv__cf_loop_state_t* state; if (handle->cf_cb == NULL) return UV_EINVAL; /* Remove handle from the list */ state = handle->loop->cf_state; uv_mutex_lock(&state->fsevent_mutex); QUEUE_REMOVE(&handle->cf_member); state->fsevent_handle_count--; state->fsevent_need_reschedule = 1; uv_mutex_unlock(&state->fsevent_mutex); /* Reschedule FSEventStream */ assert(handle != NULL); err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalClosing); if (err) return UV__ERR(err); /* Wait for deinitialization */ uv_sem_wait(&state->fsevent_sem); uv_close((uv_handle_t*) handle->cf_cb, (uv_close_cb) uv__free); handle->cf_cb = NULL; /* Free data in queue */ UV__FSEVENTS_PROCESS(handle, { /* NOP */ }); uv_mutex_destroy(&handle->cf_mutex); uv__free(handle->realpath); handle->realpath = NULL; handle->realpath_len = 0; return 0; } #endif /* TARGET_OS_IPHONE */
975257.c
// This is an open source non-commercial project. Dear PVS-Studio, please check // it. PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com /* * Code for menus. Used for the GUI and 'wildmenu'. * GUI/Motif support by Robert Webb */ #include <assert.h> #include <inttypes.h> #include <string.h> #include "nvim/vim.h" #include "nvim/ascii.h" #include "nvim/menu.h" #include "nvim/charset.h" #include "nvim/cursor.h" #include "nvim/eval.h" #include "nvim/ex_docmd.h" #include "nvim/getchar.h" #include "nvim/memory.h" #include "nvim/message.h" #include "nvim/misc1.h" #include "nvim/keymap.h" #include "nvim/garray.h" #include "nvim/state.h" #include "nvim/strings.h" #include "nvim/ui.h" #include "nvim/eval/typval.h" #define MENUDEPTH 10 /* maximum depth of menus */ #ifdef INCLUDE_GENERATED_DECLARATIONS # include "menu.c.generated.h" #endif /// The character for each menu mode static char_u menu_mode_chars[] = { 'n', 'v', 's', 'o', 'i', 'c', 't' }; static char_u e_notsubmenu[] = N_( "E327: Part of menu-item path is not sub-menu"); static char_u e_othermode[] = N_("E328: Menu only exists in another mode"); static char_u e_nomenu[] = N_("E329: No menu \"%s\""); /// Do the :menu command and relatives. /// @param eap Ex command arguments void ex_menu(exarg_T *eap) { char_u *menu_path; int modes; char_u *map_to; // command mapped to the menu entry int noremap; bool silent = false; int unmenu; char_u *map_buf; char_u *arg; char_u *p; int i; long pri_tab[MENUDEPTH + 1]; TriState enable = kNone; // kTrue for "menu enable", // kFalse for "menu disable vimmenu_T menuarg; modes = get_menu_cmd_modes(eap->cmd, eap->forceit, &noremap, &unmenu); arg = eap->arg; for (;; ) { if (STRNCMP(arg, "<script>", 8) == 0) { noremap = REMAP_SCRIPT; arg = skipwhite(arg + 8); continue; } if (STRNCMP(arg, "<silent>", 8) == 0) { silent = true; arg = skipwhite(arg + 8); continue; } if (STRNCMP(arg, "<special>", 9) == 0) { // Ignore obsolete "<special>" modifier. arg = skipwhite(arg + 9); continue; } break; } // Locate an optional "icon=filename" argument // TODO(nvim): Currently this is only parsed. Should expose it to UIs. if (STRNCMP(arg, "icon=", 5) == 0) { arg += 5; while (*arg != NUL && *arg != ' ') { if (*arg == '\\') STRMOVE(arg, arg + 1); MB_PTR_ADV(arg); } if (*arg != NUL) { *arg++ = NUL; arg = skipwhite(arg); } } // Fill in the priority table. for (p = arg; *p; p++) { if (!ascii_isdigit(*p) && *p != '.') { break; } } if (ascii_iswhite(*p)) { for (i = 0; i < MENUDEPTH && !ascii_iswhite(*arg); i++) { pri_tab[i] = getdigits_long(&arg, false, 0); if (pri_tab[i] == 0) { pri_tab[i] = 500; } if (*arg == '.') { arg++; } } arg = skipwhite(arg); } else if (eap->addr_count && eap->line2 != 0) { pri_tab[0] = eap->line2; i = 1; } else i = 0; while (i < MENUDEPTH) pri_tab[i++] = 500; pri_tab[MENUDEPTH] = -1; /* mark end of the table */ /* * Check for "disable" or "enable" argument. */ if (STRNCMP(arg, "enable", 6) == 0 && ascii_iswhite(arg[6])) { enable = kTrue; arg = skipwhite(arg + 6); } else if (STRNCMP(arg, "disable", 7) == 0 && ascii_iswhite(arg[7])) { enable = kFalse; arg = skipwhite(arg + 7); } /* * If there is no argument, display all menus. */ if (*arg == NUL) { show_menus(arg, modes); return; } menu_path = arg; if (*menu_path == '.') { EMSG2(_(e_invarg2), menu_path); goto theend; } map_to = menu_translate_tab_and_shift(arg); /* * If there is only a menu name, display menus with that name. */ if (*map_to == NUL && !unmenu && enable == kNone) { show_menus(menu_path, modes); goto theend; } else if (*map_to != NUL && (unmenu || enable != kNone)) { EMSG(_(e_trailing)); goto theend; } if (enable != kNone) { // Change sensitivity of the menu. // For the PopUp menu, remove a menu for each mode separately. // Careful: menu_enable_recurse() changes menu_path. if (STRCMP(menu_path, "*") == 0) { // meaning: do all menus menu_path = (char_u *)""; } if (menu_is_popup(menu_path)) { for (i = 0; i < MENU_INDEX_TIP; ++i) if (modes & (1 << i)) { p = popup_mode_name(menu_path, i); menu_enable_recurse(root_menu, p, MENU_ALL_MODES, enable); xfree(p); } } menu_enable_recurse(root_menu, menu_path, modes, enable); } else if (unmenu) { /* * Delete menu(s). */ if (STRCMP(menu_path, "*") == 0) /* meaning: remove all menus */ menu_path = (char_u *)""; /* * For the PopUp menu, remove a menu for each mode separately. */ if (menu_is_popup(menu_path)) { for (i = 0; i < MENU_INDEX_TIP; ++i) if (modes & (1 << i)) { p = popup_mode_name(menu_path, i); remove_menu(&root_menu, p, MENU_ALL_MODES, TRUE); xfree(p); } } /* Careful: remove_menu() changes menu_path */ remove_menu(&root_menu, menu_path, modes, FALSE); } else { /* * Add menu(s). * Replace special key codes. */ if (STRICMP(map_to, "<nop>") == 0) { /* "<Nop>" means nothing */ map_to = (char_u *)""; map_buf = NULL; } else if (modes & MENU_TIP_MODE) { map_buf = NULL; // Menu tips are plain text. } else { map_to = replace_termcodes(map_to, STRLEN(map_to), &map_buf, false, true, true, CPO_TO_CPO_FLAGS); } menuarg.modes = modes; menuarg.noremap[0] = noremap; menuarg.silent[0] = silent; add_menu_path(menu_path, &menuarg, pri_tab, map_to); /* * For the PopUp menu, add a menu for each mode separately. */ if (menu_is_popup(menu_path)) { for (i = 0; i < MENU_INDEX_TIP; ++i) if (modes & (1 << i)) { p = popup_mode_name(menu_path, i); // Include all modes, to make ":amenu" work menuarg.modes = modes; add_menu_path(p, &menuarg, pri_tab, map_to); xfree(p); } } xfree(map_buf); } ui_call_update_menu(); theend: ; } /// Add the menu with the given name to the menu hierarchy /// /// @param[out] menuarg menu entry /// @param[] pri_tab priority table /// @param[in] call_data Right hand side command static int add_menu_path( const char_u *const menu_path, vimmenu_T *menuarg, const long *const pri_tab, const char_u *const call_data ) { char_u *path_name; int modes = menuarg->modes; vimmenu_T **menup; vimmenu_T *menu = NULL; vimmenu_T *parent; vimmenu_T **lower_pri; char_u *p; char_u *name; char_u *dname; char_u *next_name; char_u c; char_u d; int i; int pri_idx = 0; int old_modes = 0; int amenu; char_u *en_name; char_u *map_to = NULL; /* Make a copy so we can stuff around with it, since it could be const */ path_name = vim_strsave(menu_path); menup = &root_menu; parent = NULL; name = path_name; while (*name) { /* Get name of this element in the menu hierarchy, and the simplified * name (without mnemonic and accelerator text). */ next_name = menu_name_skip(name); map_to = menutrans_lookup(name, (int)STRLEN(name)); if (map_to != NULL) { en_name = name; name = map_to; } else { en_name = NULL; } dname = menu_text(name, NULL, NULL); if (*dname == NUL) { /* Only a mnemonic or accelerator is not valid. */ EMSG(_("E792: Empty menu name")); goto erret; } /* See if it's already there */ lower_pri = menup; menu = *menup; while (menu != NULL) { if (menu_name_equal(name, menu) || menu_name_equal(dname, menu)) { if (*next_name == NUL && menu->children != NULL) { if (!sys_menu) { EMSG(_("E330: Menu path must not lead to a sub-menu")); } goto erret; } if (*next_name != NUL && menu->children == NULL) { if (!sys_menu) { EMSG(_(e_notsubmenu)); } goto erret; } break; } menup = &menu->next; /* Count menus, to find where this one needs to be inserted. * Ignore menus that are not in the menubar (PopUp and Toolbar) */ if (parent != NULL || menu_is_menubar(menu->name)) { if (menu->priority <= pri_tab[pri_idx]) { lower_pri = menup; } } menu = menu->next; } if (menu == NULL) { if (*next_name == NUL && parent == NULL) { EMSG(_("E331: Must not add menu items directly to menu bar")); goto erret; } if (menu_is_separator(dname) && *next_name != NUL) { EMSG(_("E332: Separator cannot be part of a menu path")); goto erret; } /* Not already there, so lets add it */ menu = xcalloc(1, sizeof(vimmenu_T)); menu->modes = modes; menu->enabled = MENU_ALL_MODES; menu->name = vim_strsave(name); // separate mnemonic and accelerator text from actual menu name menu->dname = menu_text(name, &menu->mnemonic, &menu->actext); if (en_name != NULL) { menu->en_name = vim_strsave(en_name); menu->en_dname = menu_text(en_name, NULL, NULL); } else { menu->en_name = NULL; menu->en_dname = NULL; } menu->priority = pri_tab[pri_idx]; menu->parent = parent; // Add after menu that has lower priority. menu->next = *lower_pri; *lower_pri = menu; old_modes = 0; } else { old_modes = menu->modes; /* * If this menu option was previously only available in other * modes, then make sure it's available for this one now * Also enable a menu when it's created or changed. */ { menu->modes |= modes; menu->enabled |= modes; } } menup = &menu->children; parent = menu; name = next_name; XFREE_CLEAR(dname); if (pri_tab[pri_idx + 1] != -1) { pri_idx++; } } xfree(path_name); /* * Only add system menu items which have not been defined yet. * First check if this was an ":amenu". */ amenu = ((modes & (MENU_NORMAL_MODE | MENU_INSERT_MODE)) == (MENU_NORMAL_MODE | MENU_INSERT_MODE)); if (sys_menu) modes &= ~old_modes; if (menu != NULL && modes) { p = (call_data == NULL) ? NULL : vim_strsave(call_data); /* loop over all modes, may add more than one */ for (i = 0; i < MENU_MODES; ++i) { if (modes & (1 << i)) { /* free any old menu */ free_menu_string(menu, i); // For "amenu", may insert an extra character. // Don't do this for "<Nop>". c = 0; d = 0; if (amenu && call_data != NULL && *call_data != NUL) { switch (1 << i) { case MENU_VISUAL_MODE: case MENU_SELECT_MODE: case MENU_OP_PENDING_MODE: case MENU_CMDLINE_MODE: c = Ctrl_C; break; case MENU_INSERT_MODE: c = Ctrl_BSL; d = Ctrl_O; break; } } if (c != 0) { menu->strings[i] = xmalloc(STRLEN(call_data) + 5 ); menu->strings[i][0] = c; if (d == 0) { STRCPY(menu->strings[i] + 1, call_data); } else { menu->strings[i][1] = d; STRCPY(menu->strings[i] + 2, call_data); } if (c == Ctrl_C) { int len = (int)STRLEN(menu->strings[i]); /* Append CTRL-\ CTRL-G to obey 'insertmode'. */ menu->strings[i][len] = Ctrl_BSL; menu->strings[i][len + 1] = Ctrl_G; menu->strings[i][len + 2] = NUL; } } else { menu->strings[i] = p; } menu->noremap[i] = menuarg->noremap[0]; menu->silent[i] = menuarg->silent[0]; } } } return OK; erret: xfree(path_name); xfree(dname); /* Delete any empty submenu we added before discovering the error. Repeat * for higher levels. */ while (parent != NULL && parent->children == NULL) { if (parent->parent == NULL) menup = &root_menu; else menup = &parent->parent->children; for (; *menup != NULL && *menup != parent; menup = &((*menup)->next)) ; if (*menup == NULL) /* safety check */ break; parent = parent->parent; free_menu(menup); } return FAIL; } /* * Set the (sub)menu with the given name to enabled or disabled. * Called recursively. */ static int menu_enable_recurse(vimmenu_T *menu, char_u *name, int modes, int enable) { char_u *p; if (menu == NULL) return OK; /* Got to bottom of hierarchy */ /* Get name of this element in the menu hierarchy */ p = menu_name_skip(name); /* Find the menu */ while (menu != NULL) { if (*name == NUL || *name == '*' || menu_name_equal(name, menu)) { if (*p != NUL) { if (menu->children == NULL) { EMSG(_(e_notsubmenu)); return FAIL; } if (menu_enable_recurse(menu->children, p, modes, enable) == FAIL) { return FAIL; } } else if (enable) { menu->enabled |= modes; } else { menu->enabled &= ~modes; } /* * When name is empty, we are doing all menu items for the given * modes, so keep looping, otherwise we are just doing the named * menu item (which has been found) so break here. */ if (*name != NUL && *name != '*') break; } menu = menu->next; } if (*name != NUL && *name != '*' && menu == NULL) { EMSG2(_(e_nomenu), name); return FAIL; } return OK; } /* * Remove the (sub)menu with the given name from the menu hierarchy * Called recursively. */ static int remove_menu ( vimmenu_T **menup, char_u *name, int modes, bool silent /* don't give error messages */ ) { vimmenu_T *menu; vimmenu_T *child; char_u *p; if (*menup == NULL) return OK; /* Got to bottom of hierarchy */ /* Get name of this element in the menu hierarchy */ p = menu_name_skip(name); /* Find the menu */ while ((menu = *menup) != NULL) { if (*name == NUL || menu_name_equal(name, menu)) { if (*p != NUL && menu->children == NULL) { if (!silent) EMSG(_(e_notsubmenu)); return FAIL; } if ((menu->modes & modes) != 0x0) { if (remove_menu(&menu->children, p, modes, silent) == FAIL) return FAIL; } else if (*name != NUL) { if (!silent) EMSG(_(e_othermode)); return FAIL; } /* * When name is empty, we are removing all menu items for the given * modes, so keep looping, otherwise we are just removing the named * menu item (which has been found) so break here. */ if (*name != NUL) break; /* Remove the menu item for the given mode[s]. If the menu item * is no longer valid in ANY mode, delete it */ menu->modes &= ~modes; if (modes & MENU_TIP_MODE) free_menu_string(menu, MENU_INDEX_TIP); if ((menu->modes & MENU_ALL_MODES) == 0) free_menu(menup); else menup = &menu->next; } else menup = &menu->next; } if (*name != NUL) { if (menu == NULL) { if (!silent) EMSG2(_(e_nomenu), name); return FAIL; } /* Recalculate modes for menu based on the new updated children */ menu->modes &= ~modes; child = menu->children; for (; child != NULL; child = child->next) menu->modes |= child->modes; if (modes & MENU_TIP_MODE) { free_menu_string(menu, MENU_INDEX_TIP); } if ((menu->modes & MENU_ALL_MODES) == 0) { /* The menu item is no longer valid in ANY mode, so delete it */ *menup = menu; free_menu(menup); } } return OK; } /* * Free the given menu structure and remove it from the linked list. */ static void free_menu(vimmenu_T **menup) { int i; vimmenu_T *menu; menu = *menup; /* Don't change *menup until after calling gui_mch_destroy_menu(). The * MacOS code needs the original structure to properly delete the menu. */ *menup = menu->next; xfree(menu->name); xfree(menu->dname); xfree(menu->en_name); xfree(menu->en_dname); xfree(menu->actext); for (i = 0; i < MENU_MODES; i++) free_menu_string(menu, i); xfree(menu); } /* * Free the menu->string with the given index. */ static void free_menu_string(vimmenu_T *menu, int idx) { int count = 0; int i; for (i = 0; i < MENU_MODES; i++) if (menu->strings[i] == menu->strings[idx]) count++; if (count == 1) xfree(menu->strings[idx]); menu->strings[idx] = NULL; } /// Export menus /// /// @param[in] menu if null, starts from root_menu /// @param modes, a choice of \ref MENU_MODES /// @return dict with name/commands /// @see show_menus_recursive /// @see menu_get static dict_T *menu_get_recursive(const vimmenu_T *menu, int modes) { dict_T *dict; if (!menu || (menu->modes & modes) == 0x0) { return NULL; } dict = tv_dict_alloc(); tv_dict_add_str(dict, S_LEN("name"), (char *)menu->dname); tv_dict_add_nr(dict, S_LEN("priority"), (int)menu->priority); tv_dict_add_nr(dict, S_LEN("hidden"), menu_is_hidden(menu->dname)); if (menu->mnemonic) { char buf[MB_MAXCHAR + 1] = { 0 }; // > max value of utf8_char2bytes utf_char2bytes(menu->mnemonic, (char_u *)buf); tv_dict_add_str(dict, S_LEN("shortcut"), buf); } if (menu->actext) { tv_dict_add_str(dict, S_LEN("actext"), (char *)menu->actext); } if (menu->modes & MENU_TIP_MODE && menu->strings[MENU_INDEX_TIP]) { tv_dict_add_str(dict, S_LEN("tooltip"), (char *)menu->strings[MENU_INDEX_TIP]); } if (!menu->children) { // leaf menu dict_T *commands = tv_dict_alloc(); tv_dict_add_dict(dict, S_LEN("mappings"), commands); for (int bit = 0; bit < MENU_MODES; bit++) { if ((menu->modes & modes & (1 << bit)) != 0) { dict_T *impl = tv_dict_alloc(); tv_dict_add_allocated_str(impl, S_LEN("rhs"), str2special_save((char *)menu->strings[bit], false, false)); tv_dict_add_nr(impl, S_LEN("silent"), menu->silent[bit]); tv_dict_add_nr(impl, S_LEN("enabled"), (menu->enabled & (1 << bit)) ? 1 : 0); tv_dict_add_nr(impl, S_LEN("noremap"), (menu->noremap[bit] & REMAP_NONE) ? 1 : 0); tv_dict_add_nr(impl, S_LEN("sid"), (menu->noremap[bit] & REMAP_SCRIPT) ? 1 : 0); tv_dict_add_dict(commands, (char *)&menu_mode_chars[bit], 1, impl); } } } else { // visit recursively all children list_T *const children_list = tv_list_alloc(kListLenMayKnow); for (menu = menu->children; menu != NULL; menu = menu->next) { dict_T *d = menu_get_recursive(menu, modes); if (tv_dict_len(d) > 0) { tv_list_append_dict(children_list, d); } } tv_dict_add_list(dict, S_LEN("submenus"), children_list); } return dict; } /// Export menus matching path \p path_name /// /// @param path_name /// @param modes supported modes, see \ref MENU_MODES /// @param[in,out] list must be allocated /// @return false if could not find path_name bool menu_get(char_u *const path_name, int modes, list_T *list) { vimmenu_T *menu = find_menu(root_menu, path_name, modes); if (!menu) { return false; } for (; menu != NULL; menu = menu->next) { dict_T *d = menu_get_recursive(menu, modes); if (d && tv_dict_len(d) > 0) { tv_list_append_dict(list, d); } if (*path_name != NUL) { // If a (non-empty) path query was given, only the first node in the // find_menu() result is relevant. Else we want all nodes. break; } } return true; } /// Find menu matching `name` and `modes`. /// /// @param menu top menu to start looking from /// @param name path towards the menu /// @return menu if \p name is null, found menu or NULL static vimmenu_T *find_menu(vimmenu_T *menu, char_u *name, int modes) { char_u *p; while (*name) { // find the end of one dot-separated name and put a NUL at the dot p = menu_name_skip(name); while (menu != NULL) { if (menu_name_equal(name, menu)) { // Found menu if (*p != NUL && menu->children == NULL) { EMSG(_(e_notsubmenu)); return NULL; } else if ((menu->modes & modes) == 0x0) { EMSG(_(e_othermode)); return NULL; } else if (*p == NUL) { // found a full match return menu; } break; } menu = menu->next; } if (menu == NULL) { EMSG2(_(e_nomenu), name); return NULL; } // Found a match, search the sub-menu. name = p; menu = menu->children; } return menu; } /// Show the mapping associated with a menu item or hierarchy in a sub-menu. static int show_menus(char_u *const path_name, int modes) { vimmenu_T *menu; // First, find the (sub)menu with the given name menu = find_menu(root_menu, path_name, modes); if (!menu) { return FAIL; } /* Now we have found the matching menu, and we list the mappings */ /* Highlight title */ MSG_PUTS_TITLE(_("\n--- Menus ---")); show_menus_recursive(menu->parent, modes, 0); return OK; } /// Recursively show the mappings associated with the menus under the given one static void show_menus_recursive(vimmenu_T *menu, int modes, int depth) { int i; int bit; if (menu != NULL && (menu->modes & modes) == 0x0) return; if (menu != NULL) { msg_putchar('\n'); if (got_int) /* "q" hit for "--more--" */ return; for (i = 0; i < depth; i++) MSG_PUTS(" "); if (menu->priority) { msg_outnum((long)menu->priority); MSG_PUTS(" "); } // Same highlighting as for directories!? msg_outtrans_attr(menu->name, HL_ATTR(HLF_D)); } if (menu != NULL && menu->children == NULL) { for (bit = 0; bit < MENU_MODES; bit++) if ((menu->modes & modes & (1 << bit)) != 0) { msg_putchar('\n'); if (got_int) /* "q" hit for "--more--" */ return; for (i = 0; i < depth + 2; i++) MSG_PUTS(" "); msg_putchar(menu_mode_chars[bit]); if (menu->noremap[bit] == REMAP_NONE) msg_putchar('*'); else if (menu->noremap[bit] == REMAP_SCRIPT) msg_putchar('&'); else msg_putchar(' '); if (menu->silent[bit]) msg_putchar('s'); else msg_putchar(' '); if ((menu->modes & menu->enabled & (1 << bit)) == 0) msg_putchar('-'); else msg_putchar(' '); MSG_PUTS(" "); if (*menu->strings[bit] == NUL) { msg_puts_attr("<Nop>", HL_ATTR(HLF_8)); } else { msg_outtrans_special(menu->strings[bit], false); } } } else { if (menu == NULL) { menu = root_menu; depth--; } else menu = menu->children; /* recursively show all children. Skip PopUp[nvoci]. */ for (; menu != NULL && !got_int; menu = menu->next) if (!menu_is_hidden(menu->dname)) show_menus_recursive(menu, modes, depth + 1); } } /* * Used when expanding menu names. */ static vimmenu_T *expand_menu = NULL; static int expand_modes = 0x0; static int expand_emenu; /* TRUE for ":emenu" command */ /* * Work out what to complete when doing command line completion of menu names. */ char_u *set_context_in_menu_cmd(expand_T *xp, char_u *cmd, char_u *arg, int forceit) { char_u *after_dot; char_u *p; char_u *path_name = NULL; char_u *name; int unmenu; vimmenu_T *menu; int expand_menus; xp->xp_context = EXPAND_UNSUCCESSFUL; /* Check for priority numbers, enable and disable */ for (p = arg; *p; ++p) if (!ascii_isdigit(*p) && *p != '.') break; if (!ascii_iswhite(*p)) { if (STRNCMP(arg, "enable", 6) == 0 && (arg[6] == NUL || ascii_iswhite(arg[6]))) p = arg + 6; else if (STRNCMP(arg, "disable", 7) == 0 && (arg[7] == NUL || ascii_iswhite(arg[7]))) p = arg + 7; else p = arg; } while (*p != NUL && ascii_iswhite(*p)) ++p; arg = after_dot = p; for (; *p && !ascii_iswhite(*p); ++p) { if ((*p == '\\' || *p == Ctrl_V) && p[1] != NUL) p++; else if (*p == '.') after_dot = p + 1; } // ":popup" only uses menues, not entries expand_menus = !((*cmd == 't' && cmd[1] == 'e') || *cmd == 'p'); expand_emenu = (*cmd == 'e'); if (expand_menus && ascii_iswhite(*p)) return NULL; /* TODO: check for next command? */ if (*p == NUL) { /* Complete the menu name */ /* * With :unmenu, you only want to match menus for the appropriate mode. * With :menu though you might want to add a menu with the same name as * one in another mode, so match menus from other modes too. */ expand_modes = get_menu_cmd_modes(cmd, forceit, NULL, &unmenu); if (!unmenu) expand_modes = MENU_ALL_MODES; menu = root_menu; if (after_dot > arg) { size_t path_len = (size_t) (after_dot - arg); path_name = xmalloc(path_len); STRLCPY(path_name, arg, path_len); } name = path_name; while (name != NULL && *name) { p = menu_name_skip(name); while (menu != NULL) { if (menu_name_equal(name, menu)) { /* Found menu */ if ((*p != NUL && menu->children == NULL) || ((menu->modes & expand_modes) == 0x0)) { /* * Menu path continues, but we have reached a leaf. * Or menu exists only in another mode. */ xfree(path_name); return NULL; } break; } menu = menu->next; } if (menu == NULL) { /* No menu found with the name we were looking for */ xfree(path_name); return NULL; } name = p; menu = menu->children; } xfree(path_name); xp->xp_context = expand_menus ? EXPAND_MENUNAMES : EXPAND_MENUS; xp->xp_pattern = after_dot; expand_menu = menu; } else /* We're in the mapping part */ xp->xp_context = EXPAND_NOTHING; return NULL; } /* * Function given to ExpandGeneric() to obtain the list of (sub)menus (not * entries). */ char_u *get_menu_name(expand_T *xp, int idx) { static vimmenu_T *menu = NULL; char_u *str; static int should_advance = FALSE; if (idx == 0) { /* first call: start at first item */ menu = expand_menu; should_advance = FALSE; } /* Skip PopUp[nvoci]. */ while (menu != NULL && (menu_is_hidden(menu->dname) || menu_is_separator(menu->dname) || menu->children == NULL)) menu = menu->next; if (menu == NULL) /* at end of linked list */ return NULL; if (menu->modes & expand_modes) if (should_advance) str = menu->en_dname; else { str = menu->dname; if (menu->en_dname == NULL) should_advance = TRUE; } else str = (char_u *)""; if (should_advance) /* Advance to next menu entry. */ menu = menu->next; should_advance = !should_advance; return str; } /* * Function given to ExpandGeneric() to obtain the list of menus and menu * entries. */ char_u *get_menu_names(expand_T *xp, int idx) { static vimmenu_T *menu = NULL; #define TBUFFER_LEN 256 static char_u tbuffer[TBUFFER_LEN]; /*hack*/ char_u *str; static int should_advance = FALSE; if (idx == 0) { /* first call: start at first item */ menu = expand_menu; should_advance = FALSE; } /* Skip Browse-style entries, popup menus and separators. */ while (menu != NULL && ( menu_is_hidden(menu->dname) || (expand_emenu && menu_is_separator(menu->dname)) || menu->dname[STRLEN(menu->dname) - 1] == '.' )) menu = menu->next; if (menu == NULL) /* at end of linked list */ return NULL; if (menu->modes & expand_modes) { if (menu->children != NULL) { if (should_advance) STRLCPY(tbuffer, menu->en_dname, TBUFFER_LEN - 1); else { STRLCPY(tbuffer, menu->dname, TBUFFER_LEN - 1); if (menu->en_dname == NULL) should_advance = TRUE; } /* hack on menu separators: use a 'magic' char for the separator * so that '.' in names gets escaped properly */ STRCAT(tbuffer, "\001"); str = tbuffer; } else { if (should_advance) str = menu->en_dname; else { str = menu->dname; if (menu->en_dname == NULL) should_advance = TRUE; } } } else str = (char_u *)""; if (should_advance) /* Advance to next menu entry. */ menu = menu->next; should_advance = !should_advance; return str; } /// Skip over this element of the menu path and return the start of the next /// element. Any \ and ^Vs are removed from the current element. /// /// @param name may be modified. /// @return start of the next element char_u *menu_name_skip(char_u *const name) { char_u *p; for (p = name; *p && *p != '.'; MB_PTR_ADV(p)) { if (*p == '\\' || *p == Ctrl_V) { STRMOVE(p, p + 1); if (*p == NUL) break; } } if (*p) *p++ = NUL; return p; } /* * Return TRUE when "name" matches with menu "menu". The name is compared in * two ways: raw menu name and menu name without '&'. ignore part after a TAB. */ static bool menu_name_equal(const char_u *const name, vimmenu_T *const menu) { if (menu->en_name != NULL && (menu_namecmp(name, menu->en_name) || menu_namecmp(name, menu->en_dname))) return true; return menu_namecmp(name, menu->name) || menu_namecmp(name, menu->dname); } static bool menu_namecmp(const char_u *const name, const char_u *const mname) { int i; for (i = 0; name[i] != NUL && name[i] != TAB; ++i) if (name[i] != mname[i]) break; return (name[i] == NUL || name[i] == TAB) && (mname[i] == NUL || mname[i] == TAB); } /// Returns the \ref MENU_MODES specified by menu command `cmd`. /// (eg :menu! returns MENU_CMDLINE_MODE | MENU_INSERT_MODE) /// /// @param[in] cmd string like "nmenu", "vmenu", etc. /// @param[in] forceit bang (!) was given after the command /// @param[out] noremap If not NULL, the flag it points to is set according /// to whether the command is a "nore" command. /// @param[out] unmenu If not NULL, the flag it points to is set according /// to whether the command is an "unmenu" command. int get_menu_cmd_modes( const char_u * cmd, bool forceit, int *noremap, int *unmenu ) { int modes; switch (*cmd++) { case 'v': /* vmenu, vunmenu, vnoremenu */ modes = MENU_VISUAL_MODE | MENU_SELECT_MODE; break; case 'x': /* xmenu, xunmenu, xnoremenu */ modes = MENU_VISUAL_MODE; break; case 's': /* smenu, sunmenu, snoremenu */ modes = MENU_SELECT_MODE; break; case 'o': /* omenu */ modes = MENU_OP_PENDING_MODE; break; case 'i': /* imenu */ modes = MENU_INSERT_MODE; break; case 't': modes = MENU_TIP_MODE; /* tmenu */ break; case 'c': /* cmenu */ modes = MENU_CMDLINE_MODE; break; case 'a': /* amenu */ modes = MENU_INSERT_MODE | MENU_CMDLINE_MODE | MENU_NORMAL_MODE | MENU_VISUAL_MODE | MENU_SELECT_MODE | MENU_OP_PENDING_MODE; break; case 'n': if (*cmd != 'o') { /* nmenu, not noremenu */ modes = MENU_NORMAL_MODE; break; } FALLTHROUGH; default: cmd--; if (forceit) { // menu!! modes = MENU_INSERT_MODE | MENU_CMDLINE_MODE; } else { // menu modes = MENU_NORMAL_MODE | MENU_VISUAL_MODE | MENU_SELECT_MODE | MENU_OP_PENDING_MODE; } } if (noremap != NULL) *noremap = (*cmd == 'n' ? REMAP_NONE : REMAP_YES); if (unmenu != NULL) *unmenu = (*cmd == 'u'); return modes; } /* * Modify a menu name starting with "PopUp" to include the mode character. * Returns the name in allocated memory. */ static char_u *popup_mode_name(char_u *name, int idx) { size_t len = STRLEN(name); assert(len >= 4); char_u *p = vim_strnsave(name, len + 1); memmove(p + 6, p + 5, len - 4); p[5] = menu_mode_chars[idx]; return p; } /// Duplicate the menu item text and then process to see if a mnemonic key /// and/or accelerator text has been identified. /// /// @param str The menu item text. /// @param[out] mnemonic If non-NULL, *mnemonic is set to the character after /// the first '&'. /// @param[out] actext If non-NULL, *actext is set to the text after the first /// TAB, but only if a TAB was found. Memory pointed to is newly /// allocated. /// /// @return a pointer to allocated memory. static char_u *menu_text(const char_u *str, int *mnemonic, char_u **actext) FUNC_ATTR_NONNULL_RET FUNC_ATTR_WARN_UNUSED_RESULT FUNC_ATTR_NONNULL_ARG(1) { char_u *p; char_u *text; /* Locate accelerator text, after the first TAB */ p = vim_strchr(str, TAB); if (p != NULL) { if (actext != NULL) *actext = vim_strsave(p + 1); assert(p >= str); text = vim_strnsave(str, (size_t)(p - str)); } else text = vim_strsave(str); /* Find mnemonic characters "&a" and reduce "&&" to "&". */ for (p = text; p != NULL; ) { p = vim_strchr(p, '&'); if (p != NULL) { if (p[1] == NUL) /* trailing "&" */ break; if (mnemonic != NULL && p[1] != '&') *mnemonic = p[1]; STRMOVE(p, p + 1); p = p + 1; } } return text; } /* * Return TRUE if "name" can be a menu in the MenuBar. */ int menu_is_menubar(char_u *name) { return !menu_is_popup(name) && !menu_is_toolbar(name) && *name != MNU_HIDDEN_CHAR; } /* * Return TRUE if "name" is a popup menu name. */ int menu_is_popup(char_u *name) { return STRNCMP(name, "PopUp", 5) == 0; } /* * Return TRUE if "name" is a toolbar menu name. */ int menu_is_toolbar(char_u *name) { return STRNCMP(name, "ToolBar", 7) == 0; } /* * Return TRUE if the name is a menu separator identifier: Starts and ends * with '-' */ int menu_is_separator(char_u *name) { return name[0] == '-' && name[STRLEN(name) - 1] == '-'; } /// True if a popup menu or starts with \ref MNU_HIDDEN_CHAR /// /// @return true if the menu is hidden static int menu_is_hidden(char_u *name) { return (name[0] == MNU_HIDDEN_CHAR) || (menu_is_popup(name) && name[5] != NUL); } /* * Given a menu descriptor, e.g. "File.New", find it in the menu hierarchy and * execute it. */ void ex_emenu(exarg_T *eap) { vimmenu_T *menu; char_u *name; char_u *saved_name; char_u *p; int idx; char_u *mode; saved_name = vim_strsave(eap->arg); menu = root_menu; name = saved_name; while (*name) { /* Find in the menu hierarchy */ p = menu_name_skip(name); while (menu != NULL) { if (menu_name_equal(name, menu)) { if (*p == NUL && menu->children != NULL) { EMSG(_("E333: Menu path must lead to a menu item")); menu = NULL; } else if (*p != NUL && menu->children == NULL) { EMSG(_(e_notsubmenu)); menu = NULL; } break; } menu = menu->next; } if (menu == NULL || *p == NUL) break; menu = menu->children; name = p; } xfree(saved_name); if (menu == NULL) { EMSG2(_("E334: Menu not found: %s"), eap->arg); return; } /* Found the menu, so execute. * Use the Insert mode entry when returning to Insert mode. */ if (((State & INSERT) || restart_edit) && !current_sctx.sc_sid) { mode = (char_u *)"Insert"; idx = MENU_INDEX_INSERT; } else if (State & CMDLINE) { mode = (char_u *)"Command"; idx = MENU_INDEX_CMDLINE; } else if (get_real_state() & VISUAL) { /* Detect real visual mode -- if we are really in visual mode we * don't need to do any guesswork to figure out what the selection * is. Just execute the visual binding for the menu. */ mode = (char_u *)"Visual"; idx = MENU_INDEX_VISUAL; } else if (eap->addr_count) { pos_T tpos; mode = (char_u *)"Visual"; idx = MENU_INDEX_VISUAL; /* GEDDES: This is not perfect - but it is a * quick way of detecting whether we are doing this from a * selection - see if the range matches up with the visual * select start and end. */ if ((curbuf->b_visual.vi_start.lnum == eap->line1) && (curbuf->b_visual.vi_end.lnum) == eap->line2) { /* Set it up for visual mode - equivalent to gv. */ VIsual_mode = curbuf->b_visual.vi_mode; tpos = curbuf->b_visual.vi_end; curwin->w_cursor = curbuf->b_visual.vi_start; curwin->w_curswant = curbuf->b_visual.vi_curswant; } else { /* Set it up for line-wise visual mode */ VIsual_mode = 'V'; curwin->w_cursor.lnum = eap->line1; curwin->w_cursor.col = 1; tpos.lnum = eap->line2; tpos.col = MAXCOL; tpos.coladd = 0; } /* Activate visual mode */ VIsual_active = TRUE; VIsual_reselect = TRUE; check_cursor(); VIsual = curwin->w_cursor; curwin->w_cursor = tpos; check_cursor(); /* Adjust the cursor to make sure it is in the correct pos * for exclusive mode */ if (*p_sel == 'e' && gchar_cursor() != NUL) ++curwin->w_cursor.col; } else { mode = (char_u *)"Normal"; idx = MENU_INDEX_NORMAL; } assert(idx != MENU_INDEX_INVALID); if (menu->strings[idx] != NULL) { // When executing a script or function execute the commands right now. // Otherwise put them in the typeahead buffer. if (current_sctx.sc_sid != 0) { exec_normal_cmd(menu->strings[idx], menu->noremap[idx], menu->silent[idx]); } else { ins_typebuf(menu->strings[idx], menu->noremap[idx], 0, true, menu->silent[idx]); } } else { EMSG2(_("E335: Menu not defined for %s mode"), mode); } } /* * Translation of menu names. Just a simple lookup table. */ typedef struct { char_u *from; /* English name */ char_u *from_noamp; /* same, without '&' */ char_u *to; /* translated name */ } menutrans_T; static garray_T menutrans_ga = GA_EMPTY_INIT_VALUE; #define FREE_MENUTRANS(mt) \ menutrans_T* _mt = (mt); \ xfree(_mt->from); \ xfree(_mt->from_noamp); \ xfree(_mt->to) /* * ":menutrans". * This function is also defined without the +multi_lang feature, in which * case the commands are ignored. */ void ex_menutranslate(exarg_T *eap) { char_u *arg = eap->arg; char_u *from, *from_noamp, *to; if (menutrans_ga.ga_itemsize == 0) ga_init(&menutrans_ga, (int)sizeof(menutrans_T), 5); /* * ":menutrans clear": clear all translations. */ if (STRNCMP(arg, "clear", 5) == 0 && ends_excmd(*skipwhite(arg + 5))) { GA_DEEP_CLEAR(&menutrans_ga, menutrans_T, FREE_MENUTRANS); /* Delete all "menutrans_" global variables. */ del_menutrans_vars(); } else { /* ":menutrans from to": add translation */ from = arg; arg = menu_skip_part(arg); to = skipwhite(arg); *arg = NUL; arg = menu_skip_part(to); if (arg == to) EMSG(_(e_invarg)); else { from = vim_strsave(from); from_noamp = menu_text(from, NULL, NULL); assert(arg >= to); to = vim_strnsave(to, (size_t)(arg - to)); menu_translate_tab_and_shift(from); menu_translate_tab_and_shift(to); menu_unescape_name(from); menu_unescape_name(to); menutrans_T* tp = GA_APPEND_VIA_PTR(menutrans_T, &menutrans_ga); tp->from = from; tp->from_noamp = from_noamp; tp->to = to; } } } /* * Find the character just after one part of a menu name. */ static char_u *menu_skip_part(char_u *p) { while (*p != NUL && *p != '.' && !ascii_iswhite(*p)) { if ((*p == '\\' || *p == Ctrl_V) && p[1] != NUL) ++p; ++p; } return p; } /* * Lookup part of a menu name in the translations. * Return a pointer to the translation or NULL if not found. */ static char_u *menutrans_lookup(char_u *name, int len) { menutrans_T *tp = (menutrans_T *)menutrans_ga.ga_data; char_u *dname; for (int i = 0; i < menutrans_ga.ga_len; i++) { if (STRNICMP(name, tp[i].from, len) == 0 && tp[i].from[len] == NUL) { return tp[i].to; } } /* Now try again while ignoring '&' characters. */ char_u c = name[len]; name[len] = NUL; dname = menu_text(name, NULL, NULL); name[len] = c; for (int i = 0; i < menutrans_ga.ga_len; i++) { if (STRICMP(dname, tp[i].from_noamp) == 0) { xfree(dname); return tp[i].to; } } xfree(dname); return NULL; } /* * Unescape the name in the translate dictionary table. */ static void menu_unescape_name(char_u *name) { char_u *p; for (p = name; *p && *p != '.'; MB_PTR_ADV(p)) { if (*p == '\\') { STRMOVE(p, p + 1); } } } /* * Isolate the menu name. * Skip the menu name, and translate <Tab> into a real TAB. */ static char_u *menu_translate_tab_and_shift(char_u *arg_start) { char_u *arg = arg_start; while (*arg && !ascii_iswhite(*arg)) { if ((*arg == '\\' || *arg == Ctrl_V) && arg[1] != NUL) arg++; else if (STRNICMP(arg, "<TAB>", 5) == 0) { *arg = TAB; STRMOVE(arg + 1, arg + 5); } arg++; } if (*arg != NUL) *arg++ = NUL; arg = skipwhite(arg); return arg; }
952511.c
/* ==================================================================== * Copyright (c) 2008 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * [email protected]. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== */ #include <openssl/aead.h> #include <assert.h> #include <openssl/cpu.h> #include <openssl/cipher.h> #include <openssl/err.h> #include <openssl/mem.h> #include "../fipsmodule/cipher/internal.h" struct ccm128_context { block128_f block; ctr128_f ctr; unsigned M, L; }; struct ccm128_state { union { uint64_t u[2]; uint8_t c[16]; } nonce, cmac; }; static int CRYPTO_ccm128_init(struct ccm128_context *ctx, const AES_KEY *key, block128_f block, ctr128_f ctr, unsigned M, unsigned L) { if (M < 4 || M > 16 || (M & 1) != 0 || L < 2 || L > 8) { return 0; } ctx->block = block; ctx->ctr = ctr; ctx->M = M; ctx->L = L; return 1; } static size_t CRYPTO_ccm128_max_input(const struct ccm128_context *ctx) { return ctx->L >= sizeof(size_t) ? (size_t)-1 : (((size_t)1) << (ctx->L * 8)) - 1; } static int ccm128_init_state(const struct ccm128_context *ctx, struct ccm128_state *state, const AES_KEY *key, const uint8_t *nonce, size_t nonce_len, const uint8_t *aad, size_t aad_len, size_t plaintext_len) { const block128_f block = ctx->block; const unsigned M = ctx->M; const unsigned L = ctx->L; // |L| determines the expected |nonce_len| and the limit for |plaintext_len|. if (plaintext_len > CRYPTO_ccm128_max_input(ctx) || nonce_len != 15 - L) { return 0; } // Assemble the first block for computing the MAC. OPENSSL_memset(state, 0, sizeof(*state)); state->nonce.c[0] = (uint8_t)((L - 1) | ((M - 2) / 2) << 3); if (aad_len != 0) { state->nonce.c[0] |= 0x40; // Set AAD Flag } OPENSSL_memcpy(&state->nonce.c[1], nonce, nonce_len); for (unsigned i = 0; i < L; i++) { state->nonce.c[15 - i] = (uint8_t)(plaintext_len >> (8 * i)); } (*block)(state->nonce.c, state->cmac.c, key); size_t blocks = 1; if (aad_len != 0) { unsigned i; // Cast to u64 to avoid the compiler complaining about invalid shifts. uint64_t aad_len_u64 = aad_len; if (aad_len_u64 < 0x10000 - 0x100) { state->cmac.c[0] ^= (uint8_t)(aad_len_u64 >> 8); state->cmac.c[1] ^= (uint8_t)aad_len_u64; i = 2; } else if (aad_len_u64 <= 0xffffffff) { state->cmac.c[0] ^= 0xff; state->cmac.c[1] ^= 0xfe; state->cmac.c[2] ^= (uint8_t)(aad_len_u64 >> 24); state->cmac.c[3] ^= (uint8_t)(aad_len_u64 >> 16); state->cmac.c[4] ^= (uint8_t)(aad_len_u64 >> 8); state->cmac.c[5] ^= (uint8_t)aad_len_u64; i = 6; } else { state->cmac.c[0] ^= 0xff; state->cmac.c[1] ^= 0xff; state->cmac.c[2] ^= (uint8_t)(aad_len_u64 >> 56); state->cmac.c[3] ^= (uint8_t)(aad_len_u64 >> 48); state->cmac.c[4] ^= (uint8_t)(aad_len_u64 >> 40); state->cmac.c[5] ^= (uint8_t)(aad_len_u64 >> 32); state->cmac.c[6] ^= (uint8_t)(aad_len_u64 >> 24); state->cmac.c[7] ^= (uint8_t)(aad_len_u64 >> 16); state->cmac.c[8] ^= (uint8_t)(aad_len_u64 >> 8); state->cmac.c[9] ^= (uint8_t)aad_len_u64; i = 10; } do { for (; i < 16 && aad_len != 0; i++) { state->cmac.c[i] ^= *aad; aad++; aad_len--; } (*block)(state->cmac.c, state->cmac.c, key); blocks++; i = 0; } while (aad_len != 0); } // Per RFC 3610, section 2.6, the total number of block cipher operations done // must not exceed 2^61. There are two block cipher operations remaining per // message block, plus one block at the end to encrypt the MAC. size_t remaining_blocks = 2 * ((plaintext_len + 15) / 16) + 1; if (plaintext_len + 15 < plaintext_len || remaining_blocks + blocks < blocks || (uint64_t) remaining_blocks + blocks > UINT64_C(1) << 61) { return 0; } // Assemble the first block for encrypting and decrypting. The bottom |L| // bytes are replaced with a counter and all bit the encoding of |L| is // cleared in the first byte. state->nonce.c[0] &= 7; return 1; } static int ccm128_encrypt(const struct ccm128_context *ctx, struct ccm128_state *state, const AES_KEY *key, uint8_t *out, const uint8_t *in, size_t len) { // The counter for encryption begins at one. for (unsigned i = 0; i < ctx->L; i++) { state->nonce.c[15 - i] = 0; } state->nonce.c[15] = 1; uint8_t partial_buf[16]; unsigned num = 0; if (ctx->ctr != NULL) { CRYPTO_ctr128_encrypt_ctr32(in, out, len, key, state->nonce.c, partial_buf, &num, ctx->ctr); } else { CRYPTO_ctr128_encrypt(in, out, len, key, state->nonce.c, partial_buf, &num, ctx->block); } return 1; } static int ccm128_compute_mac(const struct ccm128_context *ctx, struct ccm128_state *state, const AES_KEY *key, uint8_t *out_tag, size_t tag_len, const uint8_t *in, size_t len) { block128_f block = ctx->block; if (tag_len != ctx->M) { return 0; } // Incorporate |in| into the MAC. union { uint64_t u[2]; uint8_t c[16]; } tmp; while (len >= 16) { OPENSSL_memcpy(tmp.c, in, 16); state->cmac.u[0] ^= tmp.u[0]; state->cmac.u[1] ^= tmp.u[1]; (*block)(state->cmac.c, state->cmac.c, key); in += 16; len -= 16; } if (len > 0) { for (size_t i = 0; i < len; i++) { state->cmac.c[i] ^= in[i]; } (*block)(state->cmac.c, state->cmac.c, key); } // Encrypt the MAC with counter zero. for (unsigned i = 0; i < ctx->L; i++) { state->nonce.c[15 - i] = 0; } (*block)(state->nonce.c, tmp.c, key); state->cmac.u[0] ^= tmp.u[0]; state->cmac.u[1] ^= tmp.u[1]; OPENSSL_memcpy(out_tag, state->cmac.c, tag_len); return 1; } static int CRYPTO_ccm128_encrypt(const struct ccm128_context *ctx, const AES_KEY *key, uint8_t *out, uint8_t *out_tag, size_t tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t len, const uint8_t *aad, size_t aad_len) { struct ccm128_state state; return ccm128_init_state(ctx, &state, key, nonce, nonce_len, aad, aad_len, len) && ccm128_compute_mac(ctx, &state, key, out_tag, tag_len, in, len) && ccm128_encrypt(ctx, &state, key, out, in, len); } static int CRYPTO_ccm128_decrypt(const struct ccm128_context *ctx, const AES_KEY *key, uint8_t *out, uint8_t *out_tag, size_t tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t len, const uint8_t *aad, size_t aad_len) { struct ccm128_state state; return ccm128_init_state(ctx, &state, key, nonce, nonce_len, aad, aad_len, len) && ccm128_encrypt(ctx, &state, key, out, in, len) && ccm128_compute_mac(ctx, &state, key, out_tag, tag_len, out, len); } #define EVP_AEAD_AES_CCM_MAX_TAG_LEN 16 struct aead_aes_ccm_ctx { union { double align; AES_KEY ks; } ks; struct ccm128_context ccm; }; OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >= sizeof(struct aead_aes_ccm_ctx), "AEAD state is too small"); #if defined(__GNUC__) || defined(__clang__) OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >= alignof(struct aead_aes_ccm_ctx), "AEAD state has insufficient alignment"); #endif static int aead_aes_ccm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, unsigned M, unsigned L) { assert(M == EVP_AEAD_max_overhead(ctx->aead)); assert(M == EVP_AEAD_max_tag_len(ctx->aead)); assert(15 - L == EVP_AEAD_nonce_length(ctx->aead)); if (key_len != EVP_AEAD_key_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { tag_len = M; } if (tag_len != M) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); return 0; } struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state; block128_f block; ctr128_f ctr = aes_ctr_set_key(&ccm_ctx->ks.ks, NULL, &block, key, key_len); ctx->tag_len = tag_len; if (!CRYPTO_ccm128_init(&ccm_ctx->ccm, &ccm_ctx->ks.ks, block, ctr, M, L)) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_INTERNAL_ERROR); return 0; } return 1; } static void aead_aes_ccm_cleanup(EVP_AEAD_CTX *ctx) {} static int aead_aes_ccm_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state; if (in_len > CRYPTO_ccm128_max_input(&ccm_ctx->ccm)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (!CRYPTO_ccm128_encrypt(&ccm_ctx->ccm, &ccm_ctx->ks.ks, out, out_tag, ctx->tag_len, nonce, nonce_len, in, in_len, ad, ad_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } *out_tag_len = ctx->tag_len; return 1; } static int aead_aes_ccm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state; if (in_len > CRYPTO_ccm128_max_input(&ccm_ctx->ccm)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (in_tag_len != ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } uint8_t tag[EVP_AEAD_AES_CCM_MAX_TAG_LEN]; assert(ctx->tag_len <= EVP_AEAD_AES_CCM_MAX_TAG_LEN); if (!CRYPTO_ccm128_decrypt(&ccm_ctx->ccm, &ccm_ctx->ks.ks, out, tag, ctx->tag_len, nonce, nonce_len, in, in_len, ad, ad_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (CRYPTO_memcmp(tag, in_tag, ctx->tag_len) != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } return 1; } static int aead_aes_ccm_bluetooth_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len) { return aead_aes_ccm_init(ctx, key, key_len, tag_len, 4, 2); } static const EVP_AEAD aead_aes_128_ccm_bluetooth = { 16, // key length (AES-128) 13, // nonce length 4, // overhead 4, // max tag length 0, // seal_scatter_supports_extra_in aead_aes_ccm_bluetooth_init, NULL /* init_with_direction */, aead_aes_ccm_cleanup, NULL /* open */, aead_aes_ccm_seal_scatter, aead_aes_ccm_open_gather, NULL /* get_iv */, NULL /* tag_len */, }; const EVP_AEAD *EVP_aead_aes_128_ccm_bluetooth(void) { return &aead_aes_128_ccm_bluetooth; } static int aead_aes_ccm_bluetooth_8_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len) { return aead_aes_ccm_init(ctx, key, key_len, tag_len, 8, 2); } static const EVP_AEAD aead_aes_128_ccm_bluetooth_8 = { 16, // key length (AES-128) 13, // nonce length 8, // overhead 8, // max tag length 0, // seal_scatter_supports_extra_in aead_aes_ccm_bluetooth_8_init, NULL /* init_with_direction */, aead_aes_ccm_cleanup, NULL /* open */, aead_aes_ccm_seal_scatter, aead_aes_ccm_open_gather, NULL /* get_iv */, NULL /* tag_len */, }; const EVP_AEAD *EVP_aead_aes_128_ccm_bluetooth_8(void) { return &aead_aes_128_ccm_bluetooth_8; }
883628.c
/*HEADER****************************************************************************************** BSD 3-Clause License Copyright (c) 2020, Carlos Neri All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **END********************************************************************************************/ ////////////////////////////////////////////////////////////////////////////////////////////////// // Includes Section /////////////////////////////////////////////////////////////////////////////////////////////////// #include <stdint.h> #include "RingBuffer.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // Defines & Macros Section /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// // Typedef Section /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// // Function Prototypes Section /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// // Global Constants Section /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// // Static Constants Section /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// // Global Variables Section /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// // Static Variables Section /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// // Functions Section /////////////////////////////////////////////////////////////////////////////////////////////////// /*FUNCTION********************************************************************** * * Function Name : RingBuffer_Init * Description : initialize the ring buffer. * *END**************************************************************************/ void RingBuffer_Init(RingBuffer_t * spRingBuffer, uint8_t * pStartAddress, uint32_t BufferSize) { spRingBuffer->StartAddress = (uint32_t)pStartAddress; spRingBuffer->EndAddress = ((uint32_t)(pStartAddress) + (uint32_t)(BufferSize) - (uint32_t)(1)); spRingBuffer->BufferSize = BufferSize; spRingBuffer->pReadPointer = pStartAddress; spRingBuffer->pWritePointer = pStartAddress; spRingBuffer->BufferStatus = 0; } /*FUNCTION********************************************************************** * * Function Name : RingBuffer_Reset * Description : re-configure the read and write pointers * *END**************************************************************************/ void RingBuffer_Reset(RingBuffer_t * spRingBuffer) { spRingBuffer->pReadPointer = (uint8_t *)spRingBuffer->StartAddress; spRingBuffer->pWritePointer = (uint8_t *)spRingBuffer->StartAddress; } /*FUNCTION********************************************************************** * * Function Name : RingBuffer_WriteBuffer * Description : Write several bytes into the ring buffer * *END**************************************************************************/ void RingBuffer_WriteBuffer(RingBuffer_t * psRingBuffer, uint8_t * pOutData, uint32_t SizeOfDataToWrite) { while(SizeOfDataToWrite) { *(psRingBuffer->pWritePointer) = *pOutData; psRingBuffer->pWritePointer++; pOutData++; SizeOfDataToWrite--; /* send to the beginning the pointer */ if(((uint32_t)psRingBuffer->pWritePointer) > psRingBuffer->EndAddress) { psRingBuffer->pWritePointer = ((uint8_t*)psRingBuffer->StartAddress); } } /* check for errors */ if(((uint32_t)psRingBuffer->pWritePointer) == ((uint32_t)psRingBuffer->pReadPointer)) { //psRingBuffer->BufferStatus |= (1<<RING_BUFFER_OVERFLOW); } } /*FUNCTION********************************************************************** * * Function Name : RingBuffer_WriteData * Description : Write single data into the ring buffer * *END**************************************************************************/ void RingBuffer_WriteData(RingBuffer_t * psRingBuffer, uint8_t * pOutData) { /* store the data */ *(psRingBuffer->pWritePointer) = (*pOutData); psRingBuffer->pWritePointer++; /* send to the beginning the pointer */ if(((uint32_t)psRingBuffer->pWritePointer) > psRingBuffer->EndAddress) { psRingBuffer->pWritePointer = ((uint8_t*)psRingBuffer->StartAddress); } /* check for errors */ if(((uint32_t)psRingBuffer->pWritePointer) == ((uint32_t)psRingBuffer->pReadPointer)) { //psRingBuffer->BufferStatus |= (1<<RING_BUFFER_OVERFLOW); } } /*FUNCTION********************************************************************** * * Function Name : RingBuffer_ReadData * Description : Read single data from the ring buffer * *END**************************************************************************/ void RingBuffer_ReadData(RingBuffer_t * psRingBuffer, uint8_t * pData) { *pData = *(psRingBuffer->pReadPointer); psRingBuffer->pReadPointer++; /* send to the beginning the pointer */ if(((uint32_t)psRingBuffer->pReadPointer) > psRingBuffer->EndAddress) { psRingBuffer->pReadPointer = ((uint8_t*)psRingBuffer->StartAddress); } /* check for errors */ if(((uint32_t)psRingBuffer->pWritePointer) == ((uint32_t)psRingBuffer->pReadPointer)) { //psRingBuffer->BufferStatus |= (1<<RING_BUFFER_OVERFLOW); } } /*FUNCTION********************************************************************** * * Function Name : RingBuffer_ReadBuffer * Description : Read data several bytes from the ring buffer * *END**************************************************************************/ void RingBuffer_ReadBuffer(RingBuffer_t * psRingBuffer, uint8_t* pDataIn, uint32_t DataToRead) { while(DataToRead) { *pDataIn = *(psRingBuffer->pReadPointer); psRingBuffer->pReadPointer++; pDataIn++; DataToRead--; /* send to the beginning the pointer */ if(((uint32_t)psRingBuffer->pReadPointer) > psRingBuffer->EndAddress) { psRingBuffer->pReadPointer = ((uint8_t*)psRingBuffer->StartAddress); } } /* check for errors */ if(((uint32_t)psRingBuffer->pWritePointer) == ((uint32_t)psRingBuffer->pReadPointer)) { //psRingBuffer->BufferStatus |= (1<<RING_BUFFER_OVERFLOW); } } /*FUNCTION********************************************************************** * * Function Name : RingBuffer_SpaceAvailable * Description : Returns the current space available in the ring buffer * *END**************************************************************************/ uint32_t RingBuffer_SpaceAvailable(RingBuffer_t * psRingBuffer) { uint32_t SpaceAvailable; /* calcualted current available space */ if(((uint32_t)psRingBuffer->pWritePointer) > ((uint32_t)psRingBuffer->pReadPointer)) { SpaceAvailable = ((uint32_t)psRingBuffer->pWritePointer - (uint32_t)psRingBuffer->pReadPointer); } else { SpaceAvailable = (psRingBuffer->BufferSize - ((uint32_t)psRingBuffer->pReadPointer - (uint32_t)psRingBuffer->pWritePointer)); } return(SpaceAvailable); } /*FUNCTION********************************************************************** * * Function Name : RingBuffer_DataAvailable * Description : Returns the current space occupied in the ring buffer * *END**************************************************************************/ uint32_t RingBuffer_DataAvailable(RingBuffer_t * psRingBuffer) { uint32_t SpaceAvailable; /* calcualted current available space */ if(((uint32_t)psRingBuffer->pWritePointer) > ((uint32_t)psRingBuffer->pReadPointer)) { SpaceAvailable = ((uint32_t)psRingBuffer->pWritePointer - (uint32_t)psRingBuffer->pReadPointer); } else { SpaceAvailable = (psRingBuffer->BufferSize - ((uint32_t)psRingBuffer->pReadPointer - (uint32_t)psRingBuffer->pWritePointer)); } return(SpaceAvailable); } /////////////////////////////////////////////////////////////////////////////////////////////////// // EOF ///////////////////////////////////////////////////////////////////////////////////////////////////
61220.c
/* *------------------------------------------------------------------ * tapcli.c - dynamic tap interface hookup * * Copyright (c) 2009 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------ */ /** * @file * @brief dynamic tap interface hookup */ #include <fcntl.h> /* for open */ #include <sys/ioctl.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/uio.h> /* for iovec */ #include <netinet/in.h> #include <linux/if_arp.h> #include <linux/if_tun.h> #include <vlib/vlib.h> #include <vlib/unix/unix.h> #include <vnet/ip/ip.h> #include <vnet/ethernet/ethernet.h> #if DPDK == 1 #include <vnet/devices/dpdk/dpdk.h> #endif #include <vnet/unix/tapcli.h> static vnet_device_class_t tapcli_dev_class; static vnet_hw_interface_class_t tapcli_interface_class; static vlib_node_registration_t tapcli_rx_node; static void tapcli_nopunt_frame (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame); /** * @brief Struct for the tapcli interface */ typedef struct { u32 unix_fd; u32 unix_file_index; u32 provision_fd; /** For counters */ u32 sw_if_index; u32 hw_if_index; u32 is_promisc; struct ifreq ifr; u32 per_interface_next_index; /** for delete */ u8 active; } tapcli_interface_t; /** * @brief Struct for RX trace */ typedef struct { u16 sw_if_index; } tapcli_rx_trace_t; /** * @brief Function to format TAP CLI trace * * @param *s - u8 - formatting string * @param *va - va_list * * @return *s - u8 - formatted string * */ u8 * format_tapcli_rx_trace (u8 * s, va_list * va) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *); vnet_main_t * vnm = vnet_get_main(); tapcli_rx_trace_t * t = va_arg (*va, tapcli_rx_trace_t *); s = format (s, "%U", format_vnet_sw_if_index_name, vnm, t->sw_if_index); return s; } /** * @brief TAPCLI main state struct */ typedef struct { /** Vector of iovecs for readv/writev calls. */ struct iovec * iovecs; /** Vector of VLIB rx buffers to use. We allocate them in blocks of VLIB_FRAME_SIZE (256). */ u32 * rx_buffers; /** tap device destination MAC address. Required, or Linux drops pkts */ u8 ether_dst_mac[6]; /** Interface MTU in bytes and # of default sized buffers. */ u32 mtu_bytes, mtu_buffers; /** Vector of tap interfaces */ tapcli_interface_t * tapcli_interfaces; /** Vector of deleted tap interfaces */ u32 * tapcli_inactive_interfaces; /** Bitmap of tap interfaces with pending reads */ uword * pending_read_bitmap; /** Hash table to find tapcli interface given hw_if_index */ uword * tapcli_interface_index_by_sw_if_index; /** Hash table to find tapcli interface given unix fd */ uword * tapcli_interface_index_by_unix_fd; /** renumbering table */ u32 * show_dev_instance_by_real_dev_instance; /** 1 => disable CLI */ int is_disabled; /** convenience - vlib_main_t */ vlib_main_t * vlib_main; /** convenience - vnet_main_t */ vnet_main_t * vnet_main; /** convenience - unix_main_t */ unix_main_t * unix_main; } tapcli_main_t; static tapcli_main_t tapcli_main; /** * @brief tapcli TX node function * @node tap-cli-tx * * Output node, writes the buffers comprising the incoming frame * to the tun/tap device, aka hands them to the Linux kernel stack. * * @param *vm - vlib_main_t * @param *node - vlib_node_runtime_t * @param *frame - vlib_frame_t * * @return n_packets - uword * */ static uword tapcli_tx (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 * buffers = vlib_frame_args (frame); uword n_packets = frame->n_vectors; tapcli_main_t * tm = &tapcli_main; tapcli_interface_t * ti; int i; for (i = 0; i < n_packets; i++) { struct iovec * iov; vlib_buffer_t * b; uword l; vnet_hw_interface_t * hw; uword * p; u32 tx_sw_if_index; b = vlib_get_buffer (vm, buffers[i]); tx_sw_if_index = vnet_buffer(b)->sw_if_index[VLIB_TX]; if (tx_sw_if_index == (u32)~0) tx_sw_if_index = vnet_buffer(b)->sw_if_index[VLIB_RX]; ASSERT(tx_sw_if_index != (u32)~0); /* Use the sup intfc to finesse vlan subifs */ hw = vnet_get_sup_hw_interface (tm->vnet_main, tx_sw_if_index); tx_sw_if_index = hw->sw_if_index; p = hash_get (tm->tapcli_interface_index_by_sw_if_index, tx_sw_if_index); if (p == 0) { clib_warning ("sw_if_index %d unknown", tx_sw_if_index); /* $$$ leak, but this should never happen... */ continue; } else ti = vec_elt_at_index (tm->tapcli_interfaces, p[0]); /* Re-set iovecs if present. */ if (tm->iovecs) _vec_len (tm->iovecs) = 0; /* VLIB buffer chain -> Unix iovec(s). */ vec_add2 (tm->iovecs, iov, 1); iov->iov_base = b->data + b->current_data; iov->iov_len = l = b->current_length; if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT)) { do { b = vlib_get_buffer (vm, b->next_buffer); vec_add2 (tm->iovecs, iov, 1); iov->iov_base = b->data + b->current_data; iov->iov_len = b->current_length; l += b->current_length; } while (b->flags & VLIB_BUFFER_NEXT_PRESENT); } if (writev (ti->unix_fd, tm->iovecs, vec_len (tm->iovecs)) < l) clib_unix_warning ("writev"); } vlib_buffer_free(vm, vlib_frame_vector_args(frame), frame->n_vectors); return n_packets; } VLIB_REGISTER_NODE (tapcli_tx_node,static) = { .function = tapcli_tx, .name = "tapcli-tx", .type = VLIB_NODE_TYPE_INTERNAL, .vector_size = 4, }; enum { TAPCLI_RX_NEXT_IP4_INPUT, TAPCLI_RX_NEXT_IP6_INPUT, TAPCLI_RX_NEXT_ETHERNET_INPUT, TAPCLI_RX_NEXT_DROP, TAPCLI_RX_N_NEXT, }; /** * @brief Dispatch tapcli RX node function for node tap_cli_rx * * * @param *vm - vlib_main_t * @param *node - vlib_node_runtime_t * @param *ti - tapcli_interface_t * * @return n_packets - uword * */ static uword tapcli_rx_iface(vlib_main_t * vm, vlib_node_runtime_t * node, tapcli_interface_t * ti) { tapcli_main_t * tm = &tapcli_main; const uword buffer_size = VLIB_BUFFER_DATA_SIZE; u32 n_trace = vlib_get_trace_count (vm, node); u8 set_trace = 0; vnet_main_t *vnm; vnet_sw_interface_t * si; u8 admin_down; u32 next = node->cached_next_index; u32 n_left_to_next, next_index; u32 *to_next; vnm = vnet_get_main(); si = vnet_get_sw_interface (vnm, ti->sw_if_index); admin_down = !(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP); vlib_get_next_frame(vm, node, next, to_next, n_left_to_next); while (n_left_to_next) { // Fill at most one vector vlib_buffer_t *b_first, *b, *prev; u32 bi_first, bi; word n_bytes_in_packet; int j, n_bytes_left; if (PREDICT_FALSE(vec_len(tm->rx_buffers) < tm->mtu_buffers)) { uword len = vec_len(tm->rx_buffers); _vec_len(tm->rx_buffers) += vlib_buffer_alloc_from_free_list(vm, &tm->rx_buffers[len], VLIB_FRAME_SIZE - len, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); if (PREDICT_FALSE(vec_len(tm->rx_buffers) < tm->mtu_buffers)) { vlib_node_increment_counter(vm, tapcli_rx_node.index, TAPCLI_ERROR_BUFFER_ALLOC, tm->mtu_buffers - vec_len(tm->rx_buffers)); break; } } uword i_rx = vec_len (tm->rx_buffers) - 1; /* Allocate RX buffers from end of rx_buffers. Turn them into iovecs to pass to readv. */ vec_validate (tm->iovecs, tm->mtu_buffers - 1); for (j = 0; j < tm->mtu_buffers; j++) { b = vlib_get_buffer (vm, tm->rx_buffers[i_rx - j]); tm->iovecs[j].iov_base = b->data; tm->iovecs[j].iov_len = buffer_size; } n_bytes_left = readv (ti->unix_fd, tm->iovecs, tm->mtu_buffers); n_bytes_in_packet = n_bytes_left; if (n_bytes_left <= 0) { if (errno != EAGAIN) { vlib_node_increment_counter(vm, tapcli_rx_node.index, TAPCLI_ERROR_READ, 1); } break; } bi_first = tm->rx_buffers[i_rx]; b = b_first = vlib_get_buffer (vm, tm->rx_buffers[i_rx]); prev = NULL; while (1) { b->current_length = n_bytes_left < buffer_size ? n_bytes_left : buffer_size; n_bytes_left -= buffer_size; if (prev) { prev->next_buffer = bi; prev->flags |= VLIB_BUFFER_NEXT_PRESENT; } prev = b; /* last segment */ if (n_bytes_left <= 0) break; i_rx--; bi = tm->rx_buffers[i_rx]; b = vlib_get_buffer (vm, bi); } _vec_len (tm->rx_buffers) = i_rx; b_first->total_length_not_including_first_buffer = (n_bytes_in_packet > buffer_size) ? n_bytes_in_packet - buffer_size : 0; b_first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; /* Ensure mbufs are updated */ vlib_buffer_chain_validate(vm, b_first); VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b_first); vnet_buffer (b_first)->sw_if_index[VLIB_RX] = ti->sw_if_index; vnet_buffer (b_first)->sw_if_index[VLIB_TX] = (u32)~0; b_first->error = node->errors[TAPCLI_ERROR_NONE]; next_index = TAPCLI_RX_NEXT_ETHERNET_INPUT; next_index = (ti->per_interface_next_index != ~0) ? ti->per_interface_next_index : next_index; next_index = admin_down ? TAPCLI_RX_NEXT_DROP : next_index; to_next[0] = bi_first; to_next++; n_left_to_next--; vlib_validate_buffer_enqueue_x1 (vm, node, next, to_next, n_left_to_next, bi_first, next_index); /* Interface counters for tapcli interface. */ if (PREDICT_TRUE(!admin_down)) { vlib_increment_combined_counter ( vnet_main.interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, os_get_cpu_number(), ti->sw_if_index, 1, n_bytes_in_packet); if (PREDICT_FALSE(n_trace > 0)) { vlib_trace_buffer (vm, node, next_index, b_first, /* follow_chain */ 1); n_trace--; set_trace = 1; tapcli_rx_trace_t *t0 = vlib_add_trace (vm, node, b_first, sizeof (*t0)); t0->sw_if_index = si->sw_if_index; } } } vlib_put_next_frame (vm, node, next, n_left_to_next); if (set_trace) vlib_set_trace_count (vm, node, n_trace); return VLIB_FRAME_SIZE - n_left_to_next; } /** * @brief tapcli RX node function * @node tap-cli-rx * * Input node from the Kernel tun/tap device * * @param *vm - vlib_main_t * @param *node - vlib_node_runtime_t * @param *frame - vlib_frame_t * * @return n_packets - uword * */ static uword tapcli_rx (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { tapcli_main_t * tm = &tapcli_main; static u32 * ready_interface_indices; tapcli_interface_t * ti; int i; u32 total_count = 0; vec_reset_length (ready_interface_indices); clib_bitmap_foreach (i, tm->pending_read_bitmap, ({ vec_add1 (ready_interface_indices, i); })); if (vec_len (ready_interface_indices) == 0) return 0; for (i = 0; i < vec_len(ready_interface_indices); i++) { tm->pending_read_bitmap = clib_bitmap_set (tm->pending_read_bitmap, ready_interface_indices[i], 0); ti = vec_elt_at_index (tm->tapcli_interfaces, ready_interface_indices[i]); total_count += tapcli_rx_iface(vm, node, ti); } return total_count; //This might return more than 256. } /** TAPCLI error strings */ static char * tapcli_rx_error_strings[] = { #define _(sym,string) string, foreach_tapcli_error #undef _ }; VLIB_REGISTER_NODE (tapcli_rx_node, static) = { .function = tapcli_rx, .name = "tapcli-rx", .type = VLIB_NODE_TYPE_INPUT, .state = VLIB_NODE_STATE_INTERRUPT, .vector_size = 4, .n_errors = TAPCLI_N_ERROR, .error_strings = tapcli_rx_error_strings, .format_trace = format_tapcli_rx_trace, .n_next_nodes = TAPCLI_RX_N_NEXT, .next_nodes = { [TAPCLI_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum", [TAPCLI_RX_NEXT_IP6_INPUT] = "ip6-input", [TAPCLI_RX_NEXT_DROP] = "error-drop", [TAPCLI_RX_NEXT_ETHERNET_INPUT] = "ethernet-input", }, }; /** * @brief Gets called when file descriptor is ready from epoll. * * @param *uf - unix_file_t * * @return error - clib_error_t * */ static clib_error_t * tapcli_read_ready (unix_file_t * uf) { vlib_main_t * vm = vlib_get_main(); tapcli_main_t * tm = &tapcli_main; uword * p; /** Schedule the rx node */ vlib_node_set_interrupt_pending (vm, tapcli_rx_node.index); p = hash_get (tm->tapcli_interface_index_by_unix_fd, uf->file_descriptor); /** Mark the specific tap interface ready-to-read */ if (p) tm->pending_read_bitmap = clib_bitmap_set (tm->pending_read_bitmap, p[0], 1); else clib_warning ("fd %d not in hash table", uf->file_descriptor); return 0; } /** * @brief CLI function for TAPCLI configuration * * @param *vm - vlib_main_t * @param *input - unformat_input_t * * @return error - clib_error_t * */ static clib_error_t * tapcli_config (vlib_main_t * vm, unformat_input_t * input) { tapcli_main_t *tm = &tapcli_main; const uword buffer_size = VLIB_BUFFER_DATA_SIZE; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "mtu %d", &tm->mtu_bytes)) ; else if (unformat (input, "disable")) tm->is_disabled = 1; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } if (tm->is_disabled) return 0; if (geteuid()) { clib_warning ("tapcli disabled: must be superuser"); tm->is_disabled = 1; return 0; } tm->mtu_buffers = (tm->mtu_bytes + (buffer_size - 1)) / buffer_size; return 0; } /** * @brief Renumber TAPCLI interface * * @param *hi - vnet_hw_interface_t * @param new_dev_instance - u32 * * @return rc - int * */ static int tap_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance) { tapcli_main_t *tm = &tapcli_main; vec_validate_init_empty (tm->show_dev_instance_by_real_dev_instance, hi->dev_instance, ~0); tm->show_dev_instance_by_real_dev_instance [hi->dev_instance] = new_dev_instance; return 0; } VLIB_CONFIG_FUNCTION (tapcli_config, "tapcli"); /** * @brief Free "no punt" frame * * @param *vm - vlib_main_t * @param *node - vlib_node_runtime_t * @param *frame - vlib_frame_t * */ static void tapcli_nopunt_frame (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 * buffers = vlib_frame_args (frame); uword n_packets = frame->n_vectors; vlib_buffer_free (vm, buffers, n_packets); vlib_frame_free (vm, node, frame); } VNET_HW_INTERFACE_CLASS (tapcli_interface_class,static) = { .name = "tapcli", }; /** * @brief Formatter for TAPCLI interface name * * @param *s - formatter string * @param *args - va_list * * @return *s - formatted string * */ static u8 * format_tapcli_interface_name (u8 * s, va_list * args) { u32 i = va_arg (*args, u32); u32 show_dev_instance = ~0; tapcli_main_t * tm = &tapcli_main; if (i < vec_len (tm->show_dev_instance_by_real_dev_instance)) show_dev_instance = tm->show_dev_instance_by_real_dev_instance[i]; if (show_dev_instance != ~0) i = show_dev_instance; s = format (s, "tap-%d", i); return s; } /** * @brief Modify interface flags for TAPCLI interface * * @param *vnm - vnet_main_t * @param *hw - vnet_hw_interface_t * @param flags - u32 * * @return rc - u32 * */ static u32 tapcli_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags) { tapcli_main_t *tm = &tapcli_main; tapcli_interface_t *ti; ti = vec_elt_at_index (tm->tapcli_interfaces, hw->dev_instance); if (flags & ETHERNET_INTERFACE_FLAG_MTU) { const uword buffer_size = VLIB_BUFFER_DATA_SIZE; tm->mtu_bytes = hw->max_packet_bytes; tm->mtu_buffers = (tm->mtu_bytes + (buffer_size - 1)) / buffer_size; } else { struct ifreq ifr; u32 want_promisc; memcpy (&ifr, &ti->ifr, sizeof (ifr)); /* get flags, modify to bring up interface... */ if (ioctl (ti->provision_fd, SIOCGIFFLAGS, &ifr) < 0) { clib_unix_warning ("Couldn't get interface flags for %s", hw->name); return 0; } want_promisc = (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) != 0; if (want_promisc == ti->is_promisc) return 0; if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) ifr.ifr_flags |= IFF_PROMISC; else ifr.ifr_flags &= ~(IFF_PROMISC); /* get flags, modify to bring up interface... */ if (ioctl (ti->provision_fd, SIOCSIFFLAGS, &ifr) < 0) { clib_unix_warning ("Couldn't set interface flags for %s", hw->name); return 0; } ti->is_promisc = want_promisc; } return 0; } /** * @brief Setting the TAP interface's next processing node * * @param *vnm - vnet_main_t * @param hw_if_index - u32 * @param node_index - u32 * */ static void tapcli_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index, u32 node_index) { tapcli_main_t *tm = &tapcli_main; tapcli_interface_t *ti; vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); ti = vec_elt_at_index (tm->tapcli_interfaces, hw->dev_instance); /** Shut off redirection */ if (node_index == ~0) { ti->per_interface_next_index = node_index; return; } ti->per_interface_next_index = vlib_node_add_next (tm->vlib_main, tapcli_rx_node.index, node_index); } /** * @brief Set link_state == admin_state otherwise things like ip6 neighbor discovery breaks * * @param *vnm - vnet_main_t * @param hw_if_index - u32 * @param flags - u32 * * @return error - clib_error_t */ static clib_error_t * tapcli_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) { uword is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0; u32 hw_flags; u32 speed_duplex = VNET_HW_INTERFACE_FLAG_FULL_DUPLEX | VNET_HW_INTERFACE_FLAG_SPEED_1G; if (is_admin_up) hw_flags = VNET_HW_INTERFACE_FLAG_LINK_UP | speed_duplex; else hw_flags = speed_duplex; vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags); return 0; } VNET_DEVICE_CLASS (tapcli_dev_class,static) = { .name = "tapcli", .tx_function = tapcli_tx, .format_device_name = format_tapcli_interface_name, .rx_redirect_to_node = tapcli_set_interface_next_node, .name_renumber = tap_name_renumber, .admin_up_down_function = tapcli_interface_admin_up_down, .no_flatten_output_chains = 1, }; /** * @brief Dump TAP interfaces * * @param **out_tapids - tapcli_interface_details_t * * @return rc - int * */ int vnet_tap_dump_ifs (tapcli_interface_details_t **out_tapids) { tapcli_main_t * tm = &tapcli_main; tapcli_interface_t * ti; tapcli_interface_details_t * r_tapids = NULL; tapcli_interface_details_t * tapid = NULL; vec_foreach (ti, tm->tapcli_interfaces) { if (!ti->active) continue; vec_add2(r_tapids, tapid, 1); tapid->sw_if_index = ti->sw_if_index; strncpy((char *)tapid->dev_name, ti->ifr.ifr_name, sizeof (ti->ifr.ifr_name)-1); } *out_tapids = r_tapids; return 0; } /** * @brief Get tap interface from inactive interfaces or create new * * @return interface - tapcli_interface_t * */ static tapcli_interface_t *tapcli_get_new_tapif() { tapcli_main_t * tm = &tapcli_main; tapcli_interface_t *ti = NULL; int inactive_cnt = vec_len(tm->tapcli_inactive_interfaces); // if there are any inactive ifaces if (inactive_cnt > 0) { // take last u32 ti_idx = tm->tapcli_inactive_interfaces[inactive_cnt - 1]; if (vec_len(tm->tapcli_interfaces) > ti_idx) { ti = vec_elt_at_index (tm->tapcli_interfaces, ti_idx); clib_warning("reusing tap interface"); } // "remove" from inactive list _vec_len(tm->tapcli_inactive_interfaces) -= 1; } // ti was not retrieved from inactive ifaces - create new if (!ti) vec_add2 (tm->tapcli_interfaces, ti, 1); return ti; } /** * @brief Connect a TAP interface * * @param vm - vlib_main_t * @param intfc_name - u8 * @param hwaddr_arg - u8 * @param sw_if_indexp - u32 * * @return rc - int * */ int vnet_tap_connect (vlib_main_t * vm, u8 * intfc_name, u8 *hwaddr_arg, u32 * sw_if_indexp) { tapcli_main_t * tm = &tapcli_main; tapcli_interface_t * ti = NULL; struct ifreq ifr; int flags; int dev_net_tun_fd; int dev_tap_fd = -1; clib_error_t * error; u8 hwaddr [6]; int rv = 0; if (tm->is_disabled) { return VNET_API_ERROR_FEATURE_DISABLED; } flags = IFF_TAP | IFF_NO_PI; if ((dev_net_tun_fd = open ("/dev/net/tun", O_RDWR)) < 0) return VNET_API_ERROR_SYSCALL_ERROR_1; memset (&ifr, 0, sizeof (ifr)); strncpy(ifr.ifr_name, (char *) intfc_name, sizeof (ifr.ifr_name)-1); ifr.ifr_flags = flags; if (ioctl (dev_net_tun_fd, TUNSETIFF, (void *)&ifr) < 0) { rv = VNET_API_ERROR_SYSCALL_ERROR_2; goto error; } /* Open a provisioning socket */ if ((dev_tap_fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL))) < 0 ) { rv = VNET_API_ERROR_SYSCALL_ERROR_3; goto error; } /* Find the interface index. */ { struct ifreq ifr; struct sockaddr_ll sll; memset (&ifr, 0, sizeof(ifr)); strncpy (ifr.ifr_name, (char *) intfc_name, sizeof (ifr.ifr_name)-1); if (ioctl (dev_tap_fd, SIOCGIFINDEX, &ifr) < 0 ) { rv = VNET_API_ERROR_SYSCALL_ERROR_4; goto error; } /* Bind the provisioning socket to the interface. */ memset(&sll, 0, sizeof(sll)); sll.sll_family = AF_PACKET; sll.sll_ifindex = ifr.ifr_ifindex; sll.sll_protocol = htons(ETH_P_ALL); if (bind(dev_tap_fd, (struct sockaddr*) &sll, sizeof(sll)) < 0) { rv = VNET_API_ERROR_SYSCALL_ERROR_5; goto error; } } /* non-blocking I/O on /dev/tapX */ { int one = 1; if (ioctl (dev_net_tun_fd, FIONBIO, &one) < 0) { rv = VNET_API_ERROR_SYSCALL_ERROR_6; goto error; } } ifr.ifr_mtu = tm->mtu_bytes; if (ioctl (dev_tap_fd, SIOCSIFMTU, &ifr) < 0) { rv = VNET_API_ERROR_SYSCALL_ERROR_7; goto error; } /* get flags, modify to bring up interface... */ if (ioctl (dev_tap_fd, SIOCGIFFLAGS, &ifr) < 0) { rv = VNET_API_ERROR_SYSCALL_ERROR_8; goto error; } ifr.ifr_flags |= (IFF_UP | IFF_RUNNING); if (ioctl (dev_tap_fd, SIOCSIFFLAGS, &ifr) < 0) { rv = VNET_API_ERROR_SYSCALL_ERROR_9; goto error; } ti = tapcli_get_new_tapif(); ti->per_interface_next_index = ~0; if (hwaddr_arg != 0) clib_memcpy(hwaddr, hwaddr_arg, 6); else { f64 now = vlib_time_now(vm); u32 rnd; rnd = (u32) (now * 1e6); rnd = random_u32 (&rnd); memcpy (hwaddr+2, &rnd, sizeof(rnd)); hwaddr[0] = 2; hwaddr[1] = 0xfe; } error = ethernet_register_interface (tm->vnet_main, tapcli_dev_class.index, ti - tm->tapcli_interfaces /* device instance */, hwaddr /* ethernet address */, &ti->hw_if_index, tapcli_flag_change); if (error) { clib_error_report (error); rv = VNET_API_ERROR_INVALID_REGISTRATION; goto error; } { unix_file_t template = {0}; template.read_function = tapcli_read_ready; template.file_descriptor = dev_net_tun_fd; ti->unix_file_index = unix_file_add (&unix_main, &template); ti->unix_fd = dev_net_tun_fd; ti->provision_fd = dev_tap_fd; clib_memcpy (&ti->ifr, &ifr, sizeof (ifr)); } { vnet_hw_interface_t * hw; hw = vnet_get_hw_interface (tm->vnet_main, ti->hw_if_index); hw->min_supported_packet_bytes = TAP_MTU_MIN; hw->max_supported_packet_bytes = TAP_MTU_MAX; hw->max_l3_packet_bytes[VLIB_RX] = hw->max_l3_packet_bytes[VLIB_TX] = hw->max_supported_packet_bytes - sizeof(ethernet_header_t); ti->sw_if_index = hw->sw_if_index; if (sw_if_indexp) *sw_if_indexp = hw->sw_if_index; } ti->active = 1; hash_set (tm->tapcli_interface_index_by_sw_if_index, ti->sw_if_index, ti - tm->tapcli_interfaces); hash_set (tm->tapcli_interface_index_by_unix_fd, ti->unix_fd, ti - tm->tapcli_interfaces); return rv; error: close (dev_net_tun_fd); if (dev_tap_fd >= 0) close (dev_tap_fd); return rv; } /** * @brief Renumber a TAP interface * * @param *vm - vlib_main_t * @param *intfc_name - u8 * @param *hwaddr_arg - u8 * @param *sw_if_indexp - u32 * @param renumber - u8 * @param custom_dev_instance - u32 * * @return rc - int * */ int vnet_tap_connect_renumber (vlib_main_t * vm, u8 * intfc_name, u8 *hwaddr_arg, u32 * sw_if_indexp, u8 renumber, u32 custom_dev_instance) { int rv = vnet_tap_connect(vm, intfc_name, hwaddr_arg, sw_if_indexp); if (!rv && renumber) vnet_interface_name_renumber (*sw_if_indexp, custom_dev_instance); return rv; } /** * @brief Disconnect TAP CLI interface * * @param *ti - tapcli_interface_t * * @return rc - int * */ static int tapcli_tap_disconnect (tapcli_interface_t *ti) { int rv = 0; vnet_main_t * vnm = vnet_get_main(); tapcli_main_t * tm = &tapcli_main; u32 sw_if_index = ti->sw_if_index; // bring interface down vnet_sw_interface_set_flags (vnm, sw_if_index, 0); if (ti->unix_file_index != ~0) { unix_file_del (&unix_main, unix_main.file_pool + ti->unix_file_index); ti->unix_file_index = ~0; } else close(ti->unix_fd); hash_unset (tm->tapcli_interface_index_by_unix_fd, ti->unix_fd); hash_unset (tm->tapcli_interface_index_by_sw_if_index, ti->sw_if_index); close(ti->provision_fd); ti->unix_fd = -1; ti->provision_fd = -1; return rv; } /** * @brief Delete TAP interface * * @param *vm - vlib_main_t * @param sw_if_index - u32 * * @return rc - int * */ int vnet_tap_delete(vlib_main_t *vm, u32 sw_if_index) { int rv = 0; tapcli_main_t * tm = &tapcli_main; tapcli_interface_t *ti; uword *p = NULL; p = hash_get (tm->tapcli_interface_index_by_sw_if_index, sw_if_index); if (p == 0) { clib_warning ("sw_if_index %d unknown", sw_if_index); return VNET_API_ERROR_INVALID_SW_IF_INDEX; } ti = vec_elt_at_index (tm->tapcli_interfaces, p[0]); // inactive ti->active = 0; tapcli_tap_disconnect(ti); // add to inactive list vec_add1(tm->tapcli_inactive_interfaces, ti - tm->tapcli_interfaces); // reset renumbered iface if (p[0] < vec_len (tm->show_dev_instance_by_real_dev_instance)) tm->show_dev_instance_by_real_dev_instance[p[0]] = ~0; ethernet_delete_interface (tm->vnet_main, ti->hw_if_index); return rv; } /** * @brief CLI function to delete TAP interface * * @param *vm - vlib_main_t * @param *input - unformat_input_t * @param *cmd - vlib_cli_command_t * * @return error - clib_error_t * */ static clib_error_t * tap_delete_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { tapcli_main_t * tm = &tapcli_main; u32 sw_if_index = ~0; if (tm->is_disabled) { return clib_error_return (0, "device disabled..."); } if (unformat (input, "%U", unformat_vnet_sw_interface, tm->vnet_main, &sw_if_index)) ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); int rc = vnet_tap_delete (vm, sw_if_index); if (!rc) { vlib_cli_output (vm, "Deleted."); } else { vlib_cli_output (vm, "Error during deletion of tap interface. (rc: %d)", rc); } return 0; } VLIB_CLI_COMMAND (tap_delete_command, static) = { .path = "tap delete", .short_help = "tap delete <vpp-tap-intfc-name>", .function = tap_delete_command_fn, }; /** * @brief Modifies tap interface - can result in new interface being created * * @param *vm - vlib_main_t * @param orig_sw_if_index - u32 * @param *intfc_name - u8 * @param *hwaddr_arg - u8 * @param *sw_if_indexp - u32 * @param renumber - u8 * @param custom_dev_instance - u32 * * @return rc - int * */ int vnet_tap_modify (vlib_main_t * vm, u32 orig_sw_if_index, u8 * intfc_name, u8 *hwaddr_arg, u32 * sw_if_indexp, u8 renumber, u32 custom_dev_instance) { int rv = vnet_tap_delete (vm, orig_sw_if_index); if (rv) return rv; rv = vnet_tap_connect_renumber(vm, intfc_name, hwaddr_arg, sw_if_indexp, renumber, custom_dev_instance); return rv; } /** * @brief CLI function to modify TAP interface * * @param *vm - vlib_main_t * @param *input - unformat_input_t * @param *cmd - vlib_cli_command_t * * @return error - clib_error_t * */ static clib_error_t * tap_modify_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { u8 * intfc_name; tapcli_main_t * tm = &tapcli_main; u32 sw_if_index = ~0; u32 new_sw_if_index = ~0; int user_hwaddr = 0; u8 hwaddr[6]; if (tm->is_disabled) { return clib_error_return (0, "device disabled..."); } if (unformat (input, "%U", unformat_vnet_sw_interface, tm->vnet_main, &sw_if_index)) ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); if (unformat (input, "%s", &intfc_name)) ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); if (unformat(input, "hwaddr %U", unformat_ethernet_address, &hwaddr)) user_hwaddr = 1; int rc = vnet_tap_modify (vm, sw_if_index, intfc_name, (user_hwaddr == 1 ? hwaddr : 0), &new_sw_if_index, 0, 0); if (!rc) { vlib_cli_output (vm, "Modified %U for Linux tap '%s'", format_vnet_sw_if_index_name, tm->vnet_main, new_sw_if_index, intfc_name); } else { vlib_cli_output (vm, "Error during modification of tap interface. (rc: %d)", rc); } return 0; } VLIB_CLI_COMMAND (tap_modify_command, static) = { .path = "tap modify", .short_help = "tap modify <vpp-tap-intfc-name> <linux-intfc-name> [hwaddr <addr>]", .function = tap_modify_command_fn, }; /** * @brief CLI function to connect TAP interface * * @param *vm - vlib_main_t * @param *input - unformat_input_t * @param *cmd - vlib_cli_command_t * * @return error - clib_error_t * */ static clib_error_t * tap_connect_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { u8 * intfc_name; tapcli_main_t * tm = &tapcli_main; u8 hwaddr[6]; u8 *hwaddr_arg = 0; u32 sw_if_index; if (tm->is_disabled) { return clib_error_return (0, "device disabled..."); } if (unformat (input, "%s", &intfc_name)) ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); if (unformat(input, "hwaddr %U", unformat_ethernet_address, &hwaddr)) hwaddr_arg = hwaddr; /* It is here for backward compatibility */ if (unformat(input, "hwaddr random")) ; int rv = vnet_tap_connect(vm, intfc_name, hwaddr_arg, &sw_if_index); if (rv) { switch (rv) { case VNET_API_ERROR_SYSCALL_ERROR_1: vlib_cli_output (vm, "Couldn't open /dev/net/tun"); break; case VNET_API_ERROR_SYSCALL_ERROR_2: vlib_cli_output (vm, "Error setting flags on '%s'", intfc_name); break; case VNET_API_ERROR_SYSCALL_ERROR_3: vlib_cli_output (vm, "Couldn't open provisioning socket"); break; case VNET_API_ERROR_SYSCALL_ERROR_4: vlib_cli_output (vm, "Couldn't get if_index"); break; case VNET_API_ERROR_SYSCALL_ERROR_5: vlib_cli_output (vm, "Couldn't bind provisioning socket"); break; case VNET_API_ERROR_SYSCALL_ERROR_6: vlib_cli_output (0, "Couldn't set device non-blocking flag"); break; case VNET_API_ERROR_SYSCALL_ERROR_7: vlib_cli_output (0, "Couldn't set device MTU"); break; case VNET_API_ERROR_SYSCALL_ERROR_8: vlib_cli_output (0, "Couldn't get interface flags"); break; case VNET_API_ERROR_SYSCALL_ERROR_9: vlib_cli_output (0, "Couldn't set intfc admin state up"); break; case VNET_API_ERROR_INVALID_REGISTRATION: vlib_cli_output (0, "Invalid registration"); break; default: vlib_cli_output (0, "Unknown error: %d", rv); break; } return 0; } vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index); return 0; } VLIB_CLI_COMMAND (tap_connect_command, static) = { .path = "tap connect", .short_help = "tap connect <intfc-name> [hwaddr <addr>]", .function = tap_connect_command_fn, }; /** * @brief TAPCLI main init * * @param *vm - vlib_main_t * * @return error - clib_error_t * */ clib_error_t * tapcli_init (vlib_main_t * vm) { tapcli_main_t * tm = &tapcli_main; tm->vlib_main = vm; tm->vnet_main = vnet_get_main(); tm->unix_main = &unix_main; tm->mtu_bytes = TAP_MTU_DEFAULT; tm->tapcli_interface_index_by_sw_if_index = hash_create (0, sizeof(uword)); tm->tapcli_interface_index_by_unix_fd = hash_create (0, sizeof (uword)); tm->rx_buffers = 0; vec_alloc(tm->rx_buffers, VLIB_FRAME_SIZE); vec_reset_length(tm->rx_buffers); vm->os_punt_frame = tapcli_nopunt_frame; return 0; } VLIB_INIT_FUNCTION (tapcli_init);
516250.c
/* * Freescale STMP37XX/STMP378X Real Time Clock driver * * Copyright (c) 2007 Sigmatel, Inc. * Peter Hartley, <[email protected]> * * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. * Copyright 2011 Wolfram Sang, Pengutronix e.K. */ /* * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/of_device.h> #include <linux/of.h> #include <linux/stmp_device.h> #include <linux/stmp3xxx_rtc_wdt.h> #define STMP3XXX_RTC_CTRL 0x0 #define STMP3XXX_RTC_CTRL_ALARM_IRQ_EN 0x00000001 #define STMP3XXX_RTC_CTRL_ONEMSEC_IRQ_EN 0x00000002 #define STMP3XXX_RTC_CTRL_ALARM_IRQ 0x00000004 #define STMP3XXX_RTC_CTRL_WATCHDOGEN 0x00000010 #define STMP3XXX_RTC_STAT 0x10 #define STMP3XXX_RTC_STAT_STALE_SHIFT 16 #define STMP3XXX_RTC_STAT_RTC_PRESENT 0x80000000 #define STMP3XXX_RTC_STAT_XTAL32000_PRESENT 0x10000000 #define STMP3XXX_RTC_STAT_XTAL32768_PRESENT 0x08000000 #define STMP3XXX_RTC_SECONDS 0x30 #define STMP3XXX_RTC_ALARM 0x40 #define STMP3XXX_RTC_WATCHDOG 0x50 #define STMP3XXX_RTC_PERSISTENT0 0x60 #define STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE (1 << 0) #define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN (1 << 1) #define STMP3XXX_RTC_PERSISTENT0_ALARM_EN (1 << 2) #define STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP (1 << 4) #define STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP (1 << 5) #define STMP3XXX_RTC_PERSISTENT0_XTAL32_FREQ (1 << 6) #define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE (1 << 7) #define STMP3XXX_RTC_PERSISTENT1 0x70 /* missing bitmask in headers */ #define STMP3XXX_RTC_PERSISTENT1_FORCE_UPDATER 0x80000000 struct stmp3xxx_rtc_data { struct rtc_device *rtc; void __iomem *io; int irq_alarm; }; #if IS_ENABLED(CONFIG_STMP3XXX_RTC_WATCHDOG) /** * stmp3xxx_wdt_set_timeout - configure the watchdog inside the STMP3xxx RTC * @dev: the parent device of the watchdog (= the RTC) * @timeout: the desired value for the timeout register of the watchdog. * 0 disables the watchdog * * The watchdog needs one register and two bits which are in the RTC domain. * To handle the resource conflict, the RTC driver will create another * platform_device for the watchdog driver as a child of the RTC device. * The watchdog driver is passed the below accessor function via platform_data * to configure the watchdog. Locking is not needed because accessing SET/CLR * registers is atomic. */ static void stmp3xxx_wdt_set_timeout(struct device *dev, u32 timeout) { struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); if (timeout) { writel(timeout, rtc_data->io + STMP3XXX_RTC_WATCHDOG); writel(STMP3XXX_RTC_CTRL_WATCHDOGEN, rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_SET); writel(STMP3XXX_RTC_PERSISTENT1_FORCE_UPDATER, rtc_data->io + STMP3XXX_RTC_PERSISTENT1 + STMP_OFFSET_REG_SET); } else { writel(STMP3XXX_RTC_CTRL_WATCHDOGEN, rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_CLR); writel(STMP3XXX_RTC_PERSISTENT1_FORCE_UPDATER, rtc_data->io + STMP3XXX_RTC_PERSISTENT1 + STMP_OFFSET_REG_CLR); } } static struct stmp3xxx_wdt_pdata wdt_pdata = { .wdt_set_timeout = stmp3xxx_wdt_set_timeout, }; static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev) { int rc = -1; struct platform_device *wdt_pdev = platform_device_alloc("stmp3xxx_rtc_wdt", rtc_pdev->id); if (wdt_pdev) { wdt_pdev->dev.parent = &rtc_pdev->dev; wdt_pdev->dev.platform_data = &wdt_pdata; rc = platform_device_add(wdt_pdev); } if (rc) dev_err(&rtc_pdev->dev, "failed to register stmp3xxx_rtc_wdt\n"); } #else static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev) { } #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) { int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */ /* * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010 * states: * | The order in which registers are updated is * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds. * | (This list is in bitfield order, from LSB to MSB, as they would * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT * | register. For example, the Seconds register corresponds to * | STALE_REGS or NEW_REGS containing 0x80.) */ do { if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) & (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))) return 0; udelay(1); } while (--timeout > 0); return (readl(rtc_data->io + STMP3XXX_RTC_STAT) & (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0; } /* Time read/write */ static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) { int ret; struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); ret = stmp3xxx_wait_time(rtc_data); if (ret) return ret; rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); return 0; } static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t) { struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); return stmp3xxx_wait_time(rtc_data); } /* interrupt(s) handler */ static irqreturn_t stmp3xxx_rtc_interrupt(int irq, void *dev_id) { struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev_id); u32 status = readl(rtc_data->io + STMP3XXX_RTC_CTRL); if (status & STMP3XXX_RTC_CTRL_ALARM_IRQ) { writel(STMP3XXX_RTC_CTRL_ALARM_IRQ, rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_CLR); rtc_update_irq(rtc_data->rtc, 1, RTC_AF | RTC_IRQF); return IRQ_HANDLED; } return IRQ_NONE; } static int stmp3xxx_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); if (enabled) { writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN, rtc_data->io + STMP3XXX_RTC_PERSISTENT0 + STMP_OFFSET_REG_SET); writel(STMP3XXX_RTC_CTRL_ALARM_IRQ_EN, rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_SET); } else { writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN, rtc_data->io + STMP3XXX_RTC_PERSISTENT0 + STMP_OFFSET_REG_CLR); writel(STMP3XXX_RTC_CTRL_ALARM_IRQ_EN, rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_CLR); } return 0; } static int stmp3xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) { struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_ALARM), &alm->time); return 0; } static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) { unsigned long t; struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); rtc_tm_to_time(&alm->time, &t); writel(t, rtc_data->io + STMP3XXX_RTC_ALARM); stmp3xxx_alarm_irq_enable(dev, alm->enabled); return 0; } static const struct rtc_class_ops stmp3xxx_rtc_ops = { .alarm_irq_enable = stmp3xxx_alarm_irq_enable, .read_time = stmp3xxx_rtc_gettime, .set_mmss = stmp3xxx_rtc_set_mmss, .read_alarm = stmp3xxx_rtc_read_alarm, .set_alarm = stmp3xxx_rtc_set_alarm, }; static int stmp3xxx_rtc_remove(struct platform_device *pdev) { struct stmp3xxx_rtc_data *rtc_data = platform_get_drvdata(pdev); if (!rtc_data) return 0; writel(STMP3XXX_RTC_CTRL_ALARM_IRQ_EN, rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_CLR); return 0; } static int stmp3xxx_rtc_probe(struct platform_device *pdev) { struct stmp3xxx_rtc_data *rtc_data; struct resource *r; u32 rtc_stat; u32 pers0_set, pers0_clr; u32 crystalfreq = 0; int err; rtc_data = devm_kzalloc(&pdev->dev, sizeof(*rtc_data), GFP_KERNEL); if (!rtc_data) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "failed to get resource\n"); return -ENXIO; } rtc_data->io = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!rtc_data->io) { dev_err(&pdev->dev, "ioremap failed\n"); return -EIO; } rtc_data->irq_alarm = platform_get_irq(pdev, 0); rtc_stat = readl(rtc_data->io + STMP3XXX_RTC_STAT); if (!(rtc_stat & STMP3XXX_RTC_STAT_RTC_PRESENT)) { dev_err(&pdev->dev, "no device onboard\n"); return -ENODEV; } platform_set_drvdata(pdev, rtc_data); err = stmp_reset_block(rtc_data->io); if (err) { dev_err(&pdev->dev, "stmp_reset_block failed: %d\n", err); return err; } /* * Obviously the rtc needs a clock input to be able to run. * This clock can be provided by an external 32k crystal. If that one is * missing XTAL must not be disabled in suspend which consumes a * lot of power. Normally the presence and exact frequency (supported * are 32000 Hz and 32768 Hz) is detectable from fuses, but as reality * proves these fuses are not blown correctly on all machines, so the * frequency can be overridden in the device tree. */ if (rtc_stat & STMP3XXX_RTC_STAT_XTAL32000_PRESENT) crystalfreq = 32000; else if (rtc_stat & STMP3XXX_RTC_STAT_XTAL32768_PRESENT) crystalfreq = 32768; of_property_read_u32(pdev->dev.of_node, "stmp,crystal-freq", &crystalfreq); switch (crystalfreq) { case 32000: /* keep 32kHz crystal running in low-power mode */ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL32_FREQ | STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP | STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE; pers0_clr = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP; break; case 32768: /* keep 32.768kHz crystal running in low-power mode */ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP | STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE; pers0_clr = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP | STMP3XXX_RTC_PERSISTENT0_XTAL32_FREQ; break; default: dev_warn(&pdev->dev, "invalid crystal-freq specified in device-tree. Assuming no crystal\n"); /* fall-through */ case 0: /* keep XTAL on in low-power mode */ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP; pers0_clr = STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP | STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE; } writel(pers0_set, rtc_data->io + STMP3XXX_RTC_PERSISTENT0 + STMP_OFFSET_REG_SET); writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE | pers0_clr, rtc_data->io + STMP3XXX_RTC_PERSISTENT0 + STMP_OFFSET_REG_CLR); writel(STMP3XXX_RTC_CTRL_ONEMSEC_IRQ_EN | STMP3XXX_RTC_CTRL_ALARM_IRQ_EN, rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_CLR); rtc_data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &stmp3xxx_rtc_ops, THIS_MODULE); if (IS_ERR(rtc_data->rtc)) return PTR_ERR(rtc_data->rtc); err = devm_request_irq(&pdev->dev, rtc_data->irq_alarm, stmp3xxx_rtc_interrupt, 0, "RTC alarm", &pdev->dev); if (err) { dev_err(&pdev->dev, "Cannot claim IRQ%d\n", rtc_data->irq_alarm); return err; } stmp3xxx_wdt_register(pdev); return 0; } #ifdef CONFIG_PM_SLEEP static int stmp3xxx_rtc_suspend(struct device *dev) { return 0; } static int stmp3xxx_rtc_resume(struct device *dev) { struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); stmp_reset_block(rtc_data->io); writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE, rtc_data->io + STMP3XXX_RTC_PERSISTENT0 + STMP_OFFSET_REG_CLR); return 0; } #endif static SIMPLE_DEV_PM_OPS(stmp3xxx_rtc_pm_ops, stmp3xxx_rtc_suspend, stmp3xxx_rtc_resume); static const struct of_device_id rtc_dt_ids[] = { { .compatible = "fsl,stmp3xxx-rtc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rtc_dt_ids); static struct platform_driver stmp3xxx_rtcdrv = { .probe = stmp3xxx_rtc_probe, .remove = stmp3xxx_rtc_remove, .driver = { .name = "stmp3xxx-rtc", .pm = &stmp3xxx_rtc_pm_ops, .of_match_table = rtc_dt_ids, }, }; module_platform_driver(stmp3xxx_rtcdrv); MODULE_DESCRIPTION("STMP3xxx RTC Driver"); MODULE_AUTHOR("dmitry pervushin <[email protected]> and " "Wolfram Sang <[email protected]>"); MODULE_LICENSE("GPL");
531516.c
/************************************************************************ * Copyright 1995 by Wietse Venema. All rights reserved. * * This material was originally written and compiled by Wietse Venema at * Eindhoven University of Technology, The Netherlands, in 1990, 1991, * 1992, 1993, 1994 and 1995. * * Redistribution and use in source and binary forms are permitted * provided that this entire copyright notice is duplicated in all such * copies. * * This software is provided "as is" and without any expressed or implied * warranties, including, without limitation, the implied warranties of * merchantibility and fitness for any particular purpose. ************************************************************************/ /* $FreeBSD: src/usr.bin/login/login_fbtab.c,v 1.18 2007/09/21 01:55:11 kevlo Exp $ */ /* $DragonFly: src/usr.bin/login/login_fbtab.c,v 1.3 2003/10/04 20:36:48 hmp Exp $ */ /* SYNOPSIS void login_fbtab(tty, uid, gid) char *tty; uid_t uid; gid_t gid; DESCRIPTION This module implements device security as described in the SunOS 4.1.x fbtab(5) and SunOS 5.x logindevperm(4) manual pages. The program first looks for /etc/fbtab. If that file cannot be opened it attempts to process /etc/logindevperm. We expect entries with the folowing format: Comments start with a # and extend to the end of the line. Blank lines or lines with only a comment are ignored. All other lines consist of three fields delimited by whitespace: a login device (/dev/console), an octal permission number (0600), and a ":"-delimited list of devices (/dev/kbd:/dev/mouse). All device names are absolute paths. A path that ends in "*" refers to all directory entries except "." and "..". If the tty argument (relative path) matches a login device name (absolute path), the permissions of the devices in the ":"-delimited list are set as specified in the second field, and their ownership is changed to that of the uid and gid arguments. DIAGNOSTICS Problems are reported via the syslog daemon with severity LOG_ERR. BUGS This module uses strtok(3), which may cause conflicts with other uses of that same routine. AUTHOR Wietse Venema ([email protected]) Eindhoven University of Technology The Netherlands */ #include <sys/types.h> #include <sys/stat.h> #include <errno.h> #include <glob.h> #include <paths.h> #include <stdio.h> #include <string.h> #include <syslog.h> #include <unistd.h> #include "login.h" #include "pathnames.h" static void login_protect(const char *, char *, int, uid_t, gid_t); #define WSPACE " \t\n" /* login_fbtab - apply protections specified in /etc/fbtab or logindevperm */ void login_fbtab(char *tty, uid_t uid, gid_t gid) { FILE *fp; char buf[BUFSIZ]; char *devname; char *cp; int prot; const char *table; if ((fp = fopen(table = _PATH_FBTAB, "r")) == NULL && (fp = fopen(table = _PATH_LOGINDEVPERM, "r")) == NULL) return; while (fgets(buf, sizeof(buf), fp)) { if ((cp = strchr(buf, '#'))) *cp = 0; /* strip comment */ if ((cp = devname = strtok(buf, WSPACE)) == 0) continue; /* empty or comment */ if (strncmp(devname, _PATH_DEV, sizeof _PATH_DEV - 1) != 0 || (cp = strtok(NULL, WSPACE)) == 0 || *cp != '0' || sscanf(cp, "%o", &prot) == 0 || prot == 0 || (prot & 0777) != prot || (cp = strtok(NULL, WSPACE)) == 0) { syslog(LOG_ERR, "%s: bad entry: %s", table, cp ? cp : "(null)"); continue; } if (strcmp(devname + 5, tty) == 0) { for (cp = strtok(cp, ":"); cp; cp = strtok(NULL, ":")) { login_protect(table, cp, prot, uid, gid); } } } fclose(fp); } /* login_protect - protect one device entry */ void login_protect(const char *table, char *pattern, int mask, uid_t uid, gid_t gid) { glob_t gl; char *path; unsigned int i; if (glob(pattern, GLOB_NOSORT, NULL, &gl) != 0) return; for (i = 0; i < gl.gl_pathc; i++) { path = gl.gl_pathv[i]; /* clear flags of the device */ if (chflags(path, 0) && errno != ENOENT && errno != EOPNOTSUPP) syslog(LOG_ERR, "%s: chflags(%s): %m", table, path); if (chmod(path, mask) && errno != ENOENT) syslog(LOG_ERR, "%s: chmod(%s): %m", table, path); if (chown(path, uid, gid) && errno != ENOENT) syslog(LOG_ERR, "%s: chown(%s): %m", table, path); } globfree(&gl); }
46566.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE195_Signed_to_Unsigned_Conversion_Error__negative_strncpy_17.c Label Definition File: CWE195_Signed_to_Unsigned_Conversion_Error.label.xml Template File: sources-sink-17.tmpl.c */ /* * @description * CWE: 195 Signed to Unsigned Conversion Error * BadSource: negative Set data to a fixed negative number * GoodSource: Positive integer * Sink: strncpy * BadSink : Copy strings using strncpy() with the length of data * Flow Variant: 17 Control flow: for loops * * */ #include "std_testcase.h" #ifndef OMITBAD void CWE195_Signed_to_Unsigned_Conversion_Error__negative_strncpy_17_bad() { int i; int data; /* Initialize data */ data = -1; for(i = 0; i < 1; i++) { /* FLAW: Use a negative number */ data = -1; } { char source[100]; char dest[100] = ""; memset(source, 'A', 100-1); source[100-1] = '\0'; if (data < 100) { /* POTENTIAL FLAW: data is interpreted as an unsigned int - if its value is negative, * the sign conversion could result in a very large number */ strncpy(dest, source, data); dest[data] = '\0'; /* strncpy() does not always NULL terminate */ } printLine(dest); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B() - use goodsource and badsink by changing the conditions on the for statements */ static void goodG2B() { int h; int data; /* Initialize data */ data = -1; for(h = 0; h < 1; h++) { /* FIX: Use a positive integer less than &InitialDataSize&*/ data = 100-1; } { char source[100]; char dest[100] = ""; memset(source, 'A', 100-1); source[100-1] = '\0'; if (data < 100) { /* POTENTIAL FLAW: data is interpreted as an unsigned int - if its value is negative, * the sign conversion could result in a very large number */ strncpy(dest, source, data); dest[data] = '\0'; /* strncpy() does not always NULL terminate */ } printLine(dest); } } void CWE195_Signed_to_Unsigned_Conversion_Error__negative_strncpy_17_good() { goodG2B(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE195_Signed_to_Unsigned_Conversion_Error__negative_strncpy_17_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE195_Signed_to_Unsigned_Conversion_Error__negative_strncpy_17_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
771744.c
/*- * Copyright (c) 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Paul Borman at Krystal Technologies. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char sccsid[] = "@(#)frune.c 8.1 (Berkeley) 6/4/93"; #endif /* LIBC_SCCS and not lint */ #include <sys/cdefs.h> __FBSDID("$FreeBSD: src/lib/libc/locale/frune.c,v 1.3 2002/09/18 06:19:12 tjr Exp $"); #include "xlocale_private.h" #include <limits.h> #include <rune.h> #include <stddef.h> #include <stdio.h> #include "runedepreciated.h" long fgetrune(fp) FILE *fp; { rune_t r; int c, len; char buf[MB_LEN_MAX]; char const *result; __darwin_rune_t invalid_rune = __current_locale()->__lc_ctype->_CurrentRuneLocale.__invalid_rune; static int warn_depreciated = 1; if (warn_depreciated) { warn_depreciated = 0; fprintf(stderr, __rune_depreciated_msg, "fgetrune"); } len = 0; do { if ((c = getc(fp)) == EOF) { if (len) break; return (EOF); } buf[len++] = c; if ((r = __sgetrune(buf, len, &result)) != invalid_rune) return (r); } while (result == buf && len < MB_LEN_MAX); while (--len > 0) ungetc(buf[len], fp); return (invalid_rune); } int fungetrune(r, fp) rune_t r; FILE* fp; { int len; char buf[MB_LEN_MAX]; static int warn_depreciated = 1; if (warn_depreciated) { warn_depreciated = 0; fprintf(stderr, __rune_depreciated_msg, "fungetrune"); } len = __sputrune(r, buf, MB_LEN_MAX, 0); while (len-- > 0) if (ungetc(buf[len], fp) == EOF) return (EOF); return (0); } int fputrune(r, fp) rune_t r; FILE *fp; { int i, len; char buf[MB_LEN_MAX]; static int warn_depreciated = 1; if (warn_depreciated) { warn_depreciated = 0; fprintf(stderr, __rune_depreciated_msg, "fputrune"); } len = __sputrune(r, buf, MB_LEN_MAX, 0); for (i = 0; i < len; ++i) if (putc(buf[i], fp) == EOF) return (EOF); return (0); }
756002.c
/**************************************************************************************/ /* Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 */ /* Washington University, Mallinckrodt Institute of Radiology. */ /* All Rights Reserved. */ /* This software may not be reproduced, copied, or distributed without written */ /* permission of Washington University. For further information contact A. Z. Snyder. */ /**************************************************************************************/ /*$Header: /home/usr/shimonyj/diff4dfp/RCS/get_dti_params.c,v 1.4 2012/06/15 01:30:37 avi Exp $*/ /*$Log: get_dti_params.c,v $ * Revision 1.4 2012/06/15 01:30:37 avi * tolerate (meaningless) vector components on b = 0 input lines * * Revision 1.3 2007/08/30 05:09:08 avi * JSSutil.h compliant * * Revision 1.2 2000/12/19 01:46:58 avi * copyright * * Revision 1.1 2000/10/05 05:46:35 avi * Initial revision **/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <math.h> #include <JSSutil.h> void get_dti_nrc_test () { extern int dti_dimen (char *file); extern int get_dti_params_nrc (char *file, int n, float *b_vals, float **q_vals); int i, j, k, n; float **q_vals, *b_vals; char file[] = "/home/usr/shimonyj/diff4dfp/tp7_params.dat"; if ((n = dti_dimen (file)) <= 0) exit (-1); b_vals = vector (1, n); q_vals = matrix (1, n, 1, 3); get_dti_params_nrc (file, n, b_vals, q_vals); free_matrix (q_vals, 1, n, 1, 3); free_vector (b_vals, 1, n); exit (0); } int dti_dimen (char *file) { FILE *fp; char *ptr, string[256]; int i, k, n; if (access (file, R_OK) || !(fp = fopen (file, "r"))) return -1; while (1) { if (!(fgets (string, 256, fp))) { fclose (fp); return -1; } if (ptr = strchr (string, '#')) *ptr = '\0'; if (k = sscanf (string, "%d", &n) != 1) continue; fclose (fp); printf ("dti_dimen=%d\n", n); return n; } } int get_dti_params_nrc (char *file, int n, float *b_vals, float **q_vals) { FILE *fp; char *ptr, string[256]; float v[4]; double q; int i, j, k, l; if (access (file, R_OK) || !(fp = fopen (file, "r"))) return -1; i = 0; while (i <= n) { if (!(fgets (string, 256, fp))) goto ERRF; if (ptr = strchr (string, '#')) *ptr = '\0'; if (!i) { if (k = sscanf (string, "%d", &j) != 1) continue; if (j != n) goto ERRF; } else { k = sscanf (string, "%f%f%f%f", v + 0, v + 1, v + 2, v + 3); if (!k) continue; b_vals[i] = v[0] / 1000.; if (k == 1 || v[0] == 0.) { for (j = 1; j <= 3; j++) q_vals[i][j] = 0.0; } else if (k == 4) { q = 0; for (j = 1; j <= 3; j++) q += v[j] * v[j]; for (j = 1; j <= 3; j++) q_vals[i][j] = v[j] / sqrt (q); } else { fprintf (stderr, "input line field count not 1 or 4\n"); return -1; } printf ("%10.4f%10.4f%10.4f%10.4f\n", b_vals[i], q_vals[i][1], q_vals[i][2], q_vals[i][3]); } i++; } fclose (fp); return 0; ERRF: fprintf (stderr, "%s parse error\n", file); fclose (fp); return -1; }
678731.c
/* USER CODE BEGIN Header */ /** ****************************************************************************** * @file stm32f0xx_hal_msp.c * @brief This file provides code for the MSP Initialization * and de-Initialization codes. ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ /* USER CODE END Header */ /* Includes ------------------------------------------------------------------*/ #include "main.h" /* USER CODE BEGIN Includes */ /* USER CODE END Includes */ /* Private typedef -----------------------------------------------------------*/ /* USER CODE BEGIN TD */ /* USER CODE END TD */ /* Private define ------------------------------------------------------------*/ /* USER CODE BEGIN Define */ /* USER CODE END Define */ /* Private macro -------------------------------------------------------------*/ /* USER CODE BEGIN Macro */ /* USER CODE END Macro */ /* Private variables ---------------------------------------------------------*/ /* USER CODE BEGIN PV */ /* USER CODE END PV */ /* Private function prototypes -----------------------------------------------*/ /* USER CODE BEGIN PFP */ /* USER CODE END PFP */ /* External functions --------------------------------------------------------*/ /* USER CODE BEGIN ExternalFunctions */ /* USER CODE END ExternalFunctions */ /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ /** * Initializes the Global MSP. */ void HAL_MspInit(void) { /* USER CODE BEGIN MspInit 0 */ /* USER CODE END MspInit 0 */ __HAL_RCC_SYSCFG_CLK_ENABLE(); __HAL_RCC_PWR_CLK_ENABLE(); /* System interrupt init*/ /* USER CODE BEGIN MspInit 1 */ /* USER CODE END MspInit 1 */ } /* USER CODE BEGIN 1 */ /* USER CODE END 1 */
438563.c
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * bfa_fcs.c BFA FCS main */ #include "bfad_drv.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" BFA_TRC_FILE(FCS, FCS); /* * FCS sub-modules */ struct bfa_fcs_mod_s { void (*attach) (struct bfa_fcs_s *fcs); void (*modinit) (struct bfa_fcs_s *fcs); void (*modexit) (struct bfa_fcs_s *fcs); }; #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } static struct bfa_fcs_mod_s fcs_modules[] = { { bfa_fcs_port_attach, NULL, NULL }, { bfa_fcs_uf_attach, NULL, NULL }, { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, bfa_fcs_fabric_modexit }, }; /* * fcs_api BFA FCS API */ static void bfa_fcs_exit_comp(void *fcs_cbarg) { struct bfa_fcs_s *fcs = fcs_cbarg; struct bfad_s *bfad = fcs->bfad; complete(&bfad->comp); } /* * fcs_api BFA FCS API */ /* * fcs attach -- called once to initialize data structures at driver attach time */ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_boolean_t min_cfg) { int i; struct bfa_fcs_mod_s *mod; fcs->bfa = bfa; fcs->bfad = bfad; fcs->min_cfg = min_cfg; bfa->fcs = BFA_TRUE; fcbuild_init(); for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->attach) mod->attach(fcs); } } /* * fcs initialization, called once after bfa initialization is complete */ void bfa_fcs_init(struct bfa_fcs_s *fcs) { int i, npbc_vports; struct bfa_fcs_mod_s *mod; struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->modinit) mod->modinit(fcs); } /* Initialize pbc vports */ if (!fcs->min_cfg) { npbc_vports = bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); for (i = 0; i < npbc_vports; i++) bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); } } /* * brief * FCS driver details initialization. * * param[in] fcs FCS instance * param[in] driver_info Driver Details * * return None */ void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, struct bfa_fcs_driver_info_s *driver_info) { fcs->driver_info = *driver_info; bfa_fcs_fabric_psymb_init(&fcs->fabric); } /* * brief * FCS instance cleanup and exit. * * param[in] fcs FCS instance * return None */ void bfa_fcs_exit(struct bfa_fcs_s *fcs) { struct bfa_fcs_mod_s *mod; int nmods, i; bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); for (i = 0; i < nmods; i++) { mod = &fcs_modules[i]; if (mod->modexit) { bfa_wc_up(&fcs->wc); mod->modexit(fcs); } } bfa_wc_wait(&fcs->wc); } /* * Fabric module implementation. */ #define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ #define bfa_fcs_fabric_set_opertype(__fabric) do { \ if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ == BFA_PORT_TOPOLOGY_P2P) \ (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \ else \ (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ } while (0) /* * forward declarations */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delay(void *cbarg); static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delete_comp(void *cbarg); static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs); static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CREATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); bfa_fcs_fabric_init(fabric); bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_START: if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); } else bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_wc_down(&fabric->fcs->wc); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Link is down, awaiting LINK UP event from port. This is also the * first state at fabric creation. */ static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_UP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; case BFA_FCS_FABRIC_SM_RETRY_OP: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * FLOGI is in progress, awaiting FLOGI reply. */ static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; if (fabric->auth_reqd && fabric->is_auth) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth); bfa_trc(fabric->fcs, event); } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); } break; case BFA_FCS_FABRIC_SM_RETRY_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry); bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer, bfa_fcs_fabric_delay, fabric, BFA_FCS_FABRIC_RETRY_DELAY); break; case BFA_FCS_FABRIC_SM_LOOPBACK: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_set_opertype(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); bfa_fcs_fabric_notify_online(fabric); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELAYED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_timer_stop(&fabric->delay_timer); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_timer_stop(&fabric->delay_timer); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication is in progress, awaiting authentication results. */ static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); break; case BFA_FCS_FABRIC_SM_PERF_EVFP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication failed */ void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Port is in loopback mode. */ void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * There is no attached fabric - private loop or NPort-to-NPort topology. */ static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: bfa_trc(fabric->fcs, fabric->bb_credit); bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Fabric is online - normal operating state. */ void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Exchanging virtual fabric parameters. */ static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done); break; case BFA_FCS_FABRIC_SM_ISOLATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * EVFP exchange complete and VFT tagging is enabled. */ static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); } /* * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). */ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Port is isolated due to VF_ID mismatch. " "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.", pwwn_ptr, fabric->fcs->port_vfid, fabric->event_arg.swp_vfid); } /* * Fabric is being deleted, awaiting vport delete completions. */ static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELCOMP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_wc_down(&fabric->fcs->wc); break; case BFA_FCS_FABRIC_SM_LINK_UP: break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_fcs_fabric_notify_offline(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * fcs_fabric_private fabric private functions */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn; port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn; } /* * Port Symbolic Name Creation for base port. */ void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* Model name/number */ strncpy((char *)&port_cfg->sym_name, model, BFA_FCS_PORT_SYMBNAME_MODEL_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Driver Version */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->version, BFA_FCS_PORT_SYMBNAME_VERSION_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Host machine name */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_machine_name, BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* * Host OS Info : * If OS Patch Info is not there, do not truncate any bytes from the * OS name string and instead copy the entire OS info string (64 bytes). */ if (driver_info->host_os_patch[0] == '\0') { strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_name, BFA_FCS_OS_STR_LEN); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); } else { strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_name, BFA_FCS_PORT_SYMBNAME_OSINFO_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Append host OS Patch Info */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_patch, BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ); } /* null terminate */ port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; } /* * bfa lps login completion callback */ void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) { struct bfa_fcs_fabric_s *fabric = uarg; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, status); switch (status) { case BFA_STATUS_OK: fabric->stats.flogi_accepts++; break; case BFA_STATUS_INVALID_MAC: /* Only for CNA */ fabric->stats.flogi_acc_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_EPROTOCOL: switch (fabric->lps->ext_status) { case BFA_EPROTO_BAD_ACCEPT: fabric->stats.flogi_acc_err++; break; case BFA_EPROTO_UNKNOWN_RSP: fabric->stats.flogi_unknown_rsp++; break; default: break; } bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_FABRIC_RJT: fabric->stats.flogi_rejects++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; default: fabric->stats.flogi_rsp_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; } fabric->bb_credit = fabric->lps->pr_bbcred; bfa_trc(fabric->fcs, fabric->bb_credit); if (!(fabric->lps->brcd_switch)) fabric->fabric_name = fabric->lps->pr_nwwn; /* * Check port type. It should be 1 = F-port. */ if (fabric->lps->fport) { fabric->bport.pid = fabric->lps->lp_pid; fabric->is_npiv = fabric->lps->npiv_en; fabric->is_auth = fabric->lps->auth_req; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); } else { /* * Nport-2-Nport direct attached */ fabric->bport.port_topo.pn2n.rem_port_wwn = fabric->lps->pr_pwwn; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } bfa_trc(fabric->fcs, fabric->bport.pid); bfa_trc(fabric->fcs, fabric->is_npiv); bfa_trc(fabric->fcs, fabric->is_auth); } /* * Allocate and send FLOGI. */ static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) { struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; u8 alpa = 0; if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) alpa = bfa_fcport_get_myalpa(bfa); bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); fabric->stats.flogi_sent++; } static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); bfa_fcs_fabric_set_opertype(fabric); fabric->stats.fabric_onlines++; /* * notify online event to base and then virtual ports */ bfa_fcs_lport_online(&fabric->bport); list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_online(vport); } } static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); fabric->stats.fabric_offlines++; /* * notify offline event first to vports and then base port. */ list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_offline(vport); } bfa_fcs_lport_offline(&fabric->bport); fabric->fabric_name = 0; fabric->fabric_ip_addr[0] = 0; } static void bfa_fcs_fabric_delay(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); } /* * Delete all vports and wait for vport delete completions. */ static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_fcs_delete(vport); } bfa_fcs_lport_delete(&fabric->bport); bfa_wc_wait(&fabric->wc); } static void bfa_fcs_fabric_delete_comp(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); } /* * fcs_fabric_public fabric public functions */ /* * Attach time initialization. */ void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; fabric = &fcs->fabric; memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); /* * Initialize base fabric. */ fabric->fcs = fcs; INIT_LIST_HEAD(&fabric->vport_q); INIT_LIST_HEAD(&fabric->vf_q); fabric->lps = bfa_lps_alloc(fcs->bfa); WARN_ON(!fabric->lps); /* * Initialize fabric delete completion handler. Fabric deletion is * complete when the last vport delete is complete. */ bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); bfa_wc_up(&fabric->wc); /* For the base port */ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); } void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) { bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); bfa_trc(fcs, 0); } /* * Module cleanup */ void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); /* * Cleanup base fabric. */ fabric = &fcs->fabric; bfa_lps_delete(fabric->lps); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); } /* * Fabric module start -- kick starts FCS actions */ void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); fabric = &fcs->fabric; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); } /* * Link up notification from BFA physical port module. */ void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); } /* * Link down notification from BFA physical port module. */ void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); } /* * A child vport is being created in the fabric. * * Call from vport module at vport creation. A list of base port and vports * belonging to a fabric is maintained to propagate link events. * * param[in] fabric - Fabric instance. This can be a base fabric or vf. * param[in] vport - Vport being created. * * @return None (always succeeds) */ void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { /* * - add vport to fabric's vport_q */ bfa_trc(fabric->fcs, fabric->vf_id); list_add_tail(&vport->qe, &fabric->vport_q); fabric->num_vports++; bfa_wc_up(&fabric->wc); } /* * A child vport is being deleted from fabric. * * Vport is being deleted. */ void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { list_del(&vport->qe); fabric->num_vports--; bfa_wc_down(&fabric->wc); } /* * Lookup for a vport within a fabric given its pwwn */ struct bfa_fcs_vport_s * bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) { struct bfa_fcs_vport_s *vport; struct list_head *qe; list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn) return vport; } return NULL; } /* * Get OUI of the attached switch. * * Note : Use of this function should be avoided as much as possible. * This function should be used only if there is any requirement * to check for FOS version below 6.3. * To check if the attached fabric is a brocade fabric, use * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3 * or above only. */ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) { wwn_t fab_nwwn; u8 *tmp; u16 oui; fab_nwwn = fabric->lps->pr_nwwn; tmp = (u8 *)&fab_nwwn; oui = (tmp[3] << 8) | tmp[4]; return oui; } /* * Unsolicited frame receive handling. */ void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { u32 pid = fchs->d_id; struct bfa_fcs_vport_s *vport; struct list_head *qe; struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd; bfa_trc(fabric->fcs, len); bfa_trc(fabric->fcs, pid); /* * Look for our own FLOGI frames being looped back. This means an * external loopback cable is in place. Our own FLOGI frames are * sometimes looped back when switch port gets temporarily bypassed. */ if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) && (els_cmd->els_code == FC_ELS_FLOGI) && (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); return; } /* * FLOGI/EVFP exchanges should be consumed by base fabric. */ if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) { bfa_trc(fabric->fcs, pid); bfa_fcs_fabric_process_uf(fabric, fchs, len); return; } if (fabric->bport.pid == pid) { /* * All authentication frames should be routed to auth */ bfa_trc(fabric->fcs, els_cmd->els_code); if (els_cmd->els_code == FC_ELS_AUTH) { bfa_trc(fabric->fcs, els_cmd->els_code); return; } bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs)); bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); return; } /* * look for a matching local port ID */ list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (vport->lport.pid == pid) { bfa_fcs_lport_uf_recv(&vport->lport, fchs, len); return; } } bfa_trc(fabric->fcs, els_cmd->els_code); bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); } /* * Unsolicited frames to be processed by fabric. */ static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_trc(fabric->fcs, els_cmd->els_code); switch (els_cmd->els_code) { case FC_ELS_FLOGI: bfa_fcs_fabric_process_flogi(fabric, fchs, len); break; default: /* * need to generate a LS_RJT */ break; } } /* * Process incoming FLOGI */ static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1); struct bfa_fcs_lport_s *bport = &fabric->bport; bfa_trc(fabric->fcs, fchs->s_id); fabric->stats.flogi_rcvd++; /* * Check port type. It should be 0 = n-port. */ if (flogi->csp.port_type) { /* * @todo: may need to send a LS_RJT */ bfa_trc(fabric->fcs, flogi->port_name); fabric->stats.flogi_rejected++; return; } fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; bport->port_topo.pn2n.reply_oxid = fchs->ox_id; /* * Send a Flogi Acc */ bfa_fcs_fabric_send_flogi_acc(fabric); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n; struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_fcxp_s *fcxp; u16 reqlen; struct fchs_s fchs; fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); /* * Do not expect this failure -- expect remote node to retry */ if (!fcxp) return; reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_hton3b(FC_FABRIC_PORT), n2n_port->reply_oxid, pcfg->pwwn, pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), bfa_fcport_get_rx_bbcredit(bfa)); bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag, BFA_FALSE, FC_CLASS_3, reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, FC_MAX_PDUSZ, 0); } /* * Flogi Acc completion callback. */ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_trc(fabric->fcs, status); } /* * * @param[in] fabric - fabric * @param[in] wwn_t - new fabric name * * @return - none */ void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, wwn_t fabric_name) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; char fwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric_name); if (fabric->fabric_name == 0) { /* * With BRCD switches, we don't get Fabric Name in FLOGI. * Don't generate a fabric name change event in this case. */ fabric->fabric_name = fabric_name; } else { fabric->fabric_name = fabric_name; wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport)); wwn2str(fwwn_ptr, bfa_fcs_lport_get_fabric_name(&fabric->bport)); BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Base port WWN = %s Fabric WWN = %s\n", pwwn_ptr, fwwn_ptr); } } /* * Returns FCS vf structure for a given vf_id. * * param[in] vf_id - VF_ID * * return * If lookup succeeds, retuns fcs vf object, otherwise returns NULL */ bfa_fcs_vf_t * bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) { bfa_trc(fcs, vf_id); if (vf_id == FC_VF_ID_NULL) return &fcs->fabric; return NULL; } /* * BFA FCS PPORT ( physical port) */ static void bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) { struct bfa_fcs_s *fcs = cbarg; bfa_trc(fcs, event); switch (event) { case BFA_PORT_LINKUP: bfa_fcs_fabric_link_up(&fcs->fabric); break; case BFA_PORT_LINKDOWN: bfa_fcs_fabric_link_down(&fcs->fabric); break; default: WARN_ON(1); } } void bfa_fcs_port_attach(struct bfa_fcs_s *fcs) { bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); } /* * BFA FCS UF ( Unsolicited Frames) */ /* * BFA callback for unsolicited frame receive handler. * * @param[in] cbarg callback arg for receive handler * @param[in] uf unsolicited frame descriptor * * @return None */ static void bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) { struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg; struct fchs_s *fchs = bfa_uf_get_frmbuf(uf); u16 len = bfa_uf_get_frmlen(uf); struct fc_vft_s *vft; struct bfa_fcs_fabric_s *fabric; /* * check for VFT header */ if (fchs->routing == FC_RTG_EXT_HDR && fchs->cat_info == FC_CAT_VFT_HDR) { bfa_stats(fcs, uf.tagged); vft = bfa_uf_get_frmbuf(uf); if (fcs->port_vfid == vft->vf_id) fabric = &fcs->fabric; else fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); /* * drop frame if vfid is unknown */ if (!fabric) { WARN_ON(1); bfa_stats(fcs, uf.vfid_unknown); bfa_uf_free(uf); return; } /* * skip vft header */ fchs = (struct fchs_s *) (vft + 1); len -= sizeof(struct fc_vft_s); bfa_trc(fcs, vft->vf_id); } else { bfa_stats(fcs, uf.untagged); fabric = &fcs->fabric; } bfa_trc(fcs, ((u32 *) fchs)[0]); bfa_trc(fcs, ((u32 *) fchs)[1]); bfa_trc(fcs, ((u32 *) fchs)[2]); bfa_trc(fcs, ((u32 *) fchs)[3]); bfa_trc(fcs, ((u32 *) fchs)[4]); bfa_trc(fcs, ((u32 *) fchs)[5]); bfa_trc(fcs, len); bfa_fcs_fabric_uf_recv(fabric, fchs, len); bfa_uf_free(uf); } void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) { bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); }
337268.c
#ifdef HAVE_CONFIG_H #include <config.h> #endif #include "../bitstream.h" #include <stdio.h> #include <inttypes.h> static int simple_test(void) { int ret = 0; for (unsigned int i = 1; i <= sizeof(uint64_t) * 8; ++i) { uint8_t buffer[sizeof(uint64_t)] = {0}; const uint64_t v = UINT64_C(1) << (i - 1); bitstream_t bsw; bsw.buffer = buffer; bsw.position = 0; oqs_sig_picnic_bitstream_put_bits(&bsw, v, i); bitstream_t bsr; bsr.buffer = buffer; bsr.position = 0; const uint64_t r = oqs_sig_picnic_bitstream_get_bits(&bsr, i); if (r != v) { printf("simple_test: expected %016" PRIx64 ", got %016" PRIx64 "\n", v, r); ret = -1; } if (buffer[0] != 0x80) { printf("simple_test: expected buffer 80000000000000000000, got " "%02x%02x%02x%02x%02x%02x%02x%02x\n", buffer[0], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6], buffer[7]); ret = -1; } } return ret; } static const uint64_t v = UINT64_C(0b110110000110110010100101001010); static int test_30(void) { int ret = 0; uint8_t buffer[sizeof(uint64_t)] = {0}; uint8_t buffer2[sizeof(uint64_t)] = {0}; bitstream_t bsw; bsw.buffer = buffer; bsw.position = 0; oqs_sig_picnic_bitstream_put_bits(&bsw, v, 30); bitstream_t bsw2; bsw2.buffer = buffer2; bsw2.position = 0; for (unsigned int i = 0; i < 30; ++i) { oqs_sig_picnic_bitstream_put_bits(&bsw2, v >> (30 - i - 1), 1); } bitstream_t bsr; bsr.buffer = buffer; bsr.position = 0; uint64_t r = oqs_sig_picnic_bitstream_get_bits(&bsr, 30); if (r != v) { printf("test_30: expected %016" PRIx64 ", got %016" PRIx64 "\n", v, r); ret = -1; } bitstream_t bsr2; bsr2.buffer = buffer2; bsr2.position = 0; for (unsigned int i = 0; i < 30; ++i) { r = oqs_sig_picnic_bitstream_get_bits(&bsr2, 1); const uint64_t e = (v >> (30 - i - 1)) & 0x1; if (e != r) { printf("test_30: expected2 %016" PRIx64 ", got %016" PRIx64 "\n", e, r); ret = -1; } } if (buffer[0] != 0b11011000 || buffer[1] != 0b01101100 || buffer[2] != 0b10100101 || buffer[3] != 0b00101000) { printf("test_30: expected buffer %016" PRIx64 ", got %02x%02x%02x%02x%02x%02x%02x%02x\n", v << 34, buffer[0], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6], buffer[7]); ret = -1; } if (buffer2[0] != 0b11011000 || buffer2[1] != 0b01101100 || buffer2[2] != 0b10100101 || buffer2[3] != 0b00101000) { printf("test_30: expected buffer2 %016" PRIx64 ", got %02x%02x%02x%02x%02x%02x%02x%02x\n", v << 34, buffer2[0], buffer2[1], buffer2[2], buffer2[3], buffer2[4], buffer2[5], buffer2[6], buffer2[7]); ret = -1; } return ret; } static int test_multiple_30(void) { int ret = 0; uint8_t buffer[sizeof(uint64_t)] = {0}; const uint64_t v2 = (~v) & ((1 << 30) - 1); bitstream_t bsw; bsw.buffer = buffer; bsw.position = 0; oqs_sig_picnic_bitstream_put_bits(&bsw, v, 30); oqs_sig_picnic_bitstream_put_bits(&bsw, v2, 30); bitstream_t bsr; bsr.buffer = buffer; bsr.position = 0; uint64_t r = oqs_sig_picnic_bitstream_get_bits(&bsr, 30); if (r != v) { printf("test_multiple_30: expected %016" PRIx64 ", got %016" PRIx64 "\n", v, r); ret = -1; } r = oqs_sig_picnic_bitstream_get_bits(&bsr, 30); if (r != v2) { printf("test_multiple_30: expected %016" PRIx64 ", got %016" PRIx64 "\n", v2, r); ret = -1; } if (buffer[0] != 0b11011000 || buffer[1] != 0b01101100 || buffer[2] != 0b10100101 || buffer[3] != 0b00101000) { printf("test_30: expected buffer %016" PRIx64 ", got %02x%02x%02x%02x%02x%02x%02x%02x\n", v << 34, buffer[0], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6], buffer[7]); ret = -1; } return ret; } int main(void) { int ret = 0; int tmp = simple_test(); if (tmp) { printf("simple_test: failed!\n"); ret = tmp; } tmp = test_30(); if (tmp) { printf("test_30: failed!\n"); ret = tmp; } tmp = test_multiple_30(); if (tmp) { printf("test_multiple_30: failed!\n"); ret = tmp; } return ret; }
902959.c
/* * AD7780/AD7781 SPI ADC driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/gpio.h> #include "../iio.h" #include "../sysfs.h" #include "../ring_generic.h" #include "adc.h" #include "ad7780.h" #define AD7780_RDY (1 << 7) #define AD7780_FILTER (1 << 6) #define AD7780_ERR (1 << 5) #define AD7780_ID1 (1 << 4) #define AD7780_ID0 (1 << 3) #define AD7780_GAIN (1 << 2) #define AD7780_PAT1 (1 << 1) #define AD7780_PAT0 (1 << 0) struct ad7780_chip_info { struct iio_chan_spec channel; }; struct ad7780_state { struct spi_device *spi; const struct ad7780_chip_info *chip_info; struct regulator *reg; struct ad7780_platform_data *pdata; wait_queue_head_t wq_data_avail; bool done; u16 int_vref_mv; struct spi_transfer xfer; struct spi_message msg; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ unsigned int data ____cacheline_aligned; }; enum ad7780_supported_device_ids { ID_AD7780, ID_AD7781, }; static int ad7780_read(struct ad7780_state *st, int *val) { int ret; spi_bus_lock(st->spi->master); enable_irq(st->spi->irq); st->done = false; gpio_set_value(st->pdata->gpio_pdrst, 1); ret = wait_event_interruptible(st->wq_data_avail, st->done); disable_irq_nosync(st->spi->irq); if (ret) goto out; ret = spi_sync_locked(st->spi, &st->msg); *val = be32_to_cpu(st->data); out: gpio_set_value(st->pdata->gpio_pdrst, 0); spi_bus_unlock(st->spi->master); return ret; } static int ad7780_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { struct ad7780_state *st = iio_priv(indio_dev); struct iio_chan_spec channel = st->chip_info->channel; int ret, smpl = 0; unsigned long scale_uv; switch (m) { case 0: mutex_lock(&indio_dev->mlock); ret = ad7780_read(st, &smpl); mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; if ((smpl & AD7780_ERR) || !((smpl & AD7780_PAT0) && !(smpl & AD7780_PAT1))) return -EIO; *val = (smpl >> channel.scan_type.shift) & ((1 << (channel.scan_type.realbits)) - 1); *val -= (1 << (channel.scan_type.realbits - 1)); if (!(smpl & AD7780_GAIN)) *val *= 128; return IIO_VAL_INT; case (1 << IIO_CHAN_INFO_SCALE_SHARED): scale_uv = (st->int_vref_mv * 100000) >> (channel.scan_type.realbits - 1); *val = scale_uv / 100000; *val2 = (scale_uv % 100000) * 10; return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; } static const struct ad7780_chip_info ad7780_chip_info_tbl[] = { [ID_AD7780] = { .channel = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 0, 0, IIO_ST('s', 24, 32, 8), 0), }, [ID_AD7781] = { .channel = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 0, 0, IIO_ST('s', 20, 32, 12), 0), }, }; /** * Interrupt handler */ static irqreturn_t ad7780_interrupt(int irq, void *dev_id) { struct ad7780_state *st = dev_id; st->done = true; wake_up_interruptible(&st->wq_data_avail); return IRQ_HANDLED; }; static const struct iio_info ad7780_info = { .read_raw = &ad7780_read_raw, .driver_module = THIS_MODULE, }; static int __devinit ad7780_probe(struct spi_device *spi) { struct ad7780_platform_data *pdata = spi->dev.platform_data; struct ad7780_state *st; struct iio_dev *indio_dev; int ret, voltage_uv = 0; if (!pdata) { dev_dbg(&spi->dev, "no platform data?\n"); return -ENODEV; } indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) return -ENOMEM; st = iio_priv(indio_dev); st->reg = regulator_get(&spi->dev, "vcc"); if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) goto error_put_reg; voltage_uv = regulator_get_voltage(st->reg); } st->chip_info = &ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data]; st->pdata = pdata; if (pdata && pdata->vref_mv) st->int_vref_mv = pdata->vref_mv; else if (voltage_uv) st->int_vref_mv = voltage_uv / 1000; else dev_warn(&spi->dev, "reference voltage unspecified\n"); spi_set_drvdata(spi, indio_dev); st->spi = spi; indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = &st->chip_info->channel; indio_dev->num_channels = 1; indio_dev->info = &ad7780_info; init_waitqueue_head(&st->wq_data_avail); /* Setup default message */ st->xfer.rx_buf = &st->data; st->xfer.len = st->chip_info->channel.scan_type.storagebits / 8; spi_message_init(&st->msg); spi_message_add_tail(&st->xfer, &st->msg); ret = gpio_request_one(st->pdata->gpio_pdrst, GPIOF_OUT_INIT_LOW, "AD7780 /PDRST"); if (ret) { dev_err(&spi->dev, "failed to request GPIO PDRST\n"); goto error_disable_reg; } ret = request_irq(spi->irq, ad7780_interrupt, IRQF_TRIGGER_FALLING, spi_get_device_id(spi)->name, st); if (ret) goto error_free_gpio; disable_irq(spi->irq); ret = iio_device_register(indio_dev); if (ret) goto error_free_irq; return 0; error_free_irq: free_irq(spi->irq, st); error_free_gpio: gpio_free(st->pdata->gpio_pdrst); error_disable_reg: if (!IS_ERR(st->reg)) regulator_disable(st->reg); error_put_reg: if (!IS_ERR(st->reg)) regulator_put(st->reg); iio_free_device(indio_dev); return ret; } static int ad7780_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad7780_state *st = iio_priv(indio_dev); free_irq(spi->irq, st); gpio_free(st->pdata->gpio_pdrst); if (!IS_ERR(st->reg)) { regulator_disable(st->reg); regulator_put(st->reg); } iio_device_unregister(indio_dev); return 0; } static const struct spi_device_id ad7780_id[] = { {"ad7780", ID_AD7780}, {"ad7781", ID_AD7781}, {} }; static struct spi_driver ad7780_driver = { .driver = { .name = "ad7780", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = ad7780_probe, .remove = __devexit_p(ad7780_remove), .id_table = ad7780_id, }; static int __init ad7780_init(void) { return spi_register_driver(&ad7780_driver); } module_init(ad7780_init); static void __exit ad7780_exit(void) { spi_unregister_driver(&ad7780_driver); } module_exit(ad7780_exit); MODULE_AUTHOR("Michael Hennerich <[email protected]>"); MODULE_DESCRIPTION("Analog Devices AD7780/1 ADC"); MODULE_LICENSE("GPL v2");
81766.c
--- gmem.c.orig 2017-07-13 06:43:21 UTC +++ gmem.c @@ -1,3 +1,8 @@ +#ifdef __sun__ +#define _LARGEFILE64_SOURCE +#define __EXTENSIONS__ +#include <setjmp.h> +#endif #include <stdio.h> #include <stdlib.h> #include "gmem.h"
844428.c
/***************************************************************************//** * * @file eta_csp_io.c * * @brief This file contains eta_csp_io module implementations. * * Copyright (C) 2020 Eta Compute, Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This is part of revision 1.0.0b1 of the Tensai Chip Support Package. * ******************************************************************************/ /***************************************************************************//** * @addtogroup ecm3532io-dsp Input/Output (IO) * @ingroup ecm3532csp-dsp * @{ ******************************************************************************/ #include <stdarg.h> #include <stdlib.h> #include <stdbool.h> #include "reg.h" #include "eta_csp_io.h" #include "eta_csp_mailbox.h" #include "eta_chip.h" #include "eta_csp_mailbox_cmd.h" // #ifndef DSP_PRINT_BUFFER_SIZE // #define DSP_PRINT_BUFFER_SIZE (256) // #endif // #ifndef DSP_PRINT_BUFFER_USABLE // #define DSP_PRINT_BUFFER_USABLE (256) // #endif // #ifndef DSP_PRINT_BUFFER_IN_YMEM // #define DSP_PRINT_BUFFER_IN_YMEM (0) // #endif ////////////////////////////////////////////////////////////////////// // Run time control of printf targets #ifdef PRINT_VIA_M3_MBOX static bool bPrintViaM3Mbox = true; #else static bool bPrintViaM3Mbox = false; #endif #ifdef PRINT_VIA_DSP_UART static bool bPrintViaDspUart = true; #else static bool bPrintViaDspUart = false; #endif // These functions select printf, puts, putc targets. // More than one can be enabled for any application. /*************************************************************************//** * * EtaCspIoPrintfViaM3Mbox - Enable M3 Mailbox as a printf target. * * @param bEnable - set to true to print via the M3 Mailbox. * ****************************************************************************/ void EtaCspIoPrintfViaM3Mbox(bool bEnable) { bPrintViaM3Mbox = bEnable; } /***************************************************************************//** * * EtaCspIoPrintfViaM3MboxGet - return enable state * * @return bool * ******************************************************************************/ bool EtaCspIoPrintfViaM3MboxGet(void) { return(bPrintViaM3Mbox); } /*************************************************************************//** * * EtaCspIoPrintfViaDspUart - Enable DSP Uart as a printf target. * * @param bEnable - set to true to print via the DSP UART * * The DSP UART is also known as the back door UART. ****************************************************************************/ void EtaCspIoPrintfViaDspUart(bool bEnable) { bPrintViaDspUart = bEnable; } /***************************************************************************//** * * EtaCspIoPrintfViaDspUartGet - return enable state * * @return bool * ******************************************************************************/ bool EtaCspIoPrintfViaDspUartGet(void) { return(bPrintViaDspUart); } // // Run time control of printf targets ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// // Turn on / off all prints // If verbose is 1, prints are enabled. This is usually done inside // main, but can be done here. bool g_bEtaCspIoVerbose = true; // By default, print all debug. // This function may be called any time after init_tb(); /***************************************************************************//** * * EtaCspIoVerboseEnable - Enables global variable allowing prints * ******************************************************************************/ void EtaCspIoVerboseEnable(void) { g_bEtaCspIoVerbose = true; } /***************************************************************************//** * * EtaCspIoVerboseDisable - Disables global variable allowing silent dropping * of prints * ******************************************************************************/ void EtaCspIoVerboseDisable(void) { g_bEtaCspIoVerbose = true; } // Turn on / off all prints ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// // PRINT_VIA_M3_MBOX // These functions are needed only when the DSP is printing to a main // UART via the M3 / Mailbox commands. // // A single printf (or puts) from the DSP should not exceed // DSP_PRINT_BUFFER_SIZE // or there will be a total lockup. // // strings printed by the DSP. The M3 then sends it to the UART. #if DSP_PRINT_BUFFER_IN_YMEM uchar chess_storage(YMEM) glob_printbuff_data [DSP_PRINT_BUFFER_SIZE]; #else uchar chess_storage(XMEM) glob_printbuff_data [DSP_PRINT_BUFFER_SIZE]; #endif volatile int glob_printbuff_wrptr; // Print buffer write pointer volatile int glob_printbuff_rdptr; // Print buffer read pointer int glob_printbuff_msgstart; // Local variable to store off current // wrptr /***************************************************************************//** * * eta_csp_printviam3_inc_indx - Circular increment of write or read ptr by 1 * * @param curr_ptr - Current array pointer value * * @return pointer incremented by 1 * ******************************************************************************/ int eta_csp_printviam3_inc_indx(int curr_ptr) { if(curr_ptr == (DSP_PRINT_BUFFER_USABLE - 1)) { curr_ptr = 0; } else { curr_ptr++; } return(curr_ptr); } /***************************************************************************//** * * eta_csp_printviam3_start_message - Called at the beginning of print to * record current wrptr * ******************************************************************************/ void eta_csp_printviam3_start_message(void) { // Store the start of this print message glob_printbuff_msgstart = glob_printbuff_wrptr; return; } /***************************************************************************//** * * eta_csp_printviam3_end_message - Ends the process of sending print to M3 via * mailbox (i.e. cleanup and send mailbox) * ******************************************************************************/ void eta_csp_printviam3_end_message(void) { // We need to "print" end of string so we can deliminate commands etaPutc('\0'); uint16_t mailbox_cmd; uint32_t mailbox_message; int byteaddress; // DSP byte address is confusing. // First things first, we need to find address of glob_printbuff_data which // will be in bytes. // Then we take the index, which is also in bytes. // But, to make it easier for M3, we'll tell the the system address. The DSP // expresses a 16 bit halfword // in a 32 bit system address space. So this means that when we specify the // starting address, we'll always have // and extra 0 inseted at bit position[1]. // // byteaddress = globaddr + msgstart // systemaddr = byteaddress[14:1],1'b0,byteaddress[0]; byteaddress = ((int)&glob_printbuff_data[0]) + glob_printbuff_msgstart; // This is byte aligned, 32 bit AHB address mailbox_message = DSP_PRINT_BUFFER_IN_YMEM ? DSP_YMEM_START : DSP_XMEM_START; mailbox_message |= ((byteaddress & ~0x1) << 1); mailbox_message |= ((byteaddress & 0x1)); mailbox_cmd = MAILBOX_DSP2M3CMD_PRINTVIAM3; // Let the m3 know we want to print something. EtaCspMboxDsp2M3(mailbox_cmd, mailbox_message); return; } /***************************************************************************//** * * eta_csp_printviam3_putc - PUTC variant of printf specific for Print via M3 * * @param ui8Byte - character to print * * Note this funciton will stall until space is available in the buffer and * may stall forever if space is never available. * ******************************************************************************/ void eta_csp_printviam3_putc(uchar ui8Byte) { int new_wrptr; int byteaddress; int mailbox_16bit_message; uint32_t mbox_message; uchar tempchar; // If we get to end of DSP buffer, we jump back to the beginning. // We "hardcode" a constant at the end of the buffer that is the // "stop, jump back to address xyz" so from putc perspective, we // simply push onto circular buffer. // He we check to make sure we have space to print. If we don't, we stall. // OPTME, permenant silent stall... what can we do better here? // We could print to the DSP/DEBUG UART, but there is no reasonable // expected time that the M3 would print (M3 may be busy). Maybe we could // wait 1 sec, but we have no mechanism to determine what 1 sec is, so // we'd have to assume 1 sec at slowest and fastest DSP freq, which is a // timeout of 100s of millions of counts.... feels like a cluster to // do nothing but maybe print if we are enabled... leave as is. new_wrptr = eta_csp_printviam3_inc_indx(glob_printbuff_wrptr); while(new_wrptr == glob_printbuff_rdptr) // Stall forever (interrupt should // replenish credits) { } glob_printbuff_data [glob_printbuff_wrptr] = ui8Byte; glob_printbuff_wrptr = eta_csp_printviam3_inc_indx(glob_printbuff_wrptr); } /***************************************************************************//** * * EtaCspPrintViaM3ProcessPrintCredit - processes M3 print credits * * @param credit - indicates number of characters M3 prints * ******************************************************************************/ void EtaCspPrintViaM3ProcessPrintCredit(int credit) { int temp_rdptr; temp_rdptr = glob_printbuff_rdptr + credit; // Modulo - temp_rdptr = temp_rdptr % (DSP_PRINT_BUFFER_USABLE); if(temp_rdptr >= DSP_PRINT_BUFFER_USABLE) { temp_rdptr = temp_rdptr - DSP_PRINT_BUFFER_USABLE; } glob_printbuff_rdptr = temp_rdptr; // We could use this to increment, but we can do it in one add vs X inc by 1 // for (index0=0; index0 < credit; index0++) // { // glob_printbuff_rdptr = eta_csp_printviam3_inc_indx // (glob_printbuff_rdptr); // } } // PRINT_VIA_M3_MBOX ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// // Print functions /***************************************************************************//** * * etaBaseConvert - print function. converts int to string of char based off of * base. * * @param num - raw hexadecimal number (int) * @param base - base to print (usually 8,10,16) * @return pointer to string of characters ready for print. * ******************************************************************************/ static char * etaBaseConvert(unsigned int num, int base) { static char Representation[] = "0123456789ABCDEF\0"; static char buffer[33]; char *ptr; ptr = &buffer[32]; *ptr = '\0'; do { *--ptr = Representation[num % base]; num /= base; } while(num > 0); return(ptr); } /***************************************************************************//** * * etaBaseConvertLong - print function. converts long int to string of char * based off of base. * * @param num - raw hexadecimal number (long int) * @param base - base to print (usually 8,10,16) * @return pointer to string of characters ready for print. * ******************************************************************************/ static char * etaBaseConvertLong(unsigned long int num, int base) { static char Representation[] = "0123456789ABCDEF\0"; static char buffer[33]; char *ptr; ptr = &buffer[32]; *ptr = '\0'; do { *--ptr = Representation[num % base]; num /= base; } while(num > 0); return(ptr); } /***************************************************************************//** * * etaPrintf - main printf function. * * @param receives standard printf strings * * Note: Binary not supported (but it could be if we wanted) * * Note: %x and %d prints 16 bit integers. %lx and %ld prints 32 bit integers. * ******************************************************************************/ void etaPrintf(const char *format, ...) { char *traverse; unsigned int i; long unsigned int long_i; char *s; va_list arg; va_start(arg, format); if(g_bEtaCspIoVerbose) { if(bPrintViaM3Mbox) { eta_csp_printviam3_start_message(); } traverse = (char *)format; while(*traverse != '\0') { while((*traverse != '%') && (*traverse != '\0')) { etaPutc(*traverse); if(*traverse == 0) { break; } traverse++; } if(*traverse == 0) { break; } traverse++; // skip over % character // Module 2: Fetching and executing arguments switch(*traverse) { case 'c': { i = va_arg(arg, int); // Fetch char argument etaPutc(i); break; } case 'd': { i = va_arg(arg, int); // Fetch Decimal/Integer // argument if(i < 0) { i = -i; etaPutc('-'); } _etaPuts(etaBaseConvert(i, 10)); break; } case 'o': { i = va_arg(arg, unsigned int); // Fetch Octal representation _etaPuts(etaBaseConvert(i, 8)); break; } case 's': { s = va_arg(arg, char *); // Fetch string _etaPuts(s); break; } case 'x': { i = va_arg(arg, unsigned int); // Fetch Hexadecimal // representation _etaPuts(etaBaseConvert(i, 16)); break; } case 'l': { traverse++; // skip over l character if(*traverse == 'x') { long_i = va_arg(arg, long unsigned int); // Fetch Long // Hexadecimal // representation _etaPuts(etaBaseConvertLong(long_i, 16)); } else if(*traverse == 'd') { long_i = va_arg(arg, long unsigned int); // Fetch Long // Hexadecimal // representation if(long_i < 0) { long_i = -long_i; etaPutc('-'); } _etaPuts(etaBaseConvertLong(long_i, 10)); } else { // Silently drop if not lx or ld. } break; } } traverse++; // skip over {c,d,o,s,x} character } if(bPrintViaM3Mbox) { eta_csp_printviam3_end_message(); } // Module 3: Closing argument list to necessary clean-up va_end(arg); } } /***************************************************************************//** * * etaPutc - main etaPutc function. * * @param char to print * ******************************************************************************/ void etaPutc(const char x) { if(bPrintViaM3Mbox) { eta_csp_printviam3_putc(x); } if(bPrintViaDspUart) { EtaCspIoDebugUartPutc(x); } } /***************************************************************************//** * * _etaPuts - main _etaPuts function. * * @param string to print * * Note: _etaPuts is intended for "internal" prints not to be overriden by * g_bEtaCspIoVerbose. * ******************************************************************************/ void _etaPuts(const char *str) { char x; while(x = *str++) { etaPutc(x); } } /***************************************************************************//** * * etaPuts - user callable etaPuts function. * * @param string to print * * Note: This version does not call putc so it can be redirected seperately * from printf. * ******************************************************************************/ void etaPuts(const char *str) { char x; if(g_bEtaCspIoVerbose) { #ifdef PUTS_VIA_M3_MBOX eta_csp_printviam3_start_message(); #endif while(x = *str++) { if(bPrintViaM3Mbox) { eta_csp_printviam3_putc(x); } EtaCspIoDebugUartPutc(x); if(bPrintViaDspUart) { EtaCspIoDebugUartPutc(x); } } #ifdef PUTS_VIA_M3_MBOX eta_csp_printviam3_end_message(); #endif } } /***************************************************************************//** * * EtaCspIoDebugUartPutc - explicit call of Putc for the DSP DEBUG UART. * * @param cChar - character to print * ******************************************************************************/ void EtaCspIoDebugUartPutc(const char cChar) { // // Wait for the transmitter to finish before sending in all cases. // while((REG_DSPCTRL_DSP_DSP_UART & BM_DSPCTRL_DSP_DSP_UART_BUSY) != 0) { } // // OK, send the character to the DSP UART. It is safe right now. // REG_DSPCTRL_DSP_DSP_UART = cChar; // // Wait for the transmitter to finish before exiting. // This is critical to being able to send characters on both the base // level and in an ISR context in the same program. // while((REG_DSPCTRL_DSP_DSP_UART & BM_DSPCTRL_DSP_DSP_UART_BUSY) != 0) { } } /***************************************************************************//** * * EtaCspIoDebugUartPuts - explicit call of Puts for the DSP DEBUG UART. * * @param pcString - string to print * ******************************************************************************/ void EtaCspIoDebugUartPuts(const char *pcString) { char cChar; while(cChar = *pcString++) { EtaCspIoDebugUartPutc(cChar); } } /***************************************************************************//** * * EtaCspIoMboxPrintInit - If enabled, will initialize DSP Print * buffer used by PRINT_VIA_M3_MBOX * ******************************************************************************/ void EtaCspIoMboxPrintInit(void) { uint16_t byteaddress; uint32_t mailbox_message; glob_printbuff_wrptr = 0; glob_printbuff_rdptr = 0; // This is byte aligned, 32 bit AHB address byteaddress = ((uint16_t)&glob_printbuff_data[0]); mailbox_message = DSP_PRINT_BUFFER_IN_YMEM ? DSP_YMEM_START : DSP_XMEM_START; mailbox_message |= ((byteaddress & ~0x1) << 1); mailbox_message |= ((byteaddress & 0x1)); glob_printbuff_data [DSP_PRINT_BUFFER_SIZE - 1] = (mailbox_message >> 0) & 0xff; glob_printbuff_data [DSP_PRINT_BUFFER_SIZE - 2] = (mailbox_message >> 8) & 0xff; glob_printbuff_data [DSP_PRINT_BUFFER_SIZE - 3] = (mailbox_message >> 16) & 0xff; glob_printbuff_data [DSP_PRINT_BUFFER_SIZE - 4] = (mailbox_message >> 24) & 0xff; glob_printbuff_data [DSP_PRINT_BUFFER_SIZE - 5] = MAILBOX_DSP2M3CMD_PWRAP; } // Print functions //////////////////////////////////////////////////////////////////////
791202.c
/* * atsc3_mime_multipart_related.c * * Created on: Mar 25, 2019 * Author: jjustman */ #include "atsc3_mime_multipart_related.h" ATSC3_VECTOR_BUILDER_METHODS_IMPLEMENTATION(atsc3_mime_multipart_related_instance, atsc3_mime_multipart_related_payload) //ATSC3_VECTOR_BUILDER_METHODS_ITEM_FREE(atsc3_mime_multipart_related_payload); void atsc3_mime_multipart_related_payload_free(atsc3_mime_multipart_related_payload_t** atsc3_mime_multipart_related_payload_p) { if(atsc3_mime_multipart_related_payload_p) { atsc3_mime_multipart_related_payload_t* atsc3_mime_multipart_related_payload = *atsc3_mime_multipart_related_payload_p; if(atsc3_mime_multipart_related_payload) { freeclean((void**)&atsc3_mime_multipart_related_payload->content_location); freeclean((void**)&atsc3_mime_multipart_related_payload->content_type); freeclean((void**)&atsc3_mime_multipart_related_payload->payload); free(atsc3_mime_multipart_related_payload); atsc3_mime_multipart_related_payload = NULL; } *atsc3_mime_multipart_related_payload_p = NULL; } }
698277.c
/* "Bag-of-pages" zone garbage collector for the GNU compiler. Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008 Free Software Foundation, Inc. Contributed by Richard Henderson ([email protected]) and Daniel Berlin ([email protected]). Rewritten by Daniel Jacobowitz <[email protected]>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "tm_p.h" #include "toplev.h" #include "varray.h" #include "flags.h" #include "ggc.h" #include "timevar.h" #include "params.h" #include "bitmap.h" /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a file open. Prefer either to valloc. */ #ifdef HAVE_MMAP_ANON # undef HAVE_MMAP_DEV_ZERO # include <sys/mman.h> # ifndef MAP_FAILED # define MAP_FAILED -1 # endif # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) # define MAP_ANONYMOUS MAP_ANON # endif # define USING_MMAP #endif #ifdef HAVE_MMAP_DEV_ZERO # include <sys/mman.h> # ifndef MAP_FAILED # define MAP_FAILED -1 # endif # define USING_MMAP #endif #ifndef USING_MMAP #error Zone collector requires mmap #endif #if (GCC_VERSION < 3001) #define prefetch(X) ((void) X) #define prefetchw(X) ((void) X) #else #define prefetch(X) __builtin_prefetch (X) #define prefetchw(X) __builtin_prefetch (X, 1, 3) #endif /* FUTURE NOTES: If we track inter-zone pointers, we can mark single zones at a time. If we have a zone where we guarantee no inter-zone pointers, we could mark that zone separately. The garbage zone should not be marked, and we should return 1 in ggc_set_mark for any object in the garbage zone, which cuts off marking quickly. */ /* Strategy: This garbage-collecting allocator segregates objects into zones. It also segregates objects into "large" and "small" bins. Large objects are greater than page size. Pages for small objects are broken up into chunks. The page has a bitmap which marks the start position of each chunk (whether allocated or free). Free chunks are on one of the zone's free lists and contain a pointer to the next free chunk. Chunks in most of the free lists have a fixed size determined by the free list. Chunks in the "other" sized free list have their size stored right after their chain pointer. Empty pages (of all sizes) are kept on a single page cache list, and are considered first when new pages are required; they are deallocated at the start of the next collection if they haven't been recycled by then. The free page list is currently per-zone. */ /* Define GGC_DEBUG_LEVEL to print debugging information. 0: No debugging output. 1: GC statistics only. 2: Page-entry allocations/deallocations as well. 3: Object allocations as well. 4: Object marks as well. */ #define GGC_DEBUG_LEVEL (0) #ifndef HOST_BITS_PER_PTR #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG #endif /* This structure manages small free chunks. The SIZE field is only initialized if the chunk is in the "other" sized free list. Large chunks are allocated one at a time to their own page, and so don't come in here. */ struct alloc_chunk { struct alloc_chunk *next_free; unsigned int size; }; /* The size of the fixed-size portion of a small page descriptor. */ #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits)) /* The collector's idea of the page size. This must be a power of two no larger than the system page size, because pages must be aligned to this amount and are tracked at this granularity in the page table. We choose a size at compile time for efficiency. We could make a better guess at compile time if PAGE_SIZE is a constant in system headers, and PAGE_SHIFT is defined... */ #define GGC_PAGE_SIZE 4096 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1) #define GGC_PAGE_SHIFT 12 #if 0 /* Alternative definitions which use the runtime page size. */ #define GGC_PAGE_SIZE G.pagesize #define GGC_PAGE_MASK G.page_mask #define GGC_PAGE_SHIFT G.lg_pagesize #endif /* The size of a small page managed by the garbage collector. This must currently be GGC_PAGE_SIZE, but with a few changes could be any multiple of it to reduce certain kinds of overhead. */ #define SMALL_PAGE_SIZE GGC_PAGE_SIZE /* Free bin information. These numbers may be in need of re-tuning. In general, decreasing the number of free bins would seem to increase the time it takes to allocate... */ /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size today. */ #define NUM_FREE_BINS 64 #define FREE_BIN_DELTA MAX_ALIGNMENT #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA) /* Allocation and marking parameters. */ /* The smallest allocatable unit to keep track of. */ #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT /* The smallest markable unit. If we require each allocated object to contain at least two allocatable units, we can use half as many bits for the mark bitmap. But this adds considerable complexity to sweeping. */ #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type)) /* We use this structure to determine the alignment required for allocations. There are several things wrong with this estimation of alignment. The maximum alignment for a structure is often less than the maximum alignment for a basic data type; for instance, on some targets long long must be aligned to sizeof (int) in a structure and sizeof (long long) in a variable. i386-linux is one example; Darwin is another (sometimes, depending on the compiler in use). Also, long double is not included. Nothing in GCC uses long double, so we assume that this is OK. On powerpc-darwin, adding long double would bring the maximum alignment up to 16 bytes, and until we need long double (or to vectorize compiler operations) that's painfully wasteful. This will need to change, some day. */ struct max_alignment { char c; union { HOST_WIDEST_INT i; double d; } u; }; /* The biggest alignment required. */ #define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) /* Compute the smallest multiple of F that is >= X. */ #define ROUND_UP(x, f) (CEIL (x, f) * (f)) /* Types to use for the allocation and mark bitmaps. It might be a good idea to add ffsl to libiberty and use unsigned long instead; that could speed us up where long is wider than int. */ typedef unsigned int alloc_type; typedef unsigned int mark_type; #define alloc_ffs(x) ffs(x) /* A page_entry records the status of an allocation page. This is the common data between all three kinds of pages - small, large, and PCH. */ typedef struct page_entry { /* The address at which the memory is allocated. */ char *page; /* The zone that this page entry belongs to. */ struct alloc_zone *zone; #ifdef GATHER_STATISTICS /* How many collections we've survived. */ size_t survived; #endif /* Does this page contain small objects, or one large object? */ bool large_p; /* Is this page part of the loaded PCH? */ bool pch_p; } page_entry; /* Additional data needed for small pages. */ struct small_page_entry { struct page_entry common; /* The next small page entry, or NULL if this is the last. */ struct small_page_entry *next; /* If currently marking this zone, a pointer to the mark bits for this page. If we aren't currently marking this zone, this pointer may be stale (pointing to freed memory). */ mark_type *mark_bits; /* The allocation bitmap. This array extends far enough to have one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */ alloc_type alloc_bits[1]; }; /* Additional data needed for large pages. */ struct large_page_entry { struct page_entry common; /* The next large page entry, or NULL if this is the last. */ struct large_page_entry *next; /* The number of bytes allocated, not including the page entry. */ size_t bytes; /* The previous page in the list, so that we can unlink this one. */ struct large_page_entry *prev; /* During marking, is this object marked? */ bool mark_p; }; /* A two-level tree is used to look up the page-entry for a given pointer. Two chunks of the pointer's bits are extracted to index the first and second levels of the tree, as follows: HOST_PAGE_SIZE_BITS 32 | | msb +----------------+----+------+------+ lsb | | | PAGE_L1_BITS | | | PAGE_L2_BITS The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry pages are aligned on system page boundaries. The next most significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first index values in the lookup table, respectively. For 32-bit architectures and the settings below, there are no leftover bits. For architectures with wider pointers, the lookup tree points to a list of pages, which must be scanned to find the correct one. */ #define PAGE_L1_BITS (8) #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT) #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS) #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS) #define LOOKUP_L1(p) \ (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) #define LOOKUP_L2(p) \ (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1)) #if HOST_BITS_PER_PTR <= 32 /* On 32-bit hosts, we use a two level page table, as pictured above. */ typedef page_entry **page_table[PAGE_L1_SIZE]; #else /* On 64-bit hosts, we use the same two level page tables plus a linked list that disambiguates the top 32-bits. There will almost always be exactly one entry in the list. */ typedef struct page_table_chain { struct page_table_chain *next; size_t high_bits; page_entry **table[PAGE_L1_SIZE]; } *page_table; #endif /* The global variables. */ static struct globals { /* The linked list of zones. */ struct alloc_zone *zones; /* Lookup table for associating allocation pages with object addresses. */ page_table lookup; /* The system's page size, and related constants. */ size_t pagesize; size_t lg_pagesize; size_t page_mask; /* The size to allocate for a small page entry. This includes the size of the structure and the size of the allocation bitmap. */ size_t small_page_overhead; #if defined (HAVE_MMAP_DEV_ZERO) /* A file descriptor open to /dev/zero for reading. */ int dev_zero_fd; #endif /* Allocate pages in chunks of this size, to throttle calls to memory allocation routines. The first page is used, the rest go onto the free list. */ size_t quire_size; /* The file descriptor for debugging output. */ FILE *debug_file; } G; /* A zone allocation structure. There is one of these for every distinct allocation zone. */ struct alloc_zone { /* The most recent free chunk is saved here, instead of in the linked free list, to decrease list manipulation. It is most likely that we will want this one. */ char *cached_free; size_t cached_free_size; /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size FREE_BIN_DELTA. All other chunks are in slot 0. */ struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1]; /* The highest bin index which might be non-empty. It may turn out to be empty, in which case we have to search downwards. */ size_t high_free_bin; /* Bytes currently allocated in this zone. */ size_t allocated; /* Linked list of the small pages in this zone. */ struct small_page_entry *pages; /* Doubly linked list of large pages in this zone. */ struct large_page_entry *large_pages; /* If we are currently marking this zone, a pointer to the mark bits. */ mark_type *mark_bits; /* Name of the zone. */ const char *name; /* The number of small pages currently allocated in this zone. */ size_t n_small_pages; /* Bytes allocated at the end of the last collection. */ size_t allocated_last_gc; /* Total amount of memory mapped. */ size_t bytes_mapped; /* A cache of free system pages. */ struct small_page_entry *free_pages; /* Next zone in the linked list of zones. */ struct alloc_zone *next_zone; /* True if this zone was collected during this collection. */ bool was_collected; /* True if this zone should be destroyed after the next collection. */ bool dead; #ifdef GATHER_STATISTICS struct { /* Total memory allocated with ggc_alloc. */ unsigned long long total_allocated; /* Total overhead for memory to be allocated with ggc_alloc. */ unsigned long long total_overhead; /* Total allocations and overhead for sizes less than 32, 64 and 128. These sizes are interesting because they are typical cache line sizes. */ unsigned long long total_allocated_under32; unsigned long long total_overhead_under32; unsigned long long total_allocated_under64; unsigned long long total_overhead_under64; unsigned long long total_allocated_under128; unsigned long long total_overhead_under128; } stats; #endif } main_zone; /* Some default zones. */ struct alloc_zone rtl_zone; struct alloc_zone tree_zone; struct alloc_zone tree_id_zone; /* The PCH zone does not need a normal zone structure, and it does not live on the linked list of zones. */ struct pch_zone { /* The start of the PCH zone. NULL if there is none. */ char *page; /* The end of the PCH zone. NULL if there is none. */ char *end; /* The size of the PCH zone. 0 if there is none. */ size_t bytes; /* The allocation bitmap for the PCH zone. */ alloc_type *alloc_bits; /* If we are currently marking, the mark bitmap for the PCH zone. When it is first read in, we could avoid marking the PCH, because it will not contain any pointers to GC memory outside of the PCH; however, the PCH is currently mapped as writable, so we must mark it in case new pointers are added. */ mark_type *mark_bits; } pch_zone; #ifdef USING_MMAP static char *alloc_anon (char *, size_t, struct alloc_zone *); #endif static struct small_page_entry * alloc_small_page (struct alloc_zone *); static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *); static void free_chunk (char *, size_t, struct alloc_zone *); static void free_small_page (struct small_page_entry *); static void free_large_page (struct large_page_entry *); static void release_pages (struct alloc_zone *); static void sweep_pages (struct alloc_zone *); static bool ggc_collect_1 (struct alloc_zone *, bool); static void new_ggc_zone_1 (struct alloc_zone *, const char *); /* Traverse the page table and find the entry for a page. Die (probably) if the object wasn't allocated via GC. */ static inline page_entry * lookup_page_table_entry (const void *p) { page_entry ***base; size_t L1, L2; #if HOST_BITS_PER_PTR <= 32 base = &G.lookup[0]; #else page_table table = G.lookup; size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; while (table->high_bits != high_bits) table = table->next; base = &table->table[0]; #endif /* Extract the level 1 and 2 indices. */ L1 = LOOKUP_L1 (p); L2 = LOOKUP_L2 (p); return base[L1][L2]; } /* Traverse the page table and find the entry for a page. Return NULL if the object wasn't allocated via the GC. */ static inline page_entry * lookup_page_table_if_allocated (const void *p) { page_entry ***base; size_t L1, L2; #if HOST_BITS_PER_PTR <= 32 base = &G.lookup[0]; #else page_table table = G.lookup; size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; while (1) { if (table == NULL) return NULL; if (table->high_bits == high_bits) break; table = table->next; } base = &table->table[0]; #endif /* Extract the level 1 and 2 indices. */ L1 = LOOKUP_L1 (p); if (! base[L1]) return NULL; L2 = LOOKUP_L2 (p); if (L2 >= PAGE_L2_SIZE) return NULL; /* We might have a page entry which does not correspond exactly to a system page. */ if (base[L1][L2] && (const char *) p < base[L1][L2]->page) return NULL; return base[L1][L2]; } /* Set the page table entry for the page that starts at P. If ENTRY is NULL, clear the entry. */ static void set_page_table_entry (void *p, page_entry *entry) { page_entry ***base; size_t L1, L2; #if HOST_BITS_PER_PTR <= 32 base = &G.lookup[0]; #else page_table table; size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; for (table = G.lookup; table; table = table->next) if (table->high_bits == high_bits) goto found; /* Not found -- allocate a new table. */ table = XCNEW (struct page_table_chain); table->next = G.lookup; table->high_bits = high_bits; G.lookup = table; found: base = &table->table[0]; #endif /* Extract the level 1 and 2 indices. */ L1 = LOOKUP_L1 (p); L2 = LOOKUP_L2 (p); if (base[L1] == NULL) base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE); base[L1][L2] = entry; } /* Find the page table entry associated with OBJECT. */ static inline struct page_entry * zone_get_object_page (const void *object) { return lookup_page_table_entry (object); } /* Find which element of the alloc_bits array OBJECT should be recorded in. */ static inline unsigned int zone_get_object_alloc_word (const void *object) { return (((size_t) object & (GGC_PAGE_SIZE - 1)) / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT)); } /* Find which bit of the appropriate word in the alloc_bits array OBJECT should be recorded in. */ static inline unsigned int zone_get_object_alloc_bit (const void *object) { return (((size_t) object / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type))); } /* Find which element of the mark_bits array OBJECT should be recorded in. */ static inline unsigned int zone_get_object_mark_word (const void *object) { return (((size_t) object & (GGC_PAGE_SIZE - 1)) / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT)); } /* Find which bit of the appropriate word in the mark_bits array OBJECT should be recorded in. */ static inline unsigned int zone_get_object_mark_bit (const void *object) { return (((size_t) object / BYTES_PER_MARK_BIT) % (8 * sizeof (mark_type))); } /* Set the allocation bit corresponding to OBJECT in its page's bitmap. Used to split this object from the preceding one. */ static inline void zone_set_object_alloc_bit (const void *object) { struct small_page_entry *page = (struct small_page_entry *) zone_get_object_page (object); unsigned int start_word = zone_get_object_alloc_word (object); unsigned int start_bit = zone_get_object_alloc_bit (object); page->alloc_bits[start_word] |= 1L << start_bit; } /* Clear the allocation bit corresponding to OBJECT in PAGE's bitmap. Used to coalesce this object with the preceding one. */ static inline void zone_clear_object_alloc_bit (struct small_page_entry *page, const void *object) { unsigned int start_word = zone_get_object_alloc_word (object); unsigned int start_bit = zone_get_object_alloc_bit (object); /* Would xor be quicker? */ page->alloc_bits[start_word] &= ~(1L << start_bit); } /* Find the size of the object which starts at START_WORD and START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes. Helper function for ggc_get_size and zone_find_object_size. */ static inline size_t zone_object_size_1 (alloc_type *alloc_bits, size_t start_word, size_t start_bit, size_t max_size) { size_t size; alloc_type alloc_word; int indx; /* Load the first word. */ alloc_word = alloc_bits[start_word++]; /* If that was the last bit in this word, we'll want to continue with the next word. Otherwise, handle the rest of this word. */ if (start_bit) { indx = alloc_ffs (alloc_word >> start_bit); if (indx) /* indx is 1-based. We started at the bit after the object's start, but we also ended at the bit after the object's end. It cancels out. */ return indx * BYTES_PER_ALLOC_BIT; /* The extra 1 accounts for the starting unit, before start_bit. */ size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT; if (size >= max_size) return max_size; alloc_word = alloc_bits[start_word++]; } else size = BYTES_PER_ALLOC_BIT; while (alloc_word == 0) { size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT; if (size >= max_size) return max_size; alloc_word = alloc_bits[start_word++]; } indx = alloc_ffs (alloc_word); return size + (indx - 1) * BYTES_PER_ALLOC_BIT; } /* Find the size of OBJECT on small page PAGE. */ static inline size_t zone_find_object_size (struct small_page_entry *page, const void *object) { const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT; unsigned int start_word = zone_get_object_alloc_word (object_midptr); unsigned int start_bit = zone_get_object_alloc_bit (object_midptr); size_t max_size = (page->common.page + SMALL_PAGE_SIZE - (const char *) object); return zone_object_size_1 (page->alloc_bits, start_word, start_bit, max_size); } /* highest_bit assumes that alloc_type is 32 bits. */ extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1]; /* Find the highest set bit in VALUE. Returns the bit number of that bit, using the same values as ffs. */ static inline alloc_type highest_bit (alloc_type value) { /* This also assumes that alloc_type is unsigned. */ value |= value >> 1; value |= value >> 2; value |= value >> 4; value |= value >> 8; value |= value >> 16; value = value ^ (value >> 1); return alloc_ffs (value); } /* Find the offset from the start of an object to P, which may point into the interior of the object. */ static unsigned long zone_find_object_offset (alloc_type *alloc_bits, size_t start_word, size_t start_bit) { unsigned int offset_in_bits; alloc_type alloc_word = alloc_bits[start_word]; /* Mask off any bits after the initial bit, but make sure to include the initial bit in the result. Note that START_BIT is 0-based. */ if (start_bit < 8 * sizeof (alloc_type) - 1) alloc_word &= (1 << (start_bit + 1)) - 1; offset_in_bits = start_bit; /* Search for the start of the object. */ while (alloc_word == 0 && start_word > 0) { alloc_word = alloc_bits[--start_word]; offset_in_bits += 8 * sizeof (alloc_type); } /* We must always find a set bit. */ gcc_assert (alloc_word != 0); /* Note that the result of highest_bit is 1-based. */ offset_in_bits -= highest_bit (alloc_word) - 1; return BYTES_PER_ALLOC_BIT * offset_in_bits; } /* Allocate the mark bits for every zone, and set the pointers on each page. */ static void zone_allocate_marks (void) { struct alloc_zone *zone; for (zone = G.zones; zone; zone = zone->next_zone) { struct small_page_entry *page; mark_type *cur_marks; size_t mark_words, mark_words_per_page; #ifdef ENABLE_CHECKING size_t n = 0; #endif mark_words_per_page = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD; mark_words = zone->n_small_pages * mark_words_per_page; zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type), mark_words); cur_marks = zone->mark_bits; for (page = zone->pages; page; page = page->next) { page->mark_bits = cur_marks; cur_marks += mark_words_per_page; #ifdef ENABLE_CHECKING n++; #endif } #ifdef ENABLE_CHECKING gcc_assert (n == zone->n_small_pages); #endif } /* We don't collect the PCH zone, but we do have to mark it (for now). */ if (pch_zone.bytes) pch_zone.mark_bits = (mark_type *) xcalloc (sizeof (mark_type), CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD)); } /* After marking and sweeping, release the memory used for mark bits. */ static void zone_free_marks (void) { struct alloc_zone *zone; for (zone = G.zones; zone; zone = zone->next_zone) if (zone->mark_bits) { free (zone->mark_bits); zone->mark_bits = NULL; } if (pch_zone.bytes) { free (pch_zone.mark_bits); pch_zone.mark_bits = NULL; } } #ifdef USING_MMAP /* Allocate SIZE bytes of anonymous memory, preferably near PREF, (if non-null). The ifdef structure here is intended to cause a compile error unless exactly one of the HAVE_* is defined. */ static inline char * alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone) { #ifdef HAVE_MMAP_ANON char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #endif #ifdef HAVE_MMAP_DEV_ZERO char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, G.dev_zero_fd, 0); #endif if (page == (char *) MAP_FAILED) { perror ("virtual memory exhausted"); exit (FATAL_EXIT_CODE); } /* Remember that we allocated this memory. */ zone->bytes_mapped += size; /* Pretend we don't have access to the allocated pages. We'll enable access to smaller pieces of the area in ggc_alloc. Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size)); return page; } #endif /* Allocate a new page for allocating small objects in ZONE, and return an entry for it. */ static struct small_page_entry * alloc_small_page (struct alloc_zone *zone) { struct small_page_entry *entry; /* Check the list of free pages for one we can use. */ entry = zone->free_pages; if (entry != NULL) { /* Recycle the allocated memory from this page ... */ zone->free_pages = entry->next; } else { /* We want just one page. Allocate a bunch of them and put the extras on the freelist. (Can only do this optimization with mmap for backing store.) */ struct small_page_entry *e, *f = zone->free_pages; int i; char *page; page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone); /* This loop counts down so that the chain will be in ascending memory order. */ for (i = G.quire_size - 1; i >= 1; i--) { e = XCNEWVAR (struct small_page_entry, G.small_page_overhead); e->common.page = page + (i << GGC_PAGE_SHIFT); e->common.zone = zone; e->next = f; f = e; set_page_table_entry (e->common.page, &e->common); } zone->free_pages = f; entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead); entry->common.page = page; entry->common.zone = zone; set_page_table_entry (page, &entry->common); } zone->n_small_pages++; if (GGC_DEBUG_LEVEL >= 2) fprintf (G.debug_file, "Allocating %s page at %p, data %p-%p\n", entry->common.zone->name, (PTR) entry, entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1); return entry; } /* Allocate a large page of size SIZE in ZONE. */ static struct large_page_entry * alloc_large_page (size_t size, struct alloc_zone *zone) { struct large_page_entry *entry; char *page; size_t needed_size; needed_size = size + sizeof (struct large_page_entry); page = XNEWVAR (char, needed_size); entry = (struct large_page_entry *) page; entry->next = NULL; entry->common.page = page + sizeof (struct large_page_entry); entry->common.large_p = true; entry->common.pch_p = false; entry->common.zone = zone; #ifdef GATHER_STATISTICS entry->common.survived = 0; #endif entry->mark_p = false; entry->bytes = size; entry->prev = NULL; set_page_table_entry (entry->common.page, &entry->common); if (GGC_DEBUG_LEVEL >= 2) fprintf (G.debug_file, "Allocating %s large page at %p, data %p-%p\n", entry->common.zone->name, (PTR) entry, entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1); return entry; } /* For a page that is no longer needed, put it on the free page list. */ static inline void free_small_page (struct small_page_entry *entry) { if (GGC_DEBUG_LEVEL >= 2) fprintf (G.debug_file, "Deallocating %s page at %p, data %p-%p\n", entry->common.zone->name, (PTR) entry, entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1); gcc_assert (!entry->common.large_p); /* Mark the page as inaccessible. Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page, SMALL_PAGE_SIZE)); entry->next = entry->common.zone->free_pages; entry->common.zone->free_pages = entry; entry->common.zone->n_small_pages--; } /* Release a large page that is no longer needed. */ static inline void free_large_page (struct large_page_entry *entry) { if (GGC_DEBUG_LEVEL >= 2) fprintf (G.debug_file, "Deallocating %s page at %p, data %p-%p\n", entry->common.zone->name, (PTR) entry, entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1); gcc_assert (entry->common.large_p); set_page_table_entry (entry->common.page, NULL); free (entry); } /* Release the free page cache to the system. */ static void release_pages (struct alloc_zone *zone) { #ifdef USING_MMAP struct small_page_entry *p, *next; char *start; size_t len; /* Gather up adjacent pages so they are unmapped together. */ p = zone->free_pages; while (p) { start = p->common.page; next = p->next; len = SMALL_PAGE_SIZE; set_page_table_entry (p->common.page, NULL); p = next; while (p && p->common.page == start + len) { next = p->next; len += SMALL_PAGE_SIZE; set_page_table_entry (p->common.page, NULL); p = next; } munmap (start, len); zone->bytes_mapped -= len; } zone->free_pages = NULL; #endif } /* Place the block at PTR of size SIZE on the free list for ZONE. */ static inline void free_chunk (char *ptr, size_t size, struct alloc_zone *zone) { struct alloc_chunk *chunk = (struct alloc_chunk *) ptr; size_t bin = 0; bin = SIZE_BIN_DOWN (size); gcc_assert (bin != 0); if (bin > NUM_FREE_BINS) { bin = 0; VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk, sizeof (struct alloc_chunk))); chunk->size = size; chunk->next_free = zone->free_chunks[bin]; VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr + sizeof (struct alloc_chunk), size - sizeof (struct alloc_chunk))); } else { VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk, sizeof (struct alloc_chunk *))); chunk->next_free = zone->free_chunks[bin]; VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr + sizeof (struct alloc_chunk *), size - sizeof (struct alloc_chunk *))); } zone->free_chunks[bin] = chunk; if (bin > zone->high_free_bin) zone->high_free_bin = bin; if (GGC_DEBUG_LEVEL >= 3) fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk); } /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */ void * ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone MEM_STAT_DECL) { size_t bin; size_t csize; struct small_page_entry *entry; struct alloc_chunk *chunk, **pp; void *result; size_t size = orig_size; /* Make sure that zero-sized allocations get a unique and freeable pointer. */ if (size == 0) size = MAX_ALIGNMENT; else size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT; /* Try to allocate the object from several different sources. Each of these cases is responsible for setting RESULT and SIZE to describe the allocated block, before jumping to FOUND. If a chunk is split, the allocate bit for the new chunk should also be set. Large objects are handled specially. However, they'll just fail the next couple of conditions, so we can wait to check for them below. The large object case is relatively rare (< 1%), so this is a win. */ /* First try to split the last chunk we allocated. For best fragmentation behavior it would be better to look for a free bin of the appropriate size for a small object. However, we're unlikely (1% - 7%) to find one, and this gives better locality behavior anyway. This case handles the lion's share of all calls to this function. */ if (size <= zone->cached_free_size) { result = zone->cached_free; zone->cached_free_size -= size; if (zone->cached_free_size) { zone->cached_free += size; zone_set_object_alloc_bit (zone->cached_free); } goto found; } /* Next, try to find a free bin of the exactly correct size. */ /* We want to round SIZE up, rather than down, but we know it's already aligned to at least FREE_BIN_DELTA, so we can just shift. */ bin = SIZE_BIN_DOWN (size); if (bin <= NUM_FREE_BINS && (chunk = zone->free_chunks[bin]) != NULL) { /* We have a chunk of the right size. Pull it off the free list and use it. */ zone->free_chunks[bin] = chunk->next_free; /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT == FREE_BIN_DELTA. */ result = chunk; /* The allocation bits are already set correctly. HIGH_FREE_BIN may now be wrong, if this was the last chunk in the high bin. Rather than fixing it up now, wait until we need to search the free bins. */ goto found; } /* Next, if there wasn't a chunk of the ideal size, look for a chunk to split. We can find one in the too-big bin, or in the largest sized bin with a chunk in it. Try the largest normal-sized bin first. */ if (zone->high_free_bin > bin) { /* Find the highest numbered free bin. It will be at or below the watermark. */ while (zone->high_free_bin > bin && zone->free_chunks[zone->high_free_bin] == NULL) zone->high_free_bin--; if (zone->high_free_bin > bin) { size_t tbin = zone->high_free_bin; chunk = zone->free_chunks[tbin]; /* Remove the chunk from its previous bin. */ zone->free_chunks[tbin] = chunk->next_free; result = (char *) chunk; /* Save the rest of the chunk for future allocation. */ if (zone->cached_free_size) free_chunk (zone->cached_free, zone->cached_free_size, zone); chunk = (struct alloc_chunk *) ((char *) result + size); zone->cached_free = (char *) chunk; zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA; /* Mark the new free chunk as an object, so that we can find the size of the newly allocated object. */ zone_set_object_alloc_bit (chunk); /* HIGH_FREE_BIN may now be wrong, if this was the last chunk in the high bin. Rather than fixing it up now, wait until we need to search the free bins. */ goto found; } } /* Failing that, look through the "other" bucket for a chunk that is large enough. */ pp = &(zone->free_chunks[0]); chunk = *pp; while (chunk && chunk->size < size) { pp = &chunk->next_free; chunk = *pp; } if (chunk) { /* Remove the chunk from its previous bin. */ *pp = chunk->next_free; result = (char *) chunk; /* Save the rest of the chunk for future allocation, if there's any left over. */ csize = chunk->size; if (csize > size) { if (zone->cached_free_size) free_chunk (zone->cached_free, zone->cached_free_size, zone); chunk = (struct alloc_chunk *) ((char *) result + size); zone->cached_free = (char *) chunk; zone->cached_free_size = csize - size; /* Mark the new free chunk as an object. */ zone_set_object_alloc_bit (chunk); } goto found; } /* Handle large allocations. We could choose any threshold between GGC_PAGE_SIZE - sizeof (struct large_page_entry) and GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't be guaranteed to have a unique entry in the lookup table. Large allocations will always fall through to here. */ if (size > GGC_PAGE_SIZE) { struct large_page_entry *entry = alloc_large_page (size, zone); #ifdef GATHER_STATISTICS entry->common.survived = 0; #endif entry->next = zone->large_pages; if (zone->large_pages) zone->large_pages->prev = entry; zone->large_pages = entry; result = entry->common.page; goto found; } /* Failing everything above, allocate a new small page. */ entry = alloc_small_page (zone); entry->next = zone->pages; zone->pages = entry; /* Mark the first chunk in the new page. */ entry->alloc_bits[0] = 1; result = entry->common.page; if (size < SMALL_PAGE_SIZE) { if (zone->cached_free_size) free_chunk (zone->cached_free, zone->cached_free_size, zone); zone->cached_free = (char *) result + size; zone->cached_free_size = SMALL_PAGE_SIZE - size; /* Mark the new free chunk as an object. */ zone_set_object_alloc_bit (zone->cached_free); } found: /* We could save TYPE in the chunk, but we don't use that for anything yet. If we wanted to, we could do it by adding it either before the beginning of the chunk or after its end, and adjusting the size and pointer appropriately. */ /* We'll probably write to this after we return. */ prefetchw (result); #ifdef ENABLE_GC_CHECKING /* `Poison' the entire allocated object. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size)); memset (result, 0xaf, size); VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size, size - orig_size)); #endif /* Tell Valgrind that the memory is there, but its content isn't defined. The bytes at the end of the object are still marked unaccessible. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size)); /* Keep track of how many bytes are being allocated. This information is used in deciding when to collect. */ zone->allocated += size; timevar_ggc_mem_total += size; #ifdef GATHER_STATISTICS ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT); { size_t object_size = size; size_t overhead = object_size - orig_size; zone->stats.total_overhead += overhead; zone->stats.total_allocated += object_size; if (orig_size <= 32) { zone->stats.total_overhead_under32 += overhead; zone->stats.total_allocated_under32 += object_size; } if (orig_size <= 64) { zone->stats.total_overhead_under64 += overhead; zone->stats.total_allocated_under64 += object_size; } if (orig_size <= 128) { zone->stats.total_overhead_under128 += overhead; zone->stats.total_allocated_under128 += object_size; } } #endif if (GGC_DEBUG_LEVEL >= 3) fprintf (G.debug_file, "Allocating object, size=%lu at %p\n", (unsigned long) size, result); return result; } /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone for that type. */ void * ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size MEM_STAT_DECL) { switch (gte) { case gt_ggc_e_14lang_tree_node: return ggc_alloc_zone_pass_stat (size, &tree_zone); case gt_ggc_e_7rtx_def: return ggc_alloc_zone_pass_stat (size, &rtl_zone); case gt_ggc_e_9rtvec_def: return ggc_alloc_zone_pass_stat (size, &rtl_zone); default: return ggc_alloc_zone_pass_stat (size, &main_zone); } } /* Normal ggc_alloc simply allocates into the main zone. */ void * ggc_alloc_stat (size_t size MEM_STAT_DECL) { return ggc_alloc_zone_pass_stat (size, &main_zone); } /* Poison the chunk. */ #ifdef ENABLE_GC_CHECKING #define poison_region(PTR, SIZE) \ memset ((PTR), 0xa5, (SIZE)) #else #define poison_region(PTR, SIZE) #endif /* Free the object at P. */ void ggc_free (void *p) { struct page_entry *page; #ifdef GATHER_STATISTICS ggc_free_overhead (p); #endif poison_region (p, ggc_get_size (p)); page = zone_get_object_page (p); if (page->large_p) { struct large_page_entry *large_page = (struct large_page_entry *) page; /* Remove the page from the linked list. */ if (large_page->prev) large_page->prev->next = large_page->next; else { gcc_assert (large_page->common.zone->large_pages == large_page); large_page->common.zone->large_pages = large_page->next; } if (large_page->next) large_page->next->prev = large_page->prev; large_page->common.zone->allocated -= large_page->bytes; /* Release the memory associated with this object. */ free_large_page (large_page); } else if (page->pch_p) /* Don't do anything. We won't allocate a new object from the PCH zone so there's no point in releasing anything. */ ; else { size_t size = ggc_get_size (p); page->zone->allocated -= size; /* Add the chunk to the free list. We don't bother with coalescing, since we are likely to want a chunk of this size again. */ free_chunk ((char *)p, size, page->zone); } } /* Mark function for strings. */ void gt_ggc_m_S (const void *p) { page_entry *entry; unsigned long offset; if (!p) return; /* Look up the page on which the object is alloced. . */ entry = lookup_page_table_if_allocated (p); if (! entry) return; if (entry->pch_p) { size_t alloc_word, alloc_bit, t; t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT; alloc_word = t / (8 * sizeof (alloc_type)); alloc_bit = t % (8 * sizeof (alloc_type)); offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word, alloc_bit); } else if (entry->large_p) { struct large_page_entry *le = (struct large_page_entry *) entry; offset = ((const char *) p) - entry->page; gcc_assert (offset < le->bytes); } else { struct small_page_entry *se = (struct small_page_entry *) entry; unsigned int start_word = zone_get_object_alloc_word (p); unsigned int start_bit = zone_get_object_alloc_bit (p); offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit); /* On some platforms a char* will not necessarily line up on an allocation boundary, so we have to update the offset to account for the leftover bytes. */ offset += (size_t) p % BYTES_PER_ALLOC_BIT; } if (offset) { /* Here we've seen a char* which does not point to the beginning of an allocated object. We assume it points to the middle of a STRING_CST. */ gcc_assert (offset == offsetof (struct tree_string, str)); p = ((const char *) p) - offset; gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p)); return; } /* Inefficient, but also unlikely to matter. */ ggc_set_mark (p); } /* If P is not marked, mark it and return false. Otherwise return true. P must have been allocated by the GC allocator; it mustn't point to static objects, stack variables, or memory allocated with malloc. */ int ggc_set_mark (const void *p) { struct page_entry *page; const char *ptr = (const char *) p; page = zone_get_object_page (p); if (page->pch_p) { size_t mark_word, mark_bit, offset; offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT; mark_word = offset / (8 * sizeof (mark_type)); mark_bit = offset % (8 * sizeof (mark_type)); if (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) return 1; pch_zone.mark_bits[mark_word] |= (1 << mark_bit); } else if (page->large_p) { struct large_page_entry *large_page = (struct large_page_entry *) page; if (large_page->mark_p) return 1; large_page->mark_p = true; } else { struct small_page_entry *small_page = (struct small_page_entry *) page; if (small_page->mark_bits[zone_get_object_mark_word (p)] & (1 << zone_get_object_mark_bit (p))) return 1; small_page->mark_bits[zone_get_object_mark_word (p)] |= (1 << zone_get_object_mark_bit (p)); } if (GGC_DEBUG_LEVEL >= 4) fprintf (G.debug_file, "Marking %p\n", p); return 0; } /* Return 1 if P has been marked, zero otherwise. P must have been allocated by the GC allocator; it mustn't point to static objects, stack variables, or memory allocated with malloc. */ int ggc_marked_p (const void *p) { struct page_entry *page; const char *ptr = (const char *) p; page = zone_get_object_page (p); if (page->pch_p) { size_t mark_word, mark_bit, offset; offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT; mark_word = offset / (8 * sizeof (mark_type)); mark_bit = offset % (8 * sizeof (mark_type)); return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0; } if (page->large_p) { struct large_page_entry *large_page = (struct large_page_entry *) page; return large_page->mark_p; } else { struct small_page_entry *small_page = (struct small_page_entry *) page; return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)] & (1 << zone_get_object_mark_bit (p))); } } /* Return the size of the gc-able object P. */ size_t ggc_get_size (const void *p) { struct page_entry *page; const char *ptr = (const char *) p; page = zone_get_object_page (p); if (page->pch_p) { size_t alloc_word, alloc_bit, offset, max_size; offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1; alloc_word = offset / (8 * sizeof (alloc_type)); alloc_bit = offset % (8 * sizeof (alloc_type)); max_size = pch_zone.bytes - (ptr - pch_zone.page); return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit, max_size); } if (page->large_p) return ((struct large_page_entry *)page)->bytes; else return zone_find_object_size ((struct small_page_entry *) page, p); } /* Initialize the ggc-zone-mmap allocator. */ void init_ggc (void) { /* The allocation size must be greater than BYTES_PER_MARK_BIT, and a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for the current assumptions to hold. */ gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT); /* Set up the main zone by hand. */ main_zone.name = "Main zone"; G.zones = &main_zone; /* Allocate the default zones. */ new_ggc_zone_1 (&rtl_zone, "RTL zone"); new_ggc_zone_1 (&tree_zone, "Tree zone"); new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone"); G.pagesize = getpagesize(); G.lg_pagesize = exact_log2 (G.pagesize); G.page_mask = ~(G.pagesize - 1); /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */ gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0); /* Allocate 16 system pages at a time. */ G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE; /* Calculate the size of the allocation bitmap and other overhead. */ /* Right now we allocate bits for the page header and bitmap. These are wasted, but a little tricky to eliminate. */ G.small_page_overhead = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8); /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */ #ifdef HAVE_MMAP_DEV_ZERO G.dev_zero_fd = open ("/dev/zero", O_RDONLY); gcc_assert (G.dev_zero_fd != -1); #endif #if 0 G.debug_file = fopen ("ggc-mmap.debug", "w"); setlinebuf (G.debug_file); #else G.debug_file = stdout; #endif #ifdef USING_MMAP /* StunOS has an amazing off-by-one error for the first mmap allocation after fiddling with RLIMIT_STACK. The result, as hard as it is to believe, is an unaligned page allocation, which would cause us to hork badly if we tried to use it. */ { char *p = alloc_anon (NULL, G.pagesize, &main_zone); struct small_page_entry *e; if ((size_t)p & (G.pagesize - 1)) { /* How losing. Discard this one and try another. If we still can't get something useful, give up. */ p = alloc_anon (NULL, G.pagesize, &main_zone); gcc_assert (!((size_t)p & (G.pagesize - 1))); } if (GGC_PAGE_SIZE == G.pagesize) { /* We have a good page, might as well hold onto it... */ e = XCNEWVAR (struct small_page_entry, G.small_page_overhead); e->common.page = p; e->common.zone = &main_zone; e->next = main_zone.free_pages; set_page_table_entry (e->common.page, &e->common); main_zone.free_pages = e; } else { munmap (p, G.pagesize); } } #endif } /* Start a new GGC zone. */ static void new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name) { new_zone->name = name; new_zone->next_zone = G.zones->next_zone; G.zones->next_zone = new_zone; } struct alloc_zone * new_ggc_zone (const char * name) { struct alloc_zone *new_zone = XCNEW (struct alloc_zone); new_ggc_zone_1 (new_zone, name); return new_zone; } /* Destroy a GGC zone. */ void destroy_ggc_zone (struct alloc_zone * dead_zone) { struct alloc_zone *z; for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone) /* Just find that zone. */ continue; /* We should have found the zone in the list. Anything else is fatal. */ gcc_assert (z); /* z is dead, baby. z is dead. */ z->dead = true; } /* Free all empty pages and objects within a page for a given zone */ static void sweep_pages (struct alloc_zone *zone) { struct large_page_entry **lpp, *lp, *lnext; struct small_page_entry **spp, *sp, *snext; char *last_free; size_t allocated = 0; bool nomarksinpage; /* First, reset the free_chunks lists, since we are going to re-free free chunks in hopes of coalescing them into large chunks. */ memset (zone->free_chunks, 0, sizeof (zone->free_chunks)); zone->high_free_bin = 0; zone->cached_free = NULL; zone->cached_free_size = 0; /* Large pages are all or none affairs. Either they are completely empty, or they are completely full. */ lpp = &zone->large_pages; for (lp = zone->large_pages; lp != NULL; lp = lnext) { gcc_assert (lp->common.large_p); lnext = lp->next; #ifdef GATHER_STATISTICS /* This page has now survived another collection. */ lp->common.survived++; #endif if (lp->mark_p) { lp->mark_p = false; allocated += lp->bytes; lpp = &lp->next; } else { *lpp = lnext; #ifdef ENABLE_GC_CHECKING /* Poison the page. */ memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE); #endif if (lp->prev) lp->prev->next = lp->next; if (lp->next) lp->next->prev = lp->prev; free_large_page (lp); } } spp = &zone->pages; for (sp = zone->pages; sp != NULL; sp = snext) { char *object, *last_object; char *end; alloc_type *alloc_word_p; mark_type *mark_word_p; gcc_assert (!sp->common.large_p); snext = sp->next; #ifdef GATHER_STATISTICS /* This page has now survived another collection. */ sp->common.survived++; #endif /* Step through all chunks, consolidate those that are free and insert them into the free lists. Note that consolidation slows down collection slightly. */ last_object = object = sp->common.page; end = sp->common.page + SMALL_PAGE_SIZE; last_free = NULL; nomarksinpage = true; mark_word_p = sp->mark_bits; alloc_word_p = sp->alloc_bits; gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT); object = sp->common.page; do { unsigned int i, n; alloc_type alloc_word; mark_type mark_word; alloc_word = *alloc_word_p++; mark_word = *mark_word_p++; if (mark_word) nomarksinpage = false; /* There ought to be some way to do this without looping... */ i = 0; while ((n = alloc_ffs (alloc_word)) != 0) { /* Extend the current state for n - 1 bits. We can't shift alloc_word by n, even though it isn't used in the loop, in case only the highest bit was set. */ alloc_word >>= n - 1; mark_word >>= n - 1; object += BYTES_PER_MARK_BIT * (n - 1); if (mark_word & 1) { if (last_free) { VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free, object - last_free)); poison_region (last_free, object - last_free); free_chunk (last_free, object - last_free, zone); last_free = NULL; } else allocated += object - last_object; last_object = object; } else { if (last_free == NULL) { last_free = object; allocated += object - last_object; } else zone_clear_object_alloc_bit (sp, object); } /* Shift to just after the alloc bit we handled. */ alloc_word >>= 1; mark_word >>= 1; object += BYTES_PER_MARK_BIT; i += n; } object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i); } while (object < end); if (nomarksinpage) { *spp = snext; #ifdef ENABLE_GC_CHECKING VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page, SMALL_PAGE_SIZE)); /* Poison the page. */ memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE); #endif free_small_page (sp); continue; } else if (last_free) { VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free, object - last_free)); poison_region (last_free, object - last_free); free_chunk (last_free, object - last_free, zone); } else allocated += object - last_object; spp = &sp->next; } zone->allocated = allocated; } /* mark-and-sweep routine for collecting a single zone. NEED_MARKING is true if we need to mark before sweeping, false if some other zone collection has already performed marking for us. Returns true if we collected, false otherwise. */ static bool ggc_collect_1 (struct alloc_zone *zone, bool need_marking) { #if 0 /* */ { int i; for (i = 0; i < NUM_FREE_BINS + 1; i++) { struct alloc_chunk *chunk; int n, tot; n = 0; tot = 0; chunk = zone->free_chunks[i]; while (chunk) { n++; tot += chunk->size; chunk = chunk->next_free; } fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n", i, n, tot); } } /* */ #endif if (!quiet_flag) fprintf (stderr, " {%s GC %luk -> ", zone->name, (unsigned long) zone->allocated / 1024); /* Zero the total allocated bytes. This will be recalculated in the sweep phase. */ zone->allocated = 0; /* Release the pages we freed the last time we collected, but didn't reuse in the interim. */ release_pages (zone); if (need_marking) { zone_allocate_marks (); ggc_mark_roots (); #ifdef GATHER_STATISTICS ggc_prune_overhead_list (); #endif } sweep_pages (zone); zone->was_collected = true; zone->allocated_last_gc = zone->allocated; if (!quiet_flag) fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024); return true; } #ifdef GATHER_STATISTICS /* Calculate the average page survival rate in terms of number of collections. */ static float calculate_average_page_survival (struct alloc_zone *zone) { float count = 0.0; float survival = 0.0; struct small_page_entry *p; struct large_page_entry *lp; for (p = zone->pages; p; p = p->next) { count += 1.0; survival += p->common.survived; } for (lp = zone->large_pages; lp; lp = lp->next) { count += 1.0; survival += lp->common.survived; } return survival/count; } #endif /* Top level collection routine. */ void ggc_collect (void) { struct alloc_zone *zone; bool marked = false; timevar_push (TV_GC); if (!ggc_force_collect) { float allocated_last_gc = 0, allocated = 0, min_expand; for (zone = G.zones; zone; zone = zone->next_zone) { allocated_last_gc += zone->allocated_last_gc; allocated += zone->allocated; } allocated_last_gc = MAX (allocated_last_gc, (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; if (allocated < allocated_last_gc + min_expand) { timevar_pop (TV_GC); return; } } /* Start by possibly collecting the main zone. */ main_zone.was_collected = false; marked |= ggc_collect_1 (&main_zone, true); /* In order to keep the number of collections down, we don't collect other zones unless we are collecting the main zone. This gives us roughly the same number of collections as we used to have with the old gc. The number of collection is important because our main slowdown (according to profiling) is now in marking. So if we mark twice as often as we used to, we'll be twice as slow. Hopefully we'll avoid this cost when we mark zone-at-a-time. */ /* NOTE drow/2004-07-28: We now always collect the main zone, but keep this code in case the heuristics are further refined. */ if (main_zone.was_collected) { struct alloc_zone *zone; for (zone = main_zone.next_zone; zone; zone = zone->next_zone) { zone->was_collected = false; marked |= ggc_collect_1 (zone, !marked); } } #ifdef GATHER_STATISTICS /* Print page survival stats, if someone wants them. */ if (GGC_DEBUG_LEVEL >= 2) { for (zone = G.zones; zone; zone = zone->next_zone) { if (zone->was_collected) { float f = calculate_average_page_survival (zone); printf ("Average page survival in zone `%s' is %f\n", zone->name, f); } } } #endif if (marked) zone_free_marks (); /* Free dead zones. */ for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone) { if (zone->next_zone->dead) { struct alloc_zone *dead_zone = zone->next_zone; printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name); /* The zone must be empty. */ gcc_assert (!dead_zone->allocated); /* Unchain the dead zone, release all its pages and free it. */ zone->next_zone = zone->next_zone->next_zone; release_pages (dead_zone); free (dead_zone); } } timevar_pop (TV_GC); } /* Print allocation statistics. */ #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ ? (x) \ : ((x) < 1024*1024*10 \ ? (x) / 1024 \ : (x) / (1024*1024)))) #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) void ggc_print_statistics (void) { struct alloc_zone *zone; struct ggc_statistics stats; size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0; size_t pte_overhead, i; /* Clear the statistics. */ memset (&stats, 0, sizeof (stats)); /* Make sure collection will really occur. */ ggc_force_collect = true; /* Collect and print the statistics common across collectors. */ ggc_print_common_statistics (stderr, &stats); ggc_force_collect = false; /* Release free pages so that we will not count the bytes allocated there as part of the total allocated memory. */ for (zone = G.zones; zone; zone = zone->next_zone) release_pages (zone); /* Collect some information about the various sizes of allocation. */ fprintf (stderr, "Memory still allocated at the end of the compilation process\n"); fprintf (stderr, "%20s %10s %10s %10s\n", "Zone", "Allocated", "Used", "Overhead"); for (zone = G.zones; zone; zone = zone->next_zone) { struct large_page_entry *large_page; size_t overhead, allocated, in_use; /* Skip empty zones. */ if (!zone->pages && !zone->large_pages) continue; allocated = in_use = 0; overhead = sizeof (struct alloc_zone); for (large_page = zone->large_pages; large_page != NULL; large_page = large_page->next) { allocated += large_page->bytes; in_use += large_page->bytes; overhead += sizeof (struct large_page_entry); } /* There's no easy way to walk through the small pages finding used and unused objects. Instead, add all the pages, and subtract out the free list. */ allocated += GGC_PAGE_SIZE * zone->n_small_pages; in_use += GGC_PAGE_SIZE * zone->n_small_pages; overhead += G.small_page_overhead * zone->n_small_pages; for (i = 0; i <= NUM_FREE_BINS; i++) { struct alloc_chunk *chunk = zone->free_chunks[i]; while (chunk) { in_use -= ggc_get_size (chunk); chunk = chunk->next_free; } } fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", zone->name, SCALE (allocated), LABEL (allocated), SCALE (in_use), LABEL (in_use), SCALE (overhead), LABEL (overhead)); gcc_assert (in_use == zone->allocated); total_overhead += overhead; total_allocated += zone->allocated; total_bytes_mapped += zone->bytes_mapped; } /* Count the size of the page table as best we can. */ #if HOST_BITS_PER_PTR <= 32 pte_overhead = sizeof (G.lookup); for (i = 0; i < PAGE_L1_SIZE; i++) if (G.lookup[i]) pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *); #else { page_table table = G.lookup; pte_overhead = 0; while (table) { pte_overhead += sizeof (*table); for (i = 0; i < PAGE_L1_SIZE; i++) if (table->table[i]) pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *); table = table->next; } } #endif fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table", "", "", SCALE (pte_overhead), LABEL (pte_overhead)); total_overhead += pte_overhead; fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total", SCALE (total_bytes_mapped), LABEL (total_bytes_mapped), SCALE (total_allocated), LABEL(total_allocated), SCALE (total_overhead), LABEL (total_overhead)); #ifdef GATHER_STATISTICS { unsigned long long all_overhead = 0, all_allocated = 0; unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0; unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0; unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0; fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); for (zone = G.zones; zone; zone = zone->next_zone) { all_overhead += zone->stats.total_overhead; all_allocated += zone->stats.total_allocated; all_allocated_under32 += zone->stats.total_allocated_under32; all_overhead_under32 += zone->stats.total_overhead_under32; all_allocated_under64 += zone->stats.total_allocated_under64; all_overhead_under64 += zone->stats.total_overhead_under64; all_allocated_under128 += zone->stats.total_allocated_under128; all_overhead_under128 += zone->stats.total_overhead_under128; fprintf (stderr, "%20s: %10lld\n", zone->name, zone->stats.total_allocated); } fprintf (stderr, "\n"); fprintf (stderr, "Total Overhead: %10lld\n", all_overhead); fprintf (stderr, "Total Allocated: %10lld\n", all_allocated); fprintf (stderr, "Total Overhead under 32B: %10lld\n", all_overhead_under32); fprintf (stderr, "Total Allocated under 32B: %10lld\n", all_allocated_under32); fprintf (stderr, "Total Overhead under 64B: %10lld\n", all_overhead_under64); fprintf (stderr, "Total Allocated under 64B: %10lld\n", all_allocated_under64); fprintf (stderr, "Total Overhead under 128B: %10lld\n", all_overhead_under128); fprintf (stderr, "Total Allocated under 128B: %10lld\n", all_allocated_under128); } #endif } /* Precompiled header support. */ /* For precompiled headers, we sort objects based on their type. We also sort various objects into their own buckets; currently this covers strings and IDENTIFIER_NODE trees. The choices of how to sort buckets have not yet been tuned. */ #define NUM_PCH_BUCKETS (gt_types_enum_last + 3) #define OTHER_BUCKET (gt_types_enum_last + 0) #define IDENTIFIER_BUCKET (gt_types_enum_last + 1) #define STRING_BUCKET (gt_types_enum_last + 2) struct ggc_pch_ondisk { size_t total; size_t type_totals[NUM_PCH_BUCKETS]; }; struct ggc_pch_data { struct ggc_pch_ondisk d; size_t base; size_t orig_base; size_t alloc_size; alloc_type *alloc_bits; size_t type_bases[NUM_PCH_BUCKETS]; size_t start_offset; }; /* Initialize the PCH data structure. */ struct ggc_pch_data * init_ggc_pch (void) { return XCNEW (struct ggc_pch_data); } /* Return which of the page-aligned buckets the object at X, with type TYPE, should be sorted into in the PCH. Strings will have IS_STRING set and TYPE will be gt_types_enum_last. Other objects of unknown type will also have TYPE equal to gt_types_enum_last. */ static int pch_bucket (void *x, enum gt_types_enum type, bool is_string) { /* Sort identifiers into their own bucket, to improve locality when searching the identifier hash table. */ if (type == gt_ggc_e_14lang_tree_node && TREE_CODE ((tree) x) == IDENTIFIER_NODE) return IDENTIFIER_BUCKET; else if (type == gt_types_enum_last) { if (is_string) return STRING_BUCKET; return OTHER_BUCKET; } return type; } /* Add the size of object X to the size of the PCH data. */ void ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, size_t size, bool is_string, enum gt_types_enum type) { /* NOTE: Right now we don't need to align up the size of any objects. Strings can be unaligned, and everything else is allocated to a MAX_ALIGNMENT boundary already. */ d->d.type_totals[pch_bucket (x, type, is_string)] += size; } /* Return the total size of the PCH data. */ size_t ggc_pch_total_size (struct ggc_pch_data *d) { enum gt_types_enum i; size_t alloc_size, total_size; total_size = 0; for (i = 0; i < NUM_PCH_BUCKETS; i++) { d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE); total_size += d->d.type_totals[i]; } d->d.total = total_size; /* Include the size of the allocation bitmap. */ alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8); alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT); d->alloc_size = alloc_size; return d->d.total + alloc_size; } /* Set the base address for the objects in the PCH file. */ void ggc_pch_this_base (struct ggc_pch_data *d, void *base_) { int i; size_t base = (size_t) base_; d->base = d->orig_base = base; for (i = 0; i < NUM_PCH_BUCKETS; i++) { d->type_bases[i] = base; base += d->d.type_totals[i]; } if (d->alloc_bits == NULL) d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size); } /* Allocate a place for object X of size SIZE in the PCH file. */ char * ggc_pch_alloc_object (struct ggc_pch_data *d, void *x, size_t size, bool is_string, enum gt_types_enum type) { size_t alloc_word, alloc_bit; char *result; int bucket = pch_bucket (x, type, is_string); /* Record the start of the object in the allocation bitmap. We can't assert that the allocation bit is previously clear, because strings may violate the invariant that they are at least BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size should not be called for strings. */ alloc_word = ((d->type_bases[bucket] - d->orig_base) / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT)); alloc_bit = ((d->type_bases[bucket] - d->orig_base) / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type)); d->alloc_bits[alloc_word] |= 1L << alloc_bit; /* Place the object at the current pointer for this bucket. */ result = (char *) d->type_bases[bucket]; d->type_bases[bucket] += size; return result; } /* Prepare to write out the PCH data to file F. */ void ggc_pch_prepare_write (struct ggc_pch_data *d, FILE *f) { /* We seek around a lot while writing. Record where the end of the padding in the PCH file is, so that we can locate each object's offset. */ d->start_offset = ftell (f); } /* Write out object X of SIZE to file F. */ void ggc_pch_write_object (struct ggc_pch_data *d, FILE *f, void *x, void *newx, size_t size, bool is_string ATTRIBUTE_UNUSED) { if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0) fatal_error ("can't seek PCH file: %m"); if (fwrite (x, size, 1, f) != 1) fatal_error ("can't write PCH file: %m"); } void ggc_pch_finish (struct ggc_pch_data *d, FILE *f) { /* Write out the allocation bitmap. */ if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0) fatal_error ("can't seek PCH file: %m"); if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1) fatal_error ("can't write PCH file: %m"); /* Done with the PCH, so write out our footer. */ if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) fatal_error ("can't write PCH file: %m"); free (d->alloc_bits); free (d); } /* The PCH file from F has been mapped at ADDR. Read in any additional data from the file and set up the GC state. */ void ggc_pch_read (FILE *f, void *addr) { struct ggc_pch_ondisk d; size_t alloc_size; struct alloc_zone *zone; struct page_entry *pch_page; char *p; if (fread (&d, sizeof (d), 1, f) != 1) fatal_error ("can't read PCH file: %m"); alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8); alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT); pch_zone.bytes = d.total; pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes); pch_zone.page = (char *) addr; pch_zone.end = (char *) pch_zone.alloc_bits; /* We've just read in a PCH file. So, every object that used to be allocated is now free. */ for (zone = G.zones; zone; zone = zone->next_zone) { struct small_page_entry *page, *next_page; struct large_page_entry *large_page, *next_large_page; zone->allocated = 0; /* Clear the zone's free chunk list. */ memset (zone->free_chunks, 0, sizeof (zone->free_chunks)); zone->high_free_bin = 0; zone->cached_free = NULL; zone->cached_free_size = 0; /* Move all the small pages onto the free list. */ for (page = zone->pages; page != NULL; page = next_page) { next_page = page->next; memset (page->alloc_bits, 0, G.small_page_overhead - PAGE_OVERHEAD); free_small_page (page); } /* Discard all the large pages. */ for (large_page = zone->large_pages; large_page != NULL; large_page = next_large_page) { next_large_page = large_page->next; free_large_page (large_page); } zone->pages = NULL; zone->large_pages = NULL; } /* Allocate the dummy page entry for the PCH, and set all pages mapped into the PCH to reference it. */ pch_page = XCNEW (struct page_entry); pch_page->page = pch_zone.page; pch_page->pch_p = true; for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE) set_page_table_entry (p, pch_page); }
477867.c
/* * WPA Supplicant / UNIX domain socket -based control interface * Copyright (c) 2004-2009, Jouni Malinen <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Alternatively, this software may be distributed under the terms of BSD * license. * * See README and COPYING for more details. */ #include "includes.h" #include <sys/un.h> #include <sys/stat.h> #include <grp.h> #include <stddef.h> #ifdef ANDROID #include <cutils/sockets.h> #endif /* ANDROID */ #include "utils/common.h" #include "utils/eloop.h" #include "utils/list.h" #include "eapol_supp/eapol_supp_sm.h" #include "config.h" #include "wpa_supplicant_i.h" #include "ctrl_iface.h" /* Per-interface ctrl_iface */ /** * struct wpa_ctrl_dst - Internal data structure of control interface monitors * * This structure is used to store information about registered control * interface monitors into struct wpa_supplicant. This data is private to * ctrl_iface_unix.c and should not be touched directly from other files. */ struct wpa_ctrl_dst { struct dl_list list; struct sockaddr_un addr; socklen_t addrlen; int debug_level; int errors; }; struct ctrl_iface_priv { struct wpa_supplicant *wpa_s; int sock; struct dl_list ctrl_dst; }; static void wpa_supplicant_ctrl_iface_send(struct ctrl_iface_priv *priv, int level, const char *buf, size_t len); static int wpa_supplicant_ctrl_iface_attach(struct ctrl_iface_priv *priv, struct sockaddr_un *from, socklen_t fromlen) { struct wpa_ctrl_dst *dst; dst = os_zalloc(sizeof(*dst)); if (dst == NULL) return -1; os_memcpy(&dst->addr, from, sizeof(struct sockaddr_un)); dst->addrlen = fromlen; dst->debug_level = MSG_INFO; dl_list_add(&priv->ctrl_dst, &dst->list); wpa_hexdump(MSG_DEBUG, "CTRL_IFACE monitor attached", (u8 *) from->sun_path, fromlen - offsetof(struct sockaddr_un, sun_path)); return 0; } static int wpa_supplicant_ctrl_iface_detach(struct ctrl_iface_priv *priv, struct sockaddr_un *from, socklen_t fromlen) { struct wpa_ctrl_dst *dst; dl_list_for_each(dst, &priv->ctrl_dst, struct wpa_ctrl_dst, list) { if (fromlen == dst->addrlen && os_memcmp(from->sun_path, dst->addr.sun_path, fromlen - offsetof(struct sockaddr_un, sun_path)) == 0) { dl_list_del(&dst->list); os_free(dst); wpa_hexdump(MSG_DEBUG, "CTRL_IFACE monitor detached", (u8 *) from->sun_path, fromlen - offsetof(struct sockaddr_un, sun_path)); return 0; } } return -1; } static int wpa_supplicant_ctrl_iface_level(struct ctrl_iface_priv *priv, struct sockaddr_un *from, socklen_t fromlen, char *level) { struct wpa_ctrl_dst *dst; wpa_printf(MSG_DEBUG, "CTRL_IFACE LEVEL %s", level); dl_list_for_each(dst, &priv->ctrl_dst, struct wpa_ctrl_dst, list) { if (fromlen == dst->addrlen && os_memcmp(from->sun_path, dst->addr.sun_path, fromlen - offsetof(struct sockaddr_un, sun_path)) == 0) { wpa_hexdump(MSG_DEBUG, "CTRL_IFACE changed monitor " "level", (u8 *) from->sun_path, fromlen - offsetof(struct sockaddr_un, sun_path)); dst->debug_level = atoi(level); return 0; } } return -1; } static void wpa_supplicant_ctrl_iface_receive(int sock, void *eloop_ctx, void *sock_ctx) { struct wpa_supplicant *wpa_s = eloop_ctx; struct ctrl_iface_priv *priv = sock_ctx; char buf[4096]; int res; struct sockaddr_un from; socklen_t fromlen = sizeof(from); char *reply = NULL; size_t reply_len = 0; int new_attached = 0; res = recvfrom(sock, buf, sizeof(buf) - 1, 0, (struct sockaddr *) &from, &fromlen); if (res < 0) { perror("recvfrom(ctrl_iface)"); return; } buf[res] = '\0'; if (os_strcmp(buf, "ATTACH") == 0) { if (wpa_supplicant_ctrl_iface_attach(priv, &from, fromlen)) reply_len = 1; else { new_attached = 1; reply_len = 2; } } else if (os_strcmp(buf, "DETACH") == 0) { if (wpa_supplicant_ctrl_iface_detach(priv, &from, fromlen)) reply_len = 1; else reply_len = 2; } else if (os_strncmp(buf, "LEVEL ", 6) == 0) { if (wpa_supplicant_ctrl_iface_level(priv, &from, fromlen, buf + 6)) reply_len = 1; else reply_len = 2; } else { reply = wpa_supplicant_ctrl_iface_process(wpa_s, buf, &reply_len); } if (reply) { sendto(sock, reply, reply_len, 0, (struct sockaddr *) &from, fromlen); os_free(reply); } else if (reply_len == 1) { sendto(sock, "FAIL\n", 5, 0, (struct sockaddr *) &from, fromlen); } else if (reply_len == 2) { sendto(sock, "OK\n", 3, 0, (struct sockaddr *) &from, fromlen); } if (new_attached) eapol_sm_notify_ctrl_attached(wpa_s->eapol); } static char * wpa_supplicant_ctrl_iface_path(struct wpa_supplicant *wpa_s) { char *buf; size_t len; char *pbuf, *dir = NULL, *gid_str = NULL; int res; if (wpa_s->conf->ctrl_interface == NULL) return NULL; pbuf = os_strdup(wpa_s->conf->ctrl_interface); if (pbuf == NULL) return NULL; if (os_strncmp(pbuf, "DIR=", 4) == 0) { dir = pbuf + 4; gid_str = os_strstr(dir, " GROUP="); if (gid_str) { *gid_str = '\0'; gid_str += 7; } } else dir = pbuf; len = os_strlen(dir) + os_strlen(wpa_s->ifname) + 2; buf = os_malloc(len); if (buf == NULL) { os_free(pbuf); return NULL; } res = os_snprintf(buf, len, "%s/%s", dir, wpa_s->ifname); if (res < 0 || (size_t) res >= len) { os_free(pbuf); os_free(buf); return NULL; } #ifdef __CYGWIN__ { /* Windows/WinPcap uses interface names that are not suitable * as a file name - convert invalid chars to underscores */ char *pos = buf; while (*pos) { if (*pos == '\\') *pos = '_'; pos++; } } #endif /* __CYGWIN__ */ os_free(pbuf); return buf; } static void wpa_supplicant_ctrl_iface_msg_cb(void *ctx, int level, const char *txt, size_t len) { struct wpa_supplicant *wpa_s = ctx; if (wpa_s == NULL || wpa_s->ctrl_iface == NULL) return; wpa_supplicant_ctrl_iface_send(wpa_s->ctrl_iface, level, txt, len); } struct ctrl_iface_priv * wpa_supplicant_ctrl_iface_init(struct wpa_supplicant *wpa_s) { struct ctrl_iface_priv *priv; struct sockaddr_un addr; char *fname = NULL; gid_t gid = 0; int gid_set = 0; char *buf, *dir = NULL, *gid_str = NULL; struct group *grp; char *endp; priv = os_zalloc(sizeof(*priv)); if (priv == NULL) return NULL; dl_list_init(&priv->ctrl_dst); priv->wpa_s = wpa_s; priv->sock = -1; if (wpa_s->conf->ctrl_interface == NULL) return priv; buf = os_strdup(wpa_s->conf->ctrl_interface); if (buf == NULL) goto fail; #ifdef ANDROID os_snprintf(addr.sun_path, sizeof(addr.sun_path), "wpa_%s", wpa_s->conf->ctrl_interface); priv->sock = android_get_control_socket(addr.sun_path); if (priv->sock >= 0) goto havesock; #endif /* ANDROID */ if (os_strncmp(buf, "DIR=", 4) == 0) { dir = buf + 4; gid_str = os_strstr(dir, " GROUP="); if (gid_str) { *gid_str = '\0'; gid_str += 7; } } else { dir = buf; gid_str = wpa_s->conf->ctrl_interface_group; } if (mkdir(dir, S_IRWXU | S_IRWXG) < 0) { if (errno == EEXIST) { wpa_printf(MSG_DEBUG, "Using existing control " "interface directory."); } else { perror("mkdir[ctrl_interface]"); goto fail; } } if (gid_str) { grp = getgrnam(gid_str); if (grp) { gid = grp->gr_gid; gid_set = 1; wpa_printf(MSG_DEBUG, "ctrl_interface_group=%d" " (from group name '%s')", (int) gid, gid_str); } else { /* Group name not found - try to parse this as gid */ gid = strtol(gid_str, &endp, 10); if (*gid_str == '\0' || *endp != '\0') { wpa_printf(MSG_ERROR, "CTRL: Invalid group " "'%s'", gid_str); goto fail; } gid_set = 1; wpa_printf(MSG_DEBUG, "ctrl_interface_group=%d", (int) gid); } } if (gid_set && chown(dir, -1, gid) < 0) { perror("chown[ctrl_interface]"); goto fail; } /* Make sure the group can enter and read the directory */ if (gid_set && chmod(dir, S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | S_IXGRP) < 0) { wpa_printf(MSG_ERROR, "CTRL: chmod[ctrl_interface]: %s", strerror(errno)); goto fail; } if (os_strlen(dir) + 1 + os_strlen(wpa_s->ifname) >= sizeof(addr.sun_path)) { wpa_printf(MSG_ERROR, "ctrl_iface path limit exceeded"); goto fail; } priv->sock = socket(PF_UNIX, SOCK_DGRAM, 0); if (priv->sock < 0) { perror("socket(PF_UNIX)"); goto fail; } os_memset(&addr, 0, sizeof(addr)); #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) addr.sun_len = sizeof(addr); #endif /* __FreeBSD__ */ addr.sun_family = AF_UNIX; fname = wpa_supplicant_ctrl_iface_path(wpa_s); if (fname == NULL) goto fail; os_strlcpy(addr.sun_path, fname, sizeof(addr.sun_path)); if (bind(priv->sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) { wpa_printf(MSG_DEBUG, "ctrl_iface bind(PF_UNIX) failed: %s", strerror(errno)); if (connect(priv->sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) { wpa_printf(MSG_DEBUG, "ctrl_iface exists, but does not" " allow connections - assuming it was left" "over from forced program termination"); if (unlink(fname) < 0) { perror("unlink[ctrl_iface]"); wpa_printf(MSG_ERROR, "Could not unlink " "existing ctrl_iface socket '%s'", fname); goto fail; } if (bind(priv->sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) { perror("bind(PF_UNIX)"); goto fail; } wpa_printf(MSG_DEBUG, "Successfully replaced leftover " "ctrl_iface socket '%s'", fname); } else { wpa_printf(MSG_INFO, "ctrl_iface exists and seems to " "be in use - cannot override it"); wpa_printf(MSG_INFO, "Delete '%s' manually if it is " "not used anymore", fname); os_free(fname); fname = NULL; goto fail; } } if (gid_set && chown(fname, -1, gid) < 0) { perror("chown[ctrl_interface/ifname]"); goto fail; } if (chmod(fname, S_IRWXU | S_IRWXG) < 0) { perror("chmod[ctrl_interface/ifname]"); goto fail; } os_free(fname); #ifdef ANDROID havesock: #endif /* ANDROID */ eloop_register_read_sock(priv->sock, wpa_supplicant_ctrl_iface_receive, wpa_s, priv); wpa_msg_register_cb(wpa_supplicant_ctrl_iface_msg_cb); os_free(buf); return priv; fail: if (priv->sock >= 0) close(priv->sock); os_free(priv); if (fname) { unlink(fname); os_free(fname); } os_free(buf); return NULL; } void wpa_supplicant_ctrl_iface_deinit(struct ctrl_iface_priv *priv) { struct wpa_ctrl_dst *dst, *prev; if (priv->sock > -1) { char *fname; char *buf, *dir = NULL, *gid_str = NULL; eloop_unregister_read_sock(priv->sock); if (!dl_list_empty(&priv->ctrl_dst)) { /* * Wait a second before closing the control socket if * there are any attached monitors in order to allow * them to receive any pending messages. */ wpa_printf(MSG_DEBUG, "CTRL_IFACE wait for attached " "monitors to receive messages"); os_sleep(1, 0); } close(priv->sock); priv->sock = -1; fname = wpa_supplicant_ctrl_iface_path(priv->wpa_s); if (fname) { unlink(fname); os_free(fname); } buf = os_strdup(priv->wpa_s->conf->ctrl_interface); if (buf == NULL) goto free_dst; if (os_strncmp(buf, "DIR=", 4) == 0) { dir = buf + 4; gid_str = os_strstr(dir, " GROUP="); if (gid_str) { *gid_str = '\0'; gid_str += 7; } } else dir = buf; if (rmdir(dir) < 0) { if (errno == ENOTEMPTY) { wpa_printf(MSG_DEBUG, "Control interface " "directory not empty - leaving it " "behind"); } else { perror("rmdir[ctrl_interface]"); } } os_free(buf); } free_dst: dl_list_for_each_safe(dst, prev, &priv->ctrl_dst, struct wpa_ctrl_dst, list) os_free(dst); os_free(priv); } /** * wpa_supplicant_ctrl_iface_send - Send a control interface packet to monitors * @priv: Pointer to private data from wpa_supplicant_ctrl_iface_init() * @level: Priority level of the message * @buf: Message data * @len: Message length * * Send a packet to all monitor programs attached to the control interface. */ static void wpa_supplicant_ctrl_iface_send(struct ctrl_iface_priv *priv, int level, const char *buf, size_t len) { struct wpa_ctrl_dst *dst, *next; char levelstr[10]; int idx, res; struct msghdr msg; struct iovec io[2]; if (priv->sock < 0 || dl_list_empty(&priv->ctrl_dst)) return; res = os_snprintf(levelstr, sizeof(levelstr), "<%d>", level); if (res < 0 || (size_t) res >= sizeof(levelstr)) return; io[0].iov_base = levelstr; io[0].iov_len = os_strlen(levelstr); io[1].iov_base = (char *) buf; io[1].iov_len = len; os_memset(&msg, 0, sizeof(msg)); msg.msg_iov = io; msg.msg_iovlen = 2; idx = 0; dl_list_for_each_safe(dst, next, &priv->ctrl_dst, struct wpa_ctrl_dst, list) { if (level >= dst->debug_level) { wpa_hexdump(MSG_DEBUG, "CTRL_IFACE monitor send", (u8 *) dst->addr.sun_path, dst->addrlen - offsetof(struct sockaddr_un, sun_path)); msg.msg_name = (void *) &dst->addr; msg.msg_namelen = dst->addrlen; if (sendmsg(priv->sock, &msg, 0) < 0) { int _errno = errno; wpa_printf(MSG_INFO, "CTRL_IFACE monitor[%d]: " "%d - %s", idx, errno, strerror(errno)); dst->errors++; if (dst->errors > 1000 || (_errno != ENOBUFS && dst->errors > 10) || _errno == ENOENT) { wpa_supplicant_ctrl_iface_detach( priv, &dst->addr, dst->addrlen); } } else dst->errors = 0; } idx++; } } void wpa_supplicant_ctrl_iface_wait(struct ctrl_iface_priv *priv) { char buf[256]; int res; struct sockaddr_un from; socklen_t fromlen = sizeof(from); for (;;) { wpa_printf(MSG_DEBUG, "CTRL_IFACE - %s - wait for monitor to " "attach", priv->wpa_s->ifname); eloop_wait_for_read_sock(priv->sock); res = recvfrom(priv->sock, buf, sizeof(buf) - 1, 0, (struct sockaddr *) &from, &fromlen); if (res < 0) { perror("recvfrom(ctrl_iface)"); continue; } buf[res] = '\0'; if (os_strcmp(buf, "ATTACH") == 0) { /* handle ATTACH signal of first monitor interface */ if (!wpa_supplicant_ctrl_iface_attach(priv, &from, fromlen)) { sendto(priv->sock, "OK\n", 3, 0, (struct sockaddr *) &from, fromlen); /* OK to continue */ return; } else { sendto(priv->sock, "FAIL\n", 5, 0, (struct sockaddr *) &from, fromlen); } } else { /* return FAIL for all other signals */ sendto(priv->sock, "FAIL\n", 5, 0, (struct sockaddr *) &from, fromlen); } } } /* Global ctrl_iface */ struct ctrl_iface_global_priv { struct wpa_global *global; int sock; }; static void wpa_supplicant_global_ctrl_iface_receive(int sock, void *eloop_ctx, void *sock_ctx) { struct wpa_global *global = eloop_ctx; char buf[256]; int res; struct sockaddr_un from; socklen_t fromlen = sizeof(from); char *reply; size_t reply_len; res = recvfrom(sock, buf, sizeof(buf) - 1, 0, (struct sockaddr *) &from, &fromlen); if (res < 0) { perror("recvfrom(ctrl_iface)"); return; } buf[res] = '\0'; reply = wpa_supplicant_global_ctrl_iface_process(global, buf, &reply_len); if (reply) { sendto(sock, reply, reply_len, 0, (struct sockaddr *) &from, fromlen); os_free(reply); } else if (reply_len) { sendto(sock, "FAIL\n", 5, 0, (struct sockaddr *) &from, fromlen); } } struct ctrl_iface_global_priv * wpa_supplicant_global_ctrl_iface_init(struct wpa_global *global) { struct ctrl_iface_global_priv *priv; struct sockaddr_un addr; priv = os_zalloc(sizeof(*priv)); if (priv == NULL) return NULL; priv->global = global; priv->sock = -1; if (global->params.ctrl_interface == NULL) return priv; #ifdef ANDROID priv->sock = android_get_control_socket(global->params.ctrl_interface); if (priv->sock >= 0) goto havesock; #endif /* ANDROID */ wpa_printf(MSG_DEBUG, "Global control interface '%s'", global->params.ctrl_interface); priv->sock = socket(PF_UNIX, SOCK_DGRAM, 0); if (priv->sock < 0) { perror("socket(PF_UNIX)"); goto fail; } os_memset(&addr, 0, sizeof(addr)); #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) addr.sun_len = sizeof(addr); #endif /* __FreeBSD__ */ addr.sun_family = AF_UNIX; os_strlcpy(addr.sun_path, global->params.ctrl_interface, sizeof(addr.sun_path)); if (bind(priv->sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) { perror("bind(PF_UNIX)"); if (connect(priv->sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) { wpa_printf(MSG_DEBUG, "ctrl_iface exists, but does not" " allow connections - assuming it was left" "over from forced program termination"); if (unlink(global->params.ctrl_interface) < 0) { perror("unlink[ctrl_iface]"); wpa_printf(MSG_ERROR, "Could not unlink " "existing ctrl_iface socket '%s'", global->params.ctrl_interface); goto fail; } if (bind(priv->sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) { perror("bind(PF_UNIX)"); goto fail; } wpa_printf(MSG_DEBUG, "Successfully replaced leftover " "ctrl_iface socket '%s'", global->params.ctrl_interface); } else { wpa_printf(MSG_INFO, "ctrl_iface exists and seems to " "be in use - cannot override it"); wpa_printf(MSG_INFO, "Delete '%s' manually if it is " "not used anymore", global->params.ctrl_interface); goto fail; } } #ifdef ANDROID havesock: #endif /* ANDROID */ eloop_register_read_sock(priv->sock, wpa_supplicant_global_ctrl_iface_receive, global, NULL); return priv; fail: if (priv->sock >= 0) close(priv->sock); os_free(priv); return NULL; } void wpa_supplicant_global_ctrl_iface_deinit(struct ctrl_iface_global_priv *priv) { if (priv->sock >= 0) { eloop_unregister_read_sock(priv->sock); close(priv->sock); } if (priv->global->params.ctrl_interface) unlink(priv->global->params.ctrl_interface); os_free(priv); }
495072.c
/** /file utils_xml.c * /brief Parsing the keys for manufacturer, pgn ,spn and function * * This module takes the adress-claimed message (PGN 60928) and uses * the NAME from the CAN Data field to retrieve manufacturer, function * and class of the sender from the iso 11783 NAME list. * * /author Florian Eidner */ #include <stdio.h> #include <stdlib.h> #include "mxml-2.10/mxml.h" #include <unistd.h> #include "utils_general.h" #include "utils_parse.h" int xml_add_device(mxml_node_t* tree, int device_id, u_int64_t data,int sa) { fprintf(stdout,"Add device to xml\n"); char man_name[200]={0}; //Name max is 200 chars, Database max name length is. char func_name[200]={0}; char class_name[200]={0}; char industry_name[200]={0}; parse_get_function(data,func_name); parse_get_class_industry(data,class_name,industry_name); parse_get_manufacturer(data,man_name); char* uuid = int_to_string(device_id); char* device_sa = int_to_string(sa); char date_time[70]; get_time(date_time); mxml_node_t* device = mxmlNewElement(tree,"device"); mxmlElementSetAttr(device, "UUID", uuid); mxmlElementSetAttr(device, "manufacturer", man_name); mxmlElementSetAttr(device, "function", func_name); mxmlElementSetAttr(device, "class",class_name); mxmlElementSetAttr(device, "industry",industry_name); mxmlElementSetAttr(device, "lastClaim",date_time); mxmlElementSetAttr(device, "lastSA",device_sa); mxmlElementSetAttr(device, "status","online"); free(device_sa); free(uuid); return EXIT_SUCCESS; } int xml_update_device(mxml_node_t* tree, int device_sa, int device_uuid) { char date_time[70]; get_time(date_time); char* active_device_sa = int_to_string(device_sa); char* active_device_uuid = int_to_string(device_uuid); mxml_node_t* device = mxmlFindElement(tree,tree,"device","UUID",active_device_uuid,MXML_DESCEND); mxmlElementSetAttr(device,"lastClaim",date_time); mxmlElementSetAttr(device,"lastSA",active_device_sa); mxmlElementSetAttr(device,"status","online"); free(active_device_uuid); free(active_device_sa); return EXIT_SUCCESS; } /* * \brief Add message to xml-tree * Adds a detected message and its signals to the isoident.xml tree. */ int xml_add_message(mxml_node_t* device, int message_pgn) { fprintf(stdout,"Add message PGN: %d.\n",message_pgn); mxml_node_t* message = mxmlNewElement(device,"message"); char pgn_name[200]={0}; int pgn_type = parse_get_pgn_name(message_pgn, pgn_name); char* pgn = int_to_string(message_pgn); mxmlElementSetAttr(message, "pgn", pgn); mxmlElementSetAttr(message, "name", pgn_name); free(pgn); switch (pgn_type) { case 1: mxmlElementSetAttr(message, "type", "ISO11783"); break; case 2: mxmlElementSetAttr(message, "type", "J1939"); break; case 3: mxmlElementSetAttr(message, "type", "NMEA2000"); break; default: mxmlElementSetAttr(message, "type", "unknown"); break; } //Add signals to message. int i; mxml_node_t* sig; signal_t signal_spn; signal_spn.name = malloc(200); signal_spn.unit = malloc (20); for (i=1;(parse_get_signals(message_pgn,i,&signal_spn) != 0);i++) { sig = mxmlNewElement(message,"signal"); fprintf(stdout, "Here1\n"); char* spn_id = int_to_string(signal_spn.spn); char* spn_start_bit = int_to_string(signal_spn.start_bit); char* spn_len = int_to_string(signal_spn.len); char* spn_factor = int_to_string(signal_spn.factor); char* spn_offset = int_to_string(signal_spn.offset); char* spn_range_min = int_to_string(signal_spn.range_min); char* spn_range_max = int_to_string(signal_spn.range_max); fprintf(stdout,"Signal spn after:%s\n", spn_id); mxmlElementSetAttr(sig, "spn", spn_id); mxmlElementSetAttr(sig, "name", signal_spn.name); mxmlElementSetAttr(sig, "log", "0"); mxmlElementSetAttr(sig, "start", spn_start_bit); mxmlElementSetAttr(sig, "len", spn_len); mxmlElementSetAttr(sig, "end", "0"); mxmlElementSetAttr(sig, "fac", spn_factor); mxmlElementSetAttr(sig, "offs", spn_offset); mxmlElementSetAttr(sig, "min", spn_range_min); mxmlElementSetAttr(sig, "max", spn_range_max); mxmlElementSetAttr(sig, "type", ""); mxmlElementSetAttr(sig, "unit", signal_spn.unit); mxmlElementSetAttr(sig, "ddi", ""); fprintf(stdout,"Added Signal to message.\n"); free(spn_id); free(spn_len); free(spn_start_bit); free(spn_factor); free(spn_offset); free(spn_range_max); free(spn_range_min); } free (signal_spn.name); free (signal_spn.unit); return EXIT_SUCCESS; } // /* // * \brief Write isoident.xml // * Takes the current tree and save it in file. // */ int xml_write_file(const char* path, mxml_node_t* parentnode, mxml_node_t* node1, mxml_node_t* node2,mxml_node_t* node3,mxml_node_t* node4) { fprintf(stdout, "Writing isoident.xml..."); FILE *xmlFile; mxml_node_t *xml; mxml_node_t *parent; xml = mxmlNewXML("1.0"); parent = mxmlNewElement(xml,"isoident"); mxmlAdd(parent,MXML_ADD_AFTER,MXML_ADD_TO_PARENT,node1); mxmlAdd(parent,MXML_ADD_AFTER,MXML_ADD_TO_PARENT,node2); mxmlAdd(parent,MXML_ADD_AFTER,MXML_ADD_TO_PARENT,node3); mxmlAdd(parent,MXML_ADD_AFTER,MXML_ADD_TO_PARENT,node4); xmlFile = fopen(path,"w"); mxmlSaveFile(xml,xmlFile,MXML_NO_CALLBACK); mxmlAdd(parentnode,MXML_ADD_AFTER,MXML_ADD_TO_PARENT,node1); mxmlAdd(parentnode,MXML_ADD_AFTER,MXML_ADD_TO_PARENT,node2); mxmlAdd(parentnode,MXML_ADD_AFTER,MXML_ADD_TO_PARENT,node3); mxmlAdd(parentnode,MXML_ADD_AFTER,MXML_ADD_TO_PARENT,node4); fclose(xmlFile); mxmlDelete(xml); fprintf(stdout,"Successfully updated file.\n"); return EXIT_SUCCESS; }
255684.c
/* * Copyright (C) 2005-2007 Red Hat GmbH * * A target that delays reads and/or writes and can send * them to different devices. * * This file is released under the GPL. */ #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/device-mapper.h> #define DM_MSG_PREFIX "delay" struct delay_c { struct timer_list delay_timer; struct mutex timer_lock; struct workqueue_struct *kdelayd_wq; struct work_struct flush_expired_bios; struct list_head delayed_bios; atomic_t may_delay; struct dm_dev *dev_read; sector_t start_read; unsigned read_delay; unsigned reads; struct dm_dev *dev_write; sector_t start_write; unsigned write_delay; unsigned writes; }; struct dm_delay_info { struct delay_c *context; struct list_head list; unsigned long expires; }; static DEFINE_MUTEX(delayed_bios_lock); static void handle_delayed_timer(unsigned long data) { struct delay_c *dc = (struct delay_c *)data; queue_work(dc->kdelayd_wq, &dc->flush_expired_bios); } static void queue_timeout(struct delay_c *dc, unsigned long expires) { mutex_lock(&dc->timer_lock); if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires) mod_timer(&dc->delay_timer, expires); mutex_unlock(&dc->timer_lock); } static void flush_bios(struct bio *bio) { struct bio *n; while (bio) { n = bio->bi_next; bio->bi_next = NULL; generic_make_request(bio); bio = n; } } static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) { struct dm_delay_info *delayed, *next; unsigned long next_expires = 0; int start_timer = 0; struct bio_list flush_bios = { }; mutex_lock(&delayed_bios_lock); list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { if (flush_all || time_after_eq(jiffies, delayed->expires)) { struct bio *bio = dm_bio_from_per_bio_data(delayed, sizeof(struct dm_delay_info)); list_del(&delayed->list); bio_list_add(&flush_bios, bio); if ((bio_data_dir(bio) == WRITE)) delayed->context->writes--; else delayed->context->reads--; continue; } if (!start_timer) { start_timer = 1; next_expires = delayed->expires; } else next_expires = min(next_expires, delayed->expires); } mutex_unlock(&delayed_bios_lock); if (start_timer) queue_timeout(dc, next_expires); return bio_list_get(&flush_bios); } static void flush_expired_bios(struct work_struct *work) { struct delay_c *dc; dc = container_of(work, struct delay_c, flush_expired_bios); flush_bios(flush_delayed_bios(dc, 0)); } /* * Mapping parameters: * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>] * * With separate write parameters, the first set is only used for reads. * Delays are specified in milliseconds. */ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct delay_c *dc; unsigned long long tmpll; char dummy; if (argc != 3 && argc != 6) { ti->error = "requires exactly 3 or 6 arguments"; return -EINVAL; } dc = kmalloc(sizeof(*dc), GFP_KERNEL); if (!dc) { ti->error = "Cannot allocate context"; return -ENOMEM; } dc->reads = dc->writes = 0; if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) { ti->error = "Invalid device sector"; goto bad; } dc->start_read = tmpll; if (sscanf(argv[2], "%u%c", &dc->read_delay, &dummy) != 1) { ti->error = "Invalid delay"; goto bad; } if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dc->dev_read)) { ti->error = "Device lookup failed"; goto bad; } dc->dev_write = NULL; if (argc == 3) goto out; if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { ti->error = "Invalid write device sector"; goto bad_dev_read; } dc->start_write = tmpll; if (sscanf(argv[5], "%u%c", &dc->write_delay, &dummy) != 1) { ti->error = "Invalid write delay"; goto bad_dev_read; } if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &dc->dev_write)) { ti->error = "Write device lookup failed"; goto bad_dev_read; } out: dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); if (!dc->kdelayd_wq) { DMERR("Couldn't start kdelayd"); goto bad_queue; } setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); INIT_LIST_HEAD(&dc->delayed_bios); mutex_init(&dc->timer_lock); atomic_set(&dc->may_delay, 1); ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->per_bio_data_size = sizeof(struct dm_delay_info); ti->private = dc; return 0; bad_queue: if (dc->dev_write) dm_put_device(ti, dc->dev_write); bad_dev_read: dm_put_device(ti, dc->dev_read); bad: kfree(dc); return -EINVAL; } static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; destroy_workqueue(dc->kdelayd_wq); dm_put_device(ti, dc->dev_read); if (dc->dev_write) dm_put_device(ti, dc->dev_write); kfree(dc); } static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) { struct dm_delay_info *delayed; unsigned long expires = 0; if (!delay || !atomic_read(&dc->may_delay)) return 1; delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); delayed->context = dc; delayed->expires = expires = jiffies + (delay * HZ / 1000); mutex_lock(&delayed_bios_lock); if (bio_data_dir(bio) == WRITE) dc->writes++; else dc->reads++; list_add_tail(&delayed->list, &dc->delayed_bios); mutex_unlock(&delayed_bios_lock); queue_timeout(dc, expires); return 0; } static void delay_presuspend(struct dm_target *ti) { struct delay_c *dc = ti->private; atomic_set(&dc->may_delay, 0); del_timer_sync(&dc->delay_timer); flush_bios(flush_delayed_bios(dc, 1)); } static void delay_resume(struct dm_target *ti) { struct delay_c *dc = ti->private; atomic_set(&dc->may_delay, 1); } static int delay_map(struct dm_target *ti, struct bio *bio) { struct delay_c *dc = ti->private; if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { bio->bi_bdev = dc->dev_write->bdev; if (bio_sectors(bio)) bio->bi_iter.bi_sector = dc->start_write + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->write_delay, bio); } bio->bi_bdev = dc->dev_read->bdev; bio->bi_iter.bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->read_delay, bio); } static void delay_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct delay_c *dc = ti->private; int sz = 0; switch (type) { case STATUSTYPE_INFO: DMEMIT("%u %u", dc->reads, dc->writes); break; case STATUSTYPE_TABLE: DMEMIT("%s %llu %u", dc->dev_read->name, (unsigned long long) dc->start_read, dc->read_delay); if (dc->dev_write) DMEMIT(" %s %llu %u", dc->dev_write->name, (unsigned long long) dc->start_write, dc->write_delay); break; } } static int delay_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct delay_c *dc = ti->private; int ret = 0; ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data); if (ret) goto out; if (dc->dev_write) ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data); out: return ret; } static struct target_type delay_target = { .name = "delay", .version = {1, 2, 1}, .module = THIS_MODULE, .ctr = delay_ctr, .dtr = delay_dtr, .map = delay_map, .presuspend = delay_presuspend, .resume = delay_resume, .status = delay_status, .iterate_devices = delay_iterate_devices, }; static int __init dm_delay_init(void) { int r; r = dm_register_target(&delay_target); if (r < 0) { DMERR("register failed %d", r); goto bad_register; } return 0; bad_register: return r; } static void __exit dm_delay_exit(void) { dm_unregister_target(&delay_target); } /* Module hooks */ module_init(dm_delay_init); module_exit(dm_delay_exit); MODULE_DESCRIPTION(DM_NAME " delay target"); MODULE_AUTHOR("Heinz Mauelshagen <[email protected]>"); MODULE_LICENSE("GPL");
533845.c
/* $OpenBSD: ec_ameth.c,v 1.25 2018/08/24 20:22:15 tb Exp $ */ /* Written by Dr Stephen N Henson ([email protected]) for the OpenSSL * project 2006. */ /* ==================================================================== * Copyright (c) 2006 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * [email protected]. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * ([email protected]). This product includes software written by Tim * Hudson ([email protected]). * */ #include <stdio.h> #include <openssl/opensslconf.h> #include <openssl/bn.h> #include <openssl/ec.h> #include <openssl/err.h> #include <openssl/x509.h> #include "asn1_locl.h" static int eckey_param2type(int *pptype, void **ppval, EC_KEY * ec_key) { const EC_GROUP *group; int nid; if (ec_key == NULL || (group = EC_KEY_get0_group(ec_key)) == NULL) { ECerror(EC_R_MISSING_PARAMETERS); return 0; } if (EC_GROUP_get_asn1_flag(group) && (nid = EC_GROUP_get_curve_name(group))) { /* we have a 'named curve' => just set the OID */ *ppval = OBJ_nid2obj(nid); *pptype = V_ASN1_OBJECT; } else { /* explicit parameters */ ASN1_STRING *pstr = NULL; pstr = ASN1_STRING_new(); if (!pstr) return 0; pstr->length = i2d_ECParameters(ec_key, &pstr->data); if (pstr->length <= 0) { ASN1_STRING_free(pstr); ECerror(ERR_R_EC_LIB); return 0; } *ppval = pstr; *pptype = V_ASN1_SEQUENCE; } return 1; } static int eckey_pub_encode(X509_PUBKEY * pk, const EVP_PKEY * pkey) { EC_KEY *ec_key = pkey->pkey.ec; void *pval = NULL; int ptype; unsigned char *penc = NULL, *p; int penclen; if (!eckey_param2type(&ptype, &pval, ec_key)) { ECerror(ERR_R_EC_LIB); return 0; } penclen = i2o_ECPublicKey(ec_key, NULL); if (penclen <= 0) goto err; penc = malloc(penclen); if (!penc) goto err; p = penc; penclen = i2o_ECPublicKey(ec_key, &p); if (penclen <= 0) goto err; if (X509_PUBKEY_set0_param(pk, OBJ_nid2obj(EVP_PKEY_EC), ptype, pval, penc, penclen)) return 1; err: if (ptype == V_ASN1_OBJECT) ASN1_OBJECT_free(pval); else ASN1_STRING_free(pval); free(penc); return 0; } static EC_KEY * eckey_type2param(int ptype, const void *pval) { EC_KEY *eckey = NULL; if (ptype == V_ASN1_SEQUENCE) { const ASN1_STRING *pstr = pval; const unsigned char *pm = NULL; int pmlen; pm = pstr->data; pmlen = pstr->length; if (!(eckey = d2i_ECParameters(NULL, &pm, pmlen))) { ECerror(EC_R_DECODE_ERROR); goto ecerr; } } else if (ptype == V_ASN1_OBJECT) { const ASN1_OBJECT *poid = pval; EC_GROUP *group; /* * type == V_ASN1_OBJECT => the parameters are given by an * asn1 OID */ if ((eckey = EC_KEY_new()) == NULL) { ECerror(ERR_R_MALLOC_FAILURE); goto ecerr; } group = EC_GROUP_new_by_curve_name(OBJ_obj2nid(poid)); if (group == NULL) goto ecerr; EC_GROUP_set_asn1_flag(group, OPENSSL_EC_NAMED_CURVE); if (EC_KEY_set_group(eckey, group) == 0) goto ecerr; EC_GROUP_free(group); } else { ECerror(EC_R_DECODE_ERROR); goto ecerr; } return eckey; ecerr: if (eckey) EC_KEY_free(eckey); return NULL; } static int eckey_pub_decode(EVP_PKEY * pkey, X509_PUBKEY * pubkey) { const unsigned char *p = NULL; const void *pval; int ptype, pklen; EC_KEY *eckey = NULL; X509_ALGOR *palg; if (!X509_PUBKEY_get0_param(NULL, &p, &pklen, &palg, pubkey)) return 0; X509_ALGOR_get0(NULL, &ptype, &pval, palg); eckey = eckey_type2param(ptype, pval); if (!eckey) { ECerror(ERR_R_EC_LIB); return 0; } /* We have parameters now set public key */ if (!o2i_ECPublicKey(&eckey, &p, pklen)) { ECerror(EC_R_DECODE_ERROR); goto ecerr; } EVP_PKEY_assign_EC_KEY(pkey, eckey); return 1; ecerr: if (eckey) EC_KEY_free(eckey); return 0; } static int eckey_pub_cmp(const EVP_PKEY * a, const EVP_PKEY * b) { int r; const EC_GROUP *group = EC_KEY_get0_group(b->pkey.ec); const EC_POINT *pa = EC_KEY_get0_public_key(a->pkey.ec), *pb = EC_KEY_get0_public_key(b->pkey.ec); r = EC_POINT_cmp(group, pa, pb, NULL); if (r == 0) return 1; if (r == 1) return 0; return -2; } static int eckey_priv_decode(EVP_PKEY * pkey, const PKCS8_PRIV_KEY_INFO * p8) { const unsigned char *p = NULL; const void *pval; int ptype, pklen; EC_KEY *eckey = NULL; const X509_ALGOR *palg; if (!PKCS8_pkey_get0(NULL, &p, &pklen, &palg, p8)) return 0; X509_ALGOR_get0(NULL, &ptype, &pval, palg); eckey = eckey_type2param(ptype, pval); if (!eckey) goto ecliberr; /* We have parameters now set private key */ if (!d2i_ECPrivateKey(&eckey, &p, pklen)) { ECerror(EC_R_DECODE_ERROR); goto ecerr; } /* calculate public key (if necessary) */ if (EC_KEY_get0_public_key(eckey) == NULL) { const BIGNUM *priv_key; const EC_GROUP *group; EC_POINT *pub_key; /* * the public key was not included in the SEC1 private key => * calculate the public key */ group = EC_KEY_get0_group(eckey); pub_key = EC_POINT_new(group); if (pub_key == NULL) { ECerror(ERR_R_EC_LIB); goto ecliberr; } if (!EC_POINT_copy(pub_key, EC_GROUP_get0_generator(group))) { EC_POINT_free(pub_key); ECerror(ERR_R_EC_LIB); goto ecliberr; } priv_key = EC_KEY_get0_private_key(eckey); if (!EC_POINT_mul(group, pub_key, priv_key, NULL, NULL, NULL)) { EC_POINT_free(pub_key); ECerror(ERR_R_EC_LIB); goto ecliberr; } if (EC_KEY_set_public_key(eckey, pub_key) == 0) { EC_POINT_free(pub_key); ECerror(ERR_R_EC_LIB); goto ecliberr; } EC_POINT_free(pub_key); } EVP_PKEY_assign_EC_KEY(pkey, eckey); return 1; ecliberr: ECerror(ERR_R_EC_LIB); ecerr: if (eckey) EC_KEY_free(eckey); return 0; } static int eckey_priv_encode(PKCS8_PRIV_KEY_INFO * p8, const EVP_PKEY * pkey) { EC_KEY *ec_key; unsigned char *ep, *p; int eplen, ptype; void *pval; unsigned int tmp_flags, old_flags; ec_key = pkey->pkey.ec; if (!eckey_param2type(&ptype, &pval, ec_key)) { ECerror(EC_R_DECODE_ERROR); return 0; } /* set the private key */ /* * do not include the parameters in the SEC1 private key see PKCS#11 * 12.11 */ old_flags = EC_KEY_get_enc_flags(ec_key); tmp_flags = old_flags | EC_PKEY_NO_PARAMETERS; EC_KEY_set_enc_flags(ec_key, tmp_flags); eplen = i2d_ECPrivateKey(ec_key, NULL); if (!eplen) { EC_KEY_set_enc_flags(ec_key, old_flags); ECerror(ERR_R_EC_LIB); return 0; } ep = malloc(eplen); if (!ep) { EC_KEY_set_enc_flags(ec_key, old_flags); ECerror(ERR_R_MALLOC_FAILURE); return 0; } p = ep; if (!i2d_ECPrivateKey(ec_key, &p)) { EC_KEY_set_enc_flags(ec_key, old_flags); free(ep); ECerror(ERR_R_EC_LIB); return 0; } /* restore old encoding flags */ EC_KEY_set_enc_flags(ec_key, old_flags); if (!PKCS8_pkey_set0(p8, OBJ_nid2obj(NID_X9_62_id_ecPublicKey), 0, ptype, pval, ep, eplen)) return 0; return 1; } static int int_ec_size(const EVP_PKEY * pkey) { return ECDSA_size(pkey->pkey.ec); } static int ec_bits(const EVP_PKEY * pkey) { BIGNUM *order = BN_new(); const EC_GROUP *group; int ret; if (!order) { ERR_clear_error(); return 0; } group = EC_KEY_get0_group(pkey->pkey.ec); if (!EC_GROUP_get_order(group, order, NULL)) { BN_free(order); ERR_clear_error(); return 0; } ret = BN_num_bits(order); BN_free(order); return ret; } static int ec_missing_parameters(const EVP_PKEY * pkey) { if (EC_KEY_get0_group(pkey->pkey.ec) == NULL) return 1; return 0; } static int ec_copy_parameters(EVP_PKEY * to, const EVP_PKEY * from) { return EC_KEY_set_group(to->pkey.ec, EC_KEY_get0_group(from->pkey.ec)); } static int ec_cmp_parameters(const EVP_PKEY * a, const EVP_PKEY * b) { const EC_GROUP *group_a = EC_KEY_get0_group(a->pkey.ec), *group_b = EC_KEY_get0_group(b->pkey.ec); if (EC_GROUP_cmp(group_a, group_b, NULL)) return 0; else return 1; } static void int_ec_free(EVP_PKEY * pkey) { EC_KEY_free(pkey->pkey.ec); } static int do_EC_KEY_print(BIO * bp, const EC_KEY * x, int off, int ktype) { unsigned char *buffer = NULL; const char *ecstr; size_t buf_len = 0, i; int ret = 0, reason = ERR_R_BIO_LIB; BIGNUM *pub_key = NULL, *order = NULL; BN_CTX *ctx = NULL; const EC_GROUP *group; const EC_POINT *public_key; const BIGNUM *priv_key; if (x == NULL || (group = EC_KEY_get0_group(x)) == NULL) { reason = ERR_R_PASSED_NULL_PARAMETER; goto err; } ctx = BN_CTX_new(); if (ctx == NULL) { reason = ERR_R_MALLOC_FAILURE; goto err; } if (ktype > 0) { public_key = EC_KEY_get0_public_key(x); if (public_key != NULL) { if ((pub_key = EC_POINT_point2bn(group, public_key, EC_KEY_get_conv_form(x), NULL, ctx)) == NULL) { reason = ERR_R_EC_LIB; goto err; } if (pub_key) buf_len = (size_t) BN_num_bytes(pub_key); } } if (ktype == 2) { priv_key = EC_KEY_get0_private_key(x); if (priv_key && (i = (size_t) BN_num_bytes(priv_key)) > buf_len) buf_len = i; } else priv_key = NULL; if (ktype > 0) { buf_len += 10; if ((buffer = malloc(buf_len)) == NULL) { reason = ERR_R_MALLOC_FAILURE; goto err; } } if (ktype == 2) ecstr = "Private-Key"; else if (ktype == 1) ecstr = "Public-Key"; else ecstr = "ECDSA-Parameters"; if (!BIO_indent(bp, off, 128)) goto err; if ((order = BN_new()) == NULL) goto err; if (!EC_GROUP_get_order(group, order, NULL)) goto err; if (BIO_printf(bp, "%s: (%d bit)\n", ecstr, BN_num_bits(order)) <= 0) goto err; if ((priv_key != NULL) && !ASN1_bn_print(bp, "priv:", priv_key, buffer, off)) goto err; if ((pub_key != NULL) && !ASN1_bn_print(bp, "pub: ", pub_key, buffer, off)) goto err; if (!ECPKParameters_print(bp, group, off)) goto err; ret = 1; err: if (!ret) ECerror(reason); BN_free(pub_key); BN_free(order); BN_CTX_free(ctx); free(buffer); return (ret); } static int eckey_param_decode(EVP_PKEY * pkey, const unsigned char **pder, int derlen) { EC_KEY *eckey; if (!(eckey = d2i_ECParameters(NULL, pder, derlen))) { ECerror(ERR_R_EC_LIB); return 0; } EVP_PKEY_assign_EC_KEY(pkey, eckey); return 1; } static int eckey_param_encode(const EVP_PKEY * pkey, unsigned char **pder) { return i2d_ECParameters(pkey->pkey.ec, pder); } static int eckey_param_print(BIO * bp, const EVP_PKEY * pkey, int indent, ASN1_PCTX * ctx) { return do_EC_KEY_print(bp, pkey->pkey.ec, indent, 0); } static int eckey_pub_print(BIO * bp, const EVP_PKEY * pkey, int indent, ASN1_PCTX * ctx) { return do_EC_KEY_print(bp, pkey->pkey.ec, indent, 1); } static int eckey_priv_print(BIO * bp, const EVP_PKEY * pkey, int indent, ASN1_PCTX * ctx) { return do_EC_KEY_print(bp, pkey->pkey.ec, indent, 2); } static int old_ec_priv_decode(EVP_PKEY * pkey, const unsigned char **pder, int derlen) { EC_KEY *ec; if (!(ec = d2i_ECPrivateKey(NULL, pder, derlen))) { ECerror(EC_R_DECODE_ERROR); return 0; } EVP_PKEY_assign_EC_KEY(pkey, ec); return 1; } static int old_ec_priv_encode(const EVP_PKEY * pkey, unsigned char **pder) { return i2d_ECPrivateKey(pkey->pkey.ec, pder); } static int ec_pkey_ctrl(EVP_PKEY * pkey, int op, long arg1, void *arg2) { switch (op) { case ASN1_PKEY_CTRL_PKCS7_SIGN: if (arg1 == 0) { int snid, hnid; X509_ALGOR *alg1, *alg2; PKCS7_SIGNER_INFO_get0_algs(arg2, NULL, &alg1, &alg2); if (alg1 == NULL || alg1->algorithm == NULL) return -1; hnid = OBJ_obj2nid(alg1->algorithm); if (hnid == NID_undef) return -1; if (!OBJ_find_sigid_by_algs(&snid, hnid, EVP_PKEY_id(pkey))) return -1; X509_ALGOR_set0(alg2, OBJ_nid2obj(snid), V_ASN1_UNDEF, 0); } return 1; case ASN1_PKEY_CTRL_DEFAULT_MD_NID: *(int *) arg2 = NID_sha1; return 2; default: return -2; } } const EVP_PKEY_ASN1_METHOD eckey_asn1_meth = { .pkey_id = EVP_PKEY_EC, .pkey_base_id = EVP_PKEY_EC, .pem_str = "EC", .info = "OpenSSL EC algorithm", .pub_decode = eckey_pub_decode, .pub_encode = eckey_pub_encode, .pub_cmp = eckey_pub_cmp, .pub_print = eckey_pub_print, .priv_decode = eckey_priv_decode, .priv_encode = eckey_priv_encode, .priv_print = eckey_priv_print, .pkey_size = int_ec_size, .pkey_bits = ec_bits, .param_decode = eckey_param_decode, .param_encode = eckey_param_encode, .param_missing = ec_missing_parameters, .param_copy = ec_copy_parameters, .param_cmp = ec_cmp_parameters, .param_print = eckey_param_print, .pkey_free = int_ec_free, .pkey_ctrl = ec_pkey_ctrl, .old_priv_decode = old_ec_priv_decode, .old_priv_encode = old_ec_priv_encode };
44835.c
/*! \file gd32f4xx_exti.c \brief EXTI driver \version 2016-08-15, V1.0.0, firmware for GD32F4xx \version 2018-12-12, V2.0.0, firmware for GD32F4xx \version 2018-12-25, V2.1.0, firmware for GD32F4xx (The version is for mbed) */ /* Copyright (c) 2018, GigaDevice Semiconductor Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "gd32f4xx_exti.h" /*! \brief deinitialize the EXTI \param[in] none \param[out] none \retval none */ void exti_deinit(void) { /* reset the value of all the EXTI registers */ EXTI_INTEN = (uint32_t)0x00000000U; EXTI_EVEN = (uint32_t)0x00000000U; EXTI_RTEN = (uint32_t)0x00000000U; EXTI_FTEN = (uint32_t)0x00000000U; EXTI_SWIEV = (uint32_t)0x00000000U; } /*! \brief initialize the EXTI \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[in] mode: interrupt or event mode, refer to exti_mode_enum only one parameter can be selected which is shown as below: \arg EXTI_INTERRUPT: interrupt mode \arg EXTI_EVENT: event mode \param[in] trig_type: interrupt trigger type, refer to exti_trig_type_enum only one parameter can be selected which is shown as below: \arg EXTI_TRIG_RISING: rising edge trigger \arg EXTI_TRIG_FALLING: falling trigger \arg EXTI_TRIG_BOTH: rising and falling trigger \param[out] none \retval none */ void exti_init(exti_line_enum linex, \ exti_mode_enum mode, \ exti_trig_type_enum trig_type) { /* reset the EXTI line x */ EXTI_INTEN &= ~(uint32_t)linex; EXTI_EVEN &= ~(uint32_t)linex; EXTI_RTEN &= ~(uint32_t)linex; EXTI_FTEN &= ~(uint32_t)linex; /* set the EXTI mode and enable the interrupts or events from EXTI line x */ switch (mode) { case EXTI_INTERRUPT: EXTI_INTEN |= (uint32_t)linex; break; case EXTI_EVENT: EXTI_EVEN |= (uint32_t)linex; break; default: break; } /* set the EXTI trigger type */ switch (trig_type) { case EXTI_TRIG_RISING: EXTI_RTEN |= (uint32_t)linex; EXTI_FTEN &= ~(uint32_t)linex; break; case EXTI_TRIG_FALLING: EXTI_RTEN &= ~(uint32_t)linex; EXTI_FTEN |= (uint32_t)linex; break; case EXTI_TRIG_BOTH: EXTI_RTEN |= (uint32_t)linex; EXTI_FTEN |= (uint32_t)linex; break; default: break; } } /*! \brief enable the interrupts from EXTI line x \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval none */ void exti_interrupt_enable(exti_line_enum linex) { EXTI_INTEN |= (uint32_t)linex; } /*! \brief disable the interrupt from EXTI line x \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval none */ void exti_interrupt_disable(exti_line_enum linex) { EXTI_INTEN &= ~(uint32_t)linex; } /*! \brief enable the events from EXTI line x \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval none */ void exti_event_enable(exti_line_enum linex) { EXTI_EVEN |= (uint32_t)linex; } /*! \brief disable the events from EXTI line x \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval none */ void exti_event_disable(exti_line_enum linex) { EXTI_EVEN &= ~(uint32_t)linex; } /*! \brief enable EXTI software interrupt event \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval none */ void exti_software_interrupt_enable(exti_line_enum linex) { EXTI_SWIEV |= (uint32_t)linex; } /*! \brief disable EXTI software interrupt event \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval none */ void exti_software_interrupt_disable(exti_line_enum linex) { EXTI_SWIEV &= ~(uint32_t)linex; } /*! \brief get EXTI lines flag \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval FlagStatus: status of flag (RESET or SET) */ FlagStatus exti_flag_get(exti_line_enum linex) { if (RESET != (EXTI_PD & (uint32_t)linex)) { return SET; } else { return RESET; } } /*! \brief clear EXTI lines pending flag \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval none */ void exti_flag_clear(exti_line_enum linex) { EXTI_PD = (uint32_t)linex; } /*! \brief get EXTI lines flag when the interrupt flag is set \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval FlagStatus: status of flag (RESET or SET) */ FlagStatus exti_interrupt_flag_get(exti_line_enum linex) { uint32_t flag_left, flag_right; flag_left = EXTI_PD & (uint32_t)linex; flag_right = EXTI_INTEN & (uint32_t)linex; if ((RESET != flag_left) && (RESET != flag_right)) { return SET; } else { return RESET; } } /*! \brief clear EXTI lines pending flag \param[in] linex: EXTI line number, refer to exti_line_enum only one parameter can be selected which is shown as below: \arg EXTI_x (x=0..22): EXTI line x \param[out] none \retval none */ void exti_interrupt_flag_clear(exti_line_enum linex) { EXTI_PD = (uint32_t)linex; }
452272.c
/*- * Copyright (c) 2008 David Schultz <[email protected]> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Tests for corner cases in trigonometric functions. Some accuracy tests * are included as well, but these are very basic sanity checks, not * intended to be comprehensive. * * The program for generating representable numbers near multiples of pi is * available at http://www.cs.berkeley.edu/~wkahan/testpi/ . */ #include <sys/cdefs.h> __FBSDID("$FreeBSD: soc2013/dpl/head/tools/regression/lib/msun/test-trig.c 252447 2013-06-02 04:30:03Z das $"); #include <assert.h> #include <fenv.h> #include <float.h> #include <math.h> #include <stdio.h> #include "test-utils.h" #define LEN(a) (sizeof(a) / sizeof((a)[0])) #pragma STDC FENV_ACCESS ON /* * Test that a function returns the correct value and sets the * exception flags correctly. The exceptmask specifies which * exceptions we should check. We need to be lenient for several * reasons, but mainly because on some architectures it's impossible * to raise FE_OVERFLOW without raising FE_INEXACT. * * These are macros instead of functions so that assert provides more * meaningful error messages. * * XXX The volatile here is to avoid gcc's bogus constant folding and work * around the lack of support for the FENV_ACCESS pragma. */ #define test(func, x, result, exceptmask, excepts) do { \ volatile long double _d = x; \ assert(feclearexcept(FE_ALL_EXCEPT) == 0); \ assert(fpequal((func)(_d), (result))); \ assert(((void)(func), fetestexcept(exceptmask) == (excepts))); \ } while (0) #define testall(prefix, x, result, exceptmask, excepts) do { \ test(prefix, x, (double)result, exceptmask, excepts); \ test(prefix##f, x, (float)result, exceptmask, excepts); \ test(prefix##l, x, result, exceptmask, excepts); \ } while (0) #define testdf(prefix, x, result, exceptmask, excepts) do { \ test(prefix, x, (double)result, exceptmask, excepts); \ test(prefix##f, x, (float)result, exceptmask, excepts); \ } while (0) /* * Test special cases in sin(), cos(), and tan(). */ static void run_special_tests(void) { /* Values at 0 should be exact. */ testall(tan, 0.0, 0.0, ALL_STD_EXCEPT, 0); testall(tan, -0.0, -0.0, ALL_STD_EXCEPT, 0); testall(cos, 0.0, 1.0, ALL_STD_EXCEPT, 0); testall(cos, -0.0, 1.0, ALL_STD_EXCEPT, 0); testall(sin, 0.0, 0.0, ALL_STD_EXCEPT, 0); testall(sin, -0.0, -0.0, ALL_STD_EXCEPT, 0); /* func(+-Inf) == NaN */ testall(tan, INFINITY, NAN, ALL_STD_EXCEPT, FE_INVALID); testall(sin, INFINITY, NAN, ALL_STD_EXCEPT, FE_INVALID); testall(cos, INFINITY, NAN, ALL_STD_EXCEPT, FE_INVALID); testall(tan, -INFINITY, NAN, ALL_STD_EXCEPT, FE_INVALID); testall(sin, -INFINITY, NAN, ALL_STD_EXCEPT, FE_INVALID); testall(cos, -INFINITY, NAN, ALL_STD_EXCEPT, FE_INVALID); /* func(NaN) == NaN */ testall(tan, NAN, NAN, ALL_STD_EXCEPT, 0); testall(sin, NAN, NAN, ALL_STD_EXCEPT, 0); testall(cos, NAN, NAN, ALL_STD_EXCEPT, 0); } /* * Tests to ensure argument reduction for large arguments is accurate. */ static void run_reduction_tests(void) { /* floats very close to odd multiples of pi */ static const float f_pi_odd[] = { 85563208.0f, 43998769152.0f, 9.2763667655669323e+25f, 1.5458357838905804e+29f, }; /* doubles very close to odd multiples of pi */ static const double d_pi_odd[] = { 3.1415926535897931, 91.106186954104004, 642615.9188844458, 3397346.5699258847, 6134899525417045.0, 3.0213551960457761e+43, 1.2646209897993783e+295, 6.2083625380677099e+307, }; /* long doubles very close to odd multiples of pi */ #if LDBL_MANT_DIG == 64 static const long double ld_pi_odd[] = { 1.1891886960373841596e+101L, 1.07999475322710967206e+2087L, 6.522151627890431836e+2147L, 8.9368974898260328229e+2484L, 9.2961044110572205863e+2555L, 4.90208421886578286e+3189L, 1.5275546401232615884e+3317L, 1.7227465626338900093e+3565L, 2.4160090594000745334e+3808L, 9.8477555741888350649e+4314L, 1.6061597222105160737e+4326L, }; #elif LDBL_MANT_DIG == 113 static const long double ld_pi_odd[] = { /* XXX */ }; #endif int i; for (i = 0; i < LEN(f_pi_odd); i++) { assert(fabs(sinf(f_pi_odd[i])) < FLT_EPSILON); assert(cosf(f_pi_odd[i]) == -1.0); assert(fabs(tan(f_pi_odd[i])) < FLT_EPSILON); assert(fabs(sinf(-f_pi_odd[i])) < FLT_EPSILON); assert(cosf(-f_pi_odd[i]) == -1.0); assert(fabs(tanf(-f_pi_odd[i])) < FLT_EPSILON); assert(fabs(sinf(f_pi_odd[i] * 2)) < FLT_EPSILON); assert(cosf(f_pi_odd[i] * 2) == 1.0); assert(fabs(tanf(f_pi_odd[i] * 2)) < FLT_EPSILON); assert(fabs(sinf(-f_pi_odd[i] * 2)) < FLT_EPSILON); assert(cosf(-f_pi_odd[i] * 2) == 1.0); assert(fabs(tanf(-f_pi_odd[i] * 2)) < FLT_EPSILON); } for (i = 0; i < LEN(d_pi_odd); i++) { assert(fabs(sin(d_pi_odd[i])) < 2 * DBL_EPSILON); assert(cos(d_pi_odd[i]) == -1.0); assert(fabs(tan(d_pi_odd[i])) < 2 * DBL_EPSILON); assert(fabs(sin(-d_pi_odd[i])) < 2 * DBL_EPSILON); assert(cos(-d_pi_odd[i]) == -1.0); assert(fabs(tan(-d_pi_odd[i])) < 2 * DBL_EPSILON); assert(fabs(sin(d_pi_odd[i] * 2)) < 2 * DBL_EPSILON); assert(cos(d_pi_odd[i] * 2) == 1.0); assert(fabs(tan(d_pi_odd[i] * 2)) < 2 * DBL_EPSILON); assert(fabs(sin(-d_pi_odd[i] * 2)) < 2 * DBL_EPSILON); assert(cos(-d_pi_odd[i] * 2) == 1.0); assert(fabs(tan(-d_pi_odd[i] * 2)) < 2 * DBL_EPSILON); } #if LDBL_MANT_DIG > 53 for (i = 0; i < LEN(ld_pi_odd); i++) { assert(fabsl(sinl(ld_pi_odd[i])) < LDBL_EPSILON); assert(cosl(ld_pi_odd[i]) == -1.0); assert(fabsl(tanl(ld_pi_odd[i])) < LDBL_EPSILON); assert(fabsl(sinl(-ld_pi_odd[i])) < LDBL_EPSILON); assert(cosl(-ld_pi_odd[i]) == -1.0); assert(fabsl(tanl(-ld_pi_odd[i])) < LDBL_EPSILON); assert(fabsl(sinl(ld_pi_odd[i] * 2)) < LDBL_EPSILON); assert(cosl(ld_pi_odd[i] * 2) == 1.0); assert(fabsl(tanl(ld_pi_odd[i] * 2)) < LDBL_EPSILON); assert(fabsl(sinl(-ld_pi_odd[i] * 2)) < LDBL_EPSILON); assert(cosl(-ld_pi_odd[i] * 2) == 1.0); assert(fabsl(tanl(-ld_pi_odd[i] * 2)) < LDBL_EPSILON); } #endif } /* * Tests the accuracy of these functions over the primary range. */ static void run_accuracy_tests(void) { /* For small args, sin(x) = tan(x) = x, and cos(x) = 1. */ testall(sin, 0xd.50ee515fe4aea16p-114L, 0xd.50ee515fe4aea16p-114L, ALL_STD_EXCEPT, FE_INEXACT); testall(tan, 0xd.50ee515fe4aea16p-114L, 0xd.50ee515fe4aea16p-114L, ALL_STD_EXCEPT, FE_INEXACT); testall(cos, 0xd.50ee515fe4aea16p-114L, 1.0, ALL_STD_EXCEPT, FE_INEXACT); /* * These tests should pass for f32, d64, and ld80 as long as * the error is <= 0.75 ulp (round to nearest) */ #if LDBL_MANT_DIG <= 64 #define testacc testall #else #define testacc testdf #endif testacc(sin, 0.17255452780841205174L, 0.17169949801444412683L, ALL_STD_EXCEPT, FE_INEXACT); testacc(sin, -0.75431944555904520893L, -0.68479288156557286353L, ALL_STD_EXCEPT, FE_INEXACT); testacc(cos, 0.70556358769838947292L, 0.76124620693117771850L, ALL_STD_EXCEPT, FE_INEXACT); testacc(cos, -0.34061437849088045332L, 0.94254960031831729956L, ALL_STD_EXCEPT, FE_INEXACT); testacc(tan, -0.15862817413325692897L, -0.15997221861309522115L, ALL_STD_EXCEPT, FE_INEXACT); testacc(tan, 0.38374784931303813530L, 0.40376500259976759951L, ALL_STD_EXCEPT, FE_INEXACT); /* * XXX missing: * - tests for ld128 * - tests for other rounding modes (probably won't pass for now) * - tests for large numbers that get reduced to hi+lo with lo!=0 */ } int main(int argc, char *argv[]) { printf("1..3\n"); run_special_tests(); printf("ok 1 - trig\n"); #ifndef __i386__ run_reduction_tests(); #endif printf("ok 2 - trig\n"); #ifndef __i386__ run_accuracy_tests(); #endif printf("ok 3 - trig\n"); return (0); }
475534.c
/* * Simple Graphics - Screen Rotation * * Copyright (c) 2014-2015 Charles McManis, all rights reserved. * * This source code is licensed under a Creative Commons 4.0 * International Public license. * * See: http://creativecommons.org/licenses/by/4.0/legalcode for * details. * * The library tests both rotation of the screen and * rotation of the text. */ #include <stdint.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include "test.h" int main(int argc, char *argv[]) { int a, b; enum gfx_rotate r = GFX_ROT_0; printf("Testing the graphics code\n"); if (argc == 3) { int angle; if (strncmp(argv[1], "-r", 2)) { fprintf(stderr, "usage: rotation [-s 0|90|180|270]\n"); exit(1); } angle = atoi(argv[2]); switch (angle) { case 90: r = GFX_ROT_90; break; case 180: r = GFX_ROT_180; break; case 270: r = GFX_ROT_270; break; default: break; } } gfx_init(NULL, draw_pixel, SWIDTH, SHEIGHT, GFX_FONT_LARGE); gfx_setRotation(r); printf("New screen height, width is %d, %d\n", gfx_height(), gfx_width()); gfx_fillScreen(C_SPACE); gfx_setTextColor(C_AT, C_SPACE); gfx_setCursor(5, 15); gfx_setTextRotation(GFX_ROT_0); gfx_puts("Rotation "); gfx_setTextRotation(GFX_ROT_90); gfx_puts("is "); gfx_setTextRotation(GFX_ROT_180); gfx_puts("useful "); gfx_setTextRotation(GFX_ROT_270); gfx_puts("eh?"); gfx_setTextRotation(GFX_ROT_0); gfx_drawRoundRect(28,20, 34, 11, 3, C_DASH); gfx_setTextColor(C_STAR, C_STAR); gfx_setCursor(34, 30); gfx_puts("Up\034"); print_screen(); }
635766.c
/* $Source: bitbucket.org:berkeleylab/gasnet.git/other/amxtests/testam.c $ * Description: AMX test * Copyright 2004, Dan Bonachea <[email protected]> * Terms of use are as specified in license.txt */ #include "testam.h" #define verify(cond) do { \ if (!(cond)) { fprintf(stderr,"%i: ERROR: verify(%s) failed at %s:%i\n", \ MYPROC, #cond, __FILE__,__LINE__); fflush(stderr); \ } } while(0) volatile int false = 0; /* ------------------------------------------------------------------------------------ */ ep_t check_ep; en_t check_en; tag_t check_tag; void checkAMshort(void *token) { ep_t ep; en_t en; tag_t tag; int id = -1; AM_Safe(AMX_GetSourceId(token,&id)); verify(id == 0); AM_Safe(AM_GetSourceEndpoint(token,&en)); verify(AMX_enEqual(en,check_en)); AM_Safe(AM_GetDestEndpoint(token,&ep)); verify(ep == check_ep); AM_Safe(AM_GetMsgTag(token,&tag)); verify(tag == check_tag); check_tag = 0; } /* ------------------------------------------------------------------------------------ */ int main(int argc, char **argv) { eb_t eb; ep_t ep; uint64_t networkpid; int partner; int iters=0, polling = 1, i; #if defined(AMUDP) const char *envvars[][3] = { #define DEFVAR(key,val) { key, val, key "=" val }, DEFVAR("A","A") DEFVAR("B","B") DEFVAR("C","C") DEFVAR("ABC","ABC") DEFVAR("AReallyLongEnvironmentName","A Really Long Environment Value") }; for (i=0; i < sizeof(envvars)/sizeof(envvars[0]); i++) { putenv((char*)(envvars[i][2])); } #endif TEST_STARTUP(argc, argv, networkpid, eb, ep, 1, 2, "iters (Poll/Block)"); if (argc > 1) iters = atoi(argv[1]); if (!iters) iters = 1; if (argc > 2) { switch(argv[2][0]) { case 'p': case 'P': polling = 1; break; case 'b': case 'B': polling = 0; break; default: printf("polling must be 'P' or 'B'..\n"); AMX_SPMDExit(1); } } { /* misc tests for functions possibly not covered elsewhere */ int numhand = AM_MaxNumHandlers(); verify(numhand >= 256); int n = 0; tag_t t,mytag; AM_Safe(AM_SetNumHandlers(ep, numhand)); AM_Safe(AM_GetNumHandlers(ep, &n)); verify(n >= numhand); AM_Safe(AM_GetTag(ep, &mytag)); for (n=0; n < NUMPROCS; n++) { verify(AM_GetTranslationInuse(ep,n) == AM_OK); AM_Safe(AM_GetTranslationTag(ep,n,&t)); if (n == MYPROC) verify(t == mytag); } verify(AM_GetTranslationInuse(ep,NUMPROCS) != AM_OK); } { /* test direct endpoint/bundle ops */ eb_t eb1, eb2; ep_t ep1; en_t en1, en2; int i,n,m; handler_t h; size_t sz; void *vp; tag_t t1 = (tag_t)0x1234ABCDU; AM_Safe(AM_AllocateBundle(AM_SEQ,&eb1)); AM_Safe(AM_AllocateBundle(AM_SEQ,&eb2)); AM_Safe(AM_AllocateEndpoint(eb1,&ep1,&en1)); check_ep = ep1; check_en = en1; AM_Safe(AM_GetNumTranslations(ep1, &n)); verify(n >= 256); AM_Safe(AM_MaxNumTranslations(&m)); verify(m >= 256); if (m > 1024) m = 1024; AM_Safe(AM_SetNumTranslations(ep1, m)); AM_Safe(AM_GetNumTranslations(ep1, &n)); verify(n == m); AM_Safe(AM_MapAny(ep1,&i,en1,t1)); verify(i >= 0 && i < m); verify(AM_GetTranslationInuse(ep1,i) == AM_OK); check_tag = 0; AM_Safe(AM_GetTranslationTag(ep1,i,&check_tag)); verify(t1 == check_tag); AM_Safe(AM_UnMap(ep1,i)); verify(AM_GetTranslationInuse(ep1,i) != AM_OK); AM_Safe(AM_Map(ep1,i,en1,t1)); AM_Safe(AM_MaxSegLength(&sz)); verify(sz >= sizeof(int)); AM_Safe(AM_SetSeg(ep1,&n,sizeof(int))); AM_Safe(AM_SetTag(ep1,t1)); AM_Safe(AM_MoveEndpoint(ep1,eb1,eb2)); AM_Safe(AM_FreeBundle(eb1)); verify(AM_GetTranslationInuse(ep1,i) == AM_OK); check_tag = 0; AM_Safe(AM_GetTranslationTag(ep1,i,&check_tag)); verify(t1 == check_tag); AM_Safe(AM_GetTranslationName(ep1,i,&en2)); verify(AMX_enEqual(en1, en2)); AM_Safe(AM_GetSeg(ep1,&vp,&sz)); verify(sz == sizeof(int)); verify(vp == &n); AM_Safe(AM_GetNumTranslations(ep1, &n)); verify(n == m); check_tag = 0; AM_Safe(AM_GetTag(ep1,&check_tag)); verify(t1 == check_tag); AM_Safe(AM_SetExpectedResources(ep1,1,1)); h = (handler_t)-1; AM_Safe(AM_SetHandlerAny(ep1,&h,checkAMshort)); i = (int)h; // bug3459: avoid a warning verify(i >= 0 && i < AM_MaxNumHandlers()); AM_Safe(AM_Request0(ep1,0,h)); do { AM_Safe(AM_Poll(eb2)); } while (check_tag); AM_Safe(AM_FreeEndpoint(ep1)); AM_Safe(AM_FreeBundle(eb2)); } /* setup handlers */ SETUP_ALLAM(); setupUtilHandlers(ep, eb); VMsegsz = 2*sizeof(testam_payload_t)*NUMHANDLERS_PER_TYPE; VMseg = malloc(VMsegsz); memset(VMseg, 0, VMsegsz); AM_Safe(AM_SetSeg(ep, VMseg, VMsegsz)); if (false) { /* don't actually call these, just ensure they link properly */ static char *ignore; AMX_SPMDSetExitCallback(NULL); ignore += ignore - AMX_SPMDgetenvMaster("PATH"); (void) AMX_SPMDIsWorker(argv); AMX_SPMDAllGather(NULL, NULL, 0); AMX_SPMDkillmyprocess(0); } /* barrier */ AM_Safe(AMX_SPMDBarrier()); partner = (MYPROC + 1)%NUMPROCS; /* compute */ for (i=0; i < iters; i++) { ALLAM_REQ(partner); while (!ALLAM_DONE(i+1)) { if (polling) { AM_Safe(AM_Poll(eb)); } else { AM_Safe(AM_SetEventMask(eb, AM_NOTEMPTY)); AM_Safe(AM_WaitSema(eb)); AM_Safe(AM_Poll(eb)); } } } #if defined(AMUDP) for (i=0; i < sizeof(envvars)/sizeof(envvars[0]); i++) { const char *key = envvars[i][0]; const char *val = envvars[i][1]; const char *actual = AMX_SPMDgetenvMaster(key); if (!actual) actual = "<undef>"; if (strcmp(val, actual)) printf("P%i: ERROR Environment value mismatch: %s='%s'\n", MYPROC, key, actual); } #endif printf("Slave %i done.\n", MYPROC); fflush(stdout); /* barrier */ AM_Safe(AMX_SPMDBarrier()); printGlobalStats(); AM_Safe(AMX_SPMDBarrier()); /* exit */ AM_Safe(AMX_SPMDExit(0)); return 0; } /* ------------------------------------------------------------------------------------ */
901782.c
/* * Copyright (c) 2014-2015 Hisilicon Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/acpi.h> #include <linux/device.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/vmalloc.h> #include "hns_dsaf_mac.h" #include "hns_dsaf_main.h" #include "hns_dsaf_ppe.h" #include "hns_dsaf_rcb.h" #include "hns_dsaf_misc.h" const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss", [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf", [DSAF_MODE_DISABLE_SP] = "single-port", }; static const struct acpi_device_id hns_dsaf_acpi_match[] = { { "HISI00B1", 0 }, { "HISI00B2", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match); int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) { int ret, i; u32 desc_num; u32 buf_size; u32 reset_offset = 0; u32 res_idx = 0; const char *mode_str; struct regmap *syscon; struct resource *res; struct device_node *np = dsaf_dev->dev->of_node, *np_temp; struct platform_device *pdev = to_platform_device(dsaf_dev->dev); if (dev_of_node(dsaf_dev->dev)) { if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) dsaf_dev->dsaf_ver = AE_VERSION_1; else dsaf_dev->dsaf_ver = AE_VERSION_2; } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { if (acpi_dev_found(hns_dsaf_acpi_match[0].id)) dsaf_dev->dsaf_ver = AE_VERSION_1; else if (acpi_dev_found(hns_dsaf_acpi_match[1].id)) dsaf_dev->dsaf_ver = AE_VERSION_2; else return -ENXIO; } else { dev_err(dsaf_dev->dev, "cannot get cfg data from of or acpi\n"); return -ENXIO; } ret = device_property_read_string(dsaf_dev->dev, "mode", &mode_str); if (ret) { dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret); return ret; } for (i = 0; i < DSAF_MODE_MAX; i++) { if (g_dsaf_mode_match[i] && !strcmp(mode_str, g_dsaf_mode_match[i])) break; } if (i >= DSAF_MODE_MAX || i == DSAF_MODE_INVALID || i == DSAF_MODE_ENABLE) { dev_err(dsaf_dev->dev, "%s prs mode str fail!\n", dsaf_dev->ae_dev.name); return -EINVAL; } dsaf_dev->dsaf_mode = (enum dsaf_mode)i; if (dsaf_dev->dsaf_mode > DSAF_MODE_ENABLE) dsaf_dev->dsaf_en = HRD_DSAF_NO_DSAF_MODE; else dsaf_dev->dsaf_en = HRD_DSAF_MODE; if ((i == DSAF_MODE_ENABLE_16VM) || (i == DSAF_MODE_DISABLE_2PORT_8VM) || (i == DSAF_MODE_DISABLE_6PORT_2VM)) dsaf_dev->dsaf_tc_mode = HRD_DSAF_8TC_MODE; else dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE; if (dev_of_node(dsaf_dev->dev)) { np_temp = of_parse_phandle(np, "subctrl-syscon", 0); syscon = syscon_node_to_regmap(np_temp); of_node_put(np_temp); if (IS_ERR_OR_NULL(syscon)) { res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); if (!res) { dev_err(dsaf_dev->dev, "subctrl info is needed!\n"); return -ENOMEM; } dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dsaf_dev->sc_base)) return PTR_ERR(dsaf_dev->sc_base); res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); if (!res) { dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n"); return -ENOMEM; } dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dsaf_dev->sds_base)) return PTR_ERR(dsaf_dev->sds_base); } else { dsaf_dev->sub_ctrl = syscon; } } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base"); if (!res) { res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); if (!res) { dev_err(dsaf_dev->dev, "ppe-base info is needed!\n"); return -ENOMEM; } } dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dsaf_dev->ppe_base)) return PTR_ERR(dsaf_dev->ppe_base); dsaf_dev->ppe_paddr = res->start; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsaf-base"); if (!res) { res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx); if (!res) { dev_err(dsaf_dev->dev, "dsaf-base info is needed!\n"); return -ENOMEM; } } dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dsaf_dev->io_base)) return PTR_ERR(dsaf_dev->io_base); } ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num); if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT || desc_num > HNS_DSAF_MAX_DESC_CNT) { dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n", desc_num, ret); return -EINVAL; } dsaf_dev->desc_num = desc_num; ret = device_property_read_u32(dsaf_dev->dev, "reset-field-offset", &reset_offset); if (ret < 0) { dev_dbg(dsaf_dev->dev, "get reset-field-offset fail, ret=%d!\r\n", ret); } dsaf_dev->reset_offset = reset_offset; ret = device_property_read_u32(dsaf_dev->dev, "buf-size", &buf_size); if (ret < 0) { dev_err(dsaf_dev->dev, "get buf-size fail, ret=%d!\r\n", ret); return ret; } dsaf_dev->buf_size = buf_size; dsaf_dev->buf_size_type = hns_rcb_buf_size2type(buf_size); if (dsaf_dev->buf_size_type < 0) { dev_err(dsaf_dev->dev, "buf_size(%d) is wrong!\n", buf_size); return -EINVAL; } dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev); if (!dsaf_dev->misc_op) return -ENOMEM; if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL))) dev_dbg(dsaf_dev->dev, "set mask to 64bit\n"); else dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n"); return 0; } /** * hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en * @dsaf_id: dsa fabric id */ static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev) { dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_SBM_INIT_S, 1); } /** * hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce * @dsaf_id: dsa fabric id * @hns_dsaf_reg_cnt_clr_ce: config value */ static void hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce) { dsaf_set_dev_bit(dsaf_dev, DSAF_DSA_REG_CNT_CLR_CE_REG, DSAF_CNT_CLR_CE_S, reg_cnt_clr_ce); } /** * hns_ppe_qid_cfg - config ppe qid * @dsaf_id: dsa fabric id * @pppe_qid_cfg: value array */ static void hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg) { u32 i; for (i = 0; i < DSAF_COMM_CHN; i++) { dsaf_set_dev_field(dsaf_dev, DSAF_PPE_QID_CFG_0_REG + 0x0004 * i, DSAF_PPE_QID_CFG_M, DSAF_PPE_QID_CFG_S, qid_cfg); } } static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev) { u16 max_q_per_vf, max_vfn; u32 q_id, q_num_per_port; u32 i; hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf); q_num_per_port = max_vfn * max_q_per_vf; for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) { dsaf_set_dev_field(dsaf_dev, DSAF_MIX_DEF_QID_0_REG + 0x0004 * i, 0xff, 0, q_id); q_id += q_num_per_port; } } static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev) { u16 max_q_per_vf, max_vfn; u32 q_id, q_num_per_port; u32 mac_id; if (AE_IS_VER1(dsaf_dev->dsaf_ver)) return; hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf); q_num_per_port = max_vfn * max_q_per_vf; for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) { dsaf_set_dev_field(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, DSAFV2_SERDES_LBK_QID_M, DSAFV2_SERDES_LBK_QID_S, q_id); q_id += q_num_per_port; } } /** * hns_dsaf_sw_port_type_cfg - cfg sw type * @dsaf_id: dsa fabric id * @psw_port_type: array */ static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev, enum dsaf_sw_port_type port_type) { u32 i; for (i = 0; i < DSAF_SW_PORT_NUM; i++) { dsaf_set_dev_field(dsaf_dev, DSAF_SW_PORT_TYPE_0_REG + 0x0004 * i, DSAF_SW_PORT_TYPE_M, DSAF_SW_PORT_TYPE_S, port_type); } } /** * hns_dsaf_stp_port_type_cfg - cfg stp type * @dsaf_id: dsa fabric id * @pstp_port_type: array */ static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev, enum dsaf_stp_port_type port_type) { u32 i; for (i = 0; i < DSAF_COMM_CHN; i++) { dsaf_set_dev_field(dsaf_dev, DSAF_STP_PORT_TYPE_0_REG + 0x0004 * i, DSAF_STP_PORT_TYPE_M, DSAF_STP_PORT_TYPE_S, port_type); } } #define HNS_DSAF_SBM_NUM(dev) \ (AE_IS_VER1((dev)->dsaf_ver) ? DSAF_SBM_NUM : DSAFV2_SBM_NUM) /** * hns_dsaf_sbm_cfg - config sbm * @dsaf_id: dsa fabric id */ static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev) { u32 o_sbm_cfg; u32 i; for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { o_sbm_cfg = dsaf_read_dev(dsaf_dev, DSAF_SBM_CFG_REG_0_REG + 0x80 * i); dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1); dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_SHCUT_EN_S, 0); dsaf_write_dev(dsaf_dev, DSAF_SBM_CFG_REG_0_REG + 0x80 * i, o_sbm_cfg); } } /** * hns_dsaf_sbm_cfg_mib_en - config sbm * @dsaf_id: dsa fabric id */ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) { u32 sbm_cfg_mib_en; u32 i; u32 reg; u32 read_cnt; /* validate configure by setting SBM_CFG_MIB_EN bit from 0 to 1. */ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 0); } for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1); } /* waitint for all sbm enable finished */ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { read_cnt = 0; reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; do { udelay(1); sbm_cfg_mib_en = dsaf_get_dev_bit( dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S); read_cnt++; } while (sbm_cfg_mib_en == 0 && read_cnt < DSAF_CFG_READ_CNT); if (sbm_cfg_mib_en == 0) { dev_err(dsaf_dev->dev, "sbm_cfg_mib_en fail,%s,sbm_num=%d\n", dsaf_dev->ae_dev.name, i); return -ENODEV; } } return 0; } /** * hns_dsaf_sbm_bp_wl_cfg - config sbm * @dsaf_id: dsa fabric id */ static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) { u32 o_sbm_bp_cfg; u32 reg; u32 i; /* XGE */ for (i = 0; i < DSAF_XGE_NUM; i++) { reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 104); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); /* for no enable pfc mode */ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* PPE */ for (i = 0; i < DSAF_COMM_CHN; i++) { reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 10); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* RoCEE */ for (i = 0; i < DSAF_COMM_CHN; i++) { reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 2); dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } } static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) { u32 o_sbm_bp_cfg; u32 reg; u32 i; /* XGE */ for (i = 0; i < DSAFV2_SBM_XGE_CHN; i++) { reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M, DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S, 256); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M, DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M, DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M, DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M, DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 104); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 128); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 55); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 110); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); /* for no enable pfc mode */ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M, DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M, DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* PPE */ for (i = 0; i < DSAFV2_SBM_PPE_CHN; i++) { reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M, DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S, 2); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M, DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S, 3); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M, DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S, 52); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* RoCEE */ for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) { reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M, DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S, 2); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M, DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S, 4); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } } /** * hns_dsaf_voq_bp_all_thrd_cfg - voq * @dsaf_id: dsa fabric id */ static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev) { u32 voq_bp_all_thrd; u32 i; for (i = 0; i < DSAF_VOQ_NUM; i++) { voq_bp_all_thrd = dsaf_read_dev( dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i); if (i < DSAF_XGE_NUM) { dsaf_set_field(voq_bp_all_thrd, DSAF_VOQ_BP_ALL_DOWNTHRD_M, DSAF_VOQ_BP_ALL_DOWNTHRD_S, 930); dsaf_set_field(voq_bp_all_thrd, DSAF_VOQ_BP_ALL_UPTHRD_M, DSAF_VOQ_BP_ALL_UPTHRD_S, 950); } else { dsaf_set_field(voq_bp_all_thrd, DSAF_VOQ_BP_ALL_DOWNTHRD_M, DSAF_VOQ_BP_ALL_DOWNTHRD_S, 220); dsaf_set_field(voq_bp_all_thrd, DSAF_VOQ_BP_ALL_UPTHRD_M, DSAF_VOQ_BP_ALL_UPTHRD_S, 230); } dsaf_write_dev( dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i, voq_bp_all_thrd); } } static void hns_dsaf_tbl_tcam_match_cfg( struct dsaf_device *dsaf_dev, struct dsaf_tbl_tcam_data *ptbl_tcam_data) { dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MATCH_CFG_L_REG, ptbl_tcam_data->tbl_tcam_data_low); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MATCH_CFG_H_REG, ptbl_tcam_data->tbl_tcam_data_high); } /** * hns_dsaf_tbl_tcam_data_cfg - tbl * @dsaf_id: dsa fabric id * @ptbl_tcam_data: addr */ static void hns_dsaf_tbl_tcam_data_cfg( struct dsaf_device *dsaf_dev, struct dsaf_tbl_tcam_data *ptbl_tcam_data) { dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_LOW_0_REG, ptbl_tcam_data->tbl_tcam_data_low); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_HIGH_0_REG, ptbl_tcam_data->tbl_tcam_data_high); } /** * dsaf_tbl_tcam_mcast_cfg - tbl * @dsaf_id: dsa fabric id * @ptbl_tcam_mcast: addr */ static void hns_dsaf_tbl_tcam_mcast_cfg( struct dsaf_device *dsaf_dev, struct dsaf_tbl_tcam_mcast_cfg *mcast) { u32 mcast_cfg4; mcast_cfg4 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG); dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S, mcast->tbl_mcast_item_vld); dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_OLD_EN_S, mcast->tbl_mcast_old_en); dsaf_set_field(mcast_cfg4, DSAF_TBL_MCAST_CFG4_VM128_112_M, DSAF_TBL_MCAST_CFG4_VM128_112_S, mcast->tbl_mcast_port_msk[4]); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, mcast_cfg4); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG, mcast->tbl_mcast_port_msk[3]); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG, mcast->tbl_mcast_port_msk[2]); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG, mcast->tbl_mcast_port_msk[1]); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG, mcast->tbl_mcast_port_msk[0]); } /** * hns_dsaf_tbl_tcam_ucast_cfg - tbl * @dsaf_id: dsa fabric id * @ptbl_tcam_ucast: addr */ static void hns_dsaf_tbl_tcam_ucast_cfg( struct dsaf_device *dsaf_dev, struct dsaf_tbl_tcam_ucast_cfg *tbl_tcam_ucast) { u32 ucast_cfg1; ucast_cfg1 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG); dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S, tbl_tcam_ucast->tbl_ucast_mac_discard); dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_ITEM_VLD_S, tbl_tcam_ucast->tbl_ucast_item_vld); dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OLD_EN_S, tbl_tcam_ucast->tbl_ucast_old_en); dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_DVC_S, tbl_tcam_ucast->tbl_ucast_dvc); dsaf_set_field(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OUT_PORT_M, DSAF_TBL_UCAST_CFG1_OUT_PORT_S, tbl_tcam_ucast->tbl_ucast_out_port); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG, ucast_cfg1); } /** * hns_dsaf_tbl_line_cfg - tbl * @dsaf_id: dsa fabric id * @ptbl_lin: addr */ static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev, struct dsaf_tbl_line_cfg *tbl_lin) { u32 tbl_line; tbl_line = dsaf_read_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG); dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_MAC_DISCARD_S, tbl_lin->tbl_line_mac_discard); dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_DVC_S, tbl_lin->tbl_line_dvc); dsaf_set_field(tbl_line, DSAF_TBL_LINE_CFG_OUT_PORT_M, DSAF_TBL_LINE_CFG_OUT_PORT_S, tbl_lin->tbl_line_out_port); dsaf_write_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG, tbl_line); } /** * hns_dsaf_tbl_tcam_mcast_pul - tbl * @dsaf_id: dsa fabric id */ static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev) { u32 o_tbl_pul; o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1); dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0); dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul); } /** * hns_dsaf_tbl_line_pul - tbl * @dsaf_id: dsa fabric id */ static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev) { u32 tbl_pul; tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG); dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 1); dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul); dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 0); dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul); } /** * hns_dsaf_tbl_tcam_data_mcast_pul - tbl * @dsaf_id: dsa fabric id */ static void hns_dsaf_tbl_tcam_data_mcast_pul( struct dsaf_device *dsaf_dev) { u32 o_tbl_pul; o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1); dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0); dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul); } /** * hns_dsaf_tbl_tcam_data_ucast_pul - tbl * @dsaf_id: dsa fabric id */ static void hns_dsaf_tbl_tcam_data_ucast_pul( struct dsaf_device *dsaf_dev) { u32 o_tbl_pul; o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 1); dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0); dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 0); dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul); } void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) { if (AE_IS_VER1(dsaf_dev->dsaf_ver) && !HNS_DSAF_IS_DEBUG(dsaf_dev)) dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); } /** * hns_dsaf_tbl_stat_en - tbl * @dsaf_id: dsa fabric id * @ptbl_stat_en: addr */ static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev) { u32 o_tbl_ctrl; o_tbl_ctrl = dsaf_read_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG); dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S, 1); dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_UC_LKUP_NUM_EN_S, 1); dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_MC_LKUP_NUM_EN_S, 1); dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_BC_LKUP_NUM_EN_S, 1); dsaf_write_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG, o_tbl_ctrl); } /** * hns_dsaf_rocee_bp_en - rocee back press enable * @dsaf_id: dsa fabric id */ static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev) { if (AE_IS_VER1(dsaf_dev->dsaf_ver)) dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG, DSAF_FC_XGE_TX_PAUSE_S, 1); } /* set msk for dsaf exception irq*/ static void hns_dsaf_int_xge_msk_set(struct dsaf_device *dsaf_dev, u32 chnn_num, u32 mask_set) { dsaf_write_dev(dsaf_dev, DSAF_XGE_INT_MSK_0_REG + 0x4 * chnn_num, mask_set); } static void hns_dsaf_int_ppe_msk_set(struct dsaf_device *dsaf_dev, u32 chnn_num, u32 msk_set) { dsaf_write_dev(dsaf_dev, DSAF_PPE_INT_MSK_0_REG + 0x4 * chnn_num, msk_set); } static void hns_dsaf_int_rocee_msk_set(struct dsaf_device *dsaf_dev, u32 chnn, u32 msk_set) { dsaf_write_dev(dsaf_dev, DSAF_ROCEE_INT_MSK_0_REG + 0x4 * chnn, msk_set); } static void hns_dsaf_int_tbl_msk_set(struct dsaf_device *dsaf_dev, u32 msk_set) { dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_MSK_0_REG, msk_set); } /* clr dsaf exception irq*/ static void hns_dsaf_int_xge_src_clr(struct dsaf_device *dsaf_dev, u32 chnn_num, u32 int_src) { dsaf_write_dev(dsaf_dev, DSAF_XGE_INT_SRC_0_REG + 0x4 * chnn_num, int_src); } static void hns_dsaf_int_ppe_src_clr(struct dsaf_device *dsaf_dev, u32 chnn, u32 int_src) { dsaf_write_dev(dsaf_dev, DSAF_PPE_INT_SRC_0_REG + 0x4 * chnn, int_src); } static void hns_dsaf_int_rocee_src_clr(struct dsaf_device *dsaf_dev, u32 chnn, u32 int_src) { dsaf_write_dev(dsaf_dev, DSAF_ROCEE_INT_SRC_0_REG + 0x4 * chnn, int_src); } static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev, u32 int_src) { dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_SRC_0_REG, int_src); } /** * hns_dsaf_single_line_tbl_cfg - INT * @dsaf_id: dsa fabric id * @address: * @ptbl_line: */ static void hns_dsaf_single_line_tbl_cfg( struct dsaf_device *dsaf_dev, u32 address, struct dsaf_tbl_line_cfg *ptbl_line) { spin_lock_bh(&dsaf_dev->tcam_lock); /*Write Addr*/ hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address); /*Write Line*/ hns_dsaf_tbl_line_cfg(dsaf_dev, ptbl_line); /*Write Plus*/ hns_dsaf_tbl_line_pul(dsaf_dev); spin_unlock_bh(&dsaf_dev->tcam_lock); } /** * hns_dsaf_tcam_uc_cfg - INT * @dsaf_id: dsa fabric id * @address, * @ptbl_tcam_data, */ static void hns_dsaf_tcam_uc_cfg( struct dsaf_device *dsaf_dev, u32 address, struct dsaf_tbl_tcam_data *ptbl_tcam_data, struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast) { spin_lock_bh(&dsaf_dev->tcam_lock); /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); /*Write Tcam Data*/ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data); /*Write Tcam Ucast*/ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast); /*Write Plus*/ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev); spin_unlock_bh(&dsaf_dev->tcam_lock); } /** * hns_dsaf_tcam_mc_cfg - cfg the tcam for mc * @dsaf_dev: dsa fabric device struct pointer * @address: tcam index * @ptbl_tcam_data: tcam data struct pointer * @ptbl_tcam_mcast: tcam mask struct pointer, it must be null for HNSv1 */ static void hns_dsaf_tcam_mc_cfg( struct dsaf_device *dsaf_dev, u32 address, struct dsaf_tbl_tcam_data *ptbl_tcam_data, struct dsaf_tbl_tcam_data *ptbl_tcam_mask, struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast) { spin_lock_bh(&dsaf_dev->tcam_lock); /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); /*Write Tcam Data*/ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data); /*Write Tcam Mcast*/ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast); /* Write Match Data */ if (ptbl_tcam_mask) hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, ptbl_tcam_mask); /* Write Puls */ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev); spin_unlock_bh(&dsaf_dev->tcam_lock); } /** * hns_dsaf_tcam_mc_invld - INT * @dsaf_id: dsa fabric id * @address */ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address) { spin_lock_bh(&dsaf_dev->tcam_lock); /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); /*write tcam mcast*/ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG, 0); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG, 0); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG, 0); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG, 0); dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, 0); /*Write Plus*/ hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev); spin_unlock_bh(&dsaf_dev->tcam_lock); } void hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr) { addr[0] = mac_key->high.bits.mac_0; addr[1] = mac_key->high.bits.mac_1; addr[2] = mac_key->high.bits.mac_2; addr[3] = mac_key->high.bits.mac_3; addr[4] = mac_key->low.bits.mac_4; addr[5] = mac_key->low.bits.mac_5; } /** * hns_dsaf_tcam_uc_get - INT * @dsaf_id: dsa fabric id * @address * @ptbl_tcam_data * @ptbl_tcam_ucast */ static void hns_dsaf_tcam_uc_get( struct dsaf_device *dsaf_dev, u32 address, struct dsaf_tbl_tcam_data *ptbl_tcam_data, struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast) { u32 tcam_read_data0; u32 tcam_read_data4; spin_lock_bh(&dsaf_dev->tcam_lock); /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); /*read tcam item puls*/ hns_dsaf_tbl_tcam_load_pul(dsaf_dev); /*read tcam data*/ ptbl_tcam_data->tbl_tcam_data_high = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); ptbl_tcam_data->tbl_tcam_data_low = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); /*read tcam mcast*/ tcam_read_data0 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG); tcam_read_data4 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG); ptbl_tcam_ucast->tbl_ucast_item_vld = dsaf_get_bit(tcam_read_data4, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S); ptbl_tcam_ucast->tbl_ucast_old_en = dsaf_get_bit(tcam_read_data4, DSAF_TBL_MCAST_CFG4_OLD_EN_S); ptbl_tcam_ucast->tbl_ucast_mac_discard = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S); ptbl_tcam_ucast->tbl_ucast_out_port = dsaf_get_field(tcam_read_data0, DSAF_TBL_UCAST_CFG1_OUT_PORT_M, DSAF_TBL_UCAST_CFG1_OUT_PORT_S); ptbl_tcam_ucast->tbl_ucast_dvc = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S); spin_unlock_bh(&dsaf_dev->tcam_lock); } /** * hns_dsaf_tcam_mc_get - INT * @dsaf_id: dsa fabric id * @address * @ptbl_tcam_data * @ptbl_tcam_ucast */ static void hns_dsaf_tcam_mc_get( struct dsaf_device *dsaf_dev, u32 address, struct dsaf_tbl_tcam_data *ptbl_tcam_data, struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast) { u32 data_tmp; spin_lock_bh(&dsaf_dev->tcam_lock); /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); /*read tcam item puls*/ hns_dsaf_tbl_tcam_load_pul(dsaf_dev); /*read tcam data*/ ptbl_tcam_data->tbl_tcam_data_high = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); ptbl_tcam_data->tbl_tcam_data_low = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); /*read tcam mcast*/ ptbl_tcam_mcast->tbl_mcast_port_msk[0] = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG); ptbl_tcam_mcast->tbl_mcast_port_msk[1] = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG); ptbl_tcam_mcast->tbl_mcast_port_msk[2] = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG); ptbl_tcam_mcast->tbl_mcast_port_msk[3] = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG); data_tmp = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG); ptbl_tcam_mcast->tbl_mcast_item_vld = dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S); ptbl_tcam_mcast->tbl_mcast_old_en = dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_OLD_EN_S); ptbl_tcam_mcast->tbl_mcast_port_msk[4] = dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M, DSAF_TBL_MCAST_CFG4_VM128_112_S); spin_unlock_bh(&dsaf_dev->tcam_lock); } /** * hns_dsaf_tbl_line_init - INT * @dsaf_id: dsa fabric id */ static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev) { u32 i; /* defaultly set all lineal mac table entry resulting discard */ struct dsaf_tbl_line_cfg tbl_line[] = {{1, 0, 0} }; for (i = 0; i < DSAF_LINE_SUM; i++) hns_dsaf_single_line_tbl_cfg(dsaf_dev, i, tbl_line); } /** * hns_dsaf_tbl_tcam_init - INT * @dsaf_id: dsa fabric id */ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev) { u32 i; struct dsaf_tbl_tcam_data tcam_data[] = {{0, 0} }; struct dsaf_tbl_tcam_ucast_cfg tcam_ucast[] = {{0, 0, 0, 0, 0} }; /*tcam tbl*/ for (i = 0; i < DSAF_TCAM_SUM; i++) hns_dsaf_tcam_uc_cfg(dsaf_dev, i, tcam_data, tcam_ucast); } /** * hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg * @mac_cb: mac contrl block */ static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev, int mac_id, int tc_en) { dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en); } static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev, int mac_id, int tx_en, int rx_en) { if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { if (!tx_en || !rx_en) dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n"); return; } dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, DSAF_PFC_PAUSE_RX_EN_B, !!rx_en); dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, DSAF_PFC_PAUSE_TX_EN_B, !!tx_en); } int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, u32 en) { if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { if (!en) { dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n"); return -EINVAL; } } dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, DSAF_MAC_PAUSE_RX_EN_B, !!en); return 0; } void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, u32 *en) { if (AE_IS_VER1(dsaf_dev->dsaf_ver)) *en = 1; else *en = dsaf_get_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, DSAF_MAC_PAUSE_RX_EN_B); } /** * hns_dsaf_tbl_tcam_init - INT * @dsaf_id: dsa fabric id * @dsaf_mode */ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) { u32 i; u32 o_dsaf_cfg; bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG); dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en); dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_TC_MODE_S, dsaf_dev->dsaf_tc_mode); dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_CRC_EN_S, 0); dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_MIX_MODE_S, 0); dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_LOCA_ADDR_EN_S, 0); dsaf_write_dev(dsaf_dev, DSAF_CFG_0_REG, o_dsaf_cfg); hns_dsaf_reg_cnt_clr_ce(dsaf_dev, 1); hns_dsaf_stp_port_type_cfg(dsaf_dev, DSAF_STP_PORT_TYPE_FORWARD); /* set 22 queue per tx ppe engine, only used in switch mode */ hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE); /* set promisc def queue id */ hns_dsaf_mix_def_qid_cfg(dsaf_dev); /* set inner loopback queue id */ hns_dsaf_inner_qid_cfg(dsaf_dev); /* in non switch mode, set all port to access mode */ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); /*set dsaf pfc to 0 for parseing rx pause*/ for (i = 0; i < DSAF_COMM_CHN; i++) { hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0); hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1); } /*msk and clr exception irqs */ for (i = 0; i < DSAF_COMM_CHN; i++) { hns_dsaf_int_xge_src_clr(dsaf_dev, i, 0xfffffffful); hns_dsaf_int_ppe_src_clr(dsaf_dev, i, 0xfffffffful); hns_dsaf_int_rocee_src_clr(dsaf_dev, i, 0xfffffffful); hns_dsaf_int_xge_msk_set(dsaf_dev, i, 0xfffffffful); hns_dsaf_int_ppe_msk_set(dsaf_dev, i, 0xfffffffful); hns_dsaf_int_rocee_msk_set(dsaf_dev, i, 0xfffffffful); } hns_dsaf_int_tbl_src_clr(dsaf_dev, 0xfffffffful); hns_dsaf_int_tbl_msk_set(dsaf_dev, 0xfffffffful); } /** * hns_dsaf_inode_init - INT * @dsaf_id: dsa fabric id */ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev) { u32 reg; u32 tc_cfg; u32 i; if (dsaf_dev->dsaf_tc_mode == HRD_DSAF_4TC_MODE) tc_cfg = HNS_DSAF_I4TC_CFG; else tc_cfg = HNS_DSAF_I8TC_CFG; if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { for (i = 0; i < DSAF_INODE_NUM; i++) { reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; dsaf_set_dev_field(dsaf_dev, reg, DSAF_INODE_IN_PORT_NUM_M, DSAF_INODE_IN_PORT_NUM_S, i % DSAF_XGE_NUM); } } else { for (i = 0; i < DSAF_PORT_TYPE_NUM; i++) { reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; dsaf_set_dev_field(dsaf_dev, reg, DSAF_INODE_IN_PORT_NUM_M, DSAF_INODE_IN_PORT_NUM_S, 0); dsaf_set_dev_field(dsaf_dev, reg, DSAFV2_INODE_IN_PORT1_NUM_M, DSAFV2_INODE_IN_PORT1_NUM_S, 1); dsaf_set_dev_field(dsaf_dev, reg, DSAFV2_INODE_IN_PORT2_NUM_M, DSAFV2_INODE_IN_PORT2_NUM_S, 2); dsaf_set_dev_field(dsaf_dev, reg, DSAFV2_INODE_IN_PORT3_NUM_M, DSAFV2_INODE_IN_PORT3_NUM_S, 3); dsaf_set_dev_field(dsaf_dev, reg, DSAFV2_INODE_IN_PORT4_NUM_M, DSAFV2_INODE_IN_PORT4_NUM_S, 4); dsaf_set_dev_field(dsaf_dev, reg, DSAFV2_INODE_IN_PORT5_NUM_M, DSAFV2_INODE_IN_PORT5_NUM_S, 5); } } for (i = 0; i < DSAF_INODE_NUM; i++) { reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i; dsaf_write_dev(dsaf_dev, reg, tc_cfg); } } /** * hns_dsaf_sbm_init - INT * @dsaf_id: dsa fabric id */ static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev) { u32 flag; u32 finish_msk; u32 cnt = 0; int ret; if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { hns_dsaf_sbm_bp_wl_cfg(dsaf_dev); finish_msk = DSAF_SRAM_INIT_OVER_M; } else { hns_dsafv2_sbm_bp_wl_cfg(dsaf_dev); finish_msk = DSAFV2_SRAM_INIT_OVER_M; } /* enable sbm chanel, disable sbm chanel shcut function*/ hns_dsaf_sbm_cfg(dsaf_dev); /* enable sbm mib */ ret = hns_dsaf_sbm_cfg_mib_en(dsaf_dev); if (ret) { dev_err(dsaf_dev->dev, "hns_dsaf_sbm_cfg_mib_en fail,%s, ret=%d\n", dsaf_dev->ae_dev.name, ret); return ret; } /* enable sbm initial link sram */ hns_dsaf_sbm_link_sram_init_en(dsaf_dev); do { usleep_range(200, 210);/*udelay(200);*/ flag = dsaf_get_dev_field(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG, finish_msk, DSAF_SRAM_INIT_OVER_S); cnt++; } while (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S) && cnt < DSAF_CFG_READ_CNT); if (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S)) { dev_err(dsaf_dev->dev, "hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n", dsaf_dev->ae_dev.name, flag, cnt); return -ENODEV; } hns_dsaf_rocee_bp_en(dsaf_dev); return 0; } /** * hns_dsaf_tbl_init - INT * @dsaf_id: dsa fabric id */ static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev) { hns_dsaf_tbl_stat_en(dsaf_dev); hns_dsaf_tbl_tcam_init(dsaf_dev); hns_dsaf_tbl_line_init(dsaf_dev); } /** * hns_dsaf_voq_init - INT * @dsaf_id: dsa fabric id */ static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev) { hns_dsaf_voq_bp_all_thrd_cfg(dsaf_dev); } /** * hns_dsaf_init_hw - init dsa fabric hardware * @dsaf_dev: dsa fabric device struct pointer */ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev) { int ret; dev_dbg(dsaf_dev->dev, "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name); dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0); mdelay(10); dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 1); hns_dsaf_comm_init(dsaf_dev); /*init XBAR_INODE*/ hns_dsaf_inode_init(dsaf_dev); /*init SBM*/ ret = hns_dsaf_sbm_init(dsaf_dev); if (ret) return ret; /*init TBL*/ hns_dsaf_tbl_init(dsaf_dev); /*init VOQ*/ hns_dsaf_voq_init(dsaf_dev); return 0; } /** * hns_dsaf_remove_hw - uninit dsa fabric hardware * @dsaf_dev: dsa fabric device struct pointer */ static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev) { /*reset*/ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0); } /** * hns_dsaf_init - init dsa fabric * @dsaf_dev: dsa fabric device struct pointer * retuen 0 - success , negative --fail */ static int hns_dsaf_init(struct dsaf_device *dsaf_dev) { struct dsaf_drv_priv *priv = (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); u32 i; int ret; if (HNS_DSAF_IS_DEBUG(dsaf_dev)) return 0; if (AE_IS_VER1(dsaf_dev->dsaf_ver)) dsaf_dev->tcam_max_num = DSAF_TCAM_SUM; else dsaf_dev->tcam_max_num = DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM; spin_lock_init(&dsaf_dev->tcam_lock); ret = hns_dsaf_init_hw(dsaf_dev); if (ret) return ret; /* malloc mem for tcam mac key(vlan+mac) */ priv->soft_mac_tbl = vzalloc(sizeof(*priv->soft_mac_tbl) * DSAF_TCAM_SUM); if (!priv->soft_mac_tbl) { ret = -ENOMEM; goto remove_hw; } /*all entry invall */ for (i = 0; i < DSAF_TCAM_SUM; i++) (priv->soft_mac_tbl + i)->index = DSAF_INVALID_ENTRY_IDX; return 0; remove_hw: hns_dsaf_remove_hw(dsaf_dev); return ret; } /** * hns_dsaf_free - free dsa fabric * @dsaf_dev: dsa fabric device struct pointer */ static void hns_dsaf_free(struct dsaf_device *dsaf_dev) { struct dsaf_drv_priv *priv = (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); hns_dsaf_remove_hw(dsaf_dev); /* free all mac mem */ vfree(priv->soft_mac_tbl); priv->soft_mac_tbl = NULL; } /** * hns_dsaf_find_soft_mac_entry - find dsa fabric soft entry * @dsaf_dev: dsa fabric device struct pointer * @mac_key: mac entry struct pointer */ static u16 hns_dsaf_find_soft_mac_entry( struct dsaf_device *dsaf_dev, struct dsaf_drv_tbl_tcam_key *mac_key) { struct dsaf_drv_priv *priv = (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry; u32 i; soft_mac_entry = priv->soft_mac_tbl; for (i = 0; i < dsaf_dev->tcam_max_num; i++) { /* invall tab entry */ if ((soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX) && (soft_mac_entry->tcam_key.high.val == mac_key->high.val) && (soft_mac_entry->tcam_key.low.val == mac_key->low.val)) /* return find result --soft index */ return soft_mac_entry->index; soft_mac_entry++; } return DSAF_INVALID_ENTRY_IDX; } /** * hns_dsaf_find_empty_mac_entry - search dsa fabric soft empty-entry * @dsaf_dev: dsa fabric device struct pointer */ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev) { struct dsaf_drv_priv *priv = (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry; u32 i; soft_mac_entry = priv->soft_mac_tbl; for (i = 0; i < dsaf_dev->tcam_max_num; i++) { /* inv all entry */ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX) /* return find result --soft index */ return i; soft_mac_entry++; } return DSAF_INVALID_ENTRY_IDX; } /** * hns_dsaf_set_mac_key - set mac key * @dsaf_dev: dsa fabric device struct pointer * @mac_key: tcam key pointer * @vlan_id: vlan id * @in_port_num: input port num * @addr: mac addr */ static void hns_dsaf_set_mac_key( struct dsaf_device *dsaf_dev, struct dsaf_drv_tbl_tcam_key *mac_key, u16 vlan_id, u8 in_port_num, u8 *addr) { u8 port; if (dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) /*DSAF mode : in port id fixed 0*/ port = 0; else /*non-dsaf mode*/ port = in_port_num; mac_key->high.bits.mac_0 = addr[0]; mac_key->high.bits.mac_1 = addr[1]; mac_key->high.bits.mac_2 = addr[2]; mac_key->high.bits.mac_3 = addr[3]; mac_key->low.bits.mac_4 = addr[4]; mac_key->low.bits.mac_5 = addr[5]; mac_key->low.bits.port_vlan = 0; dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M, DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, DSAF_TBL_TCAM_KEY_PORT_S, port); mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan); } /** * hns_dsaf_set_mac_uc_entry - set mac uc-entry * @dsaf_dev: dsa fabric device struct pointer * @mac_entry: uc-mac entry */ int hns_dsaf_set_mac_uc_entry( struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { u16 entry_index = DSAF_INVALID_ENTRY_IDX; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_tbl_tcam_ucast_cfg mac_data; struct dsaf_drv_priv *priv = (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; struct dsaf_tbl_tcam_data tcam_data; /* mac addr check */ if (MAC_IS_ALL_ZEROS(mac_entry->addr) || MAC_IS_BROADCAST(mac_entry->addr) || MAC_IS_MULTICAST(mac_entry->addr)) { dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n", dsaf_dev->ae_dev.name, mac_entry->addr); return -EINVAL; } /* config key */ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id, mac_entry->in_port_num, mac_entry->addr); /* entry ie exist? */ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); if (entry_index == DSAF_INVALID_ENTRY_IDX) { /*if has not inv entry,find a empty entry */ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev); if (entry_index == DSAF_INVALID_ENTRY_IDX) { /* has not empty,return error */ dev_err(dsaf_dev->dev, "set_uc_entry failed, %s Mac key(%#x:%#x)\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val); return -EINVAL; } } dev_dbg(dsaf_dev->dev, "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val, entry_index); /* config hardware entry */ mac_data.tbl_ucast_item_vld = 1; mac_data.tbl_ucast_mac_discard = 0; mac_data.tbl_ucast_old_en = 0; /* default config dvc to 0 */ mac_data.tbl_ucast_dvc = 0; mac_data.tbl_ucast_out_port = mac_entry->port_num; tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); /* config software entry */ soft_mac_entry += entry_index; soft_mac_entry->index = entry_index; soft_mac_entry->tcam_key.high.val = mac_key.high.val; soft_mac_entry->tcam_key.low.val = mac_key.low.val; return 0; } int hns_dsaf_rm_mac_addr( struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { u16 entry_index = DSAF_INVALID_ENTRY_IDX; struct dsaf_tbl_tcam_ucast_cfg mac_data; struct dsaf_drv_tbl_tcam_key mac_key; /* mac addr check */ if (!is_valid_ether_addr(mac_entry->addr)) { dev_err(dsaf_dev->dev, "rm_uc_addr %s Mac %pM err!\n", dsaf_dev->ae_dev.name, mac_entry->addr); return -EINVAL; } /* config key */ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id, mac_entry->in_port_num, mac_entry->addr); entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); if (entry_index == DSAF_INVALID_ENTRY_IDX) { /* can not find the tcam entry, return 0 */ dev_info(dsaf_dev->dev, "rm_uc_addr no tcam, %s Mac key(%#x:%#x)\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val); return 0; } dev_dbg(dsaf_dev->dev, "rm_uc_addr, %s Mac key(%#x:%#x) entry_index%d\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val, entry_index); hns_dsaf_tcam_uc_get( dsaf_dev, entry_index, (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data); /* unicast entry not used locally should not clear */ if (mac_entry->port_num != mac_data.tbl_ucast_out_port) return -EFAULT; return hns_dsaf_del_mac_entry(dsaf_dev, mac_entry->in_vlan_id, mac_entry->in_port_num, mac_entry->addr); } static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src) { u16 *a = (u16 *)dst; const u16 *b = (const u16 *)src; a[0] &= b[0]; a[1] &= b[1]; a[2] &= b[2]; } /** * hns_dsaf_add_mac_mc_port - add mac mc-port * @dsaf_dev: dsa fabric device struct pointer * @mac_entry: mc-mac entry */ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { u16 entry_index = DSAF_INVALID_ENTRY_IDX; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_tbl_tcam_key mask_key; struct dsaf_tbl_tcam_data *pmask_key = NULL; struct dsaf_tbl_tcam_mcast_cfg mac_data; struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; struct dsaf_drv_tbl_tcam_key tmp_mac_key; struct dsaf_tbl_tcam_data tcam_data; u8 mc_addr[ETH_ALEN]; u8 *mc_mask; int mskid; /*chechk mac addr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n", mac_entry->addr); return -EINVAL; } ether_addr_copy(mc_addr, mac_entry->addr); mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask; if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { /* prepare for key data setting */ hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask); /* config key mask */ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x0, 0xff, mc_mask); mask_key.high.val = le32_to_cpu(mask_key.high.val); mask_key.low.val = le32_to_cpu(mask_key.low.val); pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); } /*config key */ hns_dsaf_set_mac_key( dsaf_dev, &mac_key, mac_entry->in_vlan_id, mac_entry->in_port_num, mc_addr); memset(&mac_data, 0, sizeof(struct dsaf_tbl_tcam_mcast_cfg)); /* check if the tcam is exist */ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); if (entry_index == DSAF_INVALID_ENTRY_IDX) { /*if hasnot , find a empty*/ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev); if (entry_index == DSAF_INVALID_ENTRY_IDX) { /*if hasnot empty, error*/ dev_err(dsaf_dev->dev, "set_uc_entry failed, %s Mac key(%#x:%#x)\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val); return -EINVAL; } } else { /* if exist, add in */ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data); tmp_mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high); tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low); } /* config hardware entry */ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) { mskid = mac_entry->port_num; } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) { mskid = mac_entry->port_num - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; } else { dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", dsaf_dev->ae_dev.name, mac_entry->port_num, mac_key.high.val, mac_key.low.val); return -EINVAL; } dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1); mac_data.tbl_mcast_old_en = 0; mac_data.tbl_mcast_item_vld = 1; dev_dbg(dsaf_dev->dev, "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val, entry_index); tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); /* config mc entry with mask */ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, pmask_key, &mac_data); /*config software entry */ soft_mac_entry += entry_index; soft_mac_entry->index = entry_index; soft_mac_entry->tcam_key.high.val = mac_key.high.val; soft_mac_entry->tcam_key.low.val = mac_key.low.val; return 0; } /** * hns_dsaf_del_mac_entry - del mac mc-port * @dsaf_dev: dsa fabric device struct pointer * @vlan_id: vlian id * @in_port_num: input port num * @addr : mac addr */ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, u8 in_port_num, u8 *addr) { u16 entry_index = DSAF_INVALID_ENTRY_IDX; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_priv *priv = (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; /*check mac addr */ if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n", addr); return -EINVAL; } /*config key */ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, addr); /*exist ?*/ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); if (entry_index == DSAF_INVALID_ENTRY_IDX) { /*not exist, error */ dev_err(dsaf_dev->dev, "del_mac_entry failed, %s Mac key(%#x:%#x)\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val); return -EINVAL; } dev_dbg(dsaf_dev->dev, "del_mac_entry, %s Mac key(%#x:%#x) entry_index%d\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val, entry_index); /*do del opt*/ hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index); /*del soft emtry */ soft_mac_entry += entry_index; soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; return 0; } /** * hns_dsaf_del_mac_mc_port - del mac mc- port * @dsaf_dev: dsa fabric device struct pointer * @mac_entry: mac entry */ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { u16 entry_index = DSAF_INVALID_ENTRY_IDX; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; u16 vlan_id; u8 in_port_num; struct dsaf_tbl_tcam_mcast_cfg mac_data; struct dsaf_tbl_tcam_data tcam_data; int mskid; const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0}; struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key; struct dsaf_tbl_tcam_data *pmask_key = NULL; u8 mc_addr[ETH_ALEN]; u8 *mc_mask; if (!(void *)mac_entry) { dev_err(dsaf_dev->dev, "hns_dsaf_del_mac_mc_port mac_entry is NULL\n"); return -EINVAL; } /*check mac addr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n", mac_entry->addr); return -EINVAL; } /* always mask vlan_id field */ ether_addr_copy(mc_addr, mac_entry->addr); mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask; if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { /* prepare for key data setting */ hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask); /* config key mask */ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_addr); mask_key.high.val = le32_to_cpu(mask_key.high.val); mask_key.low.val = le32_to_cpu(mask_key.low.val); pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); } /* get key info */ vlan_id = mac_entry->in_vlan_id; in_port_num = mac_entry->in_port_num; /* config key */ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, mc_addr); /* check if the tcam entry is exist */ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); if (entry_index == DSAF_INVALID_ENTRY_IDX) { /*find none */ dev_err(dsaf_dev->dev, "find_soft_mac_entry failed, %s Mac key(%#x:%#x)\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val); return -EINVAL; } dev_dbg(dsaf_dev->dev, "del_mac_mc_port, %s key(%#x:%#x) index%d\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val, entry_index); /* read entry */ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data); tmp_mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high); tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low); /*del the port*/ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) { mskid = mac_entry->port_num; } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) { mskid = mac_entry->port_num - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; } else { dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", dsaf_dev->ae_dev.name, mac_entry->port_num, mac_key.high.val, mac_key.low.val); return -EINVAL; } dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 0); /*check non port, do del entry */ if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk, sizeof(mac_data.tbl_mcast_port_msk))) { hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index); /* del soft entry */ soft_mac_entry += entry_index; soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; } else { /* not zero, just del port, update */ tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, pmask_key, &mac_data); } return 0; } int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id, u8 port_num) { struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry; struct dsaf_tbl_tcam_mcast_cfg mac_data; int ret = 0, i; if (HNS_DSAF_IS_DEBUG(dsaf_dev)) return 0; for (i = 0; i < DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM; i++) { u8 addr[ETH_ALEN]; u8 port; soft_mac_entry = priv->soft_mac_tbl + i; hns_dsaf_tcam_addr_get(&soft_mac_entry->tcam_key, addr); port = dsaf_get_field( soft_mac_entry->tcam_key.low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, DSAF_TBL_TCAM_KEY_PORT_S); /* check valid tcam mc entry */ if (soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX && port == mac_id && is_multicast_ether_addr(addr) && !is_broadcast_ether_addr(addr)) { const u32 empty_msk[DSAF_PORT_MSK_NUM] = {0}; struct dsaf_drv_mac_single_dest_entry mac_entry; /* disable receiving of this multicast address for * the VF. */ ether_addr_copy(mac_entry.addr, addr); mac_entry.in_vlan_id = dsaf_get_field( soft_mac_entry->tcam_key.low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M, DSAF_TBL_TCAM_KEY_VLAN_S); mac_entry.in_port_num = mac_id; mac_entry.port_num = port_num; if (hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry)) { ret = -EINVAL; continue; } /* disable receiving of this multicast address for * the mac port if all VF are disable */ hns_dsaf_tcam_mc_get(dsaf_dev, i, (struct dsaf_tbl_tcam_data *) (&soft_mac_entry->tcam_key), &mac_data); dsaf_set_bit(mac_data.tbl_mcast_port_msk[mac_id / 32], mac_id % 32, 0); if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk, sizeof(u32) * DSAF_PORT_MSK_NUM)) { mac_entry.port_num = mac_id; if (hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry)) { ret = -EINVAL; continue; } } } } return ret; } static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev, size_t sizeof_priv) { struct dsaf_device *dsaf_dev; dsaf_dev = devm_kzalloc(dev, sizeof(*dsaf_dev) + sizeof_priv, GFP_KERNEL); if (unlikely(!dsaf_dev)) { dsaf_dev = ERR_PTR(-ENOMEM); } else { dsaf_dev->dev = dev; dev_set_drvdata(dev, dsaf_dev); } return dsaf_dev; } /** * hns_dsaf_free_dev - free dev mem * @dev: struct device pointer */ static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev) { (void)dev_set_drvdata(dsaf_dev->dev, NULL); } /** * dsaf_pfc_unit_cnt - set pfc unit count * @dsaf_id: dsa fabric id * @pport_rate: value array * @pdsaf_pfc_unit_cnt: value array */ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id, enum dsaf_port_rate_mode rate) { u32 unit_cnt; switch (rate) { case DSAF_PORT_RATE_10000: unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE; break; case DSAF_PORT_RATE_1000: unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000; break; case DSAF_PORT_RATE_2500: unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000; break; default: unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE; } dsaf_set_dev_field(dsaf_dev, (DSAF_PFC_UNIT_CNT_0_REG + 0x4 * (u64)mac_id), DSAF_PFC_UNINT_CNT_M, DSAF_PFC_UNINT_CNT_S, unit_cnt); } /** * dsaf_port_work_rate_cfg - fifo * @dsaf_id: dsa fabric id * @xge_ge_work_mode */ void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id, enum dsaf_port_rate_mode rate_mode) { u32 port_work_mode; port_work_mode = dsaf_read_dev( dsaf_dev, DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id); if (rate_mode == DSAF_PORT_RATE_10000) dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 1); else dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 0); dsaf_write_dev(dsaf_dev, DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id, port_work_mode); hns_dsaf_pfc_unit_cnt(dsaf_dev, mac_id, rate_mode); } /** * hns_dsaf_fix_mac_mode - dsaf modify mac mode * @mac_cb: mac contrl block */ void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb) { enum dsaf_port_rate_mode mode; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; int mac_id = mac_cb->mac_id; if (mac_cb->mac_type != HNAE_PORT_SERVICE) return; if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII) mode = DSAF_PORT_RATE_10000; else mode = DSAF_PORT_RATE_1000; hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode); } static u32 hns_dsaf_get_inode_prio_reg(int index) { int base_index, offset; u32 base_addr = DSAF_INODE_IN_PRIO_PAUSE_BASE_REG; base_index = (index + 1) / DSAF_REG_PER_ZONE; offset = (index + 1) % DSAF_REG_PER_ZONE; return base_addr + DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET * base_index + DSAF_INODE_IN_PRIO_PAUSE_OFFSET * offset; } void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) { struct dsaf_hw_stats *hw_stats = &dsaf_dev->hw_stats[node_num]; bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); int i; u32 reg_tmp; hw_stats->pad_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->man_pkts += dsaf_read_dev(dsaf_dev, DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->rx_pkts += dsaf_read_dev(dsaf_dev, DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev, DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num); reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG : DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG; hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num); hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev, DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_SBM_DROP_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->crc_false += dsaf_read_dev(dsaf_dev, DSAF_INODE_CRC_FALSE_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->bp_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_BP_DISCARD_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->rslt_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_RSLT_DISCARD_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->local_addr_false += dsaf_read_dev(dsaf_dev, DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num); hw_stats->stp_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num); /* pfc pause frame statistics stored in dsaf inode*/ if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) { for (i = 0; i < DSAF_PRIO_NR; i++) { reg_tmp = hns_dsaf_get_inode_prio_reg(i); hw_stats->rx_pfc[i] += dsaf_read_dev(dsaf_dev, reg_tmp + 0x4 * (u64)node_num); hw_stats->tx_pfc[i] += dsaf_read_dev(dsaf_dev, DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG + DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET * i + 0xF0 * (u64)node_num); } } hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev, DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num); } /** *hns_dsaf_get_regs - dump dsaf regs *@dsaf_dev: dsaf device *@data:data for value of regs */ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) { u32 i = 0; u32 j; u32 *p = data; u32 reg_tmp; bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver); /* dsaf common registers */ p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG); p[1] = dsaf_read_dev(ddev, DSAF_CFG_0_REG); p[2] = dsaf_read_dev(ddev, DSAF_ECC_ERR_INVERT_0_REG); p[3] = dsaf_read_dev(ddev, DSAF_ABNORMAL_TIMEOUT_0_REG); p[4] = dsaf_read_dev(ddev, DSAF_FSM_TIMEOUT_0_REG); p[5] = dsaf_read_dev(ddev, DSAF_DSA_REG_CNT_CLR_CE_REG); p[6] = dsaf_read_dev(ddev, DSAF_DSA_SBM_INF_FIFO_THRD_REG); p[7] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_SEL_REG); p[8] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_CNT_REG); p[9] = dsaf_read_dev(ddev, DSAF_PFC_EN_0_REG + port * 4); p[10] = dsaf_read_dev(ddev, DSAF_PFC_UNIT_CNT_0_REG + port * 4); p[11] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4); p[12] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4); p[13] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4); p[14] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4); p[15] = dsaf_read_dev(ddev, DSAF_PPE_INT_MSK_0_REG + port * 4); p[16] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_MSK_0_REG + port * 4); p[17] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4); p[18] = dsaf_read_dev(ddev, DSAF_PPE_INT_SRC_0_REG + port * 4); p[19] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_SRC_0_REG + port * 4); p[20] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4); p[21] = dsaf_read_dev(ddev, DSAF_PPE_INT_STS_0_REG + port * 4); p[22] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_STS_0_REG + port * 4); p[23] = dsaf_read_dev(ddev, DSAF_PPE_QID_CFG_0_REG + port * 4); for (i = 0; i < DSAF_SW_PORT_NUM; i++) p[24 + i] = dsaf_read_dev(ddev, DSAF_SW_PORT_TYPE_0_REG + i * 4); p[32] = dsaf_read_dev(ddev, DSAF_MIX_DEF_QID_0_REG + port * 4); for (i = 0; i < DSAF_SW_PORT_NUM; i++) p[33 + i] = dsaf_read_dev(ddev, DSAF_PORT_DEF_VLAN_0_REG + i * 4); for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++) p[41 + i] = dsaf_read_dev(ddev, DSAF_VM_DEF_VLAN_0_REG + i * 4); /* dsaf inode registers */ p[170] = dsaf_read_dev(ddev, DSAF_INODE_CUT_THROUGH_CFG_0_REG); p[171] = dsaf_read_dev(ddev, DSAF_INODE_ECC_ERR_ADDR_0_REG + port * 0x80); for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; p[172 + i] = dsaf_read_dev(ddev, DSAF_INODE_IN_PORT_NUM_0_REG + j * 0x80); p[175 + i] = dsaf_read_dev(ddev, DSAF_INODE_PRI_TC_CFG_0_REG + j * 0x80); p[178 + i] = dsaf_read_dev(ddev, DSAF_INODE_BP_STATUS_0_REG + j * 0x80); p[181 + i] = dsaf_read_dev(ddev, DSAF_INODE_PAD_DISCARD_NUM_0_REG + j * 0x80); p[184 + i] = dsaf_read_dev(ddev, DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + j * 0x80); p[187 + i] = dsaf_read_dev(ddev, DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80); p[190 + i] = dsaf_read_dev(ddev, DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80); reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG : DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG; p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80); p[196 + i] = dsaf_read_dev(ddev, DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80); p[199 + i] = dsaf_read_dev(ddev, DSAF_INODE_SBM_DROP_NUM_0_REG + j * 0x80); p[202 + i] = dsaf_read_dev(ddev, DSAF_INODE_CRC_FALSE_NUM_0_REG + j * 0x80); p[205 + i] = dsaf_read_dev(ddev, DSAF_INODE_BP_DISCARD_NUM_0_REG + j * 0x80); p[208 + i] = dsaf_read_dev(ddev, DSAF_INODE_RSLT_DISCARD_NUM_0_REG + j * 0x80); p[211 + i] = dsaf_read_dev(ddev, DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + j * 0x80); p[214 + i] = dsaf_read_dev(ddev, DSAF_INODE_VOQ_OVER_NUM_0_REG + j * 0x80); p[217 + i] = dsaf_read_dev(ddev, DSAF_INODE_BD_SAVE_STATUS_0_REG + j * 4); p[220 + i] = dsaf_read_dev(ddev, DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4); p[223 + i] = dsaf_read_dev(ddev, DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4); p[224 + i] = dsaf_read_dev(ddev, DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4); } p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4); for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; p[228 + i] = dsaf_read_dev(ddev, DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4); } p[231] = dsaf_read_dev(ddev, DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4); /* dsaf inode registers */ for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; p[232 + i] = dsaf_read_dev(ddev, DSAF_SBM_CFG_REG_0_REG + j * 0x80); p[235 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80); p[238 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80); p[241 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80); p[244 + i] = dsaf_read_dev(ddev, DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80); p[245 + i] = dsaf_read_dev(ddev, DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80); p[248 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_0_0_REG + j * 0x80); p[251 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_1_0_REG + j * 0x80); p[254 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_2_0_REG + j * 0x80); p[257 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_3_0_REG + j * 0x80); p[260 + i] = dsaf_read_dev(ddev, DSAF_SBM_INER_ST_0_REG + j * 0x80); p[263 + i] = dsaf_read_dev(ddev, DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80); p[266 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80); p[269 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80); p[272 + i] = dsaf_read_dev(ddev, DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80); p[275 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80); p[278 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80); p[281 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80); p[284 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80); p[287 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80); p[290 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80); p[293 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80); p[296 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80); p[299 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80); p[302 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80); p[305 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80); p[308 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80); } /* dsaf onode registers */ for (i = 0; i < DSAF_XOD_NUM; i++) { p[311 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90); p[319 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90); p[327 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90); p[335 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90); p[343 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90); p[351 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90); } p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90); p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90); for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; p[362 + i] = dsaf_read_dev(ddev, DSAF_XOD_GNT_L_0_REG + j * 0x90); p[365 + i] = dsaf_read_dev(ddev, DSAF_XOD_GNT_H_0_REG + j * 0x90); p[368 + i] = dsaf_read_dev(ddev, DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90); p[371 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90); p[374 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90); p[377 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90); p[380 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90); p[383 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90); p[386 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90); p[389 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90); } p[392] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90); p[393] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90); p[394] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90); p[395] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90); p[396] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90); p[397] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90); p[398] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90); p[399] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90); p[400] = dsaf_read_dev(ddev, DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90); p[401] = dsaf_read_dev(ddev, DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90); p[402] = dsaf_read_dev(ddev, DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90); p[403] = dsaf_read_dev(ddev, DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90); p[404] = dsaf_read_dev(ddev, DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90); /* dsaf voq registers */ for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) { j = (i * DSAF_COMM_CHN + port) * 0x90; p[405 + i] = dsaf_read_dev(ddev, DSAF_VOQ_ECC_INVERT_EN_0_REG + j); p[408 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SRAM_PKT_NUM_0_REG + j); p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j); p[414 + i] = dsaf_read_dev(ddev, DSAF_VOQ_OUT_PKT_NUM_0_REG + j); p[417 + i] = dsaf_read_dev(ddev, DSAF_VOQ_ECC_ERR_ADDR_0_REG + j); p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j); p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j); p[426 + i] = dsaf_read_dev(ddev, DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j); p[429 + i] = dsaf_read_dev(ddev, DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j); p[432 + i] = dsaf_read_dev(ddev, DSAF_VOQ_PPE_XOD_REQ_0_REG + j); p[435 + i] = dsaf_read_dev(ddev, DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j); p[438 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_ALL_THRD_0_REG + j); } /* dsaf tbl registers */ p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG); p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG); p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG); p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG); p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG); p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG); p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG); p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG); p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG); p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG); p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG); p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG); p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG); p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG); p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG); p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG); p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG); p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG); p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG); p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG); p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG); for (i = 0; i < DSAF_SW_PORT_NUM; i++) { j = i * 0x8; p[464 + 2 * i] = dsaf_read_dev(ddev, DSAF_TBL_DA0_MIS_INFO1_0_REG + j); p[465 + 2 * i] = dsaf_read_dev(ddev, DSAF_TBL_DA0_MIS_INFO0_0_REG + j); } p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG); p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG); p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG); p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG); p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG); p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG); p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG); p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG); p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG); p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG); p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG); p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG); /* dsaf other registers */ p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4); p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4); p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4); p[495] = dsaf_read_dev(ddev, DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4); p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4); p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4); if (!is_ver1) p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4); /* mark end of dsaf regs */ for (i = 499; i < 504; i++) p[i] = 0xdddddddd; } static char *hns_dsaf_get_node_stats_strings(char *data, int node, struct dsaf_device *dsaf_dev) { char *buff = data; int i; bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node); buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node); buff += ETH_GSTRING_LEN; if (node < DSAF_SERVICE_NW_NUM && !is_ver1) { for (i = 0; i < DSAF_PRIO_NR; i++) { snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR, ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts", node, i); snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR, ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts", node, i); buff += ETH_GSTRING_LEN; } buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN; } snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node); buff += ETH_GSTRING_LEN; return buff; } static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data, int node_num) { u64 *p = data; int i; struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num]; bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver); p[0] = hw_stats->pad_drop; p[1] = hw_stats->man_pkts; p[2] = hw_stats->rx_pkts; p[3] = hw_stats->rx_pkt_id; p[4] = hw_stats->rx_pause_frame; p[5] = hw_stats->release_buf_num; p[6] = hw_stats->sbm_drop; p[7] = hw_stats->crc_false; p[8] = hw_stats->bp_drop; p[9] = hw_stats->rslt_drop; p[10] = hw_stats->local_addr_false; p[11] = hw_stats->vlan_drop; p[12] = hw_stats->stp_drop; if (node_num < DSAF_SERVICE_NW_NUM && !is_ver1) { for (i = 0; i < DSAF_PRIO_NR; i++) { p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i]; p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i]; } p[29] = hw_stats->tx_pkts; return &p[30]; } p[13] = hw_stats->tx_pkts; return &p[14]; } /** *hns_dsaf_get_stats - get dsaf statistic *@ddev: dsaf device *@data:statistic value *@port: port num */ void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port) { u64 *p = data; int node_num = port; /* for ge/xge node info */ p = hns_dsaf_get_node_stats(ddev, p, node_num); /* for ppe node info */ node_num = port + DSAF_PPE_INODE_BASE; (void)hns_dsaf_get_node_stats(ddev, p, node_num); } /** *hns_dsaf_get_sset_count - get dsaf string set count *@stringset: type of values in data *return dsaf string name count */ int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset) { bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); if (stringset == ETH_SS_STATS) { if (is_ver1) return DSAF_STATIC_NUM; else return DSAF_V2_STATIC_NUM; } return 0; } /** *hns_dsaf_get_strings - get dsaf string set *@stringset:srting set index *@data:strings name value *@port:port index */ void hns_dsaf_get_strings(int stringset, u8 *data, int port, struct dsaf_device *dsaf_dev) { char *buff = (char *)data; int node = port; if (stringset != ETH_SS_STATS) return; /* for ge/xge node info */ buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev); /* for ppe node info */ node = port + DSAF_PPE_INODE_BASE; (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev); } /** *hns_dsaf_get_sset_count - get dsaf regs count *return dsaf regs count */ int hns_dsaf_get_regs_count(void) { return DSAF_DUMP_REGS_NUM; } /* Reserve the last TCAM entry for promisc support */ #define dsaf_promisc_tcam_entry(port) \ (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port)) void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, u32 port, bool enable) { struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; u16 entry_index; struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask; struct dsaf_tbl_tcam_mcast_cfg mac_data = {0}; if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev)) return; /* find the tcam entry index for promisc */ entry_index = dsaf_promisc_tcam_entry(port); memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data)); memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask)); /* config key mask */ if (enable) { dsaf_set_field(tbl_tcam_data.low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, DSAF_TBL_TCAM_KEY_PORT_S, port); dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, DSAF_TBL_TCAM_KEY_PORT_S, 0xf); /* SUB_QID */ dsaf_set_bit(mac_data.tbl_mcast_port_msk[0], DSAF_SERVICE_NW_NUM, true); mac_data.tbl_mcast_item_vld = true; /* item_vld bit */ } else { mac_data.tbl_mcast_item_vld = false; /* item_vld bit */ } dev_dbg(dsaf_dev->dev, "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n", dsaf_dev->ae_dev.name, tbl_tcam_data.high.val, tbl_tcam_data.low.val, entry_index); /* config promisc entry with mask */ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, (struct dsaf_tbl_tcam_data *)&tbl_tcam_data, (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask, &mac_data); /* config software entry */ soft_mac_entry += entry_index; soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; } /** * dsaf_probe - probo dsaf dev * @pdev: dasf platform device * retuen 0 - success , negative --fail */ static int hns_dsaf_probe(struct platform_device *pdev) { struct dsaf_device *dsaf_dev; int ret; dsaf_dev = hns_dsaf_alloc_dev(&pdev->dev, sizeof(struct dsaf_drv_priv)); if (IS_ERR(dsaf_dev)) { ret = PTR_ERR(dsaf_dev); dev_err(&pdev->dev, "dsaf_probe dsaf_alloc_dev failed, ret = %#x!\n", ret); return ret; } ret = hns_dsaf_get_cfg(dsaf_dev); if (ret) goto free_dev; ret = hns_dsaf_init(dsaf_dev); if (ret) goto free_dev; ret = hns_mac_init(dsaf_dev); if (ret) goto uninit_dsaf; ret = hns_ppe_init(dsaf_dev); if (ret) goto uninit_mac; ret = hns_dsaf_ae_init(dsaf_dev); if (ret) goto uninit_ppe; return 0; uninit_ppe: hns_ppe_uninit(dsaf_dev); uninit_mac: hns_mac_uninit(dsaf_dev); uninit_dsaf: hns_dsaf_free(dsaf_dev); free_dev: hns_dsaf_free_dev(dsaf_dev); return ret; } /** * dsaf_remove - remove dsaf dev * @pdev: dasf platform device */ static int hns_dsaf_remove(struct platform_device *pdev) { struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev); hns_dsaf_ae_uninit(dsaf_dev); hns_ppe_uninit(dsaf_dev); hns_mac_uninit(dsaf_dev); hns_dsaf_free(dsaf_dev); hns_dsaf_free_dev(dsaf_dev); return 0; } static const struct of_device_id g_dsaf_match[] = { {.compatible = "hisilicon,hns-dsaf-v1"}, {.compatible = "hisilicon,hns-dsaf-v2"}, {} }; MODULE_DEVICE_TABLE(of, g_dsaf_match); static struct platform_driver g_dsaf_driver = { .probe = hns_dsaf_probe, .remove = hns_dsaf_remove, .driver = { .name = DSAF_DRV_NAME, .of_match_table = g_dsaf_match, .acpi_match_table = hns_dsaf_acpi_match, }, }; module_platform_driver(g_dsaf_driver); /** * hns_dsaf_roce_reset - reset dsaf and roce * @dsaf_fwnode: Pointer to framework node for the dasf * @enable: false - request reset , true - drop reset * retuen 0 - success , negative -fail */ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) { struct dsaf_device *dsaf_dev; struct platform_device *pdev; u32 mp; u32 sl; u32 credit; int i; const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, }; const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, }; /* find the platform device corresponding to fwnode */ if (is_of_node(dsaf_fwnode)) { pdev = of_find_device_by_node(to_of_node(dsaf_fwnode)); } else if (is_acpi_device_node(dsaf_fwnode)) { pdev = hns_dsaf_find_platform_device(dsaf_fwnode); } else { pr_err("fwnode is neither OF or ACPI type\n"); return -EINVAL; } /* check if we were a success in fetching pdev */ if (!pdev) { pr_err("couldn't find platform device for node\n"); return -ENODEV; } /* retrieve the dsaf_device from the driver data */ dsaf_dev = dev_get_drvdata(&pdev->dev); if (!dsaf_dev) { dev_err(&pdev->dev, "dsaf_dev is NULL\n"); return -ENODEV; } /* now, make sure we are running on compatible SoC */ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", dsaf_dev->ae_dev.name); return -ENODEV; } /* do reset or de-reset according to the flag */ if (!dereset) { /* reset rocee-channels in dsaf and rocee */ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, false); dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false); } else { /* configure dsaf tx roce correspond to port map and sl map */ mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG); for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) dsaf_set_field(mp, 7 << i * 3, i * 3, port_map[i][DSAF_ROCE_6PORT_MODE]); dsaf_set_field(mp, 3 << i * 3, i * 3, 0); dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp); sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG); for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) dsaf_set_field(sl, 3 << i * 2, i * 2, sl_map[i][DSAF_ROCE_6PORT_MODE]); dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl); /* de-reset rocee-channels in dsaf and rocee */ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, true); msleep(SRST_TIME_INTERVAL); dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true); /* enable dsaf channel rocee credit */ credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG); dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); } return 0; } EXPORT_SYMBOL(hns_dsaf_roce_reset); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("HNS DSAF driver"); MODULE_VERSION(DSAF_MOD_VERSION);
989886.c
/* american fuzzy lop++ - GCC wrapper for GCC plugin ------------------------------------------------ Written by Austin Seipp <[email protected]> and Laszlo Szekeres <[email protected]> and Michal Zalewski GCC integration design is based on the LLVM design, which comes from Laszlo Szekeres. Copyright 2015 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 This program is a drop-in replacement for gcc, similar in most respects to ../afl-gcc, but with compiler instrumentation through a plugin. It tries to figure out compilation mode, adds a bunch of flags, and then calls the real compiler. */ #define AFL_MAIN #include "config.h" #include "types.h" #include "debug.h" #include "common.h" #include "alloc-inl.h" #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <string.h> static u8 * obj_path; /* Path to runtime libraries */ static u8 **cc_params; /* Parameters passed to the real CC */ static u32 cc_par_cnt = 1; /* Param count, including argv0 */ u8 use_stdin = 0; /* dummy */ u8 be_quiet; /* Try to find the runtime libraries. If that fails, abort. */ static void find_obj(u8 *argv0) { u8 *afl_path = getenv("AFL_PATH"); u8 *slash, *tmp; if (afl_path) { tmp = alloc_printf("%s/afl-gcc-rt.o", afl_path); if (!access(tmp, R_OK)) { obj_path = afl_path; ck_free(tmp); return; } ck_free(tmp); } slash = strrchr(argv0, '/'); if (slash) { u8 *dir; *slash = 0; dir = ck_strdup(argv0); *slash = '/'; tmp = alloc_printf("%s/afl-gcc-rt.o", dir); if (!access(tmp, R_OK)) { obj_path = dir; ck_free(tmp); return; } ck_free(tmp); ck_free(dir); } if (!access(AFL_PATH "/afl-gcc-rt.o", R_OK)) { obj_path = AFL_PATH; return; } FATAL( "Unable to find 'afl-gcc-rt.o' or 'afl-gcc-pass.so'. Please set " "AFL_PATH"); } /* Copy argv to cc_params, making the necessary edits. */ static void edit_params(u32 argc, char **argv) { u8 fortify_set = 0, asan_set = 0, x_set = 0, maybe_linking = 1; u8 *name; cc_params = ck_alloc((argc + 128) * sizeof(u8 *)); name = strrchr(argv[0], '/'); if (!name) name = argv[0]; else ++name; if (!strcmp(name, "afl-g++-fast")) { u8 *alt_cxx = getenv("AFL_CXX"); cc_params[0] = alt_cxx ? alt_cxx : (u8 *)AFL_GCC_CXX; } else { u8 *alt_cc = getenv("AFL_CC"); cc_params[0] = alt_cc ? alt_cc : (u8 *)AFL_GCC_CC; } char *fplugin_arg = alloc_printf("-fplugin=%s/afl-gcc-pass.so", obj_path); cc_params[cc_par_cnt++] = fplugin_arg; /* Detect stray -v calls from ./configure scripts. */ if (argc == 1 && !strcmp(argv[1], "-v")) maybe_linking = 0; while (--argc) { u8 *cur = *(++argv); #if defined(__x86_64__) if (!strcmp(cur, "-m32")) FATAL("-m32 is not supported"); #endif if (!strcmp(cur, "-x")) x_set = 1; if (!strcmp(cur, "-c") || !strcmp(cur, "-S") || !strcmp(cur, "-E") || !strcmp(cur, "-v")) maybe_linking = 0; if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory")) asan_set = 1; if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1; if (!strcmp(cur, "-shared")) maybe_linking = 0; cc_params[cc_par_cnt++] = cur; } if (getenv("AFL_HARDEN")) { cc_params[cc_par_cnt++] = "-fstack-protector-all"; if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2"; } if (!asan_set) { if (getenv("AFL_USE_ASAN")) { if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("ASAN and AFL_HARDEN are mutually exclusive"); cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE"; cc_params[cc_par_cnt++] = "-fsanitize=address"; } else if (getenv("AFL_USE_MSAN")) { if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("MSAN and AFL_HARDEN are mutually exclusive"); cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE"; cc_params[cc_par_cnt++] = "-fsanitize=memory"; } } if (getenv("AFL_USE_UBSAN")) { cc_params[cc_par_cnt++] = "-fsanitize=undefined"; cc_params[cc_par_cnt++] = "-fsanitize-undefined-trap-on-error"; cc_params[cc_par_cnt++] = "-fno-sanitize-recover=all"; } if (!getenv("AFL_DONT_OPTIMIZE")) { cc_params[cc_par_cnt++] = "-g"; cc_params[cc_par_cnt++] = "-O3"; cc_params[cc_par_cnt++] = "-funroll-loops"; } if (getenv("AFL_NO_BUILTIN")) { cc_params[cc_par_cnt++] = "-fno-builtin-strcmp"; cc_params[cc_par_cnt++] = "-fno-builtin-strncmp"; cc_params[cc_par_cnt++] = "-fno-builtin-strcasecmp"; cc_params[cc_par_cnt++] = "-fno-builtin-strncasecmp"; cc_params[cc_par_cnt++] = "-fno-builtin-memcmp"; cc_params[cc_par_cnt++] = "-fno-builtin-bcmp"; cc_params[cc_par_cnt++] = "-fno-builtin-strstr"; cc_params[cc_par_cnt++] = "-fno-builtin-strcasestr"; } #ifdef USEMMAP cc_params[cc_par_cnt++] = "-lrt"; #endif cc_params[cc_par_cnt++] = "-D__AFL_HAVE_MANUAL_CONTROL=1"; cc_params[cc_par_cnt++] = "-D__AFL_COMPILER=1"; cc_params[cc_par_cnt++] = "-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION=1"; /* When the user tries to use persistent or deferred forkserver modes by appending a single line to the program, we want to reliably inject a signature into the binary (to be picked up by afl-fuzz) and we want to call a function from the runtime .o file. This is unnecessarily painful for three reasons: 1) We need to convince the compiler not to optimize out the signature. This is done with __attribute__((used)). 2) We need to convince the linker, when called with -Wl,--gc-sections, not to do the same. This is done by forcing an assignment to a 'volatile' pointer. 3) We need to declare __afl_persistent_loop() in the global namespace, but doing this within a method in a class is hard - :: and extern "C" are forbidden and __attribute__((alias(...))) doesn't work. Hence the __asm__ aliasing trick. */ cc_params[cc_par_cnt++] = "-D__AFL_LOOP(_A)=" "({ static volatile char *_B __attribute__((used)); " " _B = (char*)\"" PERSIST_SIG "\"; " #ifdef __APPLE__ "int _L(unsigned int) __asm__(\"___afl_persistent_loop\"); " #else "int _L(unsigned int) __asm__(\"__afl_persistent_loop\"); " #endif /* ^__APPLE__ */ "_L(_A); })"; cc_params[cc_par_cnt++] = "-D__AFL_INIT()=" "do { static volatile char *_A __attribute__((used)); " " _A = (char*)\"" DEFER_SIG "\"; " #ifdef __APPLE__ "void _I(void) __asm__(\"___afl_manual_init\"); " #else "void _I(void) __asm__(\"__afl_manual_init\"); " #endif /* ^__APPLE__ */ "_I(); } while (0)"; if (maybe_linking) { if (x_set) { cc_params[cc_par_cnt++] = "-x"; cc_params[cc_par_cnt++] = "none"; } cc_params[cc_par_cnt++] = alloc_printf("%s/afl-gcc-rt.o", obj_path); } cc_params[cc_par_cnt] = NULL; } /* Main entry point */ int main(int argc, char **argv, char **envp) { if (argc < 2 || strcmp(argv[1], "-h") == 0) { printf( cCYA "afl-gcc-fast" VERSION cRST " initially by <[email protected]>, maintainer: hexcoder-\n" "\n" "afl-gcc-fast [options]\n" "\n" "This is a helper application for afl-fuzz. It serves as a drop-in " "replacement\n" "for gcc, letting you recompile third-party code with the required " "runtime\n" "instrumentation. A common use pattern would be one of the " "following:\n\n" " CC=%s/afl-gcc-fast ./configure\n" " CXX=%s/afl-g++-fast ./configure\n\n" "In contrast to the traditional afl-gcc tool, this version is " "implemented as\n" "a GCC plugin and tends to offer improved performance with slow " "programs\n" "(similarly to the LLVM plugin used by afl-clang-fast).\n\n" "Environment variables used:\n" "AFL_CC: path to the C compiler to use\n" "AFL_CXX: path to the C++ compiler to use\n" "AFL_PATH: path to instrumenting pass and runtime (afl-gcc-rt.*o)\n" "AFL_DONT_OPTIMIZE: disable optimization instead of -O3\n" "AFL_NO_BUILTIN: compile for use with libtokencap.so\n" "AFL_INST_RATIO: percentage of branches to instrument\n" "AFL_QUIET: suppress verbose output\n" "AFL_DEBUG: enable developer debugging output\n" "AFL_HARDEN: adds code hardening to catch memory bugs\n" "AFL_USE_ASAN: activate address sanitizer\n" "AFL_USE_MSAN: activate memory sanitizer\n" "AFL_USE_UBSAN: activate undefined behaviour sanitizer\n" "AFL_GCC_WHITELIST: enable whitelisting (selective instrumentation)\n" "\nafl-gcc-fast was built for gcc %s with the gcc binary path of " "\"%s\".\n\n", BIN_PATH, BIN_PATH, GCC_VERSION, GCC_BINDIR); exit(1); } else if ((isatty(2) && !getenv("AFL_QUIET")) || getenv("AFL_DEBUG") != NULL) { SAYF(cCYA "afl-gcc-fast" VERSION cRST " initially by <[email protected]>, maintainer: hexcoder-\n"); if (getenv("AFL_GCC_WHITELIST") == NULL) { SAYF(cYEL "Warning:" cRST " using afl-gcc-fast without using AFL_GCC_WHITELIST currently " "produces worse results than afl-gcc. Even better, use " "llvm_mode for now.\n"); } } else be_quiet = 1; check_environment_vars(envp); find_obj(argv[0]); edit_params(argc, argv); /*if (isatty(2) && !getenv("AFL_QUIET")) { printf("Calling \"%s\" with:\n", cc_params[0]); for(int i=1; i<cc_par_cnt; i++) printf("%s\n", cc_params[i]); } */ execvp(cc_params[0], (char **)cc_params); FATAL("Oops, failed to execute '%s' - check your PATH", cc_params[0]); return 0; }
612020.c
#include <stdio.h> int main() { int num1, num2, result; printf("Please enter a number: "); scanf("%d", &num1); printf("Please enter another number: "); scanf("%d", &num2); result = num1 + num2; printf("%d + %d = %d\n", num1, num2, result); result = num1 - num2; printf("%d - %d = %d\n", num1, num2, result); result = num1 * num2; printf("%d * %d = %d\n", num1, num2, result); result = num1 / num2; printf("%d / %d = %d\n", num1, num2, result); return 0; }
292484.c
// vim: syntax=c tabstop=4 softtabstop=0 noexpandtab laststatus=1 ruler /** * wrappers/iol_functions.c * * Functions for iol_wrapper. * * @author Andrea Dainese <[email protected]> * @copyright 2014-2016 Andrea Dainese * @license BSD-3-Clause https://github.com/dainok/unetlab/blob/master/LICENSE * @link http://www.unetlab.com/ * @version 20160719 */ #include <arpa/inet.h> #include <errno.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <sys/ioctl.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/un.h> #include <sys/wait.h> #include <unistd.h> #include "include/functions.h" // Linux API #include <linux/if.h> #include <linux/if_tun.h> #include "include/afsocket.h" #include "include/cmd.h" #include "include/functions.h" #include "include/serial2udp.h" #include "include/tap.h" #include "include/ts.h" #include "include/log.h" #include "include/params.h" extern int device_id; extern int child_eth; extern int child_ser; extern int tenant_id; extern int tsclients_socket[]; // Print usage void usage(const char *bin) { printf("Usage: %s <standard options> <specific options>\n", bin); printf("Standard Options:\n"); printf("-T <n> *Tenant ID\n"); printf("-D <n> *Device ID\n"); printf("-d <n> Delayed start in seconds (default 0)\n"); printf("-t <desc> Window (xterm) title\n"); printf("Specific Options:\n"); printf("-F <n> *IOL Image\n"); printf("-r <n> Size of RAM (default 256)\n"); printf("-o <n> Size of ROM (default 4)\n"); printf("-n <n> Size of NVRAM (default 128)\n"); printf("-e <n> Number of Ethernet portgroups (default 2, max 16 included serials)\n"); printf("-s <n> Number of Serial portgroups (default 2, max 16 included ethernets)\n"); printf("-l <n> Ethernet/Serial link end-point (g.e. 0:0:tap:vunl0_6_0)\n"); printf("-c <name> Startup configuration file name\n"); printf("* Mandatory option\n"); printf("WARNING: use the above parameter order!\n"); exit(1); } // Creating NETMAP int mk_netmap() { FILE *f_iol_netmap; int d = 0; int iol_id = device_id; int rc = 0; int wrapper_id = iol_id + 512; if (access("NETMAP", F_OK) != -1 && remove("NETMAP") != 0) { rc = 1; UNLLog(LLERROR, "Cannot create NETMAP file (access). ERR: %s (%i)\n", strerror(errno), rc); return rc; } f_iol_netmap = fopen("NETMAP", "a"); if (f_iol_netmap == NULL) { rc = 2; UNLLog(LLERROR, "Cannot create NETMAP file (fopen). ERR: %s (%i).\n", strerror(errno), rc); return rc; } for (d = 0; d < 64; d++) { fprintf(f_iol_netmap, "%u:%u %u:%u\n", iol_id, d, wrapper_id, d); } fclose(f_iol_netmap); UNLLog(LLINFO, "NETMAP file created.\n"); return 0; } // Creating AF sockets for child communication int mk_afsocket(int *wrapper_socket, int *iol_socket) { char iol_socketfile[100]; // Store AF_UNIX socket memset(&iol_socketfile, 0, sizeof(iol_socketfile)); char wrapper_socketfile[100]; // Store AF_UNIX socket filename memset(&wrapper_socketfile, 0, sizeof(wrapper_socketfile)); char tmp_netio[100]; memset(&tmp_netio, 0, sizeof(tmp_netio)); char tmp[100]; memset(&tmp, 0, sizeof(tmp)); int iol_id = device_id; int wrapper_id = iol_id + 512; int rc = -1; // Creating netio directory strncpy(tmp_netio, "/tmp/netio", sizeof(tmp_netio)); sprintf(tmp, "%u", getuid()); strcat(tmp_netio, tmp); strncat(tmp_netio, "\0", sizeof(*tmp_netio)); mkdir(tmp_netio, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); // Setting AF_UNIX (wrapper) socket and lock strncpy(wrapper_socketfile, tmp_netio, sizeof(wrapper_socketfile)); strncat(wrapper_socketfile, "/", sizeof(*wrapper_socketfile)); sprintf(tmp, "%u", wrapper_id); strcat(wrapper_socketfile, tmp); if ( *wrapper_socket != -1 ) { close(*wrapper_socket); *wrapper_socket = -1; } if (access(wrapper_socketfile, F_OK) != -1 && remove(wrapper_socketfile) != 0) { rc = 1; UNLLog(LLERROR, "Cannot access AF_UNIX (%s). ERR: %s (%i).\n", tmp, strerror(errno), rc); return rc; } // Setting AF_UNIX (child) socket strncpy(iol_socketfile, tmp_netio, sizeof(iol_socketfile)); strncat(iol_socketfile, "/", sizeof(*iol_socketfile)); sprintf(tmp, "%u", iol_id); strcat(iol_socketfile, tmp); strncat(iol_socketfile, "\0", sizeof(*iol_socketfile)); // Creating sockets if ((rc = afsocket_listen(wrapper_socketfile, iol_socketfile, wrapper_socket, iol_socket)) != 0) { UNLLog(LLERROR, "Cannot listen at AF_UNIX (%s). ERR: Cannot open AF_UNIX sockets (%i).\n", tmp, rc); return 2; } return 0; } // Creating TAP interfaces int mk_tap(int child_eth, int *iol_tap) { char tap_name[20]; memset(&tap_name, 0, sizeof(tap_name)); int i = 0; int j = 0; int rc = -1; int tap_fd = -1; // Create all interfaces for (i = 0; i < child_eth; i++) { for (j = 0; j <= 3; j++) { sprintf(tap_name, "vunl%u_%u_%u", tenant_id, device_id, i + 16 * j); if ((rc = tap_listen(tap_name, &tap_fd)) != 0) { rc = 1; UNLLog(LLVERBOSE, "Skipping TAP (%s) interface (%i).\n", tap_name, rc); } else { // Add TAP interface to the active ethernet list iol_tap[i + 16 * j] = tap_fd; } } } return 0; } // Check if a given interface is ethernet (0) or serial (1) int is_eth(int i) { // i = x/y -> i = x + y * 16 -> x = i - y * 16 = i % 16 if (i % 16 < child_eth) { return 0; } else { return 1; } } // Receiving packet from AF_UNIX int packet_af(int af_socket, int *iol_fd, int *udp_fd, int *remote_id, int *remote_if) { // char *iol_frame; // char eth_frame[1518]; // memset(&eth_frame, 0, sizeof(eth_frame)); // char ser_frame[1526]; // memset(&ser_frame, 0, sizeof(ser_frame)); char tmp_frame[BUFFER]; memset(&tmp_frame, 0, sizeof(tmp_frame)); int iol_ifid = -1; int length = -1; int rc = -1; /* * IOL 64 bit header: * - 16 bits for the destination IOL ID * - 16 bits for the source IOL ID * - 8 bits for the destination interface (z = x/y -> z = x + 3 * 16) * - 8 bits for the source interface (z = x/y -> z = x + y * 16) * - 16 bits equals to 0x0100 * Destination TAP interface is: vunlT_U_Z (T = tenant_id, U = device_id, Z = interface_id) */ /* * UNL 64 bit header: * - 8 bits for the destination Tenant ID * - 8 bits for the source Tenant ID * - 16 bits for the destination Device ID * - 16 bits for the source Device ID * - 8 bits for the destination interface * - 8 bits for the source interface */ if ((length = afsocket_receive(&tmp_frame, af_socket,sizeof(tmp_frame))) <= 0) { // Read error rc = 1; UNLLog(LLERROR, "Failed to receive packet from AF_UNIX (%i). ERR: %s (%i). \n", length, strerror(errno), rc); return rc; } else { //memcpy(tmp_frame, &iol_frame, length); iol_ifid = (int) tmp_frame[5]; if (is_eth(iol_ifid) == 0) { // Ethernet: packet to TAP //memcpy(eth_frame, &tmp_frame[8], length - 8); if (iol_fd[iol_ifid] != 0 && write(iol_fd[iol_ifid], &tmp_frame[8], length - 8) < 0) { // If TAP interface is configured, send packet through it rc = 3; UNLLog(LLERROR, "Failed to send a packet to TAP (src: vunl%u_%u_%u). ERR: %s (%i).\n", tenant_id, device_id, iol_ifid, strerror(errno), rc); return rc; } UNLLog(LLVERBOSE, "Sent TAP frame (dst: %02x%02x.%02x%02x.%02x%02x, src: %02x%02x.%02x%02x.%02x%02x, length: %i) to vunl%u_%u_%u.\n", tmp_frame[8] & 0xff, tmp_frame[9] & 0xff, tmp_frame[10] & 0xff, tmp_frame[11] & 0xff, tmp_frame[12] & 0xff, tmp_frame[13] & 0xff, tmp_frame[14] & 0xff, tmp_frame[15] & 0xff, tmp_frame[16] & 0xff, tmp_frame[17] & 0xff, tmp_frame[18] & 0xff, tmp_frame[19] & 0xff, length - 8, tenant_id, device_id, iol_ifid); return 0; } else { if (udp_fd[iol_ifid] == 0) { UNLLog(LLERROR, "Failed to send a packet to unconfigured UDP (src: vunl%u_%u_%u).\n", tenant_id, device_id, iol_ifid); return 0; } // Now send packet via UDP // TODO: Intra Tenant Link int dst_tenant_id = tenant_id; //memcpy(ser_frame, &tmp_frame, length); // size(header(IOL)) == size(header(UNETLAB)) tmp_frame[0] = dst_tenant_id; // Destination Tenant ID (TODO Intra Tenant Link) tmp_frame[1] = tenant_id; // Source Tenant ID tmp_frame[2] = remote_id[iol_ifid] >> 8; // Destination Device ID (TODO) tmp_frame[3] = remote_id[iol_ifid] & 255; tmp_frame[4] = device_id >> 8; // Source Device ID tmp_frame[5] = device_id & 255; tmp_frame[6] = remote_if[iol_ifid]; // Destination Interface ID tmp_frame[7] = iol_ifid; // Source Interface ID (TODO) UNLLog(LLVERBOSE, "Received IOL packet from device %u:%u:%u to device %u:%u:%u\n", tenant_id, device_id, iol_ifid, dst_tenant_id, remote_id[iol_ifid], remote_if[iol_ifid]); if (write(udp_fd[iol_ifid], tmp_frame, length) < 0) { // Sometimes packets cannot be delivered if end point is not active (Connection refused) UNLLog(LLERROR, "Failed to send a packet to UDP (src: vunl%u_%u_%u).\n", tenant_id, device_id, iol_ifid); return 0; } UNLLog(LLVERBOSE, "Sent UDP (s=%i, l=%i)\n", udp_fd[iol_ifid], length); return 0; } } } // Receiving packet from TAP int packet_tap(int tap_socket, int af_socket, int iol_ifid) { int iol_id = device_id; int length = -1; int rc = -1; int wrapper_id = iol_id + 512; // char *eth_frame; // char iol_frame[1522]; // memset(&iol_frame, 0, sizeof(iol_frame)); char tmp_frame[BUFFER]; memset(&tmp_frame, 0, sizeof(tmp_frame)); /* * IOL 64 bit header: * - 16 bits for the destination IOL ID * - 16 bits for the source IOL ID * - 8 bits for the destination interface (z = x/y -> z = x + 3 * 16) * - 8 bits for the source interface (z = x/y -> z = x + y * 16) * - 16 bits equals to 0x0100 * Destination TAP interface is: vunlT_U_Z (T = tenant_id, U = device_id, Z = interface_id) */ if ((length = tap_receive(&tmp_frame[8], tap_socket,sizeof(tmp_frame)-8)) <= 0) { // Read error rc = 1; UNLLog(LLERROR, "Failed to receive packet from TAP (%i, %i). ERR: %s (%i).\n", tap_socket, length, strerror(errno), rc); return rc; } else if (length > 1514) { UNLLog(LLWARNING, "Ignoring frame from TAP (%i) because too long (%i).\n", tap_socket, length); return 0; // The wrapper will continue to work } else { //memcpy(tmp_frame, &eth_frame, length); // Now send packet to AF_UNIX tmp_frame[0] = iol_id >> 8; // IOL device ID tmp_frame[1] = iol_id & 255; tmp_frame[2] = wrapper_id >> 8; // WRAPPER device ID tmp_frame[3] = wrapper_id & 255; tmp_frame[4] = iol_ifid; // IOL device ID tmp_frame[5] = iol_ifid; // WRAPPER device ID tmp_frame[6] = 1; tmp_frame[7] = 0; //memcpy(&iol_frame[8], &tmp_frame, length); if ((write(af_socket, tmp_frame, length + 8)) < 0) { rc = 3; UNLLog(LLERROR, "Failed forwarding data to AF_UNIX (%i) socket. ERR: %s (%i).\n", af_socket, strerror(errno), rc); return rc; } else { UNLLog(LLVERBOSE, "Sent eth frame (dst: %02x%02x.%02x%02x.%02x%02x, src: %02x%02x.%02x%02x.%02x%02x, length: %i) to AF_UNIX\n", tmp_frame[8] & 0xff, tmp_frame[9] & 0xff, tmp_frame[10] & 0xff, tmp_frame[11] & 0xff, tmp_frame[12] & 0xff, tmp_frame[13] & 0xff, tmp_frame[14] & 0xff, tmp_frame[15] & 0xff, tmp_frame[16] & 0xff, tmp_frame[17] & 0xff, tmp_frame[18] & 0xff, tmp_frame[19] & 0xff, length); UNLLog(LLVERBOSE, "Sent eth frame to AF_UNIX (dst: %u:%u, src: %u:%u\n", 256 * (int) tmp_frame[0] + (int) tmp_frame[1], (int) tmp_frame[4], 256 * (int) tmp_frame[2] + (int) tmp_frame[3], (int) tmp_frame[5]); return 0; } } return 1; // Dummy return for stupid gcc } // Receiving packet from UDP int packet_udp(int udp_socket, int af_socket) { int iol_id = device_id; int length = -1; int rc = -1; int wrapper_id = iol_id + 512; char ser_frame[BUFFER]; memset(ser_frame, 0, sizeof(ser_frame)); int dst_tenant_id = 0; int dst_device_id = 0; int dst_device_if = 0; int src_tenant_id = 0; int src_device_id = 0; int src_device_if = 0; /* * IOL 64 bit header: * - 16 bits for the destination IOL ID * - 16 bits for the source IOL ID * - 8 bits for the destination interface (z = x/y -> z = x + 3 * 16) * - 8 bits for the source interface (z = x/y -> z = x + y * 16) * - 16 bits equals to 0x0100 * Destination TAP interface is: vunlT_U_Z (T = tenant_id, U = device_id, Z = interface_id) */ /* * UNL 64 bit header: * - 8 bits for the destination Tenant ID (TT) * - 8 bits for the source Tenant ID (tt) * - 16 bits for the destination Device ID (DDDD) * - 16 bits for the source Device ID (dddd) * - 8 bits for the destination interface (II) * - 8 bits for the source interface (ii) * # tcpdump -i lo -X -nn udp * 14:20:37.096862 IP6 ::1.37773 > ::1.32770: UDP, length 309 * 0x0000: 6000 0000 013d 1140 0000 0000 0000 0000 `....=.@........ * 0x0010: 0000 0000 0000 0001 0000 0000 0000 0000 ................ * 0x0020: 0000 0000 0000 0001 938d 8002 013d 12e7 .............=.. * 0x0030: TTtt DDDD dddd IIii 8f00 2000 02b4 151c ................ * [...] */ if ((length = serial2udp_receive(ser_frame, udp_socket, sizeof(ser_frame))) <= 0) { // Read error rc = 1; UNLLog(LLERROR, "Failed to receive packet from UDP (%i). ERR: %s (%i).\n", length, strerror(errno), rc); return rc; } else if (length > 1522) { UNLLog(LLERROR, "Ignoring frame from UDP because too long (%i).\n", length); return 0; } else if (length < 8) { UNLLog(LLERROR, "Ignoring frame from UDP because too short (%i).\n", length); return 0; } else { dst_tenant_id = ser_frame[0]; src_tenant_id = ser_frame[1]; dst_device_id = (ser_frame[2] << 8) + ser_frame[3]; src_device_id = (ser_frame[4] << 8) + ser_frame[5]; dst_device_if = ser_frame[6]; src_device_if = ser_frame[7]; if (dst_tenant_id != tenant_id) { UNLLog(LLERROR, "Ignoring frame from UDP because wrong tenant_id (%i).\n", dst_tenant_id); return 0; } if (dst_device_id != device_id) { UNLLog(LLERROR, "Ignoring frame from UDP because wrong device_id (%i).\n", dst_device_id); return 0; } // Now send packet to AF_UNIX ser_frame[0] = dst_device_id >> 8; // IOL device ID ser_frame[1] = dst_device_id & 255; ser_frame[2] = wrapper_id >> 8; // WRAPPER device ID ser_frame[3] = wrapper_id & 255; ser_frame[4] = dst_device_if; // IOL device ID ser_frame[5] = dst_device_if; // WRAPPER device ID ser_frame[6] = 1; ser_frame[7] = 0; UNLLog(LLVERBOSE, "Received UDP packet from device %u:%u:%u to device %u:%u:%u\n", src_tenant_id, src_device_id, src_device_if, dst_tenant_id, dst_device_id, dst_device_if); if ((write(af_socket, ser_frame, length)) < 0) { rc = 3; UNLLog(LLERROR, "Failed forwarding data to AF_UNIX (%i) socket. ERR: %s (%i). \n", af_socket, strerror(errno), rc); return rc; } else { UNLLog(LLVERBOSE, "Sent ser frame to AF_UNIX (dst: %u:%u, src: %u:%u)\n", 256 * (int) ser_frame[0] + (int) ser_frame[1], (int) ser_frame[4], 256 * (int) ser_frame[2] + (int) ser_frame[3], (int) ser_frame[5]); return 0; } return 0; } }
938200.c
/* Copyright 2021 Simon Willcocks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "inkernel.h" void __attribute__(( naked )) default_os_writec( uint32_t r0, uint32_t r1, uint32_t r2 ) { for (;;) asm ( "wfi" ); } bool do_OS_ChangedBox( svc_registers *regs ) { workspace.vdu.ChangedBox.enabled = workspace.vdu.changed_box_tracking_enabled; switch (regs->r[0]) { case 0: workspace.vdu.changed_box_tracking_enabled = 0; break; case 1: workspace.vdu.changed_box_tracking_enabled = 1; break; case 2: workspace.vdu.ChangedBox.left = 0; workspace.vdu.ChangedBox.bottom = 0; workspace.vdu.ChangedBox.right = 0; workspace.vdu.ChangedBox.top = 0; break; } regs->r[1] = (uint32_t) &workspace.vdu.ChangedBox; regs->r[0] = workspace.vdu.ChangedBox.enabled; return true; } bool do_OS_ReadVduVariables( svc_registers *regs ) { uint32_t *var = (void*) regs->r[0]; uint32_t *val = (void*) regs->r[1]; while (*var != -1) { switch (*var) { case 0 ... 12: *val = workspace.vdu.modevars[*var]; break; case 128 ... 172: *val = workspace.vdu.vduvars[*var - 128]; break; case 256 ... 257: *val = workspace.vdu.textwindow[*var - 256]; break; default: for (;;) { asm( "wfi" ); } } var++; val++; } return true; } bool do_OS_ReadModeVariable( svc_registers *regs ) { if (regs->r[0] != -1) { for (;;) { asm ( "wfi" ); } } regs->r[2] = workspace.vdu.modevars[regs->r[1]]; return true; } bool do_OS_ReadPoint( svc_registers *regs ) { for (;;) { asm( "wfi" ); } } bool do_OS_RemoveCursors( svc_registers *regs ) { return true; } // What cursors? FIXME bool do_OS_RestoreCursors( svc_registers *regs ) { return true; } // What cursors? static const uint32_t initial_mode_vars[13] = { 0x40, 0xef, 0x86, -1, 1, 1, 0x1e00, 0x7e9000, 0, 5, 5, 0x77f, 0x437 }; void SetInitialVduVars() { memcpy( workspace.vdu.modevars, initial_mode_vars, sizeof( workspace.vdu.modevars ) ); uint32_t for_drawmod = Kernel_allocate_pages( 4096, 4096 ); MMU_map_at( (void*) 0x4000, for_drawmod, 4096 ); for (int i = 0; i < 4096; i+=4) { *(uint32_t*)(0x4000+i) = workspace.core_number; } }
385635.c
/** * @file * @brief * * @date 27.03.2013 * @author Anton Bulychev */ #include <errno.h> #include <pthread.h> #include <kernel/thread.h> #include <util/err.h> int pthread_attr_destroy(pthread_attr_t *attr) { return ENOERR; } int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate) { *detachstate = attr->flags & THREAD_FLAG_DETACHED; return ENOERR; } /* int pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize) { return -ENOSYS; } */ int pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched) { *inheritsched = attr->flags & THREAD_FLAG_PRIORITY_INHERIT; return ENOERR; } int pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param) { param->sched_priority = attr->sched_param.sched_priority; return ENOERR; } int pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy) { *policy = attr->policy; return ENOERR; } /* int pthread_attr_getscope(const pthread_attr_t *attr, int *contentionscope) { return -ENOSYS; } int pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr) { return -ENOSYS; } int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize) { return -ENOSYS; } */ int pthread_attr_init(pthread_attr_t *attr) { attr->flags = 0; if (pthread_attr_setdetachstate(attr, 0)) { return -EINVAL; } if (pthread_attr_setinheritsched(attr, THREAD_FLAG_PRIORITY_INHERIT)) { return -EINVAL; } attr->policy = SCHED_OTHER; attr->sched_param.sched_priority = SCHED_PRIORITY_NORMAL; return ENOERR; } int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) { if (detachstate) { attr->flags |= THREAD_FLAG_DETACHED; } else { attr->flags &= ~THREAD_FLAG_DETACHED; } return ENOERR; } /* int pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize) { return -ENOSYS; } */ int pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched) { if (inheritsched) { attr->flags |= THREAD_FLAG_PRIORITY_INHERIT; } else { attr->flags &= ~THREAD_FLAG_PRIORITY_INHERIT; } return ENOERR; } int pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param) { //TODO move copy to other place attr->sched_param.sched_priority = param->sched_priority; return -ENOSYS; } int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy) { attr->policy = policy; return ENOERR; } /* int pthread_attr_setscope(pthread_attr_t *attr, int contentionscope) { return -ENOSYS; } int pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr) { return -ENOSYS; } int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize) { return -ENOSYS; } */ int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { struct thread *t; pthread_attr_t def_attr; const pthread_attr_t *pattr; unsigned int flags; int detached, inherit, policy; struct sched_param sched_param; if (!start_routine) { return -EAGAIN; } if (NULL == attr) { pthread_attr_init(&def_attr); pattr = &def_attr; } else { pattr = attr; } if (0 != pthread_attr_getdetachstate(pattr, &detached)) { return -EINVAL; } if (0 != pthread_attr_getinheritsched(pattr, &inherit)) { return -EINVAL; } flags = detached | inherit | THREAD_FLAG_SUSPENDED; t = thread_create(flags, start_routine, arg); if (err(t)) { /* * The pthread_create() function will fail if: * * [EAGAIN] * The system lacked the necessary resources to create another thread, or the system-imposed limit on the total number of threads in a process PTHREAD_THREADS_MAX would be exceeded. * [EINVAL] * The value specified by attr is invalid. * [EPERM] * The caller does not have appropriate permission to set the required scheduling parameters or scheduling policy. * * The pthread_create() function will not return an error code of [EINTR]. */ return -EAGAIN; } pthread_attr_getschedpolicy(pattr, &policy); pthread_attr_getschedparam(pattr, &sched_param); pthread_setschedparam(t, policy, &sched_param); thread_launch(t); *thread = t; return ENOERR; } int pthread_detach(pthread_t thread) { return thread_detach(thread); } int pthread_equal(pthread_t t1, pthread_t t2) { return t1 == t2; } void pthread_exit(void *value_ptr) { thread_exit(value_ptr); } /* int pthread_getconcurrency(void) { return -ENOSYS; } */ int pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param) { *policy = thread->policy; param->sched_priority = schedee_priority_get(&thread->schedee); return ENOERR; } int pthread_join(pthread_t thread, void **value_ptr) { return thread_join(thread, value_ptr); } int pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) { if((NULL == init_routine) || (NULL == once_control)) { return -EINVAL; } if(pthread_mutex_trylock(once_control)) { return 0; } init_routine(); return 0; } pthread_t pthread_self(void) { return thread_self(); } /* int pthread_setconcurrency(int new_level) { return -ENOSYS; } */ int pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param) { assert((policy != SCHED_FIFO && policy != SCHED_RR) || param->sched_priority >= 200, "In current realization you must " "use SCHED_FIFO and SCHED_RR only with priority more or equal 200"); thread->policy = policy; return schedee_priority_set(&thread->schedee, param->sched_priority); } int pthread_setschedprio(pthread_t thread, int prio) { return schedee_priority_set(&thread->schedee, prio); }
562785.c
#include <brutal/io/write.h> IoWriteResult io_put(IoWriter *writer, uint8_t c) { return io_write(writer, &c, 1); } IoWriteResult io_print(IoWriter *writer, Str str) { return io_write(writer, (uint8_t *)str.buffer, str.len); }
705291.c
/******************************************************************************* UART2 PLIB Company: Microchip Technology Inc. File Name: plib_uart2.c Summary: UART2 PLIB Implementation File Description: None *******************************************************************************/ /******************************************************************************* * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries. * * Subject to your compliance with these terms, you may use Microchip software * and any derivatives exclusively with Microchip products. It is your * responsibility to comply with third party license terms applicable to your * use of third party software (including open source software) that may * accompany Microchip software. * * THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER * EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A * PARTICULAR PURPOSE. * * IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, * INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND * WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS * BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE * FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN * ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY, * THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE. *******************************************************************************/ #include "device.h" #include "plib_uart2.h" // ***************************************************************************** // ***************************************************************************** // Section: UART2 Implementation // ***************************************************************************** // ***************************************************************************** UART_RING_BUFFER_OBJECT uart2Obj; #define UART2_READ_BUFFER_SIZE 64 #define UART2_RX_INT_DISABLE() IEC1CLR = _IEC1_U2RXIE_MASK; #define UART2_RX_INT_ENABLE() IEC1SET = _IEC1_U2RXIE_MASK; static uint8_t UART2_ReadBuffer[UART2_READ_BUFFER_SIZE]; #define UART2_WRITE_BUFFER_SIZE 2048 #define UART2_TX_INT_DISABLE() IEC1CLR = _IEC1_U2TXIE_MASK; #define UART2_TX_INT_ENABLE() IEC1SET = _IEC1_U2TXIE_MASK; static uint8_t UART2_WriteBuffer[UART2_WRITE_BUFFER_SIZE]; void static UART2_ErrorClear( void ) { /* rxBufferLen = (FIFO level + RX register) */ uint8_t rxBufferLen = UART_RXFIFO_DEPTH; uint8_t dummyData = 0u; /* If it's a overrun error then clear it to flush FIFO */ if(U2STA & _U2STA_OERR_MASK) { U2STACLR = _U2STA_OERR_MASK; } /* Read existing error bytes from FIFO to clear parity and framing error flags */ while(U2STA & (_U2STA_FERR_MASK | _U2STA_PERR_MASK)) { dummyData = (uint8_t )(U2RXREG ); rxBufferLen--; /* Try to flush error bytes for one full FIFO and exit instead of * blocking here if more error bytes are received */ if(rxBufferLen == 0u) { break; } } // Ignore the warning (void)dummyData; /* Clear error interrupt flag */ IFS1CLR = _IFS1_U2EIF_MASK; /* Clear up the receive interrupt flag so that RX interrupt is not * triggered for error bytes */ IFS1CLR = _IFS1_U2RXIF_MASK; return; } void UART2_Initialize( void ) { /* Set up UxMODE bits */ /* STSEL = 0*/ /* PDSEL = 0 */ /* BRGH = 1 */ /* RXINV = 0 */ /* ABAUD = 0 */ /* LPBACK = 0 */ /* WAKE = 0 */ /* SIDL = 0 */ /* RUNOVF = 0 */ /* CLKSEL = 0 */ /* SLPEN = 0 */ U2MODE = 0x8; /* Enable UART2 Receiver, Transmitter and TX Interrupt selection */ U2STASET = (_U2STA_UTXEN_MASK | _U2STA_URXEN_MASK | _U2STA_UTXISEL1_MASK); /* BAUD Rate register Setup */ U2BRG = 108; IEC1CLR = _IEC1_U2TXIE_MASK; /* Initialize instance object */ uart2Obj.rdCallback = NULL; uart2Obj.rdInIndex = 0; uart2Obj.rdOutIndex = 0; uart2Obj.isRdNotificationEnabled = false; uart2Obj.isRdNotifyPersistently = false; uart2Obj.rdThreshold = 0; uart2Obj.wrCallback = NULL; uart2Obj.wrInIndex = 0; uart2Obj.wrOutIndex = 0; uart2Obj.isWrNotificationEnabled = false; uart2Obj.isWrNotifyPersistently = false; uart2Obj.wrThreshold = 0; /* Turn ON UART2 */ U2MODESET = _U2MODE_ON_MASK; /* Enable UART2_FAULT Interrupt */ IEC1SET = _IEC1_U2EIE_MASK; /* Enable UART2_RX Interrupt */ IEC1SET = _IEC1_U2RXIE_MASK; } bool UART2_SerialSetup( UART_SERIAL_SETUP *setup, uint32_t srcClkFreq ) { bool status = false; uint32_t baud; int32_t brgValHigh = 0; int32_t brgValLow = 0; uint32_t brgVal = 0; uint32_t uartMode; if (setup != NULL) { baud = setup->baudRate; if (baud == 0) { return status; } /* Turn OFF UART2 */ U2MODECLR = _U2MODE_ON_MASK; if(srcClkFreq == 0) { srcClkFreq = UART2_FrequencyGet(); } /* Calculate BRG value */ brgValLow = (((srcClkFreq >> 4) + (baud >> 1)) / baud ) - 1; brgValHigh = (((srcClkFreq >> 2) + (baud >> 1)) / baud ) - 1; /* Check if the baud value can be set with low baud settings */ if((brgValLow >= 0) && (brgValLow <= UINT16_MAX)) { brgVal = brgValLow; U2MODECLR = _U2MODE_BRGH_MASK; } else if ((brgValHigh >= 0) && (brgValHigh <= UINT16_MAX)) { brgVal = brgValHigh; U2MODESET = _U2MODE_BRGH_MASK; } else { return status; } if(setup->dataWidth == UART_DATA_9_BIT) { if(setup->parity != UART_PARITY_NONE) { return status; } else { /* Configure UART2 mode */ uartMode = U2MODE; uartMode &= ~_U2MODE_PDSEL_MASK; U2MODE = uartMode | setup->dataWidth; } } else { /* Configure UART2 mode */ uartMode = U2MODE; uartMode &= ~_U2MODE_PDSEL_MASK; U2MODE = uartMode | setup->parity ; } /* Configure UART2 mode */ uartMode = U2MODE; uartMode &= ~_U2MODE_STSEL_MASK; U2MODE = uartMode | setup->stopBits ; /* Configure UART2 Baud Rate */ U2BRG = brgVal; U2MODESET = _U2MODE_ON_MASK; status = true; } return status; } /* This routine is only called from ISR. Hence do not disable/enable USART interrupts. */ static inline bool UART2_RxPushByte(uint8_t rdByte) { uint32_t tempInIndex; bool isSuccess = false; tempInIndex = uart2Obj.rdInIndex + 1; if (tempInIndex >= UART2_READ_BUFFER_SIZE) { tempInIndex = 0; } if (tempInIndex == uart2Obj.rdOutIndex) { /* Queue is full - Report it to the application. Application gets a chance to free up space by reading data out from the RX ring buffer */ if(uart2Obj.rdCallback != NULL) { uart2Obj.rdCallback(UART_EVENT_READ_BUFFER_FULL, uart2Obj.rdContext); /* Read the indices again in case application has freed up space in RX ring buffer */ tempInIndex = uart2Obj.rdInIndex + 1; if (tempInIndex >= UART2_READ_BUFFER_SIZE) { tempInIndex = 0; } } } if (tempInIndex != uart2Obj.rdOutIndex) { UART2_ReadBuffer[uart2Obj.rdInIndex] = rdByte; uart2Obj.rdInIndex = tempInIndex; isSuccess = true; } else { /* Queue is full. Data will be lost. */ } return isSuccess; } /* This routine is only called from ISR. Hence do not disable/enable USART interrupts. */ static void UART2_ReadNotificationSend(void) { uint32_t nUnreadBytesAvailable; if (uart2Obj.isRdNotificationEnabled == true) { nUnreadBytesAvailable = UART2_ReadCountGet(); if(uart2Obj.rdCallback != NULL) { if (uart2Obj.isRdNotifyPersistently == true) { if (nUnreadBytesAvailable >= uart2Obj.rdThreshold) { uart2Obj.rdCallback(UART_EVENT_READ_THRESHOLD_REACHED, uart2Obj.rdContext); } } else { if (nUnreadBytesAvailable == uart2Obj.rdThreshold) { uart2Obj.rdCallback(UART_EVENT_READ_THRESHOLD_REACHED, uart2Obj.rdContext); } } } } } size_t UART2_Read(uint8_t* pRdBuffer, const size_t size) { size_t nBytesRead = 0; uint32_t rdOutIndex; uint32_t rdInIndex; while (nBytesRead < size) { UART2_RX_INT_DISABLE(); rdOutIndex = uart2Obj.rdOutIndex; rdInIndex = uart2Obj.rdInIndex; if (rdOutIndex != rdInIndex) { pRdBuffer[nBytesRead++] = UART2_ReadBuffer[uart2Obj.rdOutIndex++]; if (uart2Obj.rdOutIndex >= UART2_READ_BUFFER_SIZE) { uart2Obj.rdOutIndex = 0; } UART2_RX_INT_ENABLE(); } else { UART2_RX_INT_ENABLE(); break; } } return nBytesRead; } size_t UART2_ReadCountGet(void) { size_t nUnreadBytesAvailable; uint32_t rdInIndex; uint32_t rdOutIndex; /* Take a snapshot of indices to avoid creation of critical section */ rdInIndex = uart2Obj.rdInIndex; rdOutIndex = uart2Obj.rdOutIndex; if ( rdInIndex >= rdOutIndex) { nUnreadBytesAvailable = rdInIndex - rdOutIndex; } else { nUnreadBytesAvailable = (UART2_READ_BUFFER_SIZE - rdOutIndex) + rdInIndex; } return nUnreadBytesAvailable; } size_t UART2_ReadFreeBufferCountGet(void) { return (UART2_READ_BUFFER_SIZE - 1) - UART2_ReadCountGet(); } size_t UART2_ReadBufferSizeGet(void) { return (UART2_READ_BUFFER_SIZE - 1); } bool UART2_ReadNotificationEnable(bool isEnabled, bool isPersistent) { bool previousStatus = uart2Obj.isRdNotificationEnabled; uart2Obj.isRdNotificationEnabled = isEnabled; uart2Obj.isRdNotifyPersistently = isPersistent; return previousStatus; } void UART2_ReadThresholdSet(uint32_t nBytesThreshold) { if (nBytesThreshold > 0) { uart2Obj.rdThreshold = nBytesThreshold; } } void UART2_ReadCallbackRegister( UART_RING_BUFFER_CALLBACK callback, uintptr_t context) { uart2Obj.rdCallback = callback; uart2Obj.rdContext = context; } /* This routine is only called from ISR. Hence do not disable/enable USART interrupts. */ static bool UART2_TxPullByte(uint8_t* pWrByte) { bool isSuccess = false; uint32_t wrOutIndex = uart2Obj.wrOutIndex; uint32_t wrInIndex = uart2Obj.wrInIndex; if (wrOutIndex != wrInIndex) { *pWrByte = UART2_WriteBuffer[uart2Obj.wrOutIndex++]; if (uart2Obj.wrOutIndex >= UART2_WRITE_BUFFER_SIZE) { uart2Obj.wrOutIndex = 0; } isSuccess = true; } return isSuccess; } static inline bool UART2_TxPushByte(uint8_t wrByte) { uint32_t tempInIndex; bool isSuccess = false; tempInIndex = uart2Obj.wrInIndex + 1; if (tempInIndex >= UART2_WRITE_BUFFER_SIZE) { tempInIndex = 0; } if (tempInIndex != uart2Obj.wrOutIndex) { UART2_WriteBuffer[uart2Obj.wrInIndex] = wrByte; uart2Obj.wrInIndex = tempInIndex; isSuccess = true; } else { /* Queue is full. Report Error. */ } return isSuccess; } /* This routine is only called from ISR. Hence do not disable/enable USART interrupts. */ static void UART2_WriteNotificationSend(void) { uint32_t nFreeWrBufferCount; if (uart2Obj.isWrNotificationEnabled == true) { nFreeWrBufferCount = UART2_WriteFreeBufferCountGet(); if(uart2Obj.wrCallback != NULL) { if (uart2Obj.isWrNotifyPersistently == true) { if (nFreeWrBufferCount >= uart2Obj.wrThreshold) { uart2Obj.wrCallback(UART_EVENT_WRITE_THRESHOLD_REACHED, uart2Obj.wrContext); } } else { if (nFreeWrBufferCount == uart2Obj.wrThreshold) { uart2Obj.wrCallback(UART_EVENT_WRITE_THRESHOLD_REACHED, uart2Obj.wrContext); } } } } } static size_t UART2_WritePendingBytesGet(void) { size_t nPendingTxBytes; /* Take a snapshot of indices to avoid creation of critical section */ uint32_t wrOutIndex = uart2Obj.wrOutIndex; uint32_t wrInIndex = uart2Obj.wrInIndex; if ( wrInIndex >= wrOutIndex) { nPendingTxBytes = wrInIndex - wrOutIndex; } else { nPendingTxBytes = (UART2_WRITE_BUFFER_SIZE - wrOutIndex) + wrInIndex; } return nPendingTxBytes; } size_t UART2_WriteCountGet(void) { size_t nPendingTxBytes; nPendingTxBytes = UART2_WritePendingBytesGet(); return nPendingTxBytes; } size_t UART2_Write(uint8_t* pWrBuffer, const size_t size ) { size_t nBytesWritten = 0; UART2_TX_INT_DISABLE(); while (nBytesWritten < size) { if (UART2_TxPushByte(pWrBuffer[nBytesWritten]) == true) { nBytesWritten++; } else { /* Queue is full, exit the loop */ break; } } /* Check if any data is pending for transmission */ if (UART2_WritePendingBytesGet() > 0) { /* Enable TX interrupt as data is pending for transmission */ UART2_TX_INT_ENABLE(); } return nBytesWritten; } size_t UART2_WriteFreeBufferCountGet(void) { return (UART2_WRITE_BUFFER_SIZE - 1) - UART2_WriteCountGet(); } size_t UART2_WriteBufferSizeGet(void) { return (UART2_WRITE_BUFFER_SIZE - 1); } bool UART2_WriteNotificationEnable(bool isEnabled, bool isPersistent) { bool previousStatus = uart2Obj.isWrNotificationEnabled; uart2Obj.isWrNotificationEnabled = isEnabled; uart2Obj.isWrNotifyPersistently = isPersistent; return previousStatus; } void UART2_WriteThresholdSet(uint32_t nBytesThreshold) { if (nBytesThreshold > 0) { uart2Obj.wrThreshold = nBytesThreshold; } } void UART2_WriteCallbackRegister( UART_RING_BUFFER_CALLBACK callback, uintptr_t context) { uart2Obj.wrCallback = callback; uart2Obj.wrContext = context; } UART_ERROR UART2_ErrorGet( void ) { UART_ERROR errors = UART_ERROR_NONE; uint32_t status = U2STA; errors = (UART_ERROR)(status & (_U2STA_OERR_MASK | _U2STA_FERR_MASK | _U2STA_PERR_MASK)); if(errors != UART_ERROR_NONE) { UART2_ErrorClear(); } /* All errors are cleared, but send the previous error state */ return errors; } bool UART2_AutoBaudQuery( void ) { if(U2MODE & _U2MODE_ABAUD_MASK) return true; else return false; } void UART2_AutoBaudSet( bool enable ) { if( enable == true ) { U2MODESET = _U2MODE_ABAUD_MASK; } /* Turning off ABAUD if it was on can lead to unpredictable behavior, so that direction of control is not allowed in this function. */ } void UART2_FAULT_InterruptHandler (void) { /* Disable the fault interrupt */ IEC1CLR = _IEC1_U2EIE_MASK; /* Disable the receive interrupt */ IEC1CLR = _IEC1_U2RXIE_MASK; /* Client must call UARTx_ErrorGet() function to clear the errors */ if( uart2Obj.rdCallback != NULL ) { uart2Obj.rdCallback(UART_EVENT_READ_ERROR, uart2Obj.rdContext); } } void UART2_RX_InterruptHandler (void) { /* Clear UART2 RX Interrupt flag */ IFS1CLR = _IFS1_U2RXIF_MASK; /* Keep reading until there is a character availabe in the RX FIFO */ while((U2STA & _U2STA_URXDA_MASK) == _U2STA_URXDA_MASK) { if (UART2_RxPushByte( (uint8_t )(U2RXREG) ) == true) { UART2_ReadNotificationSend(); } else { /* UART RX buffer is full */ } } } void UART2_TX_InterruptHandler (void) { uint8_t wrByte; /* Check if any data is pending for transmission */ if (UART2_WritePendingBytesGet() > 0) { /* Clear UART2TX Interrupt flag */ IFS1CLR = _IFS1_U2TXIF_MASK; /* Keep writing to the TX FIFO as long as there is space */ while(!(U2STA & _U2STA_UTXBF_MASK)) { if (UART2_TxPullByte(&wrByte) == true) { U2TXREG = wrByte; /* Send notification */ UART2_WriteNotificationSend(); } else { /* Nothing to transmit. Disable the data register empty interrupt. */ UART2_TX_INT_DISABLE(); break; } } } else { /* Nothing to transmit. Disable the data register empty interrupt. */ UART2_TX_INT_DISABLE(); } }
204944.c
/* * Copyright (c) 2004-2008 The Trustees of Indiana University and Indiana * University Research and Technology * Corporation. All rights reserved. * Copyright (c) 2004-2011 The University of Tennessee and The University * of Tennessee Research Foundation. All rights * reserved. * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * University of Stuttgart. All rights reserved. * Copyright (c) 2004-2005 The Regents of the University of California. * All rights reserved. * Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2013 Los Alamos National Security, LLC. All rights reserved. * $COPYRIGHT$ * * Additional copyrights may follow * * $HEADER$ */ #include "orte_config.h" #include "orte/constants.h" #include "opal/mca/mca.h" #include "opal/mca/base/base.h" #include "opal/mca/base/mca_base_component_repository.h" #include "orte/mca/ess/base/base.h" int orte_ess_base_select(void) { orte_ess_base_component_t *best_component = NULL; orte_ess_base_module_t *best_module = NULL; /* * Select the best component */ if( OPAL_SUCCESS != mca_base_select("ess", orte_ess_base_framework.framework_output, &orte_ess_base_framework.framework_components, (mca_base_module_t **) &best_module, (mca_base_component_t **) &best_component) ) { /* error message emitted by fn above */ return ORTE_ERR_SILENT; } /* Save the winner */ /* No global component structure */ orte_ess = *best_module; return ORTE_SUCCESS; }
831275.c
#include "kernel/types.h" #include "kernel/stat.h" #include "user.h" #include "kernel/fcntl.h" #define BLOCKSIZE 1024 #define NBLOCKS 140 void createfile(char *filename, char *buf, int blocksize, int count, int *totalblocks) { int fd; int i; int rv; fd = open(filename, O_CREATE | O_WRONLY); if(fd < 0) { printf("diskbomb: open() failed, exiting.\n"); exit(0); } for(i = 0; i < count; i++) { rv = write(fd, buf, blocksize); if(rv < 0) { printf("diskbomb: write() failed, exiting.\n"); exit(0); } *totalblocks = *totalblocks + 1; printf("diskbomb: total blocks written: %d\n", *totalblocks); } close(fd); } void setfilename(char *filename, int i) { filename[0] = 'D'; filename[1] = 'B'; filename[2] = i + 21; } int main(int argc, char *argv[]) { int i = 0; int totalblocks = 0; int totalfiles = 0; char filename[16]; char buf[BLOCKSIZE]; /* Initialize buf with 'a' */ for (i = 0; i < BLOCKSIZE; i++) { buf[i] = 'a'; } while(1) { setfilename(filename, totalfiles); printf("diskbomb: creating %s\n", filename); createfile(filename, buf, BLOCKSIZE, NBLOCKS, &totalblocks); totalfiles += 1; printf("diskbomb: total files created: %d\n", totalfiles); } exit(0); }
816533.c
/* * Secret Labs' Regular Expression Engine * * regular expression matching engine * * partial history: * 1999-10-24 fl created (based on existing template matcher code) * 2000-03-06 fl first alpha, sort of * 2000-08-01 fl fixes for 1.6b1 * 2000-08-07 fl use PyOS_CheckStack() if available * 2000-09-20 fl added expand method * 2001-03-20 fl lots of fixes for 2.1b2 * 2001-04-15 fl export copyright as Python attribute, not global * 2001-04-28 fl added __copy__ methods (work in progress) * 2001-05-14 fl fixes for 1.5.2 compatibility * 2001-07-01 fl added BIGCHARSET support (from Martin von Loewis) * 2001-10-18 fl fixed group reset issue (from Matthew Mueller) * 2001-10-20 fl added split primitive; reenable unicode for 1.6/2.0/2.1 * 2001-10-21 fl added sub/subn primitive * 2001-10-24 fl added finditer primitive (for 2.2 only) * 2001-12-07 fl fixed memory leak in sub/subn (Guido van Rossum) * 2002-11-09 fl fixed empty sub/subn return type * 2003-04-18 mvl fully support 4-byte codes * 2003-10-17 gn implemented non recursive scheme * * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. * * This version of the SRE library can be redistributed under CNRI's * Python 1.6 license. For any other use, please contact Secret Labs * AB ([email protected]). * * Portions of this engine have been developed in cooperation with * CNRI. Hewlett-Packard provided funding for 1.6 integration and * other compatibility work. */ #ifndef SRE_RECURSIVE static char copyright[] = " SRE 2.2.2 Copyright (c) 1997-2002 by Secret Labs AB "; #define PY_SSIZE_T_CLEAN #include "Python.h" #include "structmember.h" /* offsetof */ #include "sre.h" #include <ctype.h> /* name of this module, minus the leading underscore */ #if !defined(SRE_MODULE) #define SRE_MODULE "sre" #endif #define SRE_PY_MODULE "re" /* defining this one enables tracing */ #undef VERBOSE #if PY_VERSION_HEX >= 0x01060000 #if PY_VERSION_HEX < 0x02020000 || defined(Py_USING_UNICODE) /* defining this enables unicode support (default under 1.6a1 and later) */ #define HAVE_UNICODE #endif #endif /* -------------------------------------------------------------------- */ /* optional features */ /* enables fast searching */ #define USE_FAST_SEARCH /* enables aggressive inlining (always on for Visual C) */ #undef USE_INLINE /* enables copy/deepcopy handling (work in progress) */ #undef USE_BUILTIN_COPY #if PY_VERSION_HEX < 0x01060000 #define PyObject_DEL(op) PyMem_DEL((op)) #endif /* -------------------------------------------------------------------- */ #if defined(_MSC_VER) #pragma optimize("agtw", on) /* doesn't seem to make much difference... */ #pragma warning(disable: 4710) /* who cares if functions are not inlined ;-) */ /* fastest possible local call under MSVC */ #define LOCAL(type) static __inline type __fastcall #elif defined(USE_INLINE) #define LOCAL(type) static inline type #else #define LOCAL(type) static type #endif /* error codes */ #define SRE_ERROR_ILLEGAL -1 /* illegal opcode */ #define SRE_ERROR_STATE -2 /* illegal state */ #define SRE_ERROR_RECURSION_LIMIT -3 /* runaway recursion */ #define SRE_ERROR_MEMORY -9 /* out of memory */ #define SRE_ERROR_INTERRUPTED -10 /* signal handler raised exception */ #if defined(VERBOSE) #define TRACE(v) printf v #else #define TRACE(v) #endif /* -------------------------------------------------------------------- */ /* search engine state */ /* default character predicates (run sre_chars.py to regenerate tables) */ #define SRE_DIGIT_MASK 1 #define SRE_SPACE_MASK 2 #define SRE_LINEBREAK_MASK 4 #define SRE_ALNUM_MASK 8 #define SRE_WORD_MASK 16 /* FIXME: this assumes ASCII. create tables in init_sre() instead */ static char sre_char_info[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 }; static char sre_char_lower[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 }; #define SRE_IS_DIGIT(ch)\ ((ch) < 128 ? (sre_char_info[(ch)] & SRE_DIGIT_MASK) : 0) #define SRE_IS_SPACE(ch)\ ((ch) < 128 ? (sre_char_info[(ch)] & SRE_SPACE_MASK) : 0) #define SRE_IS_LINEBREAK(ch)\ ((ch) < 128 ? (sre_char_info[(ch)] & SRE_LINEBREAK_MASK) : 0) #define SRE_IS_ALNUM(ch)\ ((ch) < 128 ? (sre_char_info[(ch)] & SRE_ALNUM_MASK) : 0) #define SRE_IS_WORD(ch)\ ((ch) < 128 ? (sre_char_info[(ch)] & SRE_WORD_MASK) : 0) static unsigned int sre_lower(unsigned int ch) { return ((ch) < 128 ? (unsigned int)sre_char_lower[ch] : ch); } /* locale-specific character predicates */ /* !(c & ~N) == (c < N+1) for any unsigned c, this avoids * warnings when c's type supports only numbers < N+1 */ #define SRE_LOC_IS_DIGIT(ch) (!((ch) & ~255) ? isdigit((ch)) : 0) #define SRE_LOC_IS_SPACE(ch) (!((ch) & ~255) ? isspace((ch)) : 0) #define SRE_LOC_IS_LINEBREAK(ch) ((ch) == '\n') #define SRE_LOC_IS_ALNUM(ch) (!((ch) & ~255) ? isalnum((ch)) : 0) #define SRE_LOC_IS_WORD(ch) (SRE_LOC_IS_ALNUM((ch)) || (ch) == '_') static unsigned int sre_lower_locale(unsigned int ch) { return ((ch) < 256 ? (unsigned int)tolower((ch)) : ch); } /* unicode-specific character predicates */ #if defined(HAVE_UNICODE) #define SRE_UNI_IS_DIGIT(ch) Py_UNICODE_ISDECIMAL((Py_UNICODE)(ch)) #define SRE_UNI_IS_SPACE(ch) Py_UNICODE_ISSPACE((Py_UNICODE)(ch)) #define SRE_UNI_IS_LINEBREAK(ch) Py_UNICODE_ISLINEBREAK((Py_UNICODE)(ch)) #define SRE_UNI_IS_ALNUM(ch) Py_UNICODE_ISALNUM((Py_UNICODE)(ch)) #define SRE_UNI_IS_WORD(ch) (SRE_UNI_IS_ALNUM((ch)) || (ch) == '_') static unsigned int sre_lower_unicode(unsigned int ch) { return (unsigned int) Py_UNICODE_TOLOWER((Py_UNICODE)(ch)); } #endif LOCAL(int) sre_category(SRE_CODE category, unsigned int ch) { switch (category) { case SRE_CATEGORY_DIGIT: return SRE_IS_DIGIT(ch); case SRE_CATEGORY_NOT_DIGIT: return !SRE_IS_DIGIT(ch); case SRE_CATEGORY_SPACE: return SRE_IS_SPACE(ch); case SRE_CATEGORY_NOT_SPACE: return !SRE_IS_SPACE(ch); case SRE_CATEGORY_WORD: return SRE_IS_WORD(ch); case SRE_CATEGORY_NOT_WORD: return !SRE_IS_WORD(ch); case SRE_CATEGORY_LINEBREAK: return SRE_IS_LINEBREAK(ch); case SRE_CATEGORY_NOT_LINEBREAK: return !SRE_IS_LINEBREAK(ch); case SRE_CATEGORY_LOC_WORD: return SRE_LOC_IS_WORD(ch); case SRE_CATEGORY_LOC_NOT_WORD: return !SRE_LOC_IS_WORD(ch); #if defined(HAVE_UNICODE) case SRE_CATEGORY_UNI_DIGIT: return SRE_UNI_IS_DIGIT(ch); case SRE_CATEGORY_UNI_NOT_DIGIT: return !SRE_UNI_IS_DIGIT(ch); case SRE_CATEGORY_UNI_SPACE: return SRE_UNI_IS_SPACE(ch); case SRE_CATEGORY_UNI_NOT_SPACE: return !SRE_UNI_IS_SPACE(ch); case SRE_CATEGORY_UNI_WORD: return SRE_UNI_IS_WORD(ch); case SRE_CATEGORY_UNI_NOT_WORD: return !SRE_UNI_IS_WORD(ch); case SRE_CATEGORY_UNI_LINEBREAK: return SRE_UNI_IS_LINEBREAK(ch); case SRE_CATEGORY_UNI_NOT_LINEBREAK: return !SRE_UNI_IS_LINEBREAK(ch); #else case SRE_CATEGORY_UNI_DIGIT: return SRE_IS_DIGIT(ch); case SRE_CATEGORY_UNI_NOT_DIGIT: return !SRE_IS_DIGIT(ch); case SRE_CATEGORY_UNI_SPACE: return SRE_IS_SPACE(ch); case SRE_CATEGORY_UNI_NOT_SPACE: return !SRE_IS_SPACE(ch); case SRE_CATEGORY_UNI_WORD: return SRE_LOC_IS_WORD(ch); case SRE_CATEGORY_UNI_NOT_WORD: return !SRE_LOC_IS_WORD(ch); case SRE_CATEGORY_UNI_LINEBREAK: return SRE_IS_LINEBREAK(ch); case SRE_CATEGORY_UNI_NOT_LINEBREAK: return !SRE_IS_LINEBREAK(ch); #endif } return 0; } /* helpers */ static void data_stack_dealloc(SRE_STATE* state) { if (state->data_stack) { PyMem_FREE(state->data_stack); state->data_stack = NULL; } state->data_stack_size = state->data_stack_base = 0; } static int data_stack_grow(SRE_STATE* state, Py_ssize_t size) { Py_ssize_t minsize, cursize; minsize = state->data_stack_base+size; cursize = state->data_stack_size; if (cursize < minsize) { void* stack; cursize = minsize+minsize/4+1024; TRACE(("allocate/grow stack %d\n", cursize)); stack = PyMem_REALLOC(state->data_stack, cursize); if (!stack) { data_stack_dealloc(state); return SRE_ERROR_MEMORY; } state->data_stack = (char *)stack; state->data_stack_size = cursize; } return 0; } /* generate 8-bit version */ #define SRE_CHAR unsigned char #define SRE_AT sre_at #define SRE_COUNT sre_count #define SRE_CHARSET sre_charset #define SRE_INFO sre_info #define SRE_MATCH sre_match #define SRE_MATCH_CONTEXT sre_match_context #define SRE_SEARCH sre_search #define SRE_LITERAL_TEMPLATE sre_literal_template #if defined(HAVE_UNICODE) #define SRE_RECURSIVE #include "_sre.c" #undef SRE_RECURSIVE #undef SRE_LITERAL_TEMPLATE #undef SRE_SEARCH #undef SRE_MATCH #undef SRE_MATCH_CONTEXT #undef SRE_INFO #undef SRE_CHARSET #undef SRE_COUNT #undef SRE_AT #undef SRE_CHAR /* generate 16-bit unicode version */ #define SRE_CHAR Py_UNICODE #define SRE_AT sre_uat #define SRE_COUNT sre_ucount #define SRE_CHARSET sre_ucharset #define SRE_INFO sre_uinfo #define SRE_MATCH sre_umatch #define SRE_MATCH_CONTEXT sre_umatch_context #define SRE_SEARCH sre_usearch #define SRE_LITERAL_TEMPLATE sre_uliteral_template #endif #endif /* SRE_RECURSIVE */ /* -------------------------------------------------------------------- */ /* String matching engine */ /* the following section is compiled twice, with different character settings */ LOCAL(int) SRE_AT(SRE_STATE* state, SRE_CHAR* ptr, SRE_CODE at) { /* check if pointer is at given position */ Py_ssize_t thisp, thatp; switch (at) { case SRE_AT_BEGINNING: case SRE_AT_BEGINNING_STRING: return ((void*) ptr == state->beginning); case SRE_AT_BEGINNING_LINE: return ((void*) ptr == state->beginning || SRE_IS_LINEBREAK((int) ptr[-1])); case SRE_AT_END: return (((void*) (ptr+1) == state->end && SRE_IS_LINEBREAK((int) ptr[0])) || ((void*) ptr == state->end)); case SRE_AT_END_LINE: return ((void*) ptr == state->end || SRE_IS_LINEBREAK((int) ptr[0])); case SRE_AT_END_STRING: return ((void*) ptr == state->end); case SRE_AT_BOUNDARY: if (state->beginning == state->end) return 0; thatp = ((void*) ptr > state->beginning) ? SRE_IS_WORD((int) ptr[-1]) : 0; thisp = ((void*) ptr < state->end) ? SRE_IS_WORD((int) ptr[0]) : 0; return thisp != thatp; case SRE_AT_NON_BOUNDARY: if (state->beginning == state->end) return 0; thatp = ((void*) ptr > state->beginning) ? SRE_IS_WORD((int) ptr[-1]) : 0; thisp = ((void*) ptr < state->end) ? SRE_IS_WORD((int) ptr[0]) : 0; return thisp == thatp; case SRE_AT_LOC_BOUNDARY: if (state->beginning == state->end) return 0; thatp = ((void*) ptr > state->beginning) ? SRE_LOC_IS_WORD((int) ptr[-1]) : 0; thisp = ((void*) ptr < state->end) ? SRE_LOC_IS_WORD((int) ptr[0]) : 0; return thisp != thatp; case SRE_AT_LOC_NON_BOUNDARY: if (state->beginning == state->end) return 0; thatp = ((void*) ptr > state->beginning) ? SRE_LOC_IS_WORD((int) ptr[-1]) : 0; thisp = ((void*) ptr < state->end) ? SRE_LOC_IS_WORD((int) ptr[0]) : 0; return thisp == thatp; #if defined(HAVE_UNICODE) case SRE_AT_UNI_BOUNDARY: if (state->beginning == state->end) return 0; thatp = ((void*) ptr > state->beginning) ? SRE_UNI_IS_WORD((int) ptr[-1]) : 0; thisp = ((void*) ptr < state->end) ? SRE_UNI_IS_WORD((int) ptr[0]) : 0; return thisp != thatp; case SRE_AT_UNI_NON_BOUNDARY: if (state->beginning == state->end) return 0; thatp = ((void*) ptr > state->beginning) ? SRE_UNI_IS_WORD((int) ptr[-1]) : 0; thisp = ((void*) ptr < state->end) ? SRE_UNI_IS_WORD((int) ptr[0]) : 0; return thisp == thatp; #endif } return 0; } LOCAL(int) SRE_CHARSET(SRE_CODE* set, SRE_CODE ch) { /* check if character is a member of the given set */ int ok = 1; for (;;) { switch (*set++) { case SRE_OP_FAILURE: return !ok; case SRE_OP_LITERAL: /* <LITERAL> <code> */ if (ch == set[0]) return ok; set++; break; case SRE_OP_CATEGORY: /* <CATEGORY> <code> */ if (sre_category(set[0], (int) ch)) return ok; set += 1; break; case SRE_OP_CHARSET: if (sizeof(SRE_CODE) == 2) { /* <CHARSET> <bitmap> (16 bits per code word) */ if (ch < 256 && (set[ch >> 4] & (1 << (ch & 15)))) return ok; set += 16; } else { /* <CHARSET> <bitmap> (32 bits per code word) */ if (ch < 256 && (set[ch >> 5] & (1 << (ch & 31)))) return ok; set += 8; } break; case SRE_OP_RANGE: /* <RANGE> <lower> <upper> */ if (set[0] <= ch && ch <= set[1]) return ok; set += 2; break; case SRE_OP_NEGATE: ok = !ok; break; case SRE_OP_BIGCHARSET: /* <BIGCHARSET> <blockcount> <256 blockindices> <blocks> */ { Py_ssize_t count, block; count = *(set++); if (sizeof(SRE_CODE) == 2) { block = ((unsigned char*)set)[ch >> 8]; set += 128; if (set[block*16 + ((ch & 255)>>4)] & (1 << (ch & 15))) return ok; set += count*16; } else { /* !(c & ~N) == (c < N+1) for any unsigned c, this avoids * warnings when c's type supports only numbers < N+1 */ if (!(ch & ~65535)) block = ((unsigned char*)set)[ch >> 8]; else block = -1; set += 64; if (block >=0 && (set[block*8 + ((ch & 255)>>5)] & (1 << (ch & 31)))) return ok; set += count*8; } break; } default: /* internal error -- there's not much we can do about it here, so let's just pretend it didn't match... */ return 0; } } } LOCAL(Py_ssize_t) SRE_MATCH(SRE_STATE* state, SRE_CODE* pattern); LOCAL(Py_ssize_t) SRE_COUNT(SRE_STATE* state, SRE_CODE* pattern, Py_ssize_t maxcount) { SRE_CODE chr; SRE_CHAR* ptr = (SRE_CHAR *)state->ptr; SRE_CHAR* end = (SRE_CHAR *)state->end; Py_ssize_t i; /* adjust end */ if (maxcount < end - ptr && maxcount != 65535) end = ptr + maxcount; switch (pattern[0]) { case SRE_OP_IN: /* repeated set */ TRACE(("|%p|%p|COUNT IN\n", pattern, ptr)); while (ptr < end && SRE_CHARSET(pattern + 2, *ptr)) ptr++; break; case SRE_OP_ANY: /* repeated dot wildcard. */ TRACE(("|%p|%p|COUNT ANY\n", pattern, ptr)); while (ptr < end && !SRE_IS_LINEBREAK(*ptr)) ptr++; break; case SRE_OP_ANY_ALL: /* repeated dot wildcard. skip to the end of the target string, and backtrack from there */ TRACE(("|%p|%p|COUNT ANY_ALL\n", pattern, ptr)); ptr = end; break; case SRE_OP_LITERAL: /* repeated literal */ chr = pattern[1]; TRACE(("|%p|%p|COUNT LITERAL %d\n", pattern, ptr, chr)); while (ptr < end && (SRE_CODE) *ptr == chr) ptr++; break; case SRE_OP_LITERAL_IGNORE: /* repeated literal */ chr = pattern[1]; TRACE(("|%p|%p|COUNT LITERAL_IGNORE %d\n", pattern, ptr, chr)); while (ptr < end && (SRE_CODE) state->lower(*ptr) == chr) ptr++; break; case SRE_OP_NOT_LITERAL: /* repeated non-literal */ chr = pattern[1]; TRACE(("|%p|%p|COUNT NOT_LITERAL %d\n", pattern, ptr, chr)); while (ptr < end && (SRE_CODE) *ptr != chr) ptr++; break; case SRE_OP_NOT_LITERAL_IGNORE: /* repeated non-literal */ chr = pattern[1]; TRACE(("|%p|%p|COUNT NOT_LITERAL_IGNORE %d\n", pattern, ptr, chr)); while (ptr < end && (SRE_CODE) state->lower(*ptr) != chr) ptr++; break; default: /* repeated single character pattern */ TRACE(("|%p|%p|COUNT SUBPATTERN\n", pattern, ptr)); while ((SRE_CHAR*) state->ptr < end) { i = SRE_MATCH(state, pattern); if (i < 0) return i; if (!i) break; } TRACE(("|%p|%p|COUNT %d\n", pattern, ptr, (SRE_CHAR*) state->ptr - ptr)); return (SRE_CHAR*) state->ptr - ptr; } TRACE(("|%p|%p|COUNT %d\n", pattern, ptr, ptr - (SRE_CHAR*) state->ptr)); return ptr - (SRE_CHAR*) state->ptr; } #if 0 /* not used in this release */ LOCAL(int) SRE_INFO(SRE_STATE* state, SRE_CODE* pattern) { /* check if an SRE_OP_INFO block matches at the current position. returns the number of SRE_CODE objects to skip if successful, 0 if no match */ SRE_CHAR* end = state->end; SRE_CHAR* ptr = state->ptr; Py_ssize_t i; /* check minimal length */ if (pattern[3] && (end - ptr) < pattern[3]) return 0; /* check known prefix */ if (pattern[2] & SRE_INFO_PREFIX && pattern[5] > 1) { /* <length> <skip> <prefix data> <overlap data> */ for (i = 0; i < pattern[5]; i++) if ((SRE_CODE) ptr[i] != pattern[7 + i]) return 0; return pattern[0] + 2 * pattern[6]; } return pattern[0]; } #endif /* The macros below should be used to protect recursive SRE_MATCH() * calls that *failed* and do *not* return immediately (IOW, those * that will backtrack). Explaining: * * - Recursive SRE_MATCH() returned true: that's usually a success * (besides atypical cases like ASSERT_NOT), therefore there's no * reason to restore lastmark; * * - Recursive SRE_MATCH() returned false but the current SRE_MATCH() * is returning to the caller: If the current SRE_MATCH() is the * top function of the recursion, returning false will be a matching * failure, and it doesn't matter where lastmark is pointing to. * If it's *not* the top function, it will be a recursive SRE_MATCH() * failure by itself, and the calling SRE_MATCH() will have to deal * with the failure by the same rules explained here (it will restore * lastmark by itself if necessary); * * - Recursive SRE_MATCH() returned false, and will continue the * outside 'for' loop: must be protected when breaking, since the next * OP could potentially depend on lastmark; * * - Recursive SRE_MATCH() returned false, and will be called again * inside a local for/while loop: must be protected between each * loop iteration, since the recursive SRE_MATCH() could do anything, * and could potentially depend on lastmark. * * For more information, check the discussion at SF patch #712900. */ #define LASTMARK_SAVE() \ do { \ ctx->lastmark = state->lastmark; \ ctx->lastindex = state->lastindex; \ } while (0) #define LASTMARK_RESTORE() \ do { \ state->lastmark = ctx->lastmark; \ state->lastindex = ctx->lastindex; \ } while (0) #define RETURN_ERROR(i) do { return i; } while(0) #define RETURN_FAILURE do { ret = 0; goto exit; } while(0) #define RETURN_SUCCESS do { ret = 1; goto exit; } while(0) #define RETURN_ON_ERROR(i) \ do { if (i < 0) RETURN_ERROR(i); } while (0) #define RETURN_ON_SUCCESS(i) \ do { RETURN_ON_ERROR(i); if (i > 0) RETURN_SUCCESS; } while (0) #define RETURN_ON_FAILURE(i) \ do { RETURN_ON_ERROR(i); if (i == 0) RETURN_FAILURE; } while (0) #define SFY(x) #x #define DATA_STACK_ALLOC(state, type, ptr) \ do { \ alloc_pos = state->data_stack_base; \ TRACE(("allocating %s in %d (%d)\n", \ SFY(type), alloc_pos, sizeof(type))); \ if (state->data_stack_size < alloc_pos+sizeof(type)) { \ int j = data_stack_grow(state, sizeof(type)); \ if (j < 0) return j; \ if (ctx_pos != -1) \ DATA_STACK_LOOKUP_AT(state, SRE_MATCH_CONTEXT, ctx, ctx_pos); \ } \ ptr = (type*)(state->data_stack+alloc_pos); \ state->data_stack_base += sizeof(type); \ } while (0) #define DATA_STACK_LOOKUP_AT(state, type, ptr, pos) \ do { \ TRACE(("looking up %s at %d\n", SFY(type), pos)); \ ptr = (type*)(state->data_stack+pos); \ } while (0) #define DATA_STACK_PUSH(state, data, size) \ do { \ TRACE(("copy data in %p to %d (%d)\n", \ data, state->data_stack_base, size)); \ if (state->data_stack_size < state->data_stack_base+size) { \ int j = data_stack_grow(state, size); \ if (j < 0) return j; \ if (ctx_pos != -1) \ DATA_STACK_LOOKUP_AT(state, SRE_MATCH_CONTEXT, ctx, ctx_pos); \ } \ memcpy(state->data_stack+state->data_stack_base, data, size); \ state->data_stack_base += size; \ } while (0) #define DATA_STACK_POP(state, data, size, discard) \ do { \ TRACE(("copy data to %p from %d (%d)\n", \ data, state->data_stack_base-size, size)); \ memcpy(data, state->data_stack+state->data_stack_base-size, size); \ if (discard) \ state->data_stack_base -= size; \ } while (0) #define DATA_STACK_POP_DISCARD(state, size) \ do { \ TRACE(("discard data from %d (%d)\n", \ state->data_stack_base-size, size)); \ state->data_stack_base -= size; \ } while(0) #define DATA_PUSH(x) \ DATA_STACK_PUSH(state, (x), sizeof(*(x))) #define DATA_POP(x) \ DATA_STACK_POP(state, (x), sizeof(*(x)), 1) #define DATA_POP_DISCARD(x) \ DATA_STACK_POP_DISCARD(state, sizeof(*(x))) #define DATA_ALLOC(t,p) \ DATA_STACK_ALLOC(state, t, p) #define DATA_LOOKUP_AT(t,p,pos) \ DATA_STACK_LOOKUP_AT(state,t,p,pos) #define MARK_PUSH(lastmark) \ do if (lastmark > 0) { \ i = lastmark; /* ctx->lastmark may change if reallocated */ \ DATA_STACK_PUSH(state, state->mark, (i+1)*sizeof(void*)); \ } while (0) #define MARK_POP(lastmark) \ do if (lastmark > 0) { \ DATA_STACK_POP(state, state->mark, (lastmark+1)*sizeof(void*), 1); \ } while (0) #define MARK_POP_KEEP(lastmark) \ do if (lastmark > 0) { \ DATA_STACK_POP(state, state->mark, (lastmark+1)*sizeof(void*), 0); \ } while (0) #define MARK_POP_DISCARD(lastmark) \ do if (lastmark > 0) { \ DATA_STACK_POP_DISCARD(state, (lastmark+1)*sizeof(void*)); \ } while (0) #define JUMP_NONE 0 #define JUMP_MAX_UNTIL_1 1 #define JUMP_MAX_UNTIL_2 2 #define JUMP_MAX_UNTIL_3 3 #define JUMP_MIN_UNTIL_1 4 #define JUMP_MIN_UNTIL_2 5 #define JUMP_MIN_UNTIL_3 6 #define JUMP_REPEAT 7 #define JUMP_REPEAT_ONE_1 8 #define JUMP_REPEAT_ONE_2 9 #define JUMP_MIN_REPEAT_ONE 10 #define JUMP_BRANCH 11 #define JUMP_ASSERT 12 #define JUMP_ASSERT_NOT 13 #define DO_JUMP(jumpvalue, jumplabel, nextpattern) \ DATA_ALLOC(SRE_MATCH_CONTEXT, nextctx); \ nextctx->last_ctx_pos = ctx_pos; \ nextctx->jump = jumpvalue; \ nextctx->pattern = nextpattern; \ ctx_pos = alloc_pos; \ ctx = nextctx; \ goto entrance; \ jumplabel: \ while (0) /* gcc doesn't like labels at end of scopes */ \ typedef struct { Py_ssize_t last_ctx_pos; Py_ssize_t jump; SRE_CHAR* ptr; SRE_CODE* pattern; Py_ssize_t count; Py_ssize_t lastmark; Py_ssize_t lastindex; union { SRE_CODE chr; SRE_REPEAT* rep; } u; } SRE_MATCH_CONTEXT; /* check if string matches the given pattern. returns <0 for error, 0 for failure, and 1 for success */ LOCAL(Py_ssize_t) SRE_MATCH(SRE_STATE* state, SRE_CODE* pattern) { SRE_CHAR* end = (SRE_CHAR *)state->end; Py_ssize_t alloc_pos, ctx_pos = -1; Py_ssize_t i, ret = 0; Py_ssize_t jump; unsigned int sigcount=0; SRE_MATCH_CONTEXT* ctx; SRE_MATCH_CONTEXT* nextctx; TRACE(("|%p|%p|ENTER\n", pattern, state->ptr)); DATA_ALLOC(SRE_MATCH_CONTEXT, ctx); ctx->last_ctx_pos = -1; ctx->jump = JUMP_NONE; ctx->pattern = pattern; ctx_pos = alloc_pos; entrance: ctx->ptr = (SRE_CHAR *)state->ptr; if (ctx->pattern[0] == SRE_OP_INFO) { /* optimization info block */ /* <INFO> <1=skip> <2=flags> <3=min> ... */ if (ctx->pattern[3] && (end - ctx->ptr) < ctx->pattern[3]) { TRACE(("reject (got %d chars, need %d)\n", (end - ctx->ptr), ctx->pattern[3])); RETURN_FAILURE; } ctx->pattern += ctx->pattern[1] + 1; } for (;;) { ++sigcount; if ((0 == (sigcount & 0xfff)) && PyErr_CheckSignals()) RETURN_ERROR(SRE_ERROR_INTERRUPTED); switch (*ctx->pattern++) { case SRE_OP_MARK: /* set mark */ /* <MARK> <gid> */ TRACE(("|%p|%p|MARK %d\n", ctx->pattern, ctx->ptr, ctx->pattern[0])); i = ctx->pattern[0]; if (i & 1) state->lastindex = i/2 + 1; if (i > state->lastmark) { /* state->lastmark is the highest valid index in the state->mark array. If it is increased by more than 1, the intervening marks must be set to NULL to signal that these marks have not been encountered. */ Py_ssize_t j = state->lastmark + 1; while (j < i) state->mark[j++] = NULL; state->lastmark = i; } state->mark[i] = ctx->ptr; ctx->pattern++; break; case SRE_OP_LITERAL: /* match literal string */ /* <LITERAL> <code> */ TRACE(("|%p|%p|LITERAL %d\n", ctx->pattern, ctx->ptr, *ctx->pattern)); if (ctx->ptr >= end || (SRE_CODE) ctx->ptr[0] != ctx->pattern[0]) RETURN_FAILURE; ctx->pattern++; ctx->ptr++; break; case SRE_OP_NOT_LITERAL: /* match anything that is not literal character */ /* <NOT_LITERAL> <code> */ TRACE(("|%p|%p|NOT_LITERAL %d\n", ctx->pattern, ctx->ptr, *ctx->pattern)); if (ctx->ptr >= end || (SRE_CODE) ctx->ptr[0] == ctx->pattern[0]) RETURN_FAILURE; ctx->pattern++; ctx->ptr++; break; case SRE_OP_SUCCESS: /* end of pattern */ TRACE(("|%p|%p|SUCCESS\n", ctx->pattern, ctx->ptr)); state->ptr = ctx->ptr; RETURN_SUCCESS; case SRE_OP_AT: /* match at given position */ /* <AT> <code> */ TRACE(("|%p|%p|AT %d\n", ctx->pattern, ctx->ptr, *ctx->pattern)); if (!SRE_AT(state, ctx->ptr, *ctx->pattern)) RETURN_FAILURE; ctx->pattern++; break; case SRE_OP_CATEGORY: /* match at given category */ /* <CATEGORY> <code> */ TRACE(("|%p|%p|CATEGORY %d\n", ctx->pattern, ctx->ptr, *ctx->pattern)); if (ctx->ptr >= end || !sre_category(ctx->pattern[0], ctx->ptr[0])) RETURN_FAILURE; ctx->pattern++; ctx->ptr++; break; case SRE_OP_ANY: /* match anything (except a newline) */ /* <ANY> */ TRACE(("|%p|%p|ANY\n", ctx->pattern, ctx->ptr)); if (ctx->ptr >= end || SRE_IS_LINEBREAK(ctx->ptr[0])) RETURN_FAILURE; ctx->ptr++; break; case SRE_OP_ANY_ALL: /* match anything */ /* <ANY_ALL> */ TRACE(("|%p|%p|ANY_ALL\n", ctx->pattern, ctx->ptr)); if (ctx->ptr >= end) RETURN_FAILURE; ctx->ptr++; break; case SRE_OP_IN: /* match set member (or non_member) */ /* <IN> <skip> <set> */ TRACE(("|%p|%p|IN\n", ctx->pattern, ctx->ptr)); if (ctx->ptr >= end || !SRE_CHARSET(ctx->pattern + 1, *ctx->ptr)) RETURN_FAILURE; ctx->pattern += ctx->pattern[0]; ctx->ptr++; break; case SRE_OP_LITERAL_IGNORE: TRACE(("|%p|%p|LITERAL_IGNORE %d\n", ctx->pattern, ctx->ptr, ctx->pattern[0])); if (ctx->ptr >= end || state->lower(*ctx->ptr) != state->lower(*ctx->pattern)) RETURN_FAILURE; ctx->pattern++; ctx->ptr++; break; case SRE_OP_NOT_LITERAL_IGNORE: TRACE(("|%p|%p|NOT_LITERAL_IGNORE %d\n", ctx->pattern, ctx->ptr, *ctx->pattern)); if (ctx->ptr >= end || state->lower(*ctx->ptr) == state->lower(*ctx->pattern)) RETURN_FAILURE; ctx->pattern++; ctx->ptr++; break; case SRE_OP_IN_IGNORE: TRACE(("|%p|%p|IN_IGNORE\n", ctx->pattern, ctx->ptr)); if (ctx->ptr >= end || !SRE_CHARSET(ctx->pattern+1, (SRE_CODE)state->lower(*ctx->ptr))) RETURN_FAILURE; ctx->pattern += ctx->pattern[0]; ctx->ptr++; break; case SRE_OP_JUMP: case SRE_OP_INFO: /* jump forward */ /* <JUMP> <offset> */ TRACE(("|%p|%p|JUMP %d\n", ctx->pattern, ctx->ptr, ctx->pattern[0])); ctx->pattern += ctx->pattern[0]; break; case SRE_OP_BRANCH: /* alternation */ /* <BRANCH> <0=skip> code <JUMP> ... <NULL> */ TRACE(("|%p|%p|BRANCH\n", ctx->pattern, ctx->ptr)); LASTMARK_SAVE(); ctx->u.rep = state->repeat; if (ctx->u.rep) MARK_PUSH(ctx->lastmark); for (; ctx->pattern[0]; ctx->pattern += ctx->pattern[0]) { if (ctx->pattern[1] == SRE_OP_LITERAL && (ctx->ptr >= end || (SRE_CODE) *ctx->ptr != ctx->pattern[2])) continue; if (ctx->pattern[1] == SRE_OP_IN && (ctx->ptr >= end || !SRE_CHARSET(ctx->pattern + 3, (SRE_CODE) *ctx->ptr))) continue; state->ptr = ctx->ptr; DO_JUMP(JUMP_BRANCH, jump_branch, ctx->pattern+1); if (ret) { if (ctx->u.rep) MARK_POP_DISCARD(ctx->lastmark); RETURN_ON_ERROR(ret); RETURN_SUCCESS; } if (ctx->u.rep) MARK_POP_KEEP(ctx->lastmark); LASTMARK_RESTORE(); } if (ctx->u.rep) MARK_POP_DISCARD(ctx->lastmark); RETURN_FAILURE; case SRE_OP_REPEAT_ONE: /* match repeated sequence (maximizing regexp) */ /* this operator only works if the repeated item is exactly one character wide, and we're not already collecting backtracking points. for other cases, use the MAX_REPEAT operator */ /* <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail */ TRACE(("|%p|%p|REPEAT_ONE %d %d\n", ctx->pattern, ctx->ptr, ctx->pattern[1], ctx->pattern[2])); if (ctx->ptr + ctx->pattern[1] > end) RETURN_FAILURE; /* cannot match */ state->ptr = ctx->ptr; ret = SRE_COUNT(state, ctx->pattern+3, ctx->pattern[2]); RETURN_ON_ERROR(ret); DATA_LOOKUP_AT(SRE_MATCH_CONTEXT, ctx, ctx_pos); ctx->count = ret; ctx->ptr += ctx->count; /* when we arrive here, count contains the number of matches, and ctx->ptr points to the tail of the target string. check if the rest of the pattern matches, and backtrack if not. */ if (ctx->count < (Py_ssize_t) ctx->pattern[1]) RETURN_FAILURE; if (ctx->pattern[ctx->pattern[0]] == SRE_OP_SUCCESS) { /* tail is empty. we're finished */ state->ptr = ctx->ptr; RETURN_SUCCESS; } LASTMARK_SAVE(); if (ctx->pattern[ctx->pattern[0]] == SRE_OP_LITERAL) { /* tail starts with a literal. skip positions where the rest of the pattern cannot possibly match */ ctx->u.chr = ctx->pattern[ctx->pattern[0]+1]; for (;;) { while (ctx->count >= (Py_ssize_t) ctx->pattern[1] && (ctx->ptr >= end || *ctx->ptr != ctx->u.chr)) { ctx->ptr--; ctx->count--; } if (ctx->count < (Py_ssize_t) ctx->pattern[1]) break; state->ptr = ctx->ptr; DO_JUMP(JUMP_REPEAT_ONE_1, jump_repeat_one_1, ctx->pattern+ctx->pattern[0]); if (ret) { RETURN_ON_ERROR(ret); RETURN_SUCCESS; } LASTMARK_RESTORE(); ctx->ptr--; ctx->count--; } } else { /* general case */ while (ctx->count >= (Py_ssize_t) ctx->pattern[1]) { state->ptr = ctx->ptr; DO_JUMP(JUMP_REPEAT_ONE_2, jump_repeat_one_2, ctx->pattern+ctx->pattern[0]); if (ret) { RETURN_ON_ERROR(ret); RETURN_SUCCESS; } ctx->ptr--; ctx->count--; LASTMARK_RESTORE(); } } RETURN_FAILURE; case SRE_OP_MIN_REPEAT_ONE: /* match repeated sequence (minimizing regexp) */ /* this operator only works if the repeated item is exactly one character wide, and we're not already collecting backtracking points. for other cases, use the MIN_REPEAT operator */ /* <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail */ TRACE(("|%p|%p|MIN_REPEAT_ONE %d %d\n", ctx->pattern, ctx->ptr, ctx->pattern[1], ctx->pattern[2])); if (ctx->ptr + ctx->pattern[1] > end) RETURN_FAILURE; /* cannot match */ state->ptr = ctx->ptr; if (ctx->pattern[1] == 0) ctx->count = 0; else { /* count using pattern min as the maximum */ ret = SRE_COUNT(state, ctx->pattern+3, ctx->pattern[1]); RETURN_ON_ERROR(ret); DATA_LOOKUP_AT(SRE_MATCH_CONTEXT, ctx, ctx_pos); if (ret < (Py_ssize_t) ctx->pattern[1]) /* didn't match minimum number of times */ RETURN_FAILURE; /* advance past minimum matches of repeat */ ctx->count = ret; ctx->ptr += ctx->count; } if (ctx->pattern[ctx->pattern[0]] == SRE_OP_SUCCESS) { /* tail is empty. we're finished */ state->ptr = ctx->ptr; RETURN_SUCCESS; } else { /* general case */ LASTMARK_SAVE(); while ((Py_ssize_t)ctx->pattern[2] == 65535 || ctx->count <= (Py_ssize_t)ctx->pattern[2]) { state->ptr = ctx->ptr; DO_JUMP(JUMP_MIN_REPEAT_ONE,jump_min_repeat_one, ctx->pattern+ctx->pattern[0]); if (ret) { RETURN_ON_ERROR(ret); RETURN_SUCCESS; } state->ptr = ctx->ptr; ret = SRE_COUNT(state, ctx->pattern+3, 1); RETURN_ON_ERROR(ret); DATA_LOOKUP_AT(SRE_MATCH_CONTEXT, ctx, ctx_pos); if (ret == 0) break; assert(ret == 1); ctx->ptr++; ctx->count++; LASTMARK_RESTORE(); } } RETURN_FAILURE; case SRE_OP_REPEAT: /* create repeat context. all the hard work is done by the UNTIL operator (MAX_UNTIL, MIN_UNTIL) */ /* <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail */ TRACE(("|%p|%p|REPEAT %d %d\n", ctx->pattern, ctx->ptr, ctx->pattern[1], ctx->pattern[2])); /* install new repeat context */ ctx->u.rep = (SRE_REPEAT*) PyObject_MALLOC(sizeof(*ctx->u.rep)); if (!ctx->u.rep) { PyErr_NoMemory(); RETURN_FAILURE; } ctx->u.rep->count = -1; ctx->u.rep->pattern = ctx->pattern; ctx->u.rep->prev = state->repeat; ctx->u.rep->last_ptr = NULL; state->repeat = ctx->u.rep; state->ptr = ctx->ptr; DO_JUMP(JUMP_REPEAT, jump_repeat, ctx->pattern+ctx->pattern[0]); state->repeat = ctx->u.rep->prev; PyObject_FREE(ctx->u.rep); if (ret) { RETURN_ON_ERROR(ret); RETURN_SUCCESS; } RETURN_FAILURE; case SRE_OP_MAX_UNTIL: /* maximizing repeat */ /* <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail */ /* FIXME: we probably need to deal with zero-width matches in here... */ ctx->u.rep = state->repeat; if (!ctx->u.rep) RETURN_ERROR(SRE_ERROR_STATE); state->ptr = ctx->ptr; ctx->count = ctx->u.rep->count+1; TRACE(("|%p|%p|MAX_UNTIL %d\n", ctx->pattern, ctx->ptr, ctx->count)); if (ctx->count < ctx->u.rep->pattern[1]) { /* not enough matches */ ctx->u.rep->count = ctx->count; DO_JUMP(JUMP_MAX_UNTIL_1, jump_max_until_1, ctx->u.rep->pattern+3); if (ret) { RETURN_ON_ERROR(ret); RETURN_SUCCESS; } ctx->u.rep->count = ctx->count-1; state->ptr = ctx->ptr; RETURN_FAILURE; } if ((ctx->count < ctx->u.rep->pattern[2] || ctx->u.rep->pattern[2] == 65535) && state->ptr != ctx->u.rep->last_ptr) { /* we may have enough matches, but if we can match another item, do so */ ctx->u.rep->count = ctx->count; LASTMARK_SAVE(); MARK_PUSH(ctx->lastmark); /* zero-width match protection */ DATA_PUSH(&ctx->u.rep->last_ptr); ctx->u.rep->last_ptr = state->ptr; DO_JUMP(JUMP_MAX_UNTIL_2, jump_max_until_2, ctx->u.rep->pattern+3); DATA_POP(&ctx->u.rep->last_ptr); if (ret) { MARK_POP_DISCARD(ctx->lastmark); RETURN_ON_ERROR(ret); RETURN_SUCCESS; } MARK_POP(ctx->lastmark); LASTMARK_RESTORE(); ctx->u.rep->count = ctx->count-1; state->ptr = ctx->ptr; } /* cannot match more repeated items here. make sure the tail matches */ state->repeat = ctx->u.rep->prev; DO_JUMP(JUMP_MAX_UNTIL_3, jump_max_until_3, ctx->pattern); RETURN_ON_SUCCESS(ret); state->repeat = ctx->u.rep; state->ptr = ctx->ptr; RETURN_FAILURE; case SRE_OP_MIN_UNTIL: /* minimizing repeat */ /* <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail */ ctx->u.rep = state->repeat; if (!ctx->u.rep) RETURN_ERROR(SRE_ERROR_STATE); state->ptr = ctx->ptr; ctx->count = ctx->u.rep->count+1; TRACE(("|%p|%p|MIN_UNTIL %d %p\n", ctx->pattern, ctx->ptr, ctx->count, ctx->u.rep->pattern)); if (ctx->count < ctx->u.rep->pattern[1]) { /* not enough matches */ ctx->u.rep->count = ctx->count; DO_JUMP(JUMP_MIN_UNTIL_1, jump_min_until_1, ctx->u.rep->pattern+3); if (ret) { RETURN_ON_ERROR(ret); RETURN_SUCCESS; } ctx->u.rep->count = ctx->count-1; state->ptr = ctx->ptr; RETURN_FAILURE; } LASTMARK_SAVE(); /* see if the tail matches */ state->repeat = ctx->u.rep->prev; DO_JUMP(JUMP_MIN_UNTIL_2, jump_min_until_2, ctx->pattern); if (ret) { RETURN_ON_ERROR(ret); RETURN_SUCCESS; } state->repeat = ctx->u.rep; state->ptr = ctx->ptr; LASTMARK_RESTORE(); if (ctx->count >= ctx->u.rep->pattern[2] && ctx->u.rep->pattern[2] != 65535) RETURN_FAILURE; ctx->u.rep->count = ctx->count; DO_JUMP(JUMP_MIN_UNTIL_3,jump_min_until_3, ctx->u.rep->pattern+3); if (ret) { RETURN_ON_ERROR(ret); RETURN_SUCCESS; } ctx->u.rep->count = ctx->count-1; state->ptr = ctx->ptr; RETURN_FAILURE; case SRE_OP_GROUPREF: /* match backreference */ TRACE(("|%p|%p|GROUPREF %d\n", ctx->pattern, ctx->ptr, ctx->pattern[0])); i = ctx->pattern[0]; { Py_ssize_t groupref = i+i; if (groupref >= state->lastmark) { RETURN_FAILURE; } else { SRE_CHAR* p = (SRE_CHAR*) state->mark[groupref]; SRE_CHAR* e = (SRE_CHAR*) state->mark[groupref+1]; if (!p || !e || e < p) RETURN_FAILURE; while (p < e) { if (ctx->ptr >= end || *ctx->ptr != *p) RETURN_FAILURE; p++; ctx->ptr++; } } } ctx->pattern++; break; case SRE_OP_GROUPREF_IGNORE: /* match backreference */ TRACE(("|%p|%p|GROUPREF_IGNORE %d\n", ctx->pattern, ctx->ptr, ctx->pattern[0])); i = ctx->pattern[0]; { Py_ssize_t groupref = i+i; if (groupref >= state->lastmark) { RETURN_FAILURE; } else { SRE_CHAR* p = (SRE_CHAR*) state->mark[groupref]; SRE_CHAR* e = (SRE_CHAR*) state->mark[groupref+1]; if (!p || !e || e < p) RETURN_FAILURE; while (p < e) { if (ctx->ptr >= end || state->lower(*ctx->ptr) != state->lower(*p)) RETURN_FAILURE; p++; ctx->ptr++; } } } ctx->pattern++; break; case SRE_OP_GROUPREF_EXISTS: TRACE(("|%p|%p|GROUPREF_EXISTS %d\n", ctx->pattern, ctx->ptr, ctx->pattern[0])); /* <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ... */ i = ctx->pattern[0]; { Py_ssize_t groupref = i+i; if (groupref >= state->lastmark) { ctx->pattern += ctx->pattern[1]; break; } else { SRE_CHAR* p = (SRE_CHAR*) state->mark[groupref]; SRE_CHAR* e = (SRE_CHAR*) state->mark[groupref+1]; if (!p || !e || e < p) { ctx->pattern += ctx->pattern[1]; break; } } } ctx->pattern += 2; break; case SRE_OP_ASSERT: /* assert subpattern */ /* <ASSERT> <skip> <back> <pattern> */ TRACE(("|%p|%p|ASSERT %d\n", ctx->pattern, ctx->ptr, ctx->pattern[1])); state->ptr = ctx->ptr - ctx->pattern[1]; if (state->ptr < state->beginning) RETURN_FAILURE; DO_JUMP(JUMP_ASSERT, jump_assert, ctx->pattern+2); RETURN_ON_FAILURE(ret); ctx->pattern += ctx->pattern[0]; break; case SRE_OP_ASSERT_NOT: /* assert not subpattern */ /* <ASSERT_NOT> <skip> <back> <pattern> */ TRACE(("|%p|%p|ASSERT_NOT %d\n", ctx->pattern, ctx->ptr, ctx->pattern[1])); state->ptr = ctx->ptr - ctx->pattern[1]; if (state->ptr >= state->beginning) { DO_JUMP(JUMP_ASSERT_NOT, jump_assert_not, ctx->pattern+2); if (ret) { RETURN_ON_ERROR(ret); RETURN_FAILURE; } } ctx->pattern += ctx->pattern[0]; break; case SRE_OP_FAILURE: /* immediate failure */ TRACE(("|%p|%p|FAILURE\n", ctx->pattern, ctx->ptr)); RETURN_FAILURE; default: TRACE(("|%p|%p|UNKNOWN %d\n", ctx->pattern, ctx->ptr, ctx->pattern[-1])); RETURN_ERROR(SRE_ERROR_ILLEGAL); } } exit: ctx_pos = ctx->last_ctx_pos; jump = ctx->jump; DATA_POP_DISCARD(ctx); if (ctx_pos == -1) return ret; DATA_LOOKUP_AT(SRE_MATCH_CONTEXT, ctx, ctx_pos); switch (jump) { case JUMP_MAX_UNTIL_2: TRACE(("|%p|%p|JUMP_MAX_UNTIL_2\n", ctx->pattern, ctx->ptr)); goto jump_max_until_2; case JUMP_MAX_UNTIL_3: TRACE(("|%p|%p|JUMP_MAX_UNTIL_3\n", ctx->pattern, ctx->ptr)); goto jump_max_until_3; case JUMP_MIN_UNTIL_2: TRACE(("|%p|%p|JUMP_MIN_UNTIL_2\n", ctx->pattern, ctx->ptr)); goto jump_min_until_2; case JUMP_MIN_UNTIL_3: TRACE(("|%p|%p|JUMP_MIN_UNTIL_3\n", ctx->pattern, ctx->ptr)); goto jump_min_until_3; case JUMP_BRANCH: TRACE(("|%p|%p|JUMP_BRANCH\n", ctx->pattern, ctx->ptr)); goto jump_branch; case JUMP_MAX_UNTIL_1: TRACE(("|%p|%p|JUMP_MAX_UNTIL_1\n", ctx->pattern, ctx->ptr)); goto jump_max_until_1; case JUMP_MIN_UNTIL_1: TRACE(("|%p|%p|JUMP_MIN_UNTIL_1\n", ctx->pattern, ctx->ptr)); goto jump_min_until_1; case JUMP_REPEAT: TRACE(("|%p|%p|JUMP_REPEAT\n", ctx->pattern, ctx->ptr)); goto jump_repeat; case JUMP_REPEAT_ONE_1: TRACE(("|%p|%p|JUMP_REPEAT_ONE_1\n", ctx->pattern, ctx->ptr)); goto jump_repeat_one_1; case JUMP_REPEAT_ONE_2: TRACE(("|%p|%p|JUMP_REPEAT_ONE_2\n", ctx->pattern, ctx->ptr)); goto jump_repeat_one_2; case JUMP_MIN_REPEAT_ONE: TRACE(("|%p|%p|JUMP_MIN_REPEAT_ONE\n", ctx->pattern, ctx->ptr)); goto jump_min_repeat_one; case JUMP_ASSERT: TRACE(("|%p|%p|JUMP_ASSERT\n", ctx->pattern, ctx->ptr)); goto jump_assert; case JUMP_ASSERT_NOT: TRACE(("|%p|%p|JUMP_ASSERT_NOT\n", ctx->pattern, ctx->ptr)); goto jump_assert_not; case JUMP_NONE: TRACE(("|%p|%p|RETURN %d\n", ctx->pattern, ctx->ptr, ret)); break; } return ret; /* should never get here */ } LOCAL(Py_ssize_t) SRE_SEARCH(SRE_STATE* state, SRE_CODE* pattern) { SRE_CHAR* ptr = (SRE_CHAR *)state->start; SRE_CHAR* end = (SRE_CHAR *)state->end; Py_ssize_t status = 0; Py_ssize_t prefix_len = 0; Py_ssize_t prefix_skip = 0; SRE_CODE* prefix = NULL; SRE_CODE* charset = NULL; SRE_CODE* overlap = NULL; int flags = 0; if (pattern[0] == SRE_OP_INFO) { /* optimization info block */ /* <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info> */ flags = pattern[2]; if (pattern[3] > 1) { /* adjust end point (but make sure we leave at least one character in there, so literal search will work) */ end -= pattern[3]-1; if (end <= ptr) end = ptr+1; } if (flags & SRE_INFO_PREFIX) { /* pattern starts with a known prefix */ /* <length> <skip> <prefix data> <overlap data> */ prefix_len = pattern[5]; prefix_skip = pattern[6]; prefix = pattern + 7; overlap = prefix + prefix_len - 1; } else if (flags & SRE_INFO_CHARSET) /* pattern starts with a character from a known set */ /* <charset> */ charset = pattern + 5; pattern += 1 + pattern[1]; } TRACE(("prefix = %p %d %d\n", prefix, prefix_len, prefix_skip)); TRACE(("charset = %p\n", charset)); #if defined(USE_FAST_SEARCH) if (prefix_len > 1) { /* pattern starts with a known prefix. use the overlap table to skip forward as fast as we possibly can */ Py_ssize_t i = 0; end = (SRE_CHAR *)state->end; while (ptr < end) { for (;;) { if ((SRE_CODE) ptr[0] != prefix[i]) { if (!i) break; else i = overlap[i]; } else { if (++i == prefix_len) { /* found a potential match */ TRACE(("|%p|%p|SEARCH SCAN\n", pattern, ptr)); state->start = ptr + 1 - prefix_len; state->ptr = ptr + 1 - prefix_len + prefix_skip; if (flags & SRE_INFO_LITERAL) return 1; /* we got all of it */ status = SRE_MATCH(state, pattern + 2*prefix_skip); if (status != 0) return status; /* close but no cigar -- try again */ i = overlap[i]; } break; } } ptr++; } return 0; } #endif if (pattern[0] == SRE_OP_LITERAL) { /* pattern starts with a literal character. this is used for short prefixes, and if fast search is disabled */ SRE_CODE chr = pattern[1]; end = (SRE_CHAR *)state->end; for (;;) { while (ptr < end && (SRE_CODE) ptr[0] != chr) ptr++; if (ptr >= end) return 0; TRACE(("|%p|%p|SEARCH LITERAL\n", pattern, ptr)); state->start = ptr; state->ptr = ++ptr; if (flags & SRE_INFO_LITERAL) return 1; /* we got all of it */ status = SRE_MATCH(state, pattern + 2); if (status != 0) break; } } else if (charset) { /* pattern starts with a character from a known set */ end = (SRE_CHAR *)state->end; for (;;) { while (ptr < end && !SRE_CHARSET(charset, ptr[0])) ptr++; if (ptr >= end) return 0; TRACE(("|%p|%p|SEARCH CHARSET\n", pattern, ptr)); state->start = ptr; state->ptr = ptr; status = SRE_MATCH(state, pattern); if (status != 0) break; ptr++; } } else /* general case */ while (ptr <= end) { TRACE(("|%p|%p|SEARCH\n", pattern, ptr)); state->start = state->ptr = ptr++; status = SRE_MATCH(state, pattern); if (status != 0) break; } return status; } LOCAL(int) SRE_LITERAL_TEMPLATE(SRE_CHAR* ptr, Py_ssize_t len) { /* check if given string is a literal template (i.e. no escapes) */ while (len-- > 0) if (*ptr++ == '\\') return 0; return 1; } #if !defined(SRE_RECURSIVE) /* -------------------------------------------------------------------- */ /* factories and destructors */ /* see sre.h for object declarations */ static PyObject*pattern_new_match(PatternObject*, SRE_STATE*, int); static PyObject*pattern_scanner(PatternObject*, PyObject*); static PyObject * sre_codesize(PyObject* self, PyObject *unused) { return Py_BuildValue("l", sizeof(SRE_CODE)); } static PyObject * sre_getlower(PyObject* self, PyObject* args) { int character, flags; if (!PyArg_ParseTuple(args, "ii", &character, &flags)) return NULL; if (flags & SRE_FLAG_LOCALE) return Py_BuildValue("i", sre_lower_locale(character)); if (flags & SRE_FLAG_UNICODE) #if defined(HAVE_UNICODE) return Py_BuildValue("i", sre_lower_unicode(character)); #else return Py_BuildValue("i", sre_lower_locale(character)); #endif return Py_BuildValue("i", sre_lower(character)); } LOCAL(void) state_reset(SRE_STATE* state) { /* FIXME: dynamic! */ /*memset(state->mark, 0, sizeof(*state->mark) * SRE_MARK_SIZE);*/ state->lastmark = -1; state->lastindex = -1; state->repeat = NULL; data_stack_dealloc(state); } static void* getstring(PyObject* string, Py_ssize_t* p_length, int* p_charsize) { /* given a python object, return a data pointer, a length (in characters), and a character size. return NULL if the object is not a string (or not compatible) */ PyBufferProcs *buffer; Py_ssize_t size, bytes; int charsize; void* ptr; #if defined(HAVE_UNICODE) if (PyUnicode_Check(string)) { /* unicode strings doesn't always support the buffer interface */ ptr = (void*) PyUnicode_AS_DATA(string); /* bytes = PyUnicode_GET_DATA_SIZE(string); */ size = PyUnicode_GET_SIZE(string); charsize = sizeof(Py_UNICODE); } else { #endif /* get pointer to string buffer */ buffer = Py_TYPE(string)->tp_as_buffer; if (!buffer || !buffer->bf_getreadbuffer || !buffer->bf_getsegcount || buffer->bf_getsegcount(string, NULL) != 1) { PyErr_SetString(PyExc_TypeError, "expected string or buffer"); return NULL; } /* determine buffer size */ bytes = buffer->bf_getreadbuffer(string, 0, &ptr); if (bytes < 0) { PyErr_SetString(PyExc_TypeError, "buffer has negative size"); return NULL; } /* determine character size */ #if PY_VERSION_HEX >= 0x01060000 size = PyObject_Size(string); #else size = PyObject_Length(string); #endif if (PyString_Check(string) || bytes == size) charsize = 1; #if defined(HAVE_UNICODE) else if (bytes == (Py_ssize_t) (size * sizeof(Py_UNICODE))) charsize = sizeof(Py_UNICODE); #endif else { PyErr_SetString(PyExc_TypeError, "buffer size mismatch"); return NULL; } #if defined(HAVE_UNICODE) } #endif *p_length = size; *p_charsize = charsize; return ptr; } LOCAL(PyObject*) state_init(SRE_STATE* state, PatternObject* pattern, PyObject* string, Py_ssize_t start, Py_ssize_t end) { /* prepare state object */ Py_ssize_t length; int charsize; void* ptr; memset(state, 0, sizeof(SRE_STATE)); state->lastmark = -1; state->lastindex = -1; ptr = getstring(string, &length, &charsize); if (!ptr) return NULL; /* adjust boundaries */ if (start < 0) start = 0; else if (start > length) start = length; if (end < 0) end = 0; else if (end > length) end = length; state->charsize = charsize; state->beginning = ptr; state->start = (void*) ((char*) ptr + start * state->charsize); state->end = (void*) ((char*) ptr + end * state->charsize); Py_INCREF(string); state->string = string; state->pos = start; state->endpos = end; if (pattern->flags & SRE_FLAG_LOCALE) state->lower = sre_lower_locale; else if (pattern->flags & SRE_FLAG_UNICODE) #if defined(HAVE_UNICODE) state->lower = sre_lower_unicode; #else state->lower = sre_lower_locale; #endif else state->lower = sre_lower; return string; } LOCAL(void) state_fini(SRE_STATE* state) { Py_XDECREF(state->string); data_stack_dealloc(state); } /* calculate offset from start of string */ #define STATE_OFFSET(state, member)\ (((char*)(member) - (char*)(state)->beginning) / (state)->charsize) LOCAL(PyObject*) state_getslice(SRE_STATE* state, Py_ssize_t index, PyObject* string, int empty) { Py_ssize_t i, j; index = (index - 1) * 2; if (string == Py_None || index >= state->lastmark || !state->mark[index] || !state->mark[index+1]) { if (empty) /* want empty string */ i = j = 0; else { Py_INCREF(Py_None); return Py_None; } } else { i = STATE_OFFSET(state, state->mark[index]); j = STATE_OFFSET(state, state->mark[index+1]); } return PySequence_GetSlice(string, i, j); } static void pattern_error(int status) { switch (status) { case SRE_ERROR_RECURSION_LIMIT: PyErr_SetString( PyExc_RuntimeError, "maximum recursion limit exceeded" ); break; case SRE_ERROR_MEMORY: PyErr_NoMemory(); break; case SRE_ERROR_INTERRUPTED: /* An exception has already been raised, so let it fly */ break; default: /* other error codes indicate compiler/engine bugs */ PyErr_SetString( PyExc_RuntimeError, "internal error in regular expression engine" ); } } static void pattern_dealloc(PatternObject* self) { if (self->weakreflist != NULL) PyObject_ClearWeakRefs((PyObject *) self); Py_XDECREF(self->pattern); Py_XDECREF(self->groupindex); Py_XDECREF(self->indexgroup); PyObject_DEL(self); } static PyObject* pattern_match(PatternObject* self, PyObject* args, PyObject* kw) { SRE_STATE state; int status; PyObject* string; Py_ssize_t start = 0; Py_ssize_t end = PY_SSIZE_T_MAX; static char* kwlist[] = { "pattern", "pos", "endpos", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kw, "O|nn:match", kwlist, &string, &start, &end)) return NULL; string = state_init(&state, self, string, start, end); if (!string) return NULL; state.ptr = state.start; TRACE(("|%p|%p|MATCH\n", PatternObject_GetCode(self), state.ptr)); if (state.charsize == 1) { status = sre_match(&state, PatternObject_GetCode(self)); } else { #if defined(HAVE_UNICODE) status = sre_umatch(&state, PatternObject_GetCode(self)); #endif } TRACE(("|%p|%p|END\n", PatternObject_GetCode(self), state.ptr)); if (PyErr_Occurred()) return NULL; state_fini(&state); return pattern_new_match(self, &state, status); } static PyObject* pattern_search(PatternObject* self, PyObject* args, PyObject* kw) { SRE_STATE state; int status; PyObject* string; Py_ssize_t start = 0; Py_ssize_t end = PY_SSIZE_T_MAX; static char* kwlist[] = { "pattern", "pos", "endpos", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kw, "O|nn:search", kwlist, &string, &start, &end)) return NULL; string = state_init(&state, self, string, start, end); if (!string) return NULL; TRACE(("|%p|%p|SEARCH\n", PatternObject_GetCode(self), state.ptr)); if (state.charsize == 1) { status = sre_search(&state, PatternObject_GetCode(self)); } else { #if defined(HAVE_UNICODE) status = sre_usearch(&state, PatternObject_GetCode(self)); #endif } TRACE(("|%p|%p|END\n", PatternObject_GetCode(self), state.ptr)); state_fini(&state); if (PyErr_Occurred()) return NULL; return pattern_new_match(self, &state, status); } static PyObject* call(char* module, char* function, PyObject* args) { PyObject* name; PyObject* mod; PyObject* func; PyObject* result; if (!args) return NULL; name = PyString_FromString(module); if (!name) return NULL; mod = PyImport_Import(name); Py_DECREF(name); if (!mod) return NULL; func = PyObject_GetAttrString(mod, function); Py_DECREF(mod); if (!func) return NULL; result = PyObject_CallObject(func, args); Py_DECREF(func); Py_DECREF(args); return result; } #ifdef USE_BUILTIN_COPY static int deepcopy(PyObject** object, PyObject* memo) { PyObject* copy; copy = call( "copy", "deepcopy", PyTuple_Pack(2, *object, memo) ); if (!copy) return 0; Py_DECREF(*object); *object = copy; return 1; /* success */ } #endif static PyObject* join_list(PyObject* list, PyObject* string) { /* join list elements */ PyObject* joiner; #if PY_VERSION_HEX >= 0x01060000 PyObject* function; PyObject* args; #endif PyObject* result; joiner = PySequence_GetSlice(string, 0, 0); if (!joiner) return NULL; if (PyList_GET_SIZE(list) == 0) { Py_DECREF(list); return joiner; } #if PY_VERSION_HEX >= 0x01060000 function = PyObject_GetAttrString(joiner, "join"); if (!function) { Py_DECREF(joiner); return NULL; } args = PyTuple_New(1); if (!args) { Py_DECREF(function); Py_DECREF(joiner); return NULL; } PyTuple_SET_ITEM(args, 0, list); result = PyObject_CallObject(function, args); Py_DECREF(args); /* also removes list */ Py_DECREF(function); #else result = call( "string", "join", PyTuple_Pack(2, list, joiner) ); #endif Py_DECREF(joiner); return result; } static PyObject* pattern_findall(PatternObject* self, PyObject* args, PyObject* kw) { SRE_STATE state; PyObject* list; int status; Py_ssize_t i, b, e; PyObject* string; Py_ssize_t start = 0; Py_ssize_t end = PY_SSIZE_T_MAX; static char* kwlist[] = { "source", "pos", "endpos", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kw, "O|nn:findall", kwlist, &string, &start, &end)) return NULL; string = state_init(&state, self, string, start, end); if (!string) return NULL; list = PyList_New(0); if (!list) { state_fini(&state); return NULL; } while (state.start <= state.end) { PyObject* item; state_reset(&state); state.ptr = state.start; if (state.charsize == 1) { status = sre_search(&state, PatternObject_GetCode(self)); } else { #if defined(HAVE_UNICODE) status = sre_usearch(&state, PatternObject_GetCode(self)); #endif } if (PyErr_Occurred()) goto error; if (status <= 0) { if (status == 0) break; pattern_error(status); goto error; } /* don't bother to build a match object */ switch (self->groups) { case 0: b = STATE_OFFSET(&state, state.start); e = STATE_OFFSET(&state, state.ptr); item = PySequence_GetSlice(string, b, e); if (!item) goto error; break; case 1: item = state_getslice(&state, 1, string, 1); if (!item) goto error; break; default: item = PyTuple_New(self->groups); if (!item) goto error; for (i = 0; i < self->groups; i++) { PyObject* o = state_getslice(&state, i+1, string, 1); if (!o) { Py_DECREF(item); goto error; } PyTuple_SET_ITEM(item, i, o); } break; } status = PyList_Append(list, item); Py_DECREF(item); if (status < 0) goto error; if (state.ptr == state.start) state.start = (void*) ((char*) state.ptr + state.charsize); else state.start = state.ptr; } state_fini(&state); return list; error: Py_DECREF(list); state_fini(&state); return NULL; } #if PY_VERSION_HEX >= 0x02020000 static PyObject* pattern_finditer(PatternObject* pattern, PyObject* args) { PyObject* scanner; PyObject* search; PyObject* iterator; scanner = pattern_scanner(pattern, args); if (!scanner) return NULL; search = PyObject_GetAttrString(scanner, "search"); Py_DECREF(scanner); if (!search) return NULL; iterator = PyCallIter_New(search, Py_None); Py_DECREF(search); return iterator; } #endif static PyObject* pattern_split(PatternObject* self, PyObject* args, PyObject* kw) { SRE_STATE state; PyObject* list; PyObject* item; int status; Py_ssize_t n; Py_ssize_t i; void* last; PyObject* string; Py_ssize_t maxsplit = 0; static char* kwlist[] = { "source", "maxsplit", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kw, "O|n:split", kwlist, &string, &maxsplit)) return NULL; string = state_init(&state, self, string, 0, PY_SSIZE_T_MAX); if (!string) return NULL; list = PyList_New(0); if (!list) { state_fini(&state); return NULL; } n = 0; last = state.start; while (!maxsplit || n < maxsplit) { state_reset(&state); state.ptr = state.start; if (state.charsize == 1) { status = sre_search(&state, PatternObject_GetCode(self)); } else { #if defined(HAVE_UNICODE) status = sre_usearch(&state, PatternObject_GetCode(self)); #endif } if (PyErr_Occurred()) goto error; if (status <= 0) { if (status == 0) break; pattern_error(status); goto error; } if (state.start == state.ptr) { if (last == state.end) break; /* skip one character */ state.start = (void*) ((char*) state.ptr + state.charsize); continue; } /* get segment before this match */ item = PySequence_GetSlice( string, STATE_OFFSET(&state, last), STATE_OFFSET(&state, state.start) ); if (!item) goto error; status = PyList_Append(list, item); Py_DECREF(item); if (status < 0) goto error; /* add groups (if any) */ for (i = 0; i < self->groups; i++) { item = state_getslice(&state, i+1, string, 0); if (!item) goto error; status = PyList_Append(list, item); Py_DECREF(item); if (status < 0) goto error; } n = n + 1; last = state.start = state.ptr; } /* get segment following last match (even if empty) */ item = PySequence_GetSlice( string, STATE_OFFSET(&state, last), state.endpos ); if (!item) goto error; status = PyList_Append(list, item); Py_DECREF(item); if (status < 0) goto error; state_fini(&state); return list; error: Py_DECREF(list); state_fini(&state); return NULL; } static PyObject* pattern_subx(PatternObject* self, PyObject* ptemplate, PyObject* string, Py_ssize_t count, Py_ssize_t subn) { SRE_STATE state; PyObject* list; PyObject* item; PyObject* filter; PyObject* args; PyObject* match; void* ptr; int status; Py_ssize_t n; Py_ssize_t i, b, e; int bint; int filter_is_callable; if (PyCallable_Check(ptemplate)) { /* sub/subn takes either a function or a template */ filter = ptemplate; Py_INCREF(filter); filter_is_callable = 1; } else { /* if not callable, check if it's a literal string */ int literal; ptr = getstring(ptemplate, &n, &bint); b = bint; if (ptr) { if (b == 1) { literal = sre_literal_template((unsigned char *)ptr, n); } else { #if defined(HAVE_UNICODE) literal = sre_uliteral_template((Py_UNICODE *)ptr, n); #endif } } else { PyErr_Clear(); literal = 0; } if (literal) { filter = ptemplate; Py_INCREF(filter); filter_is_callable = 0; } else { /* not a literal; hand it over to the template compiler */ filter = call( SRE_PY_MODULE, "_subx", PyTuple_Pack(2, self, ptemplate) ); if (!filter) return NULL; filter_is_callable = PyCallable_Check(filter); } } string = state_init(&state, self, string, 0, PY_SSIZE_T_MAX); if (!string) { Py_DECREF(filter); return NULL; } list = PyList_New(0); if (!list) { Py_DECREF(filter); state_fini(&state); return NULL; } n = i = 0; while (!count || n < count) { state_reset(&state); state.ptr = state.start; if (state.charsize == 1) { status = sre_search(&state, PatternObject_GetCode(self)); } else { #if defined(HAVE_UNICODE) status = sre_usearch(&state, PatternObject_GetCode(self)); #endif } if (PyErr_Occurred()) goto error; if (status <= 0) { if (status == 0) break; pattern_error(status); goto error; } b = STATE_OFFSET(&state, state.start); e = STATE_OFFSET(&state, state.ptr); if (i < b) { /* get segment before this match */ item = PySequence_GetSlice(string, i, b); if (!item) goto error; status = PyList_Append(list, item); Py_DECREF(item); if (status < 0) goto error; } else if (i == b && i == e && n > 0) /* ignore empty match on latest position */ goto next; if (filter_is_callable) { /* pass match object through filter */ match = pattern_new_match(self, &state, 1); if (!match) goto error; args = PyTuple_Pack(1, match); if (!args) { Py_DECREF(match); goto error; } item = PyObject_CallObject(filter, args); Py_DECREF(args); Py_DECREF(match); if (!item) goto error; } else { /* filter is literal string */ item = filter; Py_INCREF(item); } /* add to list */ if (item != Py_None) { status = PyList_Append(list, item); Py_DECREF(item); if (status < 0) goto error; } i = e; n = n + 1; next: /* move on */ if (state.ptr == state.start) state.start = (void*) ((char*) state.ptr + state.charsize); else state.start = state.ptr; } /* get segment following last match */ if (i < state.endpos) { item = PySequence_GetSlice(string, i, state.endpos); if (!item) goto error; status = PyList_Append(list, item); Py_DECREF(item); if (status < 0) goto error; } state_fini(&state); Py_DECREF(filter); /* convert list to single string (also removes list) */ item = join_list(list, string); if (!item) return NULL; if (subn) return Py_BuildValue("Ni", item, n); return item; error: Py_DECREF(list); state_fini(&state); Py_DECREF(filter); return NULL; } static PyObject* pattern_sub(PatternObject* self, PyObject* args, PyObject* kw) { PyObject* ptemplate; PyObject* string; Py_ssize_t count = 0; static char* kwlist[] = { "repl", "string", "count", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kw, "OO|n:sub", kwlist, &ptemplate, &string, &count)) return NULL; return pattern_subx(self, ptemplate, string, count, 0); } static PyObject* pattern_subn(PatternObject* self, PyObject* args, PyObject* kw) { PyObject* ptemplate; PyObject* string; Py_ssize_t count = 0; static char* kwlist[] = { "repl", "string", "count", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kw, "OO|n:subn", kwlist, &ptemplate, &string, &count)) return NULL; return pattern_subx(self, ptemplate, string, count, 1); } static PyObject* pattern_copy(PatternObject* self, PyObject *unused) { #ifdef USE_BUILTIN_COPY PatternObject* copy; int offset; copy = PyObject_NEW_VAR(PatternObject, &Pattern_Type, self->codesize); if (!copy) return NULL; offset = offsetof(PatternObject, groups); Py_XINCREF(self->groupindex); Py_XINCREF(self->indexgroup); Py_XINCREF(self->pattern); memcpy((char*) copy + offset, (char*) self + offset, sizeof(PatternObject) + self->codesize * sizeof(SRE_CODE) - offset); copy->weakreflist = NULL; return (PyObject*) copy; #else PyErr_SetString(PyExc_TypeError, "cannot copy this pattern object"); return NULL; #endif } static PyObject* pattern_deepcopy(PatternObject* self, PyObject* memo) { #ifdef USE_BUILTIN_COPY PatternObject* copy; copy = (PatternObject*) pattern_copy(self); if (!copy) return NULL; if (!deepcopy(&copy->groupindex, memo) || !deepcopy(&copy->indexgroup, memo) || !deepcopy(&copy->pattern, memo)) { Py_DECREF(copy); return NULL; } #else PyErr_SetString(PyExc_TypeError, "cannot deepcopy this pattern object"); return NULL; #endif } PyDoc_STRVAR(pattern_match_doc, "match(string[, pos[, endpos]]) --> match object or None.\n\ Matches zero or more characters at the beginning of the string"); PyDoc_STRVAR(pattern_search_doc, "search(string[, pos[, endpos]]) --> match object or None.\n\ Scan through string looking for a match, and return a corresponding\n\ MatchObject instance. Return None if no position in the string matches."); PyDoc_STRVAR(pattern_split_doc, "split(string[, maxsplit = 0]) --> list.\n\ Split string by the occurrences of pattern."); PyDoc_STRVAR(pattern_findall_doc, "findall(string[, pos[, endpos]]) --> list.\n\ Return a list of all non-overlapping matches of pattern in string."); PyDoc_STRVAR(pattern_finditer_doc, "finditer(string[, pos[, endpos]]) --> iterator.\n\ Return an iterator over all non-overlapping matches for the \n\ RE pattern in string. For each match, the iterator returns a\n\ match object."); PyDoc_STRVAR(pattern_sub_doc, "sub(repl, string[, count = 0]) --> newstring\n\ Return the string obtained by replacing the leftmost non-overlapping\n\ occurrences of pattern in string by the replacement repl."); PyDoc_STRVAR(pattern_subn_doc, "subn(repl, string[, count = 0]) --> (newstring, number of subs)\n\ Return the tuple (new_string, number_of_subs_made) found by replacing\n\ the leftmost non-overlapping occurrences of pattern with the\n\ replacement repl."); PyDoc_STRVAR(pattern_doc, "Compiled regular expression objects"); static PyMethodDef pattern_methods[] = { {"match", (PyCFunction) pattern_match, METH_VARARGS|METH_KEYWORDS, pattern_match_doc}, {"search", (PyCFunction) pattern_search, METH_VARARGS|METH_KEYWORDS, pattern_search_doc}, {"sub", (PyCFunction) pattern_sub, METH_VARARGS|METH_KEYWORDS, pattern_sub_doc}, {"subn", (PyCFunction) pattern_subn, METH_VARARGS|METH_KEYWORDS, pattern_subn_doc}, {"split", (PyCFunction) pattern_split, METH_VARARGS|METH_KEYWORDS, pattern_split_doc}, {"findall", (PyCFunction) pattern_findall, METH_VARARGS|METH_KEYWORDS, pattern_findall_doc}, #if PY_VERSION_HEX >= 0x02020000 {"finditer", (PyCFunction) pattern_finditer, METH_VARARGS, pattern_finditer_doc}, #endif {"scanner", (PyCFunction) pattern_scanner, METH_VARARGS}, {"__copy__", (PyCFunction) pattern_copy, METH_NOARGS}, {"__deepcopy__", (PyCFunction) pattern_deepcopy, METH_O}, {NULL, NULL} }; #define PAT_OFF(x) offsetof(PatternObject, x) static PyMemberDef pattern_members[] = { {"pattern", T_OBJECT, PAT_OFF(pattern), READONLY}, {"flags", T_INT, PAT_OFF(flags), READONLY}, {"groups", T_PYSSIZET, PAT_OFF(groups), READONLY}, {"groupindex", T_OBJECT, PAT_OFF(groupindex), READONLY}, {NULL} /* Sentinel */ }; statichere PyTypeObject Pattern_Type = { PyObject_HEAD_INIT(NULL) 0, "_" SRE_MODULE ".SRE_Pattern", sizeof(PatternObject), sizeof(SRE_CODE), (destructor)pattern_dealloc, /*tp_dealloc*/ 0, /* tp_print */ 0, /* tp_getattrn */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ pattern_doc, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ offsetof(PatternObject, weakreflist), /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ pattern_methods, /* tp_methods */ pattern_members, /* tp_members */ }; static int _validate(PatternObject *self); /* Forward */ static PyObject * _compile(PyObject* self_, PyObject* args) { /* "compile" pattern descriptor to pattern object */ PatternObject* self; Py_ssize_t i, n; PyObject* pattern; int flags = 0; PyObject* code; Py_ssize_t groups = 0; PyObject* groupindex = NULL; PyObject* indexgroup = NULL; if (!PyArg_ParseTuple(args, "OiO!|nOO", &pattern, &flags, &PyList_Type, &code, &groups, &groupindex, &indexgroup)) return NULL; n = PyList_GET_SIZE(code); /* coverity[ampersand_in_size] */ self = PyObject_NEW_VAR(PatternObject, &Pattern_Type, n); if (!self) return NULL; self->weakreflist = NULL; self->pattern = NULL; self->groupindex = NULL; self->indexgroup = NULL; self->codesize = n; for (i = 0; i < n; i++) { PyObject *o = PyList_GET_ITEM(code, i); unsigned long value = PyInt_Check(o) ? (unsigned long)PyInt_AsLong(o) : PyLong_AsUnsignedLong(o); self->code[i] = (SRE_CODE) value; if ((unsigned long) self->code[i] != value) { PyErr_SetString(PyExc_OverflowError, "regular expression code size limit exceeded"); break; } } if (PyErr_Occurred()) { Py_DECREF(self); return NULL; } Py_INCREF(pattern); self->pattern = pattern; self->flags = flags; self->groups = groups; Py_XINCREF(groupindex); self->groupindex = groupindex; Py_XINCREF(indexgroup); self->indexgroup = indexgroup; self->weakreflist = NULL; if (!_validate(self)) { Py_DECREF(self); return NULL; } return (PyObject*) self; } /* -------------------------------------------------------------------- */ /* Code validation */ /* To learn more about this code, have a look at the _compile() function in Lib/sre_compile.py. The validation functions below checks the code array for conformance with the code patterns generated there. The nice thing about the generated code is that it is position-independent: all jumps are relative jumps forward. Also, jumps don't cross each other: the target of a later jump is always earlier than the target of an earlier jump. IOW, this is okay: J---------J-------T--------T \ \_____/ / \______________________/ but this is not: J---------J-------T--------T \_________\_____/ / \____________/ It also helps that SRE_CODE is always an unsigned type, either 2 bytes or 4 bytes wide (the latter if Python is compiled for "wide" unicode support). */ /* Defining this one enables tracing of the validator */ #undef VVERBOSE /* Trace macro for the validator */ #if defined(VVERBOSE) #define VTRACE(v) printf v #else #define VTRACE(v) do {} while(0) /* do nothing */ #endif /* Report failure */ #define FAIL do { VTRACE(("FAIL: %d\n", __LINE__)); return 0; } while (0) /* Extract opcode, argument, or skip count from code array */ #define GET_OP \ do { \ VTRACE(("%p: ", code)); \ if (code >= end) FAIL; \ op = *code++; \ VTRACE(("%lu (op)\n", (unsigned long)op)); \ } while (0) #define GET_ARG \ do { \ VTRACE(("%p= ", code)); \ if (code >= end) FAIL; \ arg = *code++; \ VTRACE(("%lu (arg)\n", (unsigned long)arg)); \ } while (0) #define GET_SKIP_ADJ(adj) \ do { \ VTRACE(("%p= ", code)); \ if (code >= end) FAIL; \ skip = *code; \ VTRACE(("%lu (skip to %p)\n", \ (unsigned long)skip, code+skip)); \ if (code+skip-adj < code || code+skip-adj > end)\ FAIL; \ code++; \ } while (0) #define GET_SKIP GET_SKIP_ADJ(0) static int _validate_charset(SRE_CODE *code, SRE_CODE *end) { /* Some variables are manipulated by the macros above */ SRE_CODE op; SRE_CODE arg; SRE_CODE offset; int i; while (code < end) { GET_OP; switch (op) { case SRE_OP_NEGATE: break; case SRE_OP_LITERAL: GET_ARG; break; case SRE_OP_RANGE: GET_ARG; GET_ARG; break; case SRE_OP_CHARSET: offset = 32/sizeof(SRE_CODE); /* 32-byte bitmap */ if (code+offset < code || code+offset > end) FAIL; code += offset; break; case SRE_OP_BIGCHARSET: GET_ARG; /* Number of blocks */ offset = 256/sizeof(SRE_CODE); /* 256-byte table */ if (code+offset < code || code+offset > end) FAIL; /* Make sure that each byte points to a valid block */ for (i = 0; i < 256; i++) { if (((unsigned char *)code)[i] >= arg) FAIL; } code += offset; offset = arg * 32/sizeof(SRE_CODE); /* 32-byte bitmap times arg */ if (code+offset < code || code+offset > end) FAIL; code += offset; break; case SRE_OP_CATEGORY: GET_ARG; switch (arg) { case SRE_CATEGORY_DIGIT: case SRE_CATEGORY_NOT_DIGIT: case SRE_CATEGORY_SPACE: case SRE_CATEGORY_NOT_SPACE: case SRE_CATEGORY_WORD: case SRE_CATEGORY_NOT_WORD: case SRE_CATEGORY_LINEBREAK: case SRE_CATEGORY_NOT_LINEBREAK: case SRE_CATEGORY_LOC_WORD: case SRE_CATEGORY_LOC_NOT_WORD: case SRE_CATEGORY_UNI_DIGIT: case SRE_CATEGORY_UNI_NOT_DIGIT: case SRE_CATEGORY_UNI_SPACE: case SRE_CATEGORY_UNI_NOT_SPACE: case SRE_CATEGORY_UNI_WORD: case SRE_CATEGORY_UNI_NOT_WORD: case SRE_CATEGORY_UNI_LINEBREAK: case SRE_CATEGORY_UNI_NOT_LINEBREAK: break; default: FAIL; } break; default: FAIL; } } return 1; } static int _validate_inner(SRE_CODE *code, SRE_CODE *end, Py_ssize_t groups) { /* Some variables are manipulated by the macros above */ SRE_CODE op; SRE_CODE arg; SRE_CODE skip; VTRACE(("code=%p, end=%p\n", code, end)); if (code > end) FAIL; while (code < end) { GET_OP; switch (op) { case SRE_OP_MARK: /* We don't check whether marks are properly nested; the sre_match() code is robust even if they don't, and the worst you can get is nonsensical match results. */ GET_ARG; if (arg > 2*groups+1) { VTRACE(("arg=%d, groups=%d\n", (int)arg, (int)groups)); FAIL; } break; case SRE_OP_LITERAL: case SRE_OP_NOT_LITERAL: case SRE_OP_LITERAL_IGNORE: case SRE_OP_NOT_LITERAL_IGNORE: GET_ARG; /* The arg is just a character, nothing to check */ break; case SRE_OP_SUCCESS: case SRE_OP_FAILURE: /* Nothing to check; these normally end the matching process */ break; case SRE_OP_AT: GET_ARG; switch (arg) { case SRE_AT_BEGINNING: case SRE_AT_BEGINNING_STRING: case SRE_AT_BEGINNING_LINE: case SRE_AT_END: case SRE_AT_END_LINE: case SRE_AT_END_STRING: case SRE_AT_BOUNDARY: case SRE_AT_NON_BOUNDARY: case SRE_AT_LOC_BOUNDARY: case SRE_AT_LOC_NON_BOUNDARY: case SRE_AT_UNI_BOUNDARY: case SRE_AT_UNI_NON_BOUNDARY: break; default: FAIL; } break; case SRE_OP_ANY: case SRE_OP_ANY_ALL: /* These have no operands */ break; case SRE_OP_IN: case SRE_OP_IN_IGNORE: GET_SKIP; /* Stop 1 before the end; we check the FAILURE below */ if (!_validate_charset(code, code+skip-2)) FAIL; if (code[skip-2] != SRE_OP_FAILURE) FAIL; code += skip-1; break; case SRE_OP_INFO: { /* A minimal info field is <INFO> <1=skip> <2=flags> <3=min> <4=max>; If SRE_INFO_PREFIX or SRE_INFO_CHARSET is in the flags, more follows. */ SRE_CODE flags, i; SRE_CODE *newcode; GET_SKIP; newcode = code+skip-1; GET_ARG; flags = arg; GET_ARG; /* min */ GET_ARG; /* max */ /* Check that only valid flags are present */ if ((flags & ~(SRE_INFO_PREFIX | SRE_INFO_LITERAL | SRE_INFO_CHARSET)) != 0) FAIL; /* PREFIX and CHARSET are mutually exclusive */ if ((flags & SRE_INFO_PREFIX) && (flags & SRE_INFO_CHARSET)) FAIL; /* LITERAL implies PREFIX */ if ((flags & SRE_INFO_LITERAL) && !(flags & SRE_INFO_PREFIX)) FAIL; /* Validate the prefix */ if (flags & SRE_INFO_PREFIX) { SRE_CODE prefix_len; GET_ARG; prefix_len = arg; GET_ARG; /* prefix skip */ /* Here comes the prefix string */ if (code+prefix_len < code || code+prefix_len > newcode) FAIL; code += prefix_len; /* And here comes the overlap table */ if (code+prefix_len < code || code+prefix_len > newcode) FAIL; /* Each overlap value should be < prefix_len */ for (i = 0; i < prefix_len; i++) { if (code[i] >= prefix_len) FAIL; } code += prefix_len; } /* Validate the charset */ if (flags & SRE_INFO_CHARSET) { if (!_validate_charset(code, newcode-1)) FAIL; if (newcode[-1] != SRE_OP_FAILURE) FAIL; code = newcode; } else if (code != newcode) { VTRACE(("code=%p, newcode=%p\n", code, newcode)); FAIL; } } break; case SRE_OP_BRANCH: { SRE_CODE *target = NULL; for (;;) { GET_SKIP; if (skip == 0) break; /* Stop 2 before the end; we check the JUMP below */ if (!_validate_inner(code, code+skip-3, groups)) FAIL; code += skip-3; /* Check that it ends with a JUMP, and that each JUMP has the same target */ GET_OP; if (op != SRE_OP_JUMP) FAIL; GET_SKIP; if (target == NULL) target = code+skip-1; else if (code+skip-1 != target) FAIL; } } break; case SRE_OP_REPEAT_ONE: case SRE_OP_MIN_REPEAT_ONE: { SRE_CODE min, max; GET_SKIP; GET_ARG; min = arg; GET_ARG; max = arg; if (min > max) FAIL; #ifdef Py_UNICODE_WIDE if (max > 65535) FAIL; #endif if (!_validate_inner(code, code+skip-4, groups)) FAIL; code += skip-4; GET_OP; if (op != SRE_OP_SUCCESS) FAIL; } break; case SRE_OP_REPEAT: { SRE_CODE min, max; GET_SKIP; GET_ARG; min = arg; GET_ARG; max = arg; if (min > max) FAIL; #ifdef Py_UNICODE_WIDE if (max > 65535) FAIL; #endif if (!_validate_inner(code, code+skip-3, groups)) FAIL; code += skip-3; GET_OP; if (op != SRE_OP_MAX_UNTIL && op != SRE_OP_MIN_UNTIL) FAIL; } break; case SRE_OP_GROUPREF: case SRE_OP_GROUPREF_IGNORE: GET_ARG; if (arg >= groups) FAIL; break; case SRE_OP_GROUPREF_EXISTS: /* The regex syntax for this is: '(?(group)then|else)', where 'group' is either an integer group number or a group name, 'then' and 'else' are sub-regexes, and 'else' is optional. */ GET_ARG; if (arg >= groups) FAIL; GET_SKIP_ADJ(1); code--; /* The skip is relative to the first arg! */ /* There are two possibilities here: if there is both a 'then' part and an 'else' part, the generated code looks like: GROUPREF_EXISTS <group> <skipyes> ...then part... JUMP <skipno> (<skipyes> jumps here) ...else part... (<skipno> jumps here) If there is only a 'then' part, it looks like: GROUPREF_EXISTS <group> <skip> ...then part... (<skip> jumps here) There is no direct way to decide which it is, and we don't want to allow arbitrary jumps anywhere in the code; so we just look for a JUMP opcode preceding our skip target. */ if (skip >= 3 && code+skip-3 >= code && code[skip-3] == SRE_OP_JUMP) { VTRACE(("both then and else parts present\n")); if (!_validate_inner(code+1, code+skip-3, groups)) FAIL; code += skip-2; /* Position after JUMP, at <skipno> */ GET_SKIP; if (!_validate_inner(code, code+skip-1, groups)) FAIL; code += skip-1; } else { VTRACE(("only a then part present\n")); if (!_validate_inner(code+1, code+skip-1, groups)) FAIL; code += skip-1; } break; case SRE_OP_ASSERT: case SRE_OP_ASSERT_NOT: GET_SKIP; GET_ARG; /* 0 for lookahead, width for lookbehind */ code--; /* Back up over arg to simplify math below */ if (arg & 0x80000000) FAIL; /* Width too large */ /* Stop 1 before the end; we check the SUCCESS below */ if (!_validate_inner(code+1, code+skip-2, groups)) FAIL; code += skip-2; GET_OP; if (op != SRE_OP_SUCCESS) FAIL; break; default: FAIL; } } VTRACE(("okay\n")); return 1; } static int _validate_outer(SRE_CODE *code, SRE_CODE *end, Py_ssize_t groups) { if (groups < 0 || groups > 100 || code >= end || end[-1] != SRE_OP_SUCCESS) FAIL; if (groups == 0) /* fix for simplejson */ groups = 100; /* 100 groups should always be safe */ return _validate_inner(code, end-1, groups); } static int _validate(PatternObject *self) { if (!_validate_outer(self->code, self->code+self->codesize, self->groups)) { PyErr_SetString(PyExc_RuntimeError, "invalid SRE code"); return 0; } else VTRACE(("Success!\n")); return 1; } /* -------------------------------------------------------------------- */ /* match methods */ static void match_dealloc(MatchObject* self) { Py_XDECREF(self->regs); Py_XDECREF(self->string); Py_DECREF(self->pattern); PyObject_DEL(self); } static PyObject* match_getslice_by_index(MatchObject* self, Py_ssize_t index, PyObject* def) { if (index < 0 || index >= self->groups) { /* raise IndexError if we were given a bad group number */ PyErr_SetString( PyExc_IndexError, "no such group" ); return NULL; } index *= 2; if (self->string == Py_None || self->mark[index] < 0) { /* return default value if the string or group is undefined */ Py_INCREF(def); return def; } return PySequence_GetSlice( self->string, self->mark[index], self->mark[index+1] ); } static Py_ssize_t match_getindex(MatchObject* self, PyObject* index) { Py_ssize_t i; if (PyInt_Check(index)) return PyInt_AsSsize_t(index); i = -1; if (self->pattern->groupindex) { index = PyObject_GetItem(self->pattern->groupindex, index); if (index) { if (PyInt_Check(index) || PyLong_Check(index)) i = PyInt_AsSsize_t(index); Py_DECREF(index); } else PyErr_Clear(); } return i; } static PyObject* match_getslice(MatchObject* self, PyObject* index, PyObject* def) { return match_getslice_by_index(self, match_getindex(self, index), def); } static PyObject* match_expand(MatchObject* self, PyObject* ptemplate) { /* delegate to Python code */ return call( SRE_PY_MODULE, "_expand", PyTuple_Pack(3, self->pattern, self, ptemplate) ); } static PyObject* match_group(MatchObject* self, PyObject* args) { PyObject* result; Py_ssize_t i, size; size = PyTuple_GET_SIZE(args); switch (size) { case 0: result = match_getslice(self, Py_False, Py_None); break; case 1: result = match_getslice(self, PyTuple_GET_ITEM(args, 0), Py_None); break; default: /* fetch multiple items */ result = PyTuple_New(size); if (!result) return NULL; for (i = 0; i < size; i++) { PyObject* item = match_getslice( self, PyTuple_GET_ITEM(args, i), Py_None ); if (!item) { Py_DECREF(result); return NULL; } PyTuple_SET_ITEM(result, i, item); } break; } return result; } static PyObject* match_groups(MatchObject* self, PyObject* args, PyObject* kw) { PyObject* result; Py_ssize_t index; PyObject* def = Py_None; static char* kwlist[] = { "default", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kw, "|O:groups", kwlist, &def)) return NULL; result = PyTuple_New(self->groups-1); if (!result) return NULL; for (index = 1; index < self->groups; index++) { PyObject* item; item = match_getslice_by_index(self, index, def); if (!item) { Py_DECREF(result); return NULL; } PyTuple_SET_ITEM(result, index-1, item); } return result; } static PyObject* match_groupdict(MatchObject* self, PyObject* args, PyObject* kw) { PyObject* result; PyObject* keys; Py_ssize_t index; PyObject* def = Py_None; static char* kwlist[] = { "default", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kw, "|O:groupdict", kwlist, &def)) return NULL; result = PyDict_New(); if (!result || !self->pattern->groupindex) return result; keys = PyMapping_Keys(self->pattern->groupindex); if (!keys) goto failed; for (index = 0; index < PyList_GET_SIZE(keys); index++) { int status; PyObject* key; PyObject* value; key = PyList_GET_ITEM(keys, index); if (!key) goto failed; value = match_getslice(self, key, def); if (!value) { Py_DECREF(key); goto failed; } status = PyDict_SetItem(result, key, value); Py_DECREF(value); if (status < 0) goto failed; } Py_DECREF(keys); return result; failed: Py_XDECREF(keys); Py_DECREF(result); return NULL; } static PyObject* match_start(MatchObject* self, PyObject* args) { Py_ssize_t index; PyObject* index_ = Py_False; /* zero */ if (!PyArg_UnpackTuple(args, "start", 0, 1, &index_)) return NULL; index = match_getindex(self, index_); if (index < 0 || index >= self->groups) { PyErr_SetString( PyExc_IndexError, "no such group" ); return NULL; } /* mark is -1 if group is undefined */ return Py_BuildValue("i", self->mark[index*2]); } static PyObject* match_end(MatchObject* self, PyObject* args) { Py_ssize_t index; PyObject* index_ = Py_False; /* zero */ if (!PyArg_UnpackTuple(args, "end", 0, 1, &index_)) return NULL; index = match_getindex(self, index_); if (index < 0 || index >= self->groups) { PyErr_SetString( PyExc_IndexError, "no such group" ); return NULL; } /* mark is -1 if group is undefined */ return Py_BuildValue("i", self->mark[index*2+1]); } LOCAL(PyObject*) _pair(Py_ssize_t i1, Py_ssize_t i2) { PyObject* pair; PyObject* item; pair = PyTuple_New(2); if (!pair) return NULL; item = PyInt_FromSsize_t(i1); if (!item) goto error; PyTuple_SET_ITEM(pair, 0, item); item = PyInt_FromSsize_t(i2); if (!item) goto error; PyTuple_SET_ITEM(pair, 1, item); return pair; error: Py_DECREF(pair); return NULL; } static PyObject* match_span(MatchObject* self, PyObject* args) { Py_ssize_t index; PyObject* index_ = Py_False; /* zero */ if (!PyArg_UnpackTuple(args, "span", 0, 1, &index_)) return NULL; index = match_getindex(self, index_); if (index < 0 || index >= self->groups) { PyErr_SetString( PyExc_IndexError, "no such group" ); return NULL; } /* marks are -1 if group is undefined */ return _pair(self->mark[index*2], self->mark[index*2+1]); } static PyObject* match_regs(MatchObject* self) { PyObject* regs; PyObject* item; Py_ssize_t index; regs = PyTuple_New(self->groups); if (!regs) return NULL; for (index = 0; index < self->groups; index++) { item = _pair(self->mark[index*2], self->mark[index*2+1]); if (!item) { Py_DECREF(regs); return NULL; } PyTuple_SET_ITEM(regs, index, item); } Py_INCREF(regs); self->regs = regs; return regs; } static PyObject* match_copy(MatchObject* self, PyObject *unused) { #ifdef USE_BUILTIN_COPY MatchObject* copy; Py_ssize_t slots, offset; slots = 2 * (self->pattern->groups+1); copy = PyObject_NEW_VAR(MatchObject, &Match_Type, slots); if (!copy) return NULL; /* this value a constant, but any compiler should be able to figure that out all by itself */ offset = offsetof(MatchObject, string); Py_XINCREF(self->pattern); Py_XINCREF(self->string); Py_XINCREF(self->regs); memcpy((char*) copy + offset, (char*) self + offset, sizeof(MatchObject) + slots * sizeof(Py_ssize_t) - offset); return (PyObject*) copy; #else PyErr_SetString(PyExc_TypeError, "cannot copy this match object"); return NULL; #endif } static PyObject* match_deepcopy(MatchObject* self, PyObject* memo) { #ifdef USE_BUILTIN_COPY MatchObject* copy; copy = (MatchObject*) match_copy(self); if (!copy) return NULL; if (!deepcopy((PyObject**) &copy->pattern, memo) || !deepcopy(&copy->string, memo) || !deepcopy(&copy->regs, memo)) { Py_DECREF(copy); return NULL; } #else PyErr_SetString(PyExc_TypeError, "cannot deepcopy this match object"); return NULL; #endif } static struct PyMethodDef match_methods[] = { {"group", (PyCFunction) match_group, METH_VARARGS}, {"start", (PyCFunction) match_start, METH_VARARGS}, {"end", (PyCFunction) match_end, METH_VARARGS}, {"span", (PyCFunction) match_span, METH_VARARGS}, {"groups", (PyCFunction) match_groups, METH_VARARGS|METH_KEYWORDS}, {"groupdict", (PyCFunction) match_groupdict, METH_VARARGS|METH_KEYWORDS}, {"expand", (PyCFunction) match_expand, METH_O}, {"__copy__", (PyCFunction) match_copy, METH_NOARGS}, {"__deepcopy__", (PyCFunction) match_deepcopy, METH_O}, {NULL, NULL} }; static PyObject * match_lastindex_get(MatchObject *self) { if (self->lastindex >= 0) return Py_BuildValue("i", self->lastindex); Py_INCREF(Py_None); return Py_None; } static PyObject * match_lastgroup_get(MatchObject *self) { if (self->pattern->indexgroup && self->lastindex >= 0) { PyObject* result = PySequence_GetItem( self->pattern->indexgroup, self->lastindex ); if (result) return result; PyErr_Clear(); } Py_INCREF(Py_None); return Py_None; } static PyObject * match_regs_get(MatchObject *self) { if (self->regs) { Py_INCREF(self->regs); return self->regs; } else return match_regs(self); } static PyGetSetDef match_getset[] = { {"lastindex", (getter)match_lastindex_get, (setter)NULL}, {"lastgroup", (getter)match_lastgroup_get, (setter)NULL}, {"regs", (getter)match_regs_get, (setter)NULL}, {NULL} }; #define MATCH_OFF(x) offsetof(MatchObject, x) static PyMemberDef match_members[] = { {"string", T_OBJECT, MATCH_OFF(string), READONLY}, {"re", T_OBJECT, MATCH_OFF(pattern), READONLY}, {"pos", T_PYSSIZET, MATCH_OFF(pos), READONLY}, {"endpos", T_PYSSIZET, MATCH_OFF(endpos), READONLY}, {NULL} }; /* FIXME: implement setattr("string", None) as a special case (to detach the associated string, if any */ static PyTypeObject Match_Type = { PyVarObject_HEAD_INIT(NULL, 0) "_" SRE_MODULE ".SRE_Match", sizeof(MatchObject), sizeof(Py_ssize_t), (destructor)match_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ match_methods, /* tp_methods */ match_members, /* tp_members */ match_getset, /* tp_getset */ }; static PyObject* pattern_new_match(PatternObject* pattern, SRE_STATE* state, int status) { /* create match object (from state object) */ MatchObject* match; Py_ssize_t i, j; char* base; int n; if (status > 0) { /* create match object (with room for extra group marks) */ /* coverity[ampersand_in_size] */ match = PyObject_NEW_VAR(MatchObject, &Match_Type, 2*(pattern->groups+1)); if (!match) return NULL; Py_INCREF(pattern); match->pattern = pattern; Py_INCREF(state->string); match->string = state->string; match->regs = NULL; match->groups = pattern->groups+1; /* fill in group slices */ base = (char*) state->beginning; n = state->charsize; match->mark[0] = ((char*) state->start - base) / n; match->mark[1] = ((char*) state->ptr - base) / n; for (i = j = 0; i < pattern->groups; i++, j+=2) if (j+1 <= state->lastmark && state->mark[j] && state->mark[j+1]) { match->mark[j+2] = ((char*) state->mark[j] - base) / n; match->mark[j+3] = ((char*) state->mark[j+1] - base) / n; } else match->mark[j+2] = match->mark[j+3] = -1; /* undefined */ match->pos = state->pos; match->endpos = state->endpos; match->lastindex = state->lastindex; return (PyObject*) match; } else if (status == 0) { /* no match */ Py_INCREF(Py_None); return Py_None; } /* internal error */ pattern_error(status); return NULL; } /* -------------------------------------------------------------------- */ /* scanner methods (experimental) */ static void scanner_dealloc(ScannerObject* self) { state_fini(&self->state); Py_XDECREF(self->pattern); PyObject_DEL(self); } static PyObject* scanner_match(ScannerObject* self, PyObject *unused) { SRE_STATE* state = &self->state; PyObject* match; int status; state_reset(state); state->ptr = state->start; if (state->charsize == 1) { status = sre_match(state, PatternObject_GetCode(self->pattern)); } else { #if defined(HAVE_UNICODE) status = sre_umatch(state, PatternObject_GetCode(self->pattern)); #endif } if (PyErr_Occurred()) return NULL; match = pattern_new_match((PatternObject*) self->pattern, state, status); if (status == 0 || state->ptr == state->start) state->start = (void*) ((char*) state->ptr + state->charsize); else state->start = state->ptr; return match; } static PyObject* scanner_search(ScannerObject* self, PyObject *unused) { SRE_STATE* state = &self->state; PyObject* match; int status; state_reset(state); state->ptr = state->start; if (state->charsize == 1) { status = sre_search(state, PatternObject_GetCode(self->pattern)); } else { #if defined(HAVE_UNICODE) status = sre_usearch(state, PatternObject_GetCode(self->pattern)); #endif } if (PyErr_Occurred()) return NULL; match = pattern_new_match((PatternObject*) self->pattern, state, status); if (status == 0 || state->ptr == state->start) state->start = (void*) ((char*) state->ptr + state->charsize); else state->start = state->ptr; return match; } static PyMethodDef scanner_methods[] = { {"match", (PyCFunction) scanner_match, METH_NOARGS}, {"search", (PyCFunction) scanner_search, METH_NOARGS}, {NULL, NULL} }; #define SCAN_OFF(x) offsetof(ScannerObject, x) static PyMemberDef scanner_members[] = { {"pattern", T_OBJECT, SCAN_OFF(pattern), READONLY}, {NULL} /* Sentinel */ }; statichere PyTypeObject Scanner_Type = { PyObject_HEAD_INIT(NULL) 0, "_" SRE_MODULE ".SRE_Scanner", sizeof(ScannerObject), 0, (destructor)scanner_dealloc, /*tp_dealloc*/ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ scanner_methods, /* tp_methods */ scanner_members, /* tp_members */ 0, /* tp_getset */ }; static PyObject* pattern_scanner(PatternObject* pattern, PyObject* args) { /* create search state object */ ScannerObject* self; PyObject* string; Py_ssize_t start = 0; Py_ssize_t end = PY_SSIZE_T_MAX; if (!PyArg_ParseTuple(args, "O|nn:scanner", &string, &start, &end)) return NULL; /* create scanner object */ self = PyObject_NEW(ScannerObject, &Scanner_Type); if (!self) return NULL; self->pattern = NULL; string = state_init(&self->state, pattern, string, start, end); if (!string) { Py_DECREF(self); return NULL; } Py_INCREF(pattern); self->pattern = (PyObject*) pattern; return (PyObject*) self; } static PyMethodDef _functions[] = { {"compile", _compile, METH_VARARGS}, {"getcodesize", sre_codesize, METH_NOARGS}, {"getlower", sre_getlower, METH_VARARGS}, {NULL, NULL} }; #if PY_VERSION_HEX < 0x02030000 DL_EXPORT(void) init_sre(void) #else #ifdef __GNUC__ extern __attribute__((visibility("default"))) #else extern __declspec(dllexport) #endif PyMODINIT_FUNC init_sre(void) #endif { PyObject* m; PyObject* d; PyObject* x; /* Patch object types */ if (PyType_Ready(&Pattern_Type) || PyType_Ready(&Match_Type) || PyType_Ready(&Scanner_Type)) return; m = Py_InitModule("_" SRE_MODULE, _functions); if (m == NULL) return; d = PyModule_GetDict(m); x = PyInt_FromLong(SRE_MAGIC); if (x) { PyDict_SetItemString(d, "MAGIC", x); Py_DECREF(x); } x = PyInt_FromLong(sizeof(SRE_CODE)); if (x) { PyDict_SetItemString(d, "CODESIZE", x); Py_DECREF(x); } x = PyString_FromString(copyright); if (x) { PyDict_SetItemString(d, "copyright", x); Py_DECREF(x); } } #endif /* !defined(SRE_RECURSIVE) */ /* vim:ts=4:sw=4:et */
342435.c
// This test file may used by other test to test children behaviour spawned. #include <sys/types.h> #include <signal.h> #include <assert.h> #include <stdlib.h> #include <features.h> #include <sys/stat.h> #include <pthread.h> #include <unistd.h> #include "test.h" char **g_argv; void sigio_handler(int sig) { printf("[child] SIGIO is caught in child!\n"); } void sigabort_handler(int sig) { printf("[child] sigabort is caught in child! This shouldn't happen!\n"); exit(-1); } // Parent process has set the sigmask of this child process to block SIGABORT by inheritage or posix_spawnattr_t int test_spawn_attribute_sigmask() { printf("[child] Run a child process with pid = %d and ppid = %d\n", getpid(), getppid()); #ifndef __GLIBC__ // musl can perform extra checks struct __sigset_t current_block_sigmask; struct __sigset_t test; #else sigset_t current_block_sigmask, test; #endif sigprocmask(0, NULL, &current_block_sigmask); sigemptyset(&test); sigaddset(&test, SIGABRT); #ifndef __GLIBC__ if (current_block_sigmask.__bits[0] != test.__bits[0]) { THROW_ERROR("[child] signask in child process is wrong"); } #endif signal(SIGIO, sigio_handler); signal(SIGABRT, sigabort_handler); raise(SIGIO); raise(SIGABRT); printf("[child] child test_spawn_attribute_sigmask - [Ok]\n"); return 0; } // Parent process will set the sigaction of SIGALRM and SIGILL to SIG_IGN and SIGIO to user-defined handler. Then use posix_spawn attribute to set // SIGALRM to SIG_DEF. // Child process should inherit the ignore action of SIGILL and change SIGALRM and SIGIO sigaction to SIG_DEF. int test_spawn_attribute_sigdef() { struct sigaction action; sigaction(SIGALRM, NULL, &action); if (action.sa_handler != SIG_DFL) { THROW_ERROR("[child] sig handler of SIGALRM is wrong"); } sigaction(SIGIO, NULL, &action); if (action.sa_handler != SIG_DFL) { THROW_ERROR("[child] sig handler of SIGIO is wrong"); } sigaction(SIGILL, NULL, &action); if (action.sa_handler != SIG_IGN) { THROW_ERROR("[child] sig handler of SIGILL is wrong"); } printf("[child] child test_spawn_attribute_sigdef - [Ok]\n"); return 0; } int test_ioctl_fioclex() { int regular_file_fd = atoi(g_argv[3]); int pipe_reader_fd = atoi(g_argv[4]); int pipe_writer_fd = atoi(g_argv[5]); // regular file is set with ioctl FIONCLEX struct stat stat_buf; int ret = fstat(regular_file_fd, &stat_buf); if (ret != 0 || !S_ISREG(stat_buf.st_mode)) { THROW_ERROR("fstat regular file fd error"); } // pipe reader is set with ioctl FIOCLEX ret = fstat(pipe_reader_fd, &stat_buf); if (ret != -1 || errno != EBADF) { THROW_ERROR("fstat pipe reader fd error"); } // pipe writer is set with default and should inherit by child ret = fstat(pipe_writer_fd, &stat_buf); if (ret != 0 || !S_ISFIFO(stat_buf.st_mode)) { THROW_ERROR("fstat pipe writer fd error"); } return 0; } // This child process will first create multiple threads which are waiting on a condition // and then a child thread will call execve and all threads should be destroyed except the // main thread. Use "pthread" test case as reference. #define NTHREADS (5) #define WAIT_ROUND (100000) struct thread_cond_arg { int ti; volatile unsigned int *val; volatile int *exit_thread_count; pthread_cond_t *cond_val; pthread_mutex_t *mutex; }; static void *thread_cond_wait(void *_arg) { struct thread_cond_arg *arg = _arg; printf("Thread #%d: start to wait on condition variable.\n", arg->ti); for (unsigned int i = 0; i < WAIT_ROUND; ++i) { pthread_mutex_lock(arg->mutex); // execve on a child thread with mutex if (arg->ti == NTHREADS - 4) { char *args[] = {"/bin/getpid", NULL}; if (execve("/bin/getpid", args, NULL) < 0) { printf("execve failed with errno: %d", errno); exit(errno); } } while (*(arg->val) == 0) { pthread_cond_wait(arg->cond_val, arg->mutex); } pthread_mutex_unlock(arg->mutex); } (*arg->exit_thread_count)++; printf("Thread #%d: exited.\n", arg->ti); return NULL; } int test_execve_child_thread() { volatile unsigned int val = 0; volatile int exit_thread_count = 0; pthread_t threads[NTHREADS]; struct thread_cond_arg thread_args[NTHREADS]; pthread_cond_t cond_val = PTHREAD_COND_INITIALIZER; pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; /* * Start the threads waiting on the condition variable */ for (int ti = 0; ti < NTHREADS; ti++) { struct thread_cond_arg *thread_arg = &thread_args[ti]; thread_arg->ti = ti; thread_arg->val = &val; thread_arg->exit_thread_count = &exit_thread_count; thread_arg->cond_val = &cond_val; thread_arg->mutex = &mutex; if (pthread_create(&threads[ti], NULL, thread_cond_wait, thread_arg) < 0) { printf("ERROR: pthread_create failed (ti = %d)\n", ti); return -1; } } /* * Unblock all threads currently waiting on the condition variable */ while (exit_thread_count < NTHREADS) { pthread_mutex_lock(&mutex); val = 1; pthread_cond_broadcast(&cond_val); pthread_mutex_unlock(&mutex); pthread_mutex_lock(&mutex); val = 0; pthread_mutex_unlock(&mutex); } // wait for all threads to finish for (int ti = 0; ti < NTHREADS; ti++) { if (pthread_join(threads[ti], NULL) < 0) { printf("ERROR: pthread_join failed (ti = %d)\n", ti); return -1; } } THROW_ERROR("This should never be reached!"); return -1; } // /bin/naughty_child -t vfork reader_fd writer_fd // pipe_reader should remain open becuase it is inherited. // pipe_writer should be closed already before execve naughty_child. int test_vfork_child() { int pipe_reader_fd = atoi(g_argv[3]); int pipe_writer_fd = atoi(g_argv[4]); char buf[30] = {0}; struct stat stat_buf; int ret = read(pipe_reader_fd, buf, sizeof(buf)); if (ret < 0) { THROW_ERROR("[child] read from pipe error"); } // Check pipe reader if (fstat(pipe_reader_fd, &stat_buf) < 0 ) { THROW_ERROR("[child] fstat pipe files error"); } if (!S_ISFIFO(stat_buf.st_mode)) { THROW_ERROR("failed to check the pipe reader st_mode"); } // Check pipe writer which should be closed already ret = fstat(pipe_writer_fd, &stat_buf); if (ret >= 0 || errno != EBADF) { THROW_ERROR("failed to check the pipe writer which should be closed"); } printf("[child] received mesg: %s", buf); return 0; } // ============================================================================ // Test suite // ============================================================================ #define TEST_NAME_MAX 20 int start_test(const char *test_name) { if (strcmp(test_name, "sigmask") == 0) { return test_spawn_attribute_sigmask(); } else if (strcmp(test_name, "sigdef") == 0) { return test_spawn_attribute_sigdef(); } else if (strcmp(test_name, "fioclex") == 0) { return test_ioctl_fioclex(); } else if (strcmp(test_name, "execve_thread") == 0) { return test_execve_child_thread(); } else if (strcmp(test_name, "vfork") == 0) { return test_vfork_child(); } else { fprintf(stderr, "[child] test case not found\n"); return -1; } } void print_usage() { fprintf(stderr, "Usage:\n naughty_child [-t testcase1] [-t testcase2] ...\n\n"); fprintf(stderr, " Now support testcase: <sigmask, sigdef, fioclex, execve_thread, vfork>\n"); } int main(int argc, char *argv[]) { if (argc <= 1) { print_usage(); return 0; } g_argv = argv; int opt; char *testcase_name = calloc(1, TEST_NAME_MAX); while ((opt = getopt(argc, argv, "t:")) != -1) { switch (opt) { case 't': { int len = strlen(optarg); if (len >= TEST_NAME_MAX) { THROW_ERROR("[child] test case name too long"); } memset(testcase_name, 0, TEST_NAME_MAX); strncpy(testcase_name, optarg, len + 1); printf("[child] start testcase: %s\n", testcase_name); int ret = start_test(testcase_name); if (ret != 0) { THROW_ERROR("[child] test case failure"); } } break; default: print_usage(); exit(-1); } } free(testcase_name); return 0; }
950842.c
/** * Copyright (C) 2013-2015 * * @author [email protected] * @date 2013-11-19 * * @file HttpRequestHandlers.h * * @remark * */ #include <tiny_str_equal.h> #include <tiny_str_split.h> #include <StringArray.h> #include <tiny_log.h> #include "HttpRequestHandlers.h" #define TAG "HttpRequestHandlers" static void _OnItemRemoved (void * data, void *ctx) { HttpRequestHandlerContext *item = (HttpRequestHandlerContext *)data; HttpRequestHandlerContext_Delete(item); } TINY_LOR HttpRequestHandlers * HttpRequestHandlers_New(void) { return TinyList_New(_OnItemRemoved, NULL); } TINY_LOR void HttpRequestHandlers_Delete(HttpRequestHandlers *thiz) { TinyList_Delete(thiz); } TINY_LOR TinyRet HttpRequestHandlers_Put(HttpRequestHandlers *thiz, const char *method, const char *uri, HttpRequestHandler handler, void *ctx) { TinyRet ret = TINY_RET_OK; HttpRequestHandlerContext *context = NULL; RETURN_VAL_IF_FAIL(thiz, TINY_RET_E_ARG_NULL); RETURN_VAL_IF_FAIL(method, TINY_RET_E_ARG_NULL); RETURN_VAL_IF_FAIL(uri, TINY_RET_E_ARG_NULL); RETURN_VAL_IF_FAIL(handler, TINY_RET_E_ARG_NULL); LOG_I(TAG, "HttpRequestHandlers_Put: %s %s", method, uri); for (uint32_t i = 0; i < thiz->size; ++i) { HttpRequestHandlerContext *item = (HttpRequestHandlerContext *) TinyList_GetAt(thiz, i); if (str_equal(item->method, method, true) && str_equal(item->uri, uri, true)) { return TINY_RET_E_ITEM_EXIST; } } context = HttpRequestHandlerContext_New(method, uri, handler, ctx); if (context == NULL) { return TINY_RET_E_NEW; } ret = TinyList_AddTail(thiz, context); if (RET_FAILED(ret)) { HttpRequestHandlerContext_Delete(context); } return ret; } TINY_LOR TinyRet HttpRequestHandlers_PutDefaultHandler(HttpRequestHandlers *thiz, HttpRequestHandler handler, void *ctx) { return HttpRequestHandlers_Put(thiz, "*", "*", handler, ctx); } TINY_LOR static bool uriMatched(const char *pattern, const char *uri) { bool ret = true; do { if (str_equal(pattern, uri, true)) { break; } StringArray *p = StringArray_NewString(pattern, "/"); StringArray *u = StringArray_NewString(uri, "/"); if (p->values.size != u->values.size) { StringArray_Delete(p); StringArray_Delete(u); ret = false; break; } for (uint32_t i = 0; i < p->values.size; ++i) { const char *pp = TinyList_GetAt(&p->values, i); const char *uu = TinyList_GetAt(&u->values, i); LOG_D(TAG, "pp: %s == %s", pp, uu); if (pp[0] == ':') { LOG_D(TAG, "pp[0] = :, skip it"); continue; } if (! str_equal(pp, uu, true)) { ret = false; break; } } } while (false); return ret; } TINY_LOR static HttpMessage * handleRequest(HttpRequestHandlers * thiz, HttpMessage *request) { HttpRequestHandlerContext *c = HttpRequestHandlers_Get(thiz, request->request_line.method, request->request_line.uri); if (c == NULL) { LOG_I(TAG, "handleRequest FAILED, handler not found: %s %s", request->request_line.method, request->request_line.uri); return NULL; } return c->handler(request, c->ctx); } TINY_LOR static HttpMessage * handleDefault(HttpRequestHandlers * thiz, HttpMessage *request) { HttpRequestHandlerContext *c = HttpRequestHandlers_Get(thiz, "*", "*"); if (c == NULL) { LOG_I(TAG, "handleDefault FAILED, handler not found!"); return NULL; } return c->handler(request, c->ctx); } TINY_LOR HttpMessage *HttpRequestHandlers_HandleRequest(HttpRequestHandlers *thiz, HttpMessage *request) { HttpMessage * response = NULL; do { response = handleRequest(thiz, request); if (response != NULL) { break; } response = handleDefault(thiz, request); if (response != NULL) { break; } response = HttpMessage_NewHttpResponse(404, "NOT FOUND", NULL, NULL, 0); } while (false); return response; } TINY_LOR HttpRequestHandlerContext * HttpRequestHandlers_Get(HttpRequestHandlers *thiz, const char *method, const char *uri) { RETURN_VAL_IF_FAIL(thiz, NULL); RETURN_VAL_IF_FAIL(method, NULL); RETURN_VAL_IF_FAIL(uri, NULL); for (uint32_t i = 0; i < thiz->size; ++i) { HttpRequestHandlerContext *item = (HttpRequestHandlerContext *) TinyList_GetAt(thiz, i); if (str_equal(item->method, method, true) && uriMatched(item->uri, uri)) { return item; } } return NULL; }
365373.c
// mubanlu.c // Modified by Winder June.25 2000 inherit ROOM; void create() { set("short", "木板路"); set("long", @LONG 這是一條用木板鋪成的小道,雖然不是很長,但是卻看得出鋪得十 分細心,連一些細微的疏忽也注意到了。道路兩旁種着一些奇花異樹, 使過路人有心曠神怡的感覺。 LONG ); set("exits", ([ "south" : __DIR__"muwu3", "north" : __DIR__"xiaodao4", ])); set("no_clean_up", 0); set("outdoors", "xiaoyao"); set("coor/x", 90); set("coor/y", -510); set("coor/z", 0); setup(); replace_program(ROOM); }
125121.c
/* * Keyboard: Ergodox * Keymap: replicaJunction * Version: 1.2 * * This keymap is designed to complement my Atreus keyboard layout, found in keyboards/atreus. * The Atreus keyboard is a 40% board whose design was heavily influenced by the Ergodox, and I now * have both keyboards, so I've designed these layouts in an effort to make switching between the * two as easy as possible. * * I've also tried to make use of the extra keys on the Ergodox in as logical of a manner as possible, * adding to the layers in the Atreus config without disturbing what's there already. This allows for * things like F11-F20, the Application (Menu) key, and better media key placement. * * The default key layout in this keymap is Colemak-ModDH. Information on that layout can be found * here: https://colemakmods.github.io/mod-dh/ */ #include QMK_KEYBOARD_H #include "debug.h" #include "action_layer.h" #define _CO 0 // Colemak #define _QW 1 // QWERTY #define _ME 2 // media keys #define _NU 3 // numpad #define _EX 4 // extend #define _GA 5 // mouse overlay for gaming // Some quick aliases, just to make it look pretty #define _______ KC_TRNS #define KCX_CGR LCTL(KC_GRV) #define KX_STAB LSFT(KC_TAB) #define KX_COPY LCTL(KC_C) #define KX_CUT LCTL(KC_X) #define KX_PAST LCTL(KC_V) #define KX_UNDO LCTL(KC_Z) ; // This doesn't do anything. It's just for VSCode because its syntax highlighting is weird for the above #define statements. const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = { /* * Keymap: Colemak-ModDH * * ,--------------------------------------------------. ,--------------------------------------------------. * | ` | 1 | 2 | 3 | 4 | 5 | Esc | | | 6 | 7 | 8 | 9 | 0 | = | * |--------+------+------+------+------+-------------| |------+------+------+------+------+------+--------| * | LCtrl | Q | W | F | P | B | Home | | BkSp | J | L | U | Y | ; | - | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | Tab | A | R | S | T | G |------| |------| M | N | E | I | O | ' | * |--------+------+------+------+------+------| Hyper| | \ |------+------+------+------+------+--------| * | LShft | Z | X | C | D | V | | | | K | H | , | , | / | RShft | * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * | LGui | [ | ] |CtlShf| LAlt | | _EX | - | ' | = | \ | * `----------------------------------' `----------------------------------' * ,-------------. ,-------------. * | LCtrl| ~GA | | Left | Right| * ,------|------|------| |------+------+------. * |LCtrl/| LAlt/| Home | | Up | Alt/| _NU/ | * | BkSp | Del |------| |------| Enter| Space| * | | | _NU | | Down | | | * `--------------------' `--------------------' */ [_CO] = LAYOUT_ergodox( // left hand KC_GRV, KC_1, KC_2, KC_3, KC_4, KC_5, KC_ESC, KC_LCTL,KC_Q, KC_W, KC_F, KC_P, KC_B, KC_HOME, KC_TAB, KC_A, KC_R, KC_S, KC_T, KC_G, KC_LSFT,KC_Z, KC_X, KC_C, KC_D, KC_V, ALL_T(KC_NO), KC_LGUI,KC_LBRC,KC_RBRC, LCTL(KC_LSFT), KC_LALT, KC_LCTL, TG(_GA), KC_HOME, CTL_T(KC_BSPC), ALT_T(KC_DEL), MO(_NU), // right hand KC_ESC, KC_6, KC_7, KC_8, KC_9, KC_0, KC_EQL, KC_BSPC, KC_J, KC_L, KC_U, KC_Y, KC_SCLN,KC_MINS, KC_M, KC_N, KC_E, KC_I, KC_O, KC_QUOT, KC_BSLS, KC_K, KC_H, KC_COMM,KC_DOT, KC_SLSH,KC_RSFT, MO(_EX),KC_MINS,KC_QUOT,KC_EQL, KC_BSLS, KC_LEFT, KC_RGHT, KC_UP, KC_DOWN, ALT_T(KC_ENT), LT(_NU,KC_SPC) ), /* * Keymap: QWERTY layout. * * This is optimized for gaming, not typing, so there aren't as many macros * as the Dvorak layer. Some of the keys have also been moved to "game- * like" locations, such as making the spacebar available to the left thumb, * and repositioning the arrow keys at the bottom right corner. * * ,--------------------------------------------------. ,--------------------------------------------------. * | ` | 1 | 2 | 3 | 4 | 5 | Esc | | | 6 | 7 | 8 | 9 | 0 | = | * |--------+------+------+------+------+-------------| |------+------+------+------+------+------+--------| * | LCtrl | Q | W | E | R | T | Home | | BkSp | Y | U | I | O | P | - | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | Tab | A | S | D | F | G |------| |------| H | J | K | L | ; | ' | * |--------+------+------+------+------+------| Hyper| | \ |------+------+------+------+------+--------| * | LShft | Z | X | C | V | B | | | | N | M | , | . | / | RShft | * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * | LGui | ` | \ |CtlShf| _NU | | _EX | - | ' | = | \ | * `----------------------------------' `----------------------------------' * ,-------------. ,-------------. * | LCtrl| ~GA | | Left | Right| * ,------|------|------| |------+------+------. * |LCtrl/| LAlt/| Home | | Up | Alt/| _NU/ | * | BkSp | Del |------| |------| Enter| Space| * | | | _NU | | Down | | | * `--------------------' `--------------------' */ [_QW] = LAYOUT_ergodox( // Layer1: QWERTY // left hand KC_GRV, KC_1, KC_2, KC_3, KC_4, KC_5, KC_ESC, KC_LCTL,KC_Q, KC_W, KC_E, KC_R, KC_T, KC_HOME, KC_TAB, KC_A, KC_S, KC_D, KC_F, KC_G, KC_LSFT,KC_Z, KC_X, KC_C, KC_V, KC_B, ALL_T(KC_NO), KC_LGUI,KC_GRV, KC_SLSH,LCTL(KC_LSFT), MO(_NU), KC_LCTL,TG(_GA), KC_HOME, CTL_T(KC_BSPC), ALT_T(KC_DEL), MO(_NU), // right hand KC_ESC, KC_6, KC_7, KC_8, KC_9, KC_0, KC_EQL, KC_BSPC, KC_Y, KC_U, KC_I, KC_O, KC_P, KC_MINS, KC_H, KC_J, KC_K, KC_L, KC_SCLN,KC_QUOT, KC_BSLS, KC_N, KC_M, KC_COMM,KC_DOT, KC_SLSH,KC_RSFT, MO(_EX),KC_MINS,KC_QUOT,KC_EQL, KC_BSLS, KC_LEFT, KC_RGHT, KC_UP, KC_DOWN, ALT_T(KC_ENT), LT(_NU,KC_SPC) ), /* * Keymap: Numbers and symbols * * Note that the number keys here are actually numpad keystrokes. This often doesn't matter, but it may be relevant in a few cases. * That's why the Num Lock key exists on this layer - just in case. * * This layer also contains the layout switches. * * ,--------------------------------------------------. ,--------------------------------------------------. * | | F1 | F2 | F3 | F4 | F5 | | | | F6 | F7 | F8 | F9 | F10 | | * |--------+------+------+------+------+-------------| |------+------+------+------+------+------+--------| * | | ! | @ | { | } | & | | | | / | 7 | 8 | 9 | * | | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | | # | $ | ( | ) | ~ |------| |------| | | 4 | 5 | 6 | - | | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | | % | ^ | [ | ] | ` | | | | \ | 1 | 2 | 3 | + | | * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * | |QWERTY|Colemk| | | | 0 | . | = | | | * `----------------------------------' `----------------------------------' * ,-------------. ,-------------. * |NumLck| RESET| | | | * ,------|------|------| |------+------+------. * | | | | | | | | * | | |------| |------| | | * | | | | | | | | * `--------------------' `--------------------' */ [_NU] = LAYOUT_ergodox( // left hand _______, KC_F1, KC_F2, KC_F3, KC_F4, KC_F5, _______, _______, KC_EXLM, KC_AT, KC_LCBR, KC_RCBR, KC_AMPR, _______, _______, KC_HASH, KC_DLR, KC_LPRN, KC_RPRN, KC_TILD, _______, KC_PERC, KC_CIRC, KC_LBRC, KC_RBRC, KC_GRV, _______, _______, DF(_QW), DF(_CO), _______, _______, KC_NLCK,RESET, _______, _______,_______,_______, // right hand _______, KC_F6, KC_F7, KC_F8, KC_F9, KC_F10, _______, _______, KC_SLSH, KC_P7, KC_P8, KC_P9, KC_PAST, _______, KC_PIPE, KC_P4, KC_P5, KC_P6, KC_PMNS, _______, _______, KC_BSLS, KC_P1, KC_P2, KC_P3, KC_PPLS, _______, KC_P0, KC_PDOT, KC_EQL, _______, _______, _______, _______, _______, _______, _______, _______ ), /* * Keymap: Extend * * ,--------------------------------------------------. ,--------------------------------------------------. * | | F11 | F12 | F13 | F14 | F15 | Mute | | | F16 | F17 | F18 | F19 | F20 | | * |--------+------+------+------+------+-------------| |------+------+------+------+------+------+--------| * | | | | | | Ctrl`| Vol | | | PgUp | Home | Up | End | Del | | * |--------+------+------+------+------+------| Up | | |------+------+------+------+------+--------| * | | | Gui | Alt | Ctrl | |------| |------| PgDn | Left | Down | Right| BkSp | Menu | * |--------+------+------+------+------+------| Vol | | |------+------+------+------+------+--------| * | | Undo | Cut | Copy | | Paste| Down | | | | ^Tab | Tab | |Insert| PrntScr| * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * | | | | | | | | | | | | * `----------------------------------' `----------------------------------' * ,-------------. ,-------------. * | | | | | | * ,------|------|------| |------+------+------. * | | | | | | | | * | | |------| |------| | | * | | | | | | | | * `--------------------' `--------------------' * * Ctrl+` is a keyboard shortcut for the program ConEmu, which provides a Quake-style drop-down command prompt. * */ [_EX] = LAYOUT_ergodox( // left hand _______, KC_F11, KC_F12, KC_F13, KC_F14, KC_F15, KC_MUTE, _______, _______, _______, _______, _______, KCX_CGR, KC_VOLU, _______, _______, KC_LGUI, KC_LALT, KC_LCTL, _______, _______, KX_UNDO, KX_CUT, KX_COPY, _______, KX_PAST, KC_VOLD, _______, _______, _______, _______, _______, _______,_______, _______, _______,_______,_______, // right hand _______, KC_F16, KC_F17, KC_F18, KC_F19, KC_F20, _______, _______, KC_PGUP, KC_HOME, KC_UP, KC_END, KC_DEL, _______, KC_PGDN, KC_LEFT, KC_DOWN, KC_RGHT, KC_BSPC, KC_MENU, _______, _______, KX_STAB, KC_TAB, _______, KC_INS, KC_PSCR, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______ ), /* * Keymap: Gaming * * Provides a mouse overlay for the right hand, and also moves some "gamer friendly" keys to the left, such as space. * This layer also removes a lot of dual-role keys, as when gaming, it's nicer not to need to keep track of those. * * ,--------------------------------------------------. ,--------------------------------------------------. * | | | | | | | | | | | | | | | | * |--------+------+------+------+------+-------------| |------+------+------+------+------+------+--------| * | | | | | | | | | | |WhlUp | MsUp |WhlDn | | | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | | | | | | |------| |------| |MsLeft|MsDown|MsRght| | | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | | | | | | | | | | | | | | | | * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * | LCtrl| | | | | | | | | | | * `----------------------------------' `----------------------------------' * ,-------------. ,-------------. * | | ~_GA | | |MClick| * ,------|------|------| |------+------+------. * | | | | | | | | * | Space| |------| |------|RClick|LClick| * | | | | | | | | * `--------------------' `--------------------' */ [_GA] = LAYOUT_ergodox( // left hand _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, KC_LCTL, _______, _______, _______, _______, _______,_______, _______, KC_SPC, _______,_______, // right hand _______, _______, _______, _______, _______, _______, _______, _______, _______, KC_WH_U, KC_MS_U, KC_WH_D, _______, _______, _______, KC_MS_L, KC_MS_D, KC_MS_R, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, KC_BTN3, _______, _______, KC_BTN2, KC_BTN1 ), }; const uint16_t PROGMEM fn_actions[] = { [1] = ACTION_LAYER_TAP_TOGGLE(_NU) // FN1 - Momentary Layer 1 (Numbers and symbols) }; const macro_t *action_get_macro(keyrecord_t *record, uint8_t id, uint8_t opt) { // MACRODOWN only works in this function switch(id) { case 0: if (record->event.pressed) { register_code(KC_RSFT); } else { unregister_code(KC_RSFT); } break; } return MACRO_NONE; }; // Runs just one time when the keyboard initializes. void matrix_init_user(void) { }; // Runs constantly in the background, in a loop. void matrix_scan_user(void) { uint8_t layer = biton32(layer_state); // uint8_t default_layer = biton32(layer_state); ergodox_board_led_off(); ergodox_right_led_1_off(); ergodox_right_led_2_off(); ergodox_right_led_3_off(); switch (layer) { case _CO: ergodox_right_led_1_on(); break; case _QW: ergodox_right_led_2_on(); break; case _NU: ergodox_right_led_3_on(); break; case _GA: ergodox_right_led_1_on(); ergodox_right_led_2_on(); default: // none break; } };
939188.c
/* * se ts=4;se expandtab * * vrouter.c -- virtual router * * Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. */ #include <vr_os.h> #if defined(__linux__) #include <linux/version.h> #endif #include "vr_types.h" #include "vr_sandesh.h" #include "vr_message.h" #include "vr_cpuid.h" #include <vr_packet.h> #include <vr_interface.h> #include <vr_nexthop.h> #include <vr_route.h> #include <vr_mpls.h> #include <vr_flow.h> #include <vr_bridge.h> #include <vr_packet.h> #include <vr_mirror.h> #include <vr_vrf_table.h> #include <vr_vxlan.h> #include <vr_qos.h> #include <vr_offloads_dp.h> static struct vrouter router; struct host_os *vrouter_host; struct vr_offload_ops *offload_ops; void (*vr_init_cpuid)(struct vr_cpu_type_t *vr_cpu_type) = NULL; struct vr_cpu_type_t vr_cpu_type = {0}; extern struct host_os *vrouter_get_host(void); extern int vr_stats_init(struct vrouter *); extern void vr_stats_exit(struct vrouter *, bool); extern unsigned int vr_flow_entries; extern unsigned int vr_oflow_entries; extern unsigned int vr_bridge_entries; extern unsigned int vr_bridge_oentries; extern unsigned int vif_bridge_entries; extern unsigned int vif_bridge_oentries; extern unsigned int vr_pkt_droplog_bufsz; extern unsigned int vr_pkt_droplog_buf_en; extern unsigned int vr_pkt_droplog_sysctl_en; extern const char *ContrailBuildInfo; void vrouter_exit(bool); volatile bool vr_not_ready = true; /* Below hugepage req recv and resp variables are added for debug purpose */ int vr_hpage_req_recv = 0; int vr_hpage_req_resp = 0; unsigned int vr_memory_alloc_checks = 0; unsigned int vr_priority_tagging = 0; struct vr_module { char *mod_name; int error; const char *error_func; int error_line; int error_data; int (*init)(struct vrouter *); void (*exit)(struct vrouter *, bool); void (*shut)(struct vrouter *); int (*mem)(struct vrouter *); }; struct vr_module *module_under_init; static struct vr_module modules[] = { { .mod_name = "Stats", .init = vr_stats_init, .exit = vr_stats_exit, }, { .mod_name = "Interface", .init = vr_interface_init, .exit = vr_interface_exit, .shut = vr_interface_shut, }, { .mod_name = "Nexthop", .init = vr_nexthop_init, .exit = vr_nexthop_exit, }, { .mod_name = "Fib", .init = vr_fib_init, .mem = vr_fib_mem, .exit = vr_fib_exit, }, { .mod_name = "Mpls", .init = vr_mpls_init, .exit = vr_mpls_exit, }, { .mod_name = "Flow", .init = vr_flow_init, .mem = vr_flow_mem, .exit = vr_flow_exit, }, { .mod_name = "Mirror", .init = vr_mirror_init, .exit = vr_mirror_exit, }, { .mod_name = "Vxlan", .init = vr_vxlan_init, .exit = vr_vxlan_exit, }, { .mod_name = "QOS", .init = vr_qos_init, .exit = vr_qos_exit, }, { .mod_name = "Offloads", .init = vr_offloads_init, .exit = vr_offloads_exit, }, { .mod_name = "Vrf", .init = vr_vrf_table_init, .exit = vr_vrf_table_exit, }, }; #define VR_NUM_MODULES (sizeof(modules) / sizeof(modules[0])) /* * TODO For BSD we turn off all performance tweaks for now, it will * be implemented later. */ /* * Enable changes for better performance */ #if defined(__linux__) int vr_perfr = 1; /* GRO */ int vr_perfs = 1; /* segmentation in software */ #elif defined(__FreeBSD__) int vr_perfr = 0; /* GRO */ int vr_perfs = 0; /* segmentation in software */ #endif /* * Enable MPLS over UDP globally */ int vr_mudp = 0; /* * TCP MSS adjust settings */ #if defined(__linux__) int vr_from_vm_mss_adj = 1; /* adjust TCP MSS on packets from VM */ int vr_to_vm_mss_adj = 1; /* adjust TCP MSS on packet sent to VM */ #elif defined(__FreeBSD__) int vr_from_vm_mss_adj = 0; /* adjust TCP MSS on packets from VM */ int vr_to_vm_mss_adj = 0; /* adjust TCP MSS on packet sent to VM */ #endif /* * Following sysctls are to enable RPS. Based on empirical results, * performing RPS immediately after packets arrive on a physical interface * gives the best bidirectional throughput (as opposed to performing * RPS after pulling inner headers on the CPU core which received the * interrupt from the physical interface). This is probably because * more work can happen in parallel on multiple cores in the former case. * So, vr_perfr1 is set to 0 and vr_perfr3 is set to 1. vr_perfr2 is always * set as the vhost thread is usually scheduled on the CPU which received * the packet after GRO. Setting vr_perfr2 allows us to influence the * scheduling of the vhost thread to some extent (otherwise the scheduler * can sometimes make sub-optimal choices such as scheduling it on the CPU * which receives interrupts from the physical interface). vrouter ensures * that the receive processing happens on multiple cores which are in the * same NUMA node as the physical interface i.e. the CPU core which receives * packets from the physical interface is different from the CPU core which * does vrouter RX processing. A third CPU core does GRO processing (assuming * that enough cores are available). Also, hyper-thread siblings of the * above 3 cores are not used by vrouter for RX processing. */ #if defined(__linux__) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) int vr_perfr1 = 0; /* RPS after pulling inner headers */ int vr_perfr2 = 1; /* RPS after GRO on pkt1 interface */ int vr_perfr3 = 1; /* RPS from physical interface rx handler */ int vr_perfp = 1; /* pull inner headers, faster version */ int vr_use_linux_br = 0; /* nop if netdev_rx_handler_register() is used */ #else #if defined(RHEL_MAJOR) && defined(RHEL_MINOR) && \ (RHEL_MAJOR == 6) && (RHEL_MINOR >= 4) int vr_perfr1 = 0; int vr_perfr2 = 1; int vr_perfr3 = 1; int vr_perfp = 1; int vr_use_linux_br = 0; /* Centos 6.4 and higher */ #else int vr_perfr1 = 0; int vr_perfr2 = 0; int vr_perfr3 = 0; int vr_perfp = 0; int vr_use_linux_br = 1; /* Xen */ #endif #endif #elif defined(__FreeBSD__) int vr_perfp = 0; #endif /* * Following sysctls can be set if vrouter shouldn't pick a CPU for RPS * core based on a hash of the received packet. Turned off by default. */ int vr_perfq1 = 0; /* CPU to send packets to if vr_perfr1 is 1 */ int vr_perfq2 = 0; /* CPU to send packets to if vr_perfr2 is 1 */ int vr_perfq3 = 0; /* CPU to send packets to if vr_perfr3 is 1 */ /* Should NIC perform checksum offload for outer UDP header? */ int vr_udp_coff = 0; int vr_module_error(int error, const char *func, int line, int mod_specific) { struct vr_module *module = module_under_init; /* * set the error only if it was not set earlier. sometimes, the module * init can call functions which fail and set the error. In those cases, * for the sake of consistency, the module init should also be able to * call this function without overwriting the set error data */ if (module && !module->error) { module->error = error; module->error_func = func; module->error_line = line; module->error_data = mod_specific; } return error; } static void vr_module_debug_dump(void) { struct vr_module *module = module_under_init; if (!module) return; vr_printf("vrouter (%s): Init failed at %s:%d with return %d (data %d)\n", module->mod_name, module->error_func, module->error_line, module->error, module->error_data); /* reset error data */ module->error = 0; module->error_func = NULL; module->error_line = -1; module->error_data = 0; return; } struct vrouter * vrouter_get(unsigned int vr_id) { return &router; } unsigned int vrouter_generation_num_get(struct vrouter *router) { return ++router->vr_generation_num; } static void vrouter_ops_destroy(vrouter_ops *req) { if (!req) return; if (req->vo_build_info) { vr_free(req->vo_build_info, VR_BUILD_INFO_OBJECT); req->vo_build_info = NULL; } vr_free(req, VR_VROUTER_REQ_OBJECT); return; } static vrouter_ops * vrouter_ops_get(void) { vrouter_ops *req; req = vr_zalloc(sizeof(*req), VR_VROUTER_REQ_OBJECT); if (!req) return NULL; req->vo_build_info = vr_zalloc(strlen(ContrailBuildInfo) + 1, VR_BUILD_INFO_OBJECT); if (!req->vo_build_info) { vr_free(req, VR_VROUTER_REQ_OBJECT); return NULL; } return req; } void vrouter_ops_get_process(void *s_req) { int ret = 0; struct vrouter *router; vrouter_ops *req = (vrouter_ops *)s_req; vrouter_ops *resp = NULL; if (req->h_op != SANDESH_OP_GET) { ret = -EOPNOTSUPP; goto generate_response; } router = vrouter_get(req->vo_rid); if (!router) { ret = -EINVAL; goto generate_response; } resp = vrouter_ops_get(); if (!resp) { ret = -ENOMEM; goto generate_response; } /* Startup command line parameters */ resp->vo_interfaces = router->vr_max_interfaces; resp->vo_vrfs = router->vr_max_vrfs; resp->vo_mpls_labels = router->vr_max_labels; resp->vo_nexthops = router->vr_max_nexthops; resp->vo_bridge_entries = vr_bridge_entries; resp->vo_oflow_bridge_entries = vr_bridge_oentries; resp->vo_flow_entries = vr_flow_entries; resp->vo_oflow_entries = vr_oflow_entries; resp->vo_mirror_entries = router->vr_max_mirror_indices; resp->vo_vif_bridge_entries = vif_bridge_entries; resp->vo_vif_oflow_bridge_entries = vif_bridge_oentries; resp->vo_pkt_droplog_bufsz = vr_pkt_droplog_bufsz; resp->vo_pkt_droplog_buf_en = vr_pkt_droplog_buf_en; /* Runtime parameters adjustable via sysctl or the vrouter utility */ resp->vo_perfr = vr_perfr; resp->vo_perfs = vr_perfs; resp->vo_from_vm_mss_adj = vr_from_vm_mss_adj; resp->vo_to_vm_mss_adj = vr_to_vm_mss_adj; resp->vo_perfr1 = vr_perfr1; resp->vo_perfr2 = vr_perfr2; resp->vo_perfr3 = vr_perfr3; resp->vo_perfp = vr_perfp; resp->vo_perfq1 = vr_perfq1; resp->vo_perfq2 = vr_perfq2; resp->vo_perfq3 = vr_perfq3; resp->vo_udp_coff = vr_udp_coff; resp->vo_flow_hold_limit = vr_flow_hold_limit; resp->vo_mudp = vr_mudp; resp->vo_packet_dump = 0; resp->vo_pkt_droplog_en = vr_pkt_droplog_sysctl_en; resp->vo_pkt_droplog_min_en = vr_pkt_droplog_min_sysctl_en; if(vr_get_dump_packets != NULL) { resp->vo_packet_dump = vr_get_dump_packets(); } /* Build info */ strncpy(resp->vo_build_info, ContrailBuildInfo, strlen(ContrailBuildInfo) + 1); /* Logging entries */ resp->vo_log_level = vr_get_log_level(); resp->vo_log_type_enable = vr_get_enabled_log_types(&resp->vo_log_type_enable_size); /* Used entries */ resp->vo_flow_used_entries = vr_flow_table_used_total_entries(router); resp->vo_flow_used_oentries = vr_flow_table_used_oflow_entries(router); resp->vo_bridge_used_entries = vr_bridge_table_used_total_entries(router); resp->vo_bridge_used_oentries = vr_bridge_table_used_oflow_entries(router); vr_flow_get_burst_params(router, &resp->vo_burst_tokens, &resp->vo_burst_interval, &resp->vo_burst_step); resp->vo_memory_alloc_checks = vr_memory_alloc_checks; resp->vo_priority_tagging = vr_priority_tagging; req = resp; generate_response: if (ret) req = NULL; vr_message_response(VR_VROUTER_OPS_OBJECT_ID, req, ret, false); if (resp) vrouter_ops_destroy(resp); return; } /** * A handler for control messages. * * Currently logging control and runtime parameters are supported. * Setting runtime parameters is also possible via sysctl. * * @param s_req Received request to be processed. */ void vrouter_ops_add_process(void *s_req) { int i; vrouter_ops *req = (vrouter_ops *)s_req; /* Log levels */ if (req->vo_log_level) vr_set_log_level(req->vo_log_level); if (req->vo_log_type_enable_size) for (i = 0; i < req->vo_log_type_enable_size; ++i) vr_set_log_type(req->vo_log_type_enable[i], 1); if (req->vo_log_type_disable_size) for (i = 0; i < req->vo_log_type_disable_size; ++i) vr_set_log_type(req->vo_log_type_disable[i], 0); /* Runtime parameters */ if (req->vo_packet_dump != -1 && vr_set_dump_packets != NULL) vr_set_dump_packets(req->vo_packet_dump); if (req->vo_perfr != -1) vr_perfr = req->vo_perfr; if (req->vo_perfs != -1) vr_perfs = req->vo_perfs; if (req->vo_from_vm_mss_adj != -1) vr_from_vm_mss_adj = req->vo_from_vm_mss_adj; if (req->vo_to_vm_mss_adj != -1) vr_to_vm_mss_adj = req->vo_to_vm_mss_adj; if (req->vo_perfr1 != -1) vr_perfr1 = req->vo_perfr1; if (req->vo_perfr2 != -1) vr_perfr2 = req->vo_perfr2; if (req->vo_perfr3 != -1) vr_perfr3 = req->vo_perfr3; if (req->vo_perfp != -1) vr_perfp = req->vo_perfp; if (req->vo_perfq1 != -1) vr_perfq1 = req->vo_perfq1; if (req->vo_perfq2 != -1) vr_perfq2 = req->vo_perfq2; if (req->vo_perfq3 != -1) vr_perfq3 = req->vo_perfq3; if (req->vo_udp_coff != -1) vr_udp_coff = req->vo_udp_coff; if (req->vo_flow_hold_limit != -1) vr_flow_hold_limit = (unsigned int)req->vo_flow_hold_limit; if (req->vo_mudp != -1) vr_mudp = req->vo_mudp; vr_flow_set_burst_params(vrouter_get(req->vo_rid), req->vo_burst_tokens, req->vo_burst_interval, req->vo_burst_step); vr_priority_tagging = req->vo_priority_tagging; /* Neither of currently called functions signals an error. Just send OK * response here for now. */ vr_send_response(0); } void vrouter_exit(bool soft_reset) { int i; for (i = 0; i < (int)VR_NUM_MODULES; i++) if (modules[i].shut) modules[i].shut(&router); /* Mark that vrouter is no more ready as shut is already done */ vr_not_ready = true; /* Flush the previous ashynchronous events, before init */ if (vr_soft_reset) vr_soft_reset(&router); for (i = VR_NUM_MODULES - 1; i >= 0; --i) { modules[i].exit(&router, soft_reset); } /* * This is necessary on operating systems that don't * unload the binary on exit. * When soft reset is happening we must not reinitialize * vrouter struct. */ if (!soft_reset) { memset(&router, 0, sizeof(router)); } return; } int vrouter_init(void) { unsigned int i; int ret; /* init CPU id struct*/ if (vr_init_cpuid != NULL) vr_init_cpuid(&vr_cpu_type); vrouter_host = vrouter_get_host(); if (!vrouter_host && (ret = -ENOMEM)) goto init_fail; for (i = 0; i < VR_NUM_MODULES; i++) { module_under_init = &modules[i]; ret = modules[i].init(&router); if (ret) { vr_printf("vrouter module %u init error (%d)\n", i, ret); goto init_fail; } if (!vr_huge_page_config) { if (modules[i].mem) { ret = modules[i].mem(&router); if (ret) { vr_printf("vrouter module %u mem error (%d)\n", i, ret); goto init_fail; } } } } module_under_init = NULL; vr_not_ready = false; return 0; init_fail: vrouter_exit(false); vr_module_debug_dump(); module_under_init = NULL; return ret; } static int vrouter_soft_reset(void) { int ret = 0; vrouter_exit(true); ret = vrouter_init(); if (!ret) ret = vr_offload_soft_reset(); return ret; } void vrouter_ops_process(void *s_req) { int ret; vrouter_ops *ops = (vrouter_ops *)s_req; switch (ops->h_op) { case SANDESH_OP_RESET: vr_printf("vrouter soft reset start\n"); ret = vrouter_soft_reset(); vr_printf("vrouter soft reset done (%d)\n", ret); break; case SANDESH_OP_GET: vrouter_ops_get_process(s_req); return; case SANDESH_OP_ADD: vrouter_ops_add_process(s_req); return; default: ret = -EOPNOTSUPP; } vr_send_response(ret); return; } void vr_hugepage_config_process(void *s_req) { int i, ret = -EEXIST, mret = 0; vr_hugepage_config hcfg_resp; vr_hugepage_config *req= (vr_hugepage_config *)s_req; struct vrouter *router = vrouter_get(0); /* Debug purpose: Increment below variable when we get hugepage req from agent */ vr_hpage_req_recv++; /* Only addition of huge pages is supported */ if (req->vhp_op != SANDESH_OP_ADD) { vr_send_response(-EOPNOTSUPP); return; } /* Invoke huge page configuration only if a segment exists */ if (vr_huge_page_config) { ret = vr_huge_page_config(req->vhp_mem, req->vhp_mem_size, req->vhp_psize, req->vhp_psize_size, req->vhp_mem_sz, req->vhp_mem_sz_size); } if (ret != -EEXIST) { for (i = 0; i < (int)VR_NUM_MODULES; i++) { if (modules[i].mem) { mret = modules[i].mem(router); if (mret) break; } } } memset(&hcfg_resp, 0, sizeof(hcfg_resp)); hcfg_resp.vhp_op = req->vhp_op; /* * If we fail to allocate memory both in huge pages and as well * regular respond with VR_HPAGE_CFG_RESP_MEM_FAILURE. Return status * in vrouter_response would be an error value. If memory allocation * succeds in either the huge pages or regular, return status is * going to be "0" and response code gives the details of the * allocation */ switch (ret) { case 0: if (!mret) hcfg_resp.vhp_resp = VR_HPAGE_CFG_RESP_HPAGE_SUCCESS; break; case -EINVAL: if (!mret) { hcfg_resp.vhp_resp = VR_HPAGE_CFG_RESP_INVALID_ARG_MEM_INITED; ret = 0; } break; case -EEXIST: hcfg_resp.vhp_resp = VR_HPAGE_CFG_RESP_MEM_ALREADY_INITED; break; case -ENOMEM: if (!mret) { hcfg_resp.vhp_resp = VR_HPAGE_CFG_RESP_HPAGE_FAILURE_MEM_INITED; ret = 0; } break; case -E2BIG: if (!mret) { hcfg_resp.vhp_resp = VR_HPAGE_CFG_RESP_HPAGE_PARTIAL_SUCCESS; ret = 0; } break; default: ret = -EINVAL; break; } if (mret) { hcfg_resp.vhp_resp = VR_HPAGE_CFG_RESP_MEM_FAILURE; ret = mret; } /* Debug purpose: Increment below variable before sending response */ vr_hpage_req_resp++; vr_printf("Huge page req_recv_cntr:%d resp_cntr:%d resp: %d ret: %d\n", vr_hpage_req_recv, vr_hpage_req_resp, hcfg_resp.vhp_resp, ret); vr_message_response(VR_HPAGE_CFG_OBJECT_ID, &hcfg_resp, ret, false); return; }
102667.c
/** PIN MANAGER Generated Driver File @Company: Microchip Technology Inc. @File Name: pin_manager.c @Summary: This is the generated manager file for the PIC24 / dsPIC33 / PIC32MM MCUs device. This manager configures the pins direction, initial state, analog setting. The peripheral pin select, PPS, configuration is also handled by this manager. @Description: This source file provides implementations for PIN MANAGER. Generation Information : Product Revision : PIC24 / dsPIC33 / PIC32MM MCUs - 1.125 Device : PIC24FJ64GB002 The generated drivers are tested against the following: Compiler : XC16 v1.36B MPLAB : MPLAB X v5.20 */ /* (c) 2016 Microchip Technology Inc. and its subsidiaries. You may use this software and any derivatives exclusively with Microchip products. THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE, OR ITS INTERACTION WITH MICROCHIP PRODUCTS, COMBINATION WITH ANY OTHER PRODUCTS, OR USE IN ANY APPLICATION. IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY, THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE. MICROCHIP PROVIDES THIS SOFTWARE CONDITIONALLY UPON YOUR ACCEPTANCE OF THESE TERMS. */ /** Section: Includes */ #include <xc.h> #include <stdio.h> #include "pin_manager.h" /** Section: File specific functions */ /** Section: Driver Interface Function Definitions */ void PIN_MANAGER_Initialize (void) { /**************************************************************************** * Setting the Output Latch SFR(s) ***************************************************************************/ LATA = 0x0000; LATB = 0x0100; /**************************************************************************** * Setting the GPIO Direction SFR(s) ***************************************************************************/ TRISA = 0x000C; TRISB = 0x0E13; /**************************************************************************** * Setting the Weak Pull Up and Weak Pull Down SFR(s) ***************************************************************************/ CNPU1 = 0x0000; CNPU2 = 0x0000; /**************************************************************************** * Setting the Open Drain SFR(s) ***************************************************************************/ ODCA = 0x0000; ODCB = 0x0000; /**************************************************************************** * Setting the Analog/Digital Configuration SFR(s) ***************************************************************************/ AD1PCFG = 0x000C; /**************************************************************************** * Set the PPS ***************************************************************************/ __builtin_write_OSCCONL(OSCCON & 0xbf); // unlock PPS RPINR18bits.U1RXR = 0x0009; //RB9->UART1:U1RX RPOR4bits.RP8R = 0x0003; //RB8->UART1:U1TX __builtin_write_OSCCONL(OSCCON | 0x40); // lock PPS }
704119.c
/******************************************************************************* * * Project: Realtek Ameba flash loader project * * Description: Memory-specific routines for Flash Loader. * * Copyright by Diolan Ltd. All rights reserved. * *******************************************************************************/ #include <string.h> #include <stdlib.h> #include "flash_loader.h" #include "flash_loader_extra.h" #include "rtl8195a.h" //#include "rtl8195a/hal_misc.h" //#include "rtl8195a/hal_spi_flash.h" //#include "rtl8195a/core_cm3.h" extern VOID HalReInitPlatformLogUart( VOID ); extern VOID SpicLoadInitParaFromClockRtl8195A ( IN u8 CpuClkMode, IN u8 BaudRate, IN PSPIC_INIT_PARA pSpicInitPara ); extern VOID SpicWaitBusyDoneRtl8195A(); extern VOID SpicWaitWipDoneRtl8195A ( IN SPIC_INIT_PARA SpicInitPara ); extern VOID SpicTxCmdRtl8195A ( IN u8 cmd, IN SPIC_INIT_PARA SpicInitPara ); extern u8 SpicGetFlashStatusRtl8195A ( IN SPIC_INIT_PARA SpicInitPara ); __no_init unsigned int flash_loc; __no_init unsigned int erase_loc; __no_init unsigned int is_cascade; __no_init unsigned int is_head; __no_init unsigned int is_dbgmsg; __no_init unsigned int is_erasecal; __no_init unsigned int img2_addr; int rest_count; int first_write; SPIC_INIT_PARA SpicInitPara; #define PATTERN_1 0x96969999 #define PATTERN_2 0xFC66CC3F #define PATTERN_3 0x03CC33C0 #define PATTERN_4 0x6231DCE5 #define DBGPRINT(fmt, arg...) do{ if( is_dbgmsg ) DiagPrintf(fmt, ##arg);}while(0) //unsigned int fw_head[4] = {PATTERN_1, PATTERN_2, PATTERN_3, PATTERN_4}; unsigned int seg_head[4] = {0,0,0,0}; extern SPIC_INIT_PARA SpicInitCPUCLK[4]; void dump_flash_header(void) { uint32_t data; data = HAL_READ32(SPI_FLASH_BASE, 0); DBGPRINT("\n\r 0: %x", data); data = HAL_READ32(SPI_FLASH_BASE, 4); DBGPRINT("\n\r 4: %x", data); data = HAL_READ32(SPI_FLASH_BASE, 8); DBGPRINT("\n\r 8: %x", data); data = HAL_READ32(SPI_FLASH_BASE, 12); DBGPRINT("\n\r 12: %x", data); } const char* find_option(char* option, int withValue, int argc, char const* argv[]) { int i; for (i = 0; i < argc; i++) { if (strcmp(option, argv[i]) == 0){ if (withValue) { if (i + 1 < argc) { // The next argument is the value. return argv[i + 1]; } else { // The option was found but there is no value to return. return 0; } } else { // Return the flag argument itself just to get a non-zero pointer. return argv[i]; } } } return 0; } static VOID FlashDownloadHalInitialROMCodeGlobalVar(VOID) { // to initial ROM code using global variable ConfigDebugErr = _DBG_MISC_; ConfigDebugInfo= 0x0; ConfigDebugWarn= 0x0; } static VOID FlashDownloadHalCleanROMCodeGlobalVar(VOID) { ConfigDebugErr = 0x0; ConfigDebugInfo= 0x0; ConfigDebugWarn= 0x0; } // Please clean this Array extern SPIC_INIT_PARA SpicInitParaAllClk[3][CPU_CLK_TYPE_NO]; u8 FlashType; uint32_t FlashInit(void *base_of_flash, uint32_t image_size, uint32_t link_address, uint32_t flags, int argc, char const *argv[]) { u8 CpuClk; u8 Value8, InitBaudRate; char *addr; SPIC_INIT_PARA InitCPUCLK[4] = { {0x1,0x1,0x5E,0}, //default cpu 41, baud 1 {0x1,0x1,0x0,0}, //cpu 20.8 , baud 1 {0x1,0x2,0x23,0}, //cpu 83.3, baud 1 {0x1,0x5,0x5,0}, }; memcpy(SpicInitCPUCLK, InitCPUCLK, sizeof(InitCPUCLK)); memset(SpicInitParaAllClk, 0, sizeof(SPIC_INIT_PARA)*3*CPU_CLK_TYPE_NO); SpicInitPara.BaudRate = 0; SpicInitPara.DelayLine = 0; SpicInitPara.RdDummyCyle = 0; SpicInitPara.Rsvd = 0; if(find_option( "--erase_cal", 0, argc, argv )) is_erasecal = 1; else is_erasecal = 0; if(find_option( "--cascade", 0, argc, argv )) is_cascade = 1; else is_cascade = 0; if(find_option( "--head", 0, argc, argv )) is_head = 1; else is_head = 0; if(find_option( "--dbgmsg", 0, argc, argv )) is_dbgmsg = 1; else is_dbgmsg = 0; if( (addr = (char*)find_option( "--img2_addr", 1, argc, argv))){ img2_addr = strtod(addr, NULL)/1024; DBG_8195A(" image2 start address = %s, offset = %x\n\r", addr, img2_addr); }else img2_addr = 0; memset((void *) 0x10000300, 0, 0xbc0-0x300); // Load Efuse Setting Value8 = ((HAL_READ32(SYSTEM_CTRL_BASE, REG_SYS_EFUSE_SYSCFG6) & 0xFF000000) >> 24); InitBaudRate = ((Value8 & 0xC)>>2); // Make sure InitBaudRate != 0 if (!InitBaudRate) { InitBaudRate +=1; } CpuClk = ((HAL_READ32(SYSTEM_CTRL_BASE, REG_SYS_CLK_CTRL1) & (0x70)) >> 4); SpicLoadInitParaFromClockRtl8195A(CpuClk, InitBaudRate, &SpicInitPara); // Reset to low speed HAL_WRITE32(SYSTEM_CTRL_BASE, REG_SYS_CLK_CTRL1, 0x21); FlashDownloadHalInitialROMCodeGlobalVar(); //2 Need Modify VectorTableInitRtl8195A(0x1FFFFFFC); //3 Initial Log Uart PatchHalInitPlatformLogUart(); //3 Initial hardware timer PatchHalInitPlatformTimer(); DBG_8195A("\r\n===> Flash Init \n\r"); //4 Initialize the flash first if (HAL_READ32(REG_SOC_FUNC_EN,BIT_SOC_FLASH_EN) & BIT_SOC_FLASH_EN) { FLASH_FCTRL(OFF); } FLASH_FCTRL(ON); ACTCK_FLASH_CCTRL(ON); SLPCK_FLASH_CCTRL(ON); PinCtrl(SPI_FLASH,S0,ON); PatchSpicInitRtl8195A(SpicInitPara.BaudRate, SpicOneBitMode); SpicFlashInitRtl8195A(SpicOneBitMode); FlashType = SpicInitParaAllClk[SpicOneBitMode][0].flashtype; char* vendor[] = {"Others", "MXIC", "Winbond", "Micron"}; DBG_8195A("\r\n===> Flash Init Done, vendor: \x1b[32m%s\x1b[m \n\r", vendor[FlashType]); first_write = 1; rest_count = theFlashParams.block_size; seg_head[0] = theFlashParams.block_size; seg_head[1] = theFlashParams.offset_into_block; if(is_head){ seg_head[2] = 0xFFFF0000|img2_addr; seg_head[3] = 0xFFFFFFFF; }else{ if(is_cascade==0){ // Image2 signature seg_head[2] = 0x35393138; //8195 seg_head[3] = 0x31313738; //8711 }else{ seg_head[2] = 0xFFFFFFFF; seg_head[3] = 0xFFFFFFFF; } } //DBG_8195A("link_address = %08x, flags = %08x ...\n\r", link_address, flags); if(is_cascade==0 && is_head==0){ // mark partition 2 to old if existing unsigned int ota_addr = HAL_READ32(SPI_FLASH_BASE, 0x9000); //check OTA address valid if( ota_addr == 0xFFFFFFFF || ota_addr > 64*1024*1024 ){ DBG_8195A("\r\n\x1b[31mOTA addr %8x is invalid\x1b[m\n\r", ota_addr ); DBG_8195A("\x1b[31mOTA addr %8x is invalid\x1b[m\n\r", ota_addr ); DBG_8195A("\x1b[31mOTA addr %8x is invalid\x1b[m\n\r", ota_addr ); DBG_8195A("continue downloading...\n\r" ); return RESULT_OK; }else{ DBG_8195A("\x1b[36mOTA addr is %x \x1b[m\n\r", ota_addr ); } int sig0 = HAL_READ32(SPI_FLASH_BASE, ota_addr+8); int sig1 = HAL_READ32(SPI_FLASH_BASE, ota_addr+12); if(sig0==0x35393138 && sig1==0x31313738){ DBG_8195A("\r\n>>>> mark parition 2 as older \n\r" ); HAL_WRITE32(SPI_FLASH_BASE, ota_addr+8, 0x35393130); // mark to older version // wait spic busy done SpicWaitBusyDoneRtl8195A(); // wait flash busy done (wip=0) if(FlashType == FLASH_MICRON) SpicWaitOperationDoneRtl8195A(SpicInitPara); else SpicWaitWipDoneRefinedRtl8195A(SpicInitPara); } } dump_flash_header(); //SpicEraseFlashRtl8195A(); return RESULT_OK; } void write_spi_flash(uint32_t data) { HAL_WRITE32(SPI_FLASH_BASE, flash_loc, data); // wait spic busy done SpicWaitBusyDoneRtl8195A(); // wait flash busy done (wip=0) if(FlashType == FLASH_MICRON) SpicWaitOperationDoneRtl8195A(SpicInitPara); else SpicWaitWipDoneRefinedRtl8195A(SpicInitPara); flash_loc+=4; } uint32_t FlashWrite(void *block_start, uint32_t offset_into_block, uint32_t count, char const *buffer) { int write_cnt=0; uint32_t* buffer32 = (uint32_t*)buffer; DBG_8195A("\r\n===> Flash Write, start %x, addr %x, offset %d, count %d, buf %x\n\r", block_start, flash_loc, offset_into_block, count, buffer); if(first_write){ if(!is_cascade){ flash_loc = (unsigned int)block_start; } if(is_head){ unsigned int fw_head[4] = {PATTERN_1, PATTERN_2, PATTERN_3, PATTERN_4}; DBGPRINT("Write FW header...."); flash_loc=0; write_spi_flash(fw_head[0]); write_spi_flash(fw_head[1]); write_spi_flash(fw_head[2]); write_spi_flash(fw_head[3]); DBGPRINT("Write FW header.... %x %x %x %x --> Done\n\r", fw_head[0], fw_head[1], fw_head[2], fw_head[3]); } DBGPRINT("Write SEG header...."); first_write = 0; write_spi_flash(seg_head[0]); write_spi_flash(seg_head[1]); write_spi_flash(seg_head[2]); write_spi_flash(seg_head[3]); DBGPRINT("Write SEG header.... %x %x %x %x --> Done\n\r", seg_head[0], seg_head[1], seg_head[2], seg_head[3]); } if(rest_count < count) count = rest_count; // DO Write Here DBG_8195A("Write Binary...."); while (write_cnt < count) { write_spi_flash(*buffer32); write_cnt += 4; buffer32++; } DBG_8195A("Write Binary....Done\n\r"); rest_count-=count; DBG_8195A("\r\n<=== Flash Write Done %x\n\r", flash_loc); DBGPRINT("first 4 bytes %2x %2x %2x %2x\n\r", buffer[0],buffer[1],buffer[2],buffer[3]); DBGPRINT("last 4 bytes %2x %2x %2x %2x\n\r", buffer[count-4],buffer[count-3],buffer[count-2],buffer[count-1]); return RESULT_OK; } uint32_t FlashErase(void *block_start, uint32_t block_size) { if(is_head == 1) erase_loc = 0; if(!is_cascade) erase_loc = (unsigned int)block_start; if(erase_loc != 0xa000){ SpicSectorEraseFlashRtl8195A(erase_loc); DBGPRINT("@erase %x, size %d, fw offset %x\n\r", erase_loc, block_size, block_start); }else{ if(is_erasecal){ SpicSectorEraseFlashRtl8195A(erase_loc); DBGPRINT("@erase %x, size %d, fw offset %x\n\r", erase_loc, block_size, block_start); } } erase_loc += 4096; return 0; }
859837.c
/* USER CODE BEGIN Header */ /** ****************************************************************************** * @file : usb_device.c * @version : v1.0_Cube * @brief : This file implements the USB Device ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2020 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under Ultimate Liberty license * SLA0044, the "License"; You may not use this file except in compliance with * the License. You may obtain a copy of the License at: * www.st.com/SLA0044 * ****************************************************************************** */ /* USER CODE END Header */ /* Includes ------------------------------------------------------------------*/ #include "usb_device.h" #include "usbd_core.h" #include "usbd_desc.h" #include "usbd_dfu.h" #include "usbd_dfu_if.h" /* USER CODE BEGIN Includes */ /* USER CODE END Includes */ /* USER CODE BEGIN PV */ /* Private variables ---------------------------------------------------------*/ /* USER CODE END PV */ /* USER CODE BEGIN PFP */ /* Private function prototypes -----------------------------------------------*/ /* USER CODE END PFP */ /* USB Device Core handle declaration. */ USBD_HandleTypeDef hUsbDeviceFS; /* * -- Insert your variables declaration here -- */ /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ /* * -- Insert your external function declaration here -- */ /* USER CODE BEGIN 1 */ /* USER CODE END 1 */ /** * Init USB device Library, add supported class and start the library * @retval None */ void MX_USB_DEVICE_Init(void) { /* USER CODE BEGIN USB_DEVICE_Init_PreTreatment */ /* USER CODE END USB_DEVICE_Init_PreTreatment */ /* Init Device Library, add supported class and start the library. */ if (USBD_Init(&hUsbDeviceFS, &FS_Desc, DEVICE_FS) != USBD_OK) { Error_Handler(); } if (USBD_RegisterClass(&hUsbDeviceFS, &USBD_DFU) != USBD_OK) { Error_Handler(); } if (USBD_DFU_RegisterMedia(&hUsbDeviceFS, &USBD_DFU_fops_FS) != USBD_OK) { Error_Handler(); } if (USBD_Start(&hUsbDeviceFS) != USBD_OK) { Error_Handler(); } /* USER CODE BEGIN USB_DEVICE_Init_PostTreatment */ /* USER CODE END USB_DEVICE_Init_PostTreatment */ } /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
687270.c
/******************************************************************************* Serial Communication Interface Inter-Integrated Circuit (SERCOM I2C) Library Source File Company: Microchip Technology Inc. File Name: plib_sercom2_i2c.c Summary: SERCOM I2C PLIB Implementation file Description: This file defines the interface to the SERCOM I2C peripheral library. This library provides access to and control of the associated peripheral instance. *******************************************************************************/ // DOM-IGNORE-BEGIN /******************************************************************************* * Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries. * * Subject to your compliance with these terms, you may use Microchip software * and any derivatives exclusively with Microchip products. It is your * responsibility to comply with third party license terms applicable to your * use of third party software (including open source software) that may * accompany Microchip software. * * THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER * EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A * PARTICULAR PURPOSE. * * IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, * INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND * WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS * BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE * FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN * ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY, * THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE. *******************************************************************************/ // DOM-IGNORE-END // ***************************************************************************** // ***************************************************************************** // Section: Included Files // ***************************************************************************** // ***************************************************************************** #include "plib_sercom2_i2c.h" // ***************************************************************************** // ***************************************************************************** // Section: Global Data // ***************************************************************************** // ***************************************************************************** /* SERCOM2 I2C baud value for 400 Khz baud rate */ #define SERCOM2_I2CM_BAUD_VALUE (52U) static SERCOM_I2C_OBJ sercom2I2CObj; // ***************************************************************************** // ***************************************************************************** // Section: SERCOM2 I2C Implementation // ***************************************************************************** // ***************************************************************************** // ***************************************************************************** // ***************************************************************************** /* Function: void SERCOM2_I2C_Initialize(void) Summary: Initializes the instance of the SERCOM peripheral operating in I2C mode. Description: This function initializes the given instance of the SERCOM I2C peripheral as configured by the user from the MHC. Remarks: Refer plib_sercom2_i2c.h for more information. */ void SERCOM2_I2C_Initialize(void) { /* Enable smart mode enable */ SERCOM2_REGS->I2CM.SERCOM_CTRLB = SERCOM_I2CM_CTRLB_SMEN_Msk; /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); /* Baud rate - Master Baud Rate*/ SERCOM2_REGS->I2CM.SERCOM_BAUD = SERCOM_I2CM_BAUD_BAUD(SERCOM2_I2CM_BAUD_VALUE); /* Set Operation Mode (Master), SDA Hold time, run in stand by and i2c master enable */ SERCOM2_REGS->I2CM.SERCOM_CTRLA = SERCOM_I2CM_CTRLA_MODE_I2C_MASTER | SERCOM_I2CM_CTRLA_SDAHOLD_75NS | SERCOM_I2CM_CTRLA_ENABLE_Msk ; /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); /* Initial Bus State: IDLE */ SERCOM2_REGS->I2CM.SERCOM_STATUS = SERCOM_I2CM_STATUS_BUSSTATE(0x01); /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); /* Initialize the SERCOM2 PLib Object */ sercom2I2CObj.error = SERCOM_I2C_ERROR_NONE; sercom2I2CObj.state = SERCOM_I2C_STATE_IDLE; /* Enable all Interrupts */ SERCOM2_REGS->I2CM.SERCOM_INTENSET = SERCOM_I2CM_INTENSET_Msk; } bool SERCOM2_I2C_TransferSetup(SERCOM_I2C_TRANSFER_SETUP* setup, uint32_t srcClkFreq ) { uint32_t baudValue; uint32_t i2cClkSpeed; if (setup == NULL) { return false; } i2cClkSpeed = setup->clkSpeed; if( srcClkFreq == 0) { srcClkFreq = 47972352UL; } /* Reference clock frequency must be atleast two times the baud rate */ if (srcClkFreq < (2*i2cClkSpeed)) { return false; } baudValue = ((((float)srcClkFreq)/i2cClkSpeed) - ((((float)srcClkFreq) * (100/1000000000.0)) + 10))/2.0; /* BAUD.BAUD must be non-zero */ if (baudValue == 0) { return false; } /* Disable the I2C before changing the I2C clock speed */ SERCOM2_REGS->I2CM.SERCOM_CTRLA &= ~SERCOM_I2CM_CTRLA_ENABLE_Msk; /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); /* Baud rate - Master Baud Rate*/ SERCOM2_REGS->I2CM.SERCOM_BAUD = SERCOM_I2CM_BAUD_BAUD(baudValue); /* Re-enable the I2C module */ SERCOM2_REGS->I2CM.SERCOM_CTRLA |= SERCOM_I2CM_CTRLA_ENABLE_Msk; /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); /* Since the I2C module was disabled, re-initialize the bus state to IDLE */ SERCOM2_REGS->I2CM.SERCOM_STATUS = SERCOM_I2CM_STATUS_BUSSTATE(0x01); /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); return true; } // ***************************************************************************** /* Function: void SERCOM2_I2C_InitiateRead(uint16_t address) Summary: Intiates I2C Read Description: Remarks: Refer plib_sercom2_i2c.h for more information. */ static void SERCOM2_I2C_InitiateRead(uint16_t address) { sercom2I2CObj.state = SERCOM_I2C_STATE_TRANSFER_READ; SERCOM2_REGS->I2CM.SERCOM_ADDR = (address << 1) | I2C_TRANSFER_READ; /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); } // ***************************************************************************** /* Function: void SERCOM2_I2C_CallbackRegister(SERCOM_I2C_CALLBACK callback, uintptr_t context) Summary: Sets the pointer to the function (and it's context) to be called when the given SERCOM I2C's transfer events occur. Precondition: SERCOMx_I2C_Initialize must have been called for the associated SERCOM instance. Parameters: callback - A pointer to a function with a calling signature defined by the SERCOM_I2C_CALLBACK data type. context - A value (usually a pointer) passed (unused) into the function identified by the callback parameter. Returns: None. */ void SERCOM2_I2C_CallbackRegister(SERCOM_I2C_CALLBACK callback, uintptr_t contextHandle) { sercom2I2CObj.callback = callback; sercom2I2CObj.context = contextHandle; } /// ***************************************************************************** /* Function: void SERCOM2_2C_InitiateTransfer(uint16_t address, bool type) Summary: Send the 7-bit or 10-bit slave address. Precondition: SERCOMx_I2C_Initialize must have been called for the associated SERCOM instance. Parameters: address - 7-bit / 10-bit slave address. type - Read / Write Remarks: None. */ static void SERCOM2_I2C_InitiateTransfer(uint16_t address, bool type) { sercom2I2CObj.writeCount = 0; sercom2I2CObj.readCount = 0; /* Clear all flags */ SERCOM2_REGS->I2CM.SERCOM_INTFLAG = SERCOM_I2CM_INTFLAG_Msk; /* Smart mode enabled - ACK is set to send while receiving the data */ SERCOM2_REGS->I2CM.SERCOM_CTRLB &= ~SERCOM_I2CM_CTRLB_ACKACT_Msk; /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); /* Reset Error Information */ sercom2I2CObj.error = SERCOM_I2C_ERROR_NONE; if(type) { sercom2I2CObj.state = SERCOM_I2C_STATE_TRANSFER_READ; /* Write 7bit address with direction (ADDR.ADDR[0]) equal to 1*/ SERCOM2_REGS->I2CM.SERCOM_ADDR = (address << 1) | I2C_TRANSFER_READ; } else { sercom2I2CObj.state = SERCOM_I2C_STATE_TRANSFER_WRITE; /* Write 7bit address with direction (ADDR.ADDR[0]) equal to 0*/ SERCOM2_REGS->I2CM.SERCOM_ADDR = (address << 1) | I2C_TRANSFER_WRITE; } /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); } // ***************************************************************************** /* Function: bool SERCOM2_I2C_Read(uint16_t address, uint8_t *pdata, uint32_t length) Summary: Reads data from the slave. Precondition: SERCOMx_I2C_Initialize must have been called for the associated SERCOM instance. Parameters: address - 7-bit / 10-bit slave address. pdata - pointer to destination data buffer length - length of data buffer in number of bytes. Returns: Request status. True - Request was successful. False - Request has failed. */ bool SERCOM2_I2C_Read(uint16_t address, uint8_t *pdata, uint32_t length) { /* Check for ongoing transfer */ if(sercom2I2CObj.state != SERCOM_I2C_STATE_IDLE) { return false; } sercom2I2CObj.address = address; sercom2I2CObj.readBuffer = pdata; sercom2I2CObj.readSize = length; sercom2I2CObj.writeBuffer = NULL; sercom2I2CObj.writeSize = 0; sercom2I2CObj.error = SERCOM_I2C_ERROR_NONE; SERCOM2_I2C_InitiateTransfer(address, true); return true; } // ***************************************************************************** /* Function: bool SERCOM2_I2C_Write(uint16_t address, uint8_t *pdata, uint32_t length) Summary: Writes data onto the slave. Precondition: SERCOMx_I2C_Initialize must have been called for the associated SERCOM instance. Parameters: address - 7-bit / 10-bit slave address. pdata - pointer to source data buffer length - length of data buffer in number of bytes. Returns: Request status. True - Request was successful. False - Request has failed. */ bool SERCOM2_I2C_Write(uint16_t address, uint8_t *pdata, uint32_t length) { /* Check for ongoing transfer */ if(sercom2I2CObj.state != SERCOM_I2C_STATE_IDLE) { return false; } sercom2I2CObj.address = address; sercom2I2CObj.readBuffer = NULL; sercom2I2CObj.readSize = 0; sercom2I2CObj.writeBuffer = pdata; sercom2I2CObj.writeSize = length; sercom2I2CObj.error = SERCOM_I2C_ERROR_NONE; SERCOM2_I2C_InitiateTransfer(address, false); return true; } // ***************************************************************************** /* Function: bool SERCOM2_I2C_WriteRead(uint16_t address, uint8_t *wdata, uint32_t wlength, uint8_t *rdata, uint32_t rlength) Summary: Write and Read data from Slave. Precondition: SERCOMx_I2C_Initialize must have been called for the associated SERCOM instance. Parameters: address - 7-bit / 10-bit slave address. wdata - pointer to write data buffer wlength - write data length in bytes. rdata - pointer to read data buffer. rlength - read data length in bytes. Returns: Request status. True - Request was successful. False - Request has failed. */ bool SERCOM2_I2C_WriteRead(uint16_t address, uint8_t *wdata, uint32_t wlength, uint8_t *rdata, uint32_t rlength) { /* Check for ongoing transfer */ if(sercom2I2CObj.state != SERCOM_I2C_STATE_IDLE) { return false; } sercom2I2CObj.address = address; sercom2I2CObj.readBuffer = rdata; sercom2I2CObj.readSize = rlength; sercom2I2CObj.writeBuffer = wdata; sercom2I2CObj.writeSize = wlength; sercom2I2CObj.error = SERCOM_I2C_ERROR_NONE; SERCOM2_I2C_InitiateTransfer(address, false); return true; } // ***************************************************************************** /* Function: bool SERCOM2_I2C_IsBusy(void) Summary: Returns the Peripheral busy status. Precondition: SERCOMx_I2C_Initialize must have been called for the associated SERCOM instance. Parameters: None. Returns: true - Busy. false - Not busy. */ bool SERCOM2_I2C_IsBusy(void) { if((sercom2I2CObj.state == SERCOM_I2C_STATE_IDLE) && ((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_BUSSTATE_Msk) == SERCOM_I2CM_STATUS_BUSSTATE(0x01))) { return false; } else { return true; } } // ***************************************************************************** /* Function: SERCOM_I2C_ERROR SERCOM2_I2C_ErrorGet(void) Summary: Returns the error during transfer. Precondition: SERCOMx_I2C_Initialize must have been called for the associated SERCOM instance. Parameters: None. Returns: Error during transfer. */ SERCOM_I2C_ERROR SERCOM2_I2C_ErrorGet(void) { return sercom2I2CObj.error; } // ***************************************************************************** /* Function: void SERCOM2_I2C_InterruptHandler(void) Summary: SERCOM2 I2C Peripheral Interrupt Handler. Description: This function is SERCOM2 I2C Peripheral Interrupt Handler and will called on every SERCOM2 I2C interrupt. Precondition: None. Parameters: None. Returns: None. Remarks: The function is called as peripheral instance's interrupt handler if the instance interrupt is enabled. If peripheral instance's interrupt is not enabled user need to call it from the main while loop of the application. */ void SERCOM2_I2C_InterruptHandler(void) { if(SERCOM2_REGS->I2CM.SERCOM_INTENSET != 0) { /* Checks if the arbitration lost in multi-master scenario */ if((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_ARBLOST_Msk) == SERCOM_I2CM_STATUS_ARBLOST_Msk) { /* * Re-initiate the transfer if arbitration is lost * in between of the transfer */ sercom2I2CObj.state = SERCOM_I2C_REINITIATE_TRANSFER; } /* Check for Bus Error during transmission */ else if((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_BUSERR_Msk) == SERCOM_I2CM_STATUS_BUSERR_Msk) { /* Set Error status */ sercom2I2CObj.state = SERCOM_I2C_STATE_ERROR; sercom2I2CObj.error = SERCOM_I2C_ERROR_BUS; } /* Checks slave acknowledge for address or data*/ else if((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_RXNACK_Msk) == SERCOM_I2CM_STATUS_RXNACK_Msk) { sercom2I2CObj.state = SERCOM_I2C_STATE_ERROR; sercom2I2CObj.error = SERCOM_I2C_ERROR_NAK; } else { switch(sercom2I2CObj.state) { case SERCOM_I2C_REINITIATE_TRANSFER: { if (sercom2I2CObj.writeSize != 0) { /* Initiate Write transfer */ SERCOM2_I2C_InitiateTransfer(sercom2I2CObj.address, false); } else { /* Initiate Read transfer */ SERCOM2_I2C_InitiateTransfer(sercom2I2CObj.address, true); } break; } case SERCOM_I2C_STATE_IDLE: { break; } case SERCOM_I2C_STATE_TRANSFER_WRITE: { if (sercom2I2CObj.writeCount == (sercom2I2CObj.writeSize)) { if(sercom2I2CObj.readSize != 0) { SERCOM2_I2C_InitiateRead(sercom2I2CObj.address); } else { SERCOM2_REGS->I2CM.SERCOM_CTRLB |= SERCOM_I2CM_CTRLB_CMD(3); /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); sercom2I2CObj.state = SERCOM_I2C_STATE_TRANSFER_DONE; } } /* Write next byte */ else { SERCOM2_REGS->I2CM.SERCOM_DATA = sercom2I2CObj.writeBuffer[sercom2I2CObj.writeCount++]; } break; } case SERCOM_I2C_STATE_TRANSFER_READ: { if(sercom2I2CObj.readCount == (sercom2I2CObj.readSize - 1)) { /* Set NACK and send stop condition to the slave from master */ SERCOM2_REGS->I2CM.SERCOM_CTRLB |= SERCOM_I2CM_CTRLB_ACKACT_Msk | SERCOM_I2CM_CTRLB_CMD(3); /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); sercom2I2CObj.state = SERCOM_I2C_STATE_TRANSFER_DONE; } /* Read the received data */ sercom2I2CObj.readBuffer[sercom2I2CObj.readCount++] = SERCOM2_REGS->I2CM.SERCOM_DATA; break; } default: { break; } } } /* Error Status */ if(sercom2I2CObj.state == SERCOM_I2C_STATE_ERROR) { /* Reset the PLib objects and Interrupts */ sercom2I2CObj.state = SERCOM_I2C_STATE_IDLE; /* Generate STOP condition */ SERCOM2_REGS->I2CM.SERCOM_CTRLB |= SERCOM_I2CM_CTRLB_CMD(3); /* Wait for synchronization */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_SYNCBUSY_Msk) & SERCOM_I2CM_STATUS_SYNCBUSY_Msk); SERCOM2_REGS->I2CM.SERCOM_INTFLAG = SERCOM_I2CM_INTFLAG_Msk; if (sercom2I2CObj.callback != NULL) { sercom2I2CObj.callback(sercom2I2CObj.context); } } /* Transfer Complete */ else if(sercom2I2CObj.state == SERCOM_I2C_STATE_TRANSFER_DONE) { /* Reset the PLib objects and interrupts */ sercom2I2CObj.state = SERCOM_I2C_STATE_IDLE; sercom2I2CObj.error = SERCOM_I2C_ERROR_NONE; SERCOM2_REGS->I2CM.SERCOM_INTFLAG = SERCOM_I2CM_INTFLAG_Msk; /* Wait for the NAK and STOP bit to be transmitted out and I2C state machine to rest in IDLE state */ while((SERCOM2_REGS->I2CM.SERCOM_STATUS & SERCOM_I2CM_STATUS_BUSSTATE_Msk) != SERCOM_I2CM_STATUS_BUSSTATE(0x01)); if(sercom2I2CObj.callback != NULL) { sercom2I2CObj.callback(sercom2I2CObj.context); } } } return; }
468860.c
/* Microsoft Reference Implementation for TPM 2.0 * * The copyright in this software is being made available under the BSD License, * included below. This software may be subject to other third party and * contributor rights, including patent rights, and no such rights are granted * under this license. * * Copyright (c) Microsoft Corporation * * All rights reserved. * * BSD License * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list * of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS"" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "Tpm.h" #include "_TPM_Init_fp.h" // This function is used to process a _TPM_Init indication. LIB_EXPORT void _TPM_Init( void ) { g_powerWasLost = g_powerWasLost | _plat__WasPowerLost(); #if SIMULATION && !defined NDEBUG // If power was lost and this was a simulation, put canary in RAM used by NV // so that uninitialized memory can be detected more easily if(g_powerWasLost) { memset(&gc, 0xbb, sizeof(gc)); memset(&gr, 0xbb, sizeof(gr)); memset(&gp, 0xbb, sizeof(gp)); memset(&go, 0xbb, sizeof(go)); } #endif #if SIMULATION // Clear the flag that forces failure on self-test g_forceFailureMode = FALSE; #endif // Set initialization state TPMInit(); // Set g_DRTMHandle as unassigned g_DRTMHandle = TPM_RH_UNASSIGNED; // No H-CRTM, yet. g_DrtmPreStartup = FALSE; // Initialize the NvEnvironment. g_nvOk = NvPowerOn(); // Initialize cryptographic functions g_inFailureMode = (CryptInit() == FALSE); if(!g_inFailureMode) { // Load the persistent data NvReadPersistent(); // Load the orderly data (clock and DRBG state). // If this is not done here, things break NvRead(&go, NV_ORDERLY_DATA, sizeof(go)); // Start clock. Need to do this after NV has been restored. TimePowerOn(); } return; }
561815.c
/* * Board setup routines for the Buffalo Linkstation / Kurobox Platform. * * Copyright (C) 2006 G. Liakhovetski ([email protected]) * * Based on sandpoint.c by Mark A. Greer * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of * any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/initrd.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/prom.h> #include <asm/mpic.h> #include <asm/pci-bridge.h> #include "mpc10x.h" static const struct of_device_id of_bus_ids[] __initconst = { { .type = "soc", }, { .compatible = "simple-bus", }, {}, }; static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(linkstation, declare_of_platform_devices); static int __init linkstation_add_bridge(struct device_node *dev) { #ifdef CONFIG_PCI int len; struct pci_controller *hose; const int *bus_range; printk("Adding PCI host bridge %pOF\n", dev); bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) printk(KERN_WARNING "Can't get bus-range for %pOF, assume" " bus 0\n", dev); hose = pcibios_alloc_controller(dev); if (hose == NULL) return -ENOMEM; hose->first_busno = bus_range ? bus_range[0] : 0; hose->last_busno = bus_range ? bus_range[1] : 0xff; setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, 1); #endif return 0; } static void __init linkstation_setup_arch(void) { struct device_node *np; /* Lookup PCI host bridges */ for_each_compatible_node(np, "pci", "mpc10x-pci") linkstation_add_bridge(np); printk(KERN_INFO "BUFFALO Network Attached Storage Series\n"); printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n"); } /* * Interrupt setup and service. Interrupts on the linkstation come * from the four PCI slots plus onboard 8241 devices: I2C, DUART. */ static void __init linkstation_init_IRQ(void) { struct mpic *mpic; mpic = mpic_alloc(NULL, 0, 0, 4, 0, " EPIC "); BUG_ON(mpic == NULL); /* PCI IRQs */ mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200); /* I2C */ mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000); /* ttyS0, ttyS1 */ mpic_assign_isu(mpic, 2, mpic->paddr + 0x11100); mpic_init(mpic); } extern void avr_uart_configure(void); extern void avr_uart_send(const char); static void __noreturn linkstation_restart(char *cmd) { local_irq_disable(); /* Reset system via AVR */ avr_uart_configure(); /* Send reboot command */ avr_uart_send('C'); for(;;) /* Spin until reset happens */ avr_uart_send('G'); /* "kick" */ } static void __noreturn linkstation_power_off(void) { local_irq_disable(); /* Power down system via AVR */ avr_uart_configure(); /* send shutdown command */ avr_uart_send('E'); for(;;) /* Spin until power-off happens */ avr_uart_send('G'); /* "kick" */ /* NOTREACHED */ } static void __noreturn linkstation_halt(void) { linkstation_power_off(); /* NOTREACHED */ } static void linkstation_show_cpuinfo(struct seq_file *m) { seq_printf(m, "vendor\t\t: Buffalo Technology\n"); seq_printf(m, "machine\t\t: Linkstation I/Kurobox(HG)\n"); } static int __init linkstation_probe(void) { if (!of_machine_is_compatible("linkstation")) return 0; pm_power_off = linkstation_power_off; return 1; } define_machine(linkstation){ .name = "Buffalo Linkstation", .probe = linkstation_probe, .setup_arch = linkstation_setup_arch, .init_IRQ = linkstation_init_IRQ, .show_cpuinfo = linkstation_show_cpuinfo, .get_irq = mpic_get_irq, .restart = linkstation_restart, .halt = linkstation_halt, .calibrate_decr = generic_calibrate_decr, };
16865.c
//***************************************************************************** // // fonthandwriting26.c - Font definition for the 26pt Handwriting font. // //***************************************************************************** //***************************************************************************** // // Original font source: http://helloalan.com/projects/breip/ // // This Font Software is Copyright (c) 2008, Alan Hussey and Adam Breipohl. // All Rights Reserved. // // "Breip" is a Reserved Font Name for this Font Software. // // This Font Software is licensed under the SIL Open Font License, // Version 1.0. No modification of the license is permitted, only verbatim // copy is allowed. This license is copied below, and is also available with // a FAQ at: http://scripts.sil.org/OFL // ------------------------------------------------------------------------ // // SIL OPEN FONT LICENSE // // Version 1.0 - 22 November 2005 // PREAMBLE // // The goals of the Open Font License (OFL) are to stimulate worldwide // development of cooperative font projects, to support the font creation // efforts of academic and linguistic communities, and to provide an open // framework in which fonts may be shared and improved in partnership with // others. // // The OFL allows the licensed fonts to be used, studied, modified and // redistributed freely as long as they are not sold by themselves. The // fonts, including any derivative works, can be bundled, embedded, // redistributed and sold with any software provided that the font names of // derivative works are changed. The fonts and derivatives, however, cannot // be released under any other type of license. // // DEFINITIONS // // "Font Software" refers to any and all of the following: // // * font files // * data files // * source code // * build scripts // * documentation // // "Reserved Font Name" refers to the Font Software name as seen by users // and any other names as specified after the copyright statement. // // "Standard Version" refers to the collection of Font Software components // as distributed by the Copyright Holder. // // "Modified Version" refers to any derivative font software made by adding // to, deleting, or substituting -- in part or in whole -- any of the // components of the Standard Version, by changing formats or by porting // the Font Software to a new environment. // // "Author" refers to any designer, engineer, programmer, technical writer // or other person who contributed to the Font Software. // // PERMISSION & CONDITIONS // // Permission is hereby granted, free of charge, to any person obtaining a // copy of the Font Software, to use, study, copy, merge, embed, modify, // redistribute, and sell modified and unmodified copies of the Font // Software, subject to the following conditions: // // 1) Neither the Font Software nor any of its individual components, in // Standard or Modified Versions, may be sold by itself. // // 2) Standard or Modified Versions of the Font Software may be bundled, // redistributed and sold with any software, provided that each copy // contains the above copyright notice and this license. These can be // included either as stand-alone text files, human-readable headers or in // the appropriate machine-readable metadata fields within text or binary // files as long as those fields can be easily viewed by the user. // // 3) No Modified Version of the Font Software may use the Reserved Font // Name(s), in part or in whole, unless explicit written permission is // granted by the Copyright Holder. This restriction applies to all // references stored in the Font Software, such as the font menu name and // other font description fields, which are used to differentiate the font // from others. // // 4) The name(s) of the Copyright Holder or the Author(s) of the Font // Software shall not be used to promote, endorse or advertise any Modified // Version, except to acknowledge the contribution(s) of the Copyright Holder // and the Author(s) or with their explicit written permission. // // 5) The Font Software, modified or unmodified, in part or in whole, must // be distributed using this license, and may not be distributed under any // other license. // // TERMINATION // // This license becomes null and void if any of the above conditions are // not met. // // DISCLAIMER // // THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT // OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE // COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, // INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL // DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER // DEALINGS IN THE FONT SOFTWARE. //***************************************************************************** //***************************************************************************** // // This file is generated by ftrasterize; DO NOT EDIT BY HAND! // //***************************************************************************** #include <stdint.h> #include <stdbool.h> #include "grlib/grlib.h" //***************************************************************************** // // Details of this font: // Characters: 32 to 255 inclusive // Style: handwriting // Size: 26 point // Bold: no // Italic: no // Memory usage: 6080 bytes // //***************************************************************************** //***************************************************************************** // // The compressed data for the 26 point Handwriting font. // Contains characters 32 to 255 inclusive. // //***************************************************************************** static const uint8_t g_pui8Handwriting26Data[5877] = { 5, 10, 0, 37, 64, 24, 6, 240, 97, 66, 66, 66, 66, 51, 51, 51, 50, 66, 66, 65, 81, 240, 129, 81, 66, 66, 0, 5, 96, 16, 6, 240, 145, 81, 33, 33, 33, 33, 18, 33, 33, 81, 0, 15, 32, 44, 20, 0, 7, 97, 240, 65, 240, 65, 145, 146, 114, 161, 114, 127, 129, 114, 34, 97, 114, 161, 114, 161, 114, 161, 114, 161, 114, 161, 114, 127, 129, 130, 19, 81, 130, 130, 145, 130, 145, 129, 0, 22, 16, 52, 14, 240, 240, 225, 65, 129, 65, 129, 37, 99, 33, 33, 82, 49, 115, 49, 113, 17, 49, 97, 33, 49, 97, 33, 49, 113, 17, 49, 137, 97, 49, 49, 81, 49, 49, 81, 65, 33, 81, 67, 97, 51, 119, 68, 81, 209, 225, 209, 0, 10, 112, 50, 19, 0, 11, 117, 17, 65, 113, 66, 49, 148, 81, 240, 33, 240, 49, 240, 49, 240, 33, 240, 49, 240, 33, 240, 49, 67, 161, 65, 49, 145, 49, 209, 65, 81, 113, 65, 81, 98, 65, 81, 97, 97, 49, 113, 100, 129, 0, 18, 48, 53, 17, 0, 6, 84, 193, 50, 161, 81, 161, 81, 161, 81, 161, 81, 177, 50, 177, 49, 209, 33, 225, 17, 97, 129, 97, 146, 81, 145, 17, 65, 129, 49, 49, 129, 65, 33, 114, 83, 113, 114, 113, 113, 17, 97, 97, 50, 65, 82, 165, 0, 13, 112, 10, 3, 193, 33, 33, 33, 33, 0, 8, 16, 31, 10, 240, 240, 97, 114, 114, 129, 129, 129, 145, 130, 129, 145, 145, 145, 145, 145, 145, 145, 145, 145, 97, 49, 65, 66, 18, 114, 0, 8, 16, 26, 6, 240, 49, 82, 81, 97, 81, 82, 66, 81, 81, 81, 81, 81, 81, 81, 81, 65, 81, 81, 65, 81, 0, 5, 96, 13, 6, 240, 65, 81, 17, 35, 50, 82, 65, 0, 16, 32, 24, 13, 0, 8, 65, 194, 193, 194, 178, 178, 179, 178, 107, 114, 178, 178, 178, 178, 178, 178, 193, 0, 13, 96, 11, 3, 0, 7, 65, 33, 33, 33, 33, 33, 224, 8, 14, 0, 24, 76, 0, 26, 64, 9, 5, 0, 13, 99, 34, 240, 240, 48, 27, 15, 0, 7, 17, 210, 194, 209, 210, 194, 209, 210, 194, 209, 210, 209, 209, 210, 209, 210, 209, 209, 210, 209, 0, 14, 112, 52, 20, 0, 8, 18, 240, 21, 226, 49, 225, 65, 210, 209, 50, 225, 50, 241, 33, 240, 17, 33, 240, 17, 33, 240, 17, 33, 240, 17, 33, 240, 17, 33, 240, 17, 34, 226, 49, 225, 66, 194, 81, 178, 98, 146, 130, 99, 168, 241, 0, 16, 64, 24, 4, 193, 50, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 49, 49, 240, 240, 46, 21, 0, 11, 3, 240, 22, 226, 65, 210, 81, 240, 81, 240, 81, 240, 65, 240, 81, 240, 81, 240, 66, 240, 65, 193, 113, 162, 129, 130, 161, 114, 177, 98, 193, 82, 209, 65, 240, 17, 33, 240, 50, 0, 19, 112, 39, 18, 0, 7, 72, 115, 99, 67, 161, 50, 193, 240, 18, 242, 242, 242, 244, 240, 19, 240, 34, 240, 34, 240, 18, 240, 33, 240, 33, 240, 33, 240, 18, 242, 167, 225, 0, 16, 80, 49, 21, 0, 9, 18, 240, 36, 240, 21, 242, 34, 226, 50, 210, 66, 194, 82, 178, 98, 161, 130, 145, 146, 67, 33, 146, 35, 66, 133, 139, 240, 50, 240, 66, 240, 66, 240, 66, 240, 66, 240, 66, 240, 81, 240, 81, 0, 16, 112, 34, 12, 240, 240, 181, 54, 97, 177, 177, 177, 177, 177, 177, 21, 83, 65, 49, 17, 81, 50, 97, 50, 97, 177, 177, 161, 177, 177, 81, 65, 116, 0, 11, 32, 31, 14, 0, 7, 113, 209, 209, 193, 193, 209, 193, 193, 209, 193, 193, 209, 193, 209, 114, 65, 146, 33, 161, 33, 161, 49, 114, 88, 0, 12, 96, 27, 14, 0, 7, 4, 232, 209, 209, 194, 193, 209, 193, 209, 194, 193, 209, 193, 209, 194, 193, 194, 193, 209, 209, 0, 11, 64, 42, 12, 240, 97, 177, 177, 177, 177, 114, 18, 82, 49, 66, 81, 65, 82, 49, 82, 65, 81, 82, 50, 97, 49, 131, 162, 145, 19, 113, 50, 81, 81, 81, 81, 81, 81, 81, 50, 116, 0, 11, 32, 52, 17, 0, 9, 7, 17, 99, 81, 18, 66, 131, 65, 147, 50, 145, 17, 50, 129, 33, 65, 114, 33, 66, 82, 49, 82, 50, 65, 100, 97, 240, 17, 240, 17, 240, 17, 240, 17, 240, 17, 240, 17, 240, 17, 240, 17, 240, 18, 240, 17, 0, 13, 12, 4, 240, 240, 98, 34, 240, 178, 34, 0, 5, 96, 15, 4, 0, 6, 2, 34, 240, 240, 17, 49, 49, 49, 34, 240, 48, 21, 14, 0, 11, 113, 179, 148, 147, 147, 147, 163, 162, 196, 195, 210, 211, 195, 211, 210, 0, 16, 11, 13, 0, 17, 123, 0, 6, 107, 0, 21, 48, 21, 15, 0, 13, 19, 227, 212, 212, 211, 227, 226, 180, 163, 178, 179, 179, 163, 178, 0, 18, 48, 28, 12, 0, 6, 53, 88, 50, 98, 33, 129, 33, 129, 33, 129, 33, 129, 33, 114, 161, 147, 145, 0, 11, 66, 162, 0, 9, 112, 71, 23, 0, 9, 54, 240, 17, 98, 194, 130, 162, 161, 161, 178, 129, 209, 129, 83, 81, 114, 66, 17, 81, 65, 33, 65, 145, 65, 33, 50, 98, 17, 65, 33, 49, 114, 17, 65, 33, 33, 115, 17, 65, 33, 33, 98, 17, 17, 50, 34, 17, 82, 34, 65, 65, 34, 19, 145, 66, 35, 162, 82, 210, 115, 162, 163, 99, 216, 0, 21, 16, 44, 14, 0, 5, 114, 179, 177, 18, 161, 33, 146, 33, 145, 49, 145, 50, 114, 50, 114, 65, 113, 81, 113, 82, 97, 82, 91, 50, 97, 81, 113, 81, 114, 50, 114, 50, 129, 49, 145, 49, 145, 0, 12, 80, 41, 13, 0, 6, 74, 49, 145, 49, 129, 49, 114, 49, 98, 65, 66, 97, 34, 132, 129, 68, 65, 114, 49, 130, 33, 145, 33, 145, 33, 130, 33, 114, 177, 177, 162, 97, 35, 131, 0, 10, 112, 37, 18, 0, 9, 100, 211, 226, 242, 242, 240, 17, 240, 18, 240, 17, 240, 33, 240, 18, 240, 17, 240, 33, 240, 33, 240, 34, 240, 18, 240, 34, 193, 66, 101, 103, 0, 19, 47, 16, 0, 6, 20, 199, 145, 53, 113, 100, 81, 131, 65, 162, 49, 177, 49, 177, 49, 177, 49, 177, 49, 177, 49, 162, 49, 162, 49, 161, 50, 146, 50, 145, 66, 17, 98, 66, 33, 66, 81, 50, 34, 180, 0, 14, 112, 34, 15, 0, 7, 113, 225, 24, 81, 225, 210, 210, 209, 210, 115, 50, 69, 66, 36, 98, 50, 130, 210, 210, 129, 66, 130, 50, 115, 67, 37, 88, 0, 15, 96, 42, 18, 0, 9, 17, 89, 57, 146, 240, 18, 240, 18, 240, 18, 240, 18, 115, 98, 22, 146, 240, 18, 240, 18, 240, 18, 240, 18, 240, 18, 240, 18, 240, 18, 242, 240, 18, 240, 17, 0, 17, 112, 36, 17, 0, 7, 33, 242, 226, 226, 226, 226, 242, 226, 242, 226, 242, 162, 50, 177, 50, 193, 34, 193, 34, 193, 34, 193, 34, 178, 49, 162, 66, 115, 120, 0, 15, 80, 49, 16, 0, 7, 33, 82, 129, 82, 114, 82, 129, 82, 130, 66, 130, 66, 130, 66, 115, 66, 98, 17, 66, 81, 49, 66, 50, 65, 66, 34, 81, 69, 98, 50, 146, 50, 161, 50, 161, 50, 161, 50, 161, 50, 177, 34, 0, 15, 96, 25, 5, 240, 82, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 65, 65, 65, 65, 66, 65, 240, 240, 112, 37, 17, 0, 9, 84, 119, 119, 240, 240, 19, 240, 19, 244, 243, 240, 18, 240, 18, 240, 17, 240, 17, 240, 18, 241, 240, 17, 97, 130, 113, 98, 145, 66, 181, 0, 15, 80, 46, 18, 0, 9, 2, 240, 18, 240, 18, 162, 66, 147, 66, 131, 82, 115, 98, 99, 114, 83, 130, 82, 146, 66, 162, 50, 178, 50, 178, 49, 194, 50, 178, 51, 162, 68, 145, 104, 50, 240, 18, 240, 33, 0, 15, 48, 42, 18, 0, 9, 1, 240, 33, 240, 33, 240, 33, 240, 33, 240, 33, 240, 33, 240, 33, 240, 33, 240, 33, 240, 34, 240, 18, 240, 33, 240, 34, 240, 33, 178, 66, 147, 83, 83, 151, 0, 18, 96, 63, 23, 0, 11, 114, 240, 98, 162, 146, 148, 115, 145, 33, 115, 130, 33, 113, 18, 113, 50, 97, 33, 98, 65, 97, 33, 97, 81, 82, 34, 66, 81, 81, 66, 34, 98, 65, 84, 114, 65, 240, 33, 65, 240, 33, 65, 240, 33, 65, 240, 33, 50, 240, 34, 34, 240, 34, 49, 240, 49, 0, 23, 32, 52, 26, 0, 15, 98, 98, 240, 18, 99, 226, 115, 226, 115, 226, 116, 210, 113, 18, 210, 113, 33, 194, 114, 34, 178, 114, 34, 178, 114, 50, 146, 130, 50, 131, 130, 66, 114, 146, 67, 67, 146, 104, 162, 147, 193, 240, 131, 240, 130, 0, 25, 96, 50, 21, 0, 8, 81, 240, 51, 240, 34, 240, 50, 163, 97, 210, 66, 210, 65, 242, 49, 242, 34, 242, 34, 240, 17, 34, 240, 17, 34, 240, 17, 34, 242, 49, 241, 66, 225, 81, 209, 98, 178, 114, 146, 131, 99, 184, 243, 0, 17, 16, 34, 10, 240, 240, 21, 65, 67, 33, 97, 33, 97, 33, 97, 33, 82, 33, 66, 49, 34, 84, 97, 145, 145, 145, 145, 146, 130, 130, 130, 130, 130, 0, 9, 96, 42, 18, 0, 7, 82, 243, 226, 242, 242, 240, 18, 242, 242, 193, 50, 193, 49, 209, 34, 209, 34, 209, 34, 194, 33, 210, 34, 113, 50, 50, 113, 34, 67, 97, 18, 99, 83, 139, 150, 36, 0, 16, 43, 18, 0, 9, 26, 114, 129, 113, 130, 113, 114, 129, 98, 145, 82, 161, 66, 177, 66, 177, 66, 177, 66, 177, 66, 177, 82, 161, 99, 129, 131, 97, 180, 33, 240, 33, 240, 33, 240, 33, 0, 17, 112, 29, 15, 0, 6, 114, 180, 133, 117, 147, 178, 210, 210, 226, 227, 227, 212, 212, 211, 211, 210, 210, 81, 98, 97, 67, 118, 0, 13, 112, 34, 17, 0, 7, 115, 149, 134, 135, 242, 242, 242, 240, 17, 240, 18, 242, 242, 240, 18, 242, 242, 240, 18, 242, 240, 18, 242, 240, 17, 240, 33, 0, 15, 64, 47, 22, 0, 16, 82, 241, 66, 241, 65, 240, 18, 34, 240, 18, 34, 240, 18, 34, 240, 18, 34, 240, 18, 34, 240, 18, 34, 240, 18, 49, 242, 66, 226, 66, 210, 98, 194, 98, 163, 130, 131, 163, 52, 230, 0, 20, 64, 44, 24, 0, 20, 81, 240, 113, 240, 114, 49, 240, 34, 66, 240, 17, 98, 226, 98, 210, 130, 194, 130, 178, 162, 146, 178, 146, 194, 114, 211, 82, 242, 82, 240, 18, 50, 240, 35, 18, 240, 68, 0, 22, 80, 64, 24, 0, 14, 49, 240, 129, 240, 130, 240, 129, 240, 129, 240, 130, 179, 145, 33, 117, 129, 33, 113, 50, 113, 49, 82, 66, 97, 34, 82, 81, 97, 34, 81, 98, 81, 49, 81, 114, 65, 49, 81, 114, 65, 49, 66, 130, 34, 50, 50, 145, 34, 65, 34, 162, 18, 66, 18, 179, 99, 209, 0, 21, 64, 41, 15, 0, 9, 1, 33, 162, 34, 146, 50, 114, 66, 114, 82, 82, 114, 50, 130, 50, 146, 18, 180, 179, 195, 196, 162, 18, 147, 34, 130, 66, 98, 82, 98, 98, 82, 113, 81, 0, 12, 96, 44, 18, 0, 8, 66, 240, 18, 242, 50, 178, 66, 161, 98, 130, 115, 98, 149, 18, 213, 240, 18, 240, 17, 240, 18, 240, 18, 240, 17, 240, 18, 240, 18, 240, 17, 240, 33, 240, 33, 240, 33, 0, 16, 96, 44, 29, 0, 15, 58, 203, 66, 240, 179, 240, 178, 240, 179, 240, 163, 240, 178, 240, 179, 240, 178, 240, 178, 240, 194, 240, 178, 240, 194, 240, 194, 240, 194, 240, 194, 196, 179, 88, 236, 240, 54, 0, 26, 112, 31, 15, 0, 5, 113, 130, 66, 83, 87, 129, 209, 225, 225, 225, 225, 225, 225, 209, 225, 225, 225, 225, 225, 225, 81, 129, 37, 117, 161, 0, 13, 25, 14, 0, 7, 1, 209, 225, 210, 210, 209, 225, 210, 209, 225, 210, 209, 210, 210, 209, 225, 209, 225, 0, 14, 32, 28, 14, 240, 240, 203, 194, 194, 194, 194, 194, 209, 209, 209, 209, 209, 209, 209, 210, 209, 209, 209, 209, 209, 74, 66, 0, 11, 96, 19, 9, 240, 240, 17, 114, 99, 97, 17, 81, 33, 65, 49, 65, 65, 145, 0, 21, 80, 8, 12, 0, 33, 10, 0, 10, 96, 10, 7, 240, 97, 114, 113, 113, 0, 20, 48, 27, 12, 0, 21, 65, 161, 161, 161, 65, 97, 49, 17, 81, 34, 17, 65, 49, 49, 49, 33, 65, 52, 81, 49, 0, 10, 32, 30, 11, 0, 8, 49, 161, 146, 146, 146, 146, 146, 146, 146, 146, 146, 50, 66, 82, 34, 82, 49, 82, 50, 65, 66, 50, 85, 114, 0, 9, 19, 11, 0, 17, 18, 115, 114, 130, 145, 146, 146, 146, 161, 163, 35, 84, 0, 10, 16, 38, 14, 0, 6, 49, 210, 194, 194, 194, 194, 194, 194, 194, 194, 209, 98, 81, 81, 113, 65, 129, 49, 130, 49, 130, 49, 130, 49, 115, 50, 82, 17, 71, 34, 68, 0, 11, 64, 21, 16, 0, 28, 101, 162, 50, 145, 50, 145, 35, 103, 209, 241, 241, 242, 240, 24, 0, 12, 32, 24, 13, 0, 10, 24, 66, 178, 178, 178, 178, 193, 193, 152, 131, 162, 178, 178, 193, 193, 193, 193, 0, 12, 48, 28, 12, 0, 19, 114, 131, 145, 162, 161, 177, 177, 99, 49, 51, 17, 68, 49, 177, 177, 161, 177, 113, 49, 129, 17, 146, 240, 32, 38, 17, 0, 21, 33, 240, 17, 240, 33, 240, 17, 240, 17, 240, 17, 240, 17, 99, 129, 66, 33, 113, 49, 65, 113, 49, 81, 97, 33, 97, 113, 17, 98, 98, 130, 0, 15, 16, 16, 4, 240, 240, 162, 225, 49, 49, 49, 49, 49, 49, 49, 65, 240, 240, 28, 13, 0, 15, 34, 162, 177, 193, 240, 240, 129, 194, 194, 194, 194, 194, 194, 193, 193, 193, 193, 33, 130, 34, 83, 71, 240, 48, 32, 13, 0, 6, 66, 178, 178, 178, 178, 178, 178, 82, 66, 66, 82, 50, 98, 34, 114, 18, 132, 147, 162, 179, 180, 146, 23, 50, 178, 193, 0, 11, 25, 5, 240, 82, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 65, 66, 50, 240, 240, 112, 27, 14, 0, 23, 83, 113, 52, 86, 33, 82, 18, 49, 49, 17, 113, 49, 17, 113, 51, 129, 50, 129, 50, 194, 0, 13, 80, 23, 12, 0, 22, 65, 51, 81, 36, 84, 34, 83, 50, 66, 66, 66, 81, 66, 82, 50, 177, 0, 10, 16, 19, 12, 0, 23, 1, 162, 145, 161, 82, 50, 97, 50, 113, 49, 98, 56, 0, 10, 112, 23, 8, 0, 16, 20, 50, 33, 49, 50, 33, 50, 33, 49, 49, 34, 52, 66, 97, 113, 113, 113, 113, 240, 29, 15, 0, 26, 67, 178, 209, 209, 225, 82, 113, 82, 113, 66, 130, 18, 17, 147, 18, 210, 210, 210, 210, 81, 130, 35, 148, 240, 64, 20, 11, 0, 16, 115, 118, 66, 81, 35, 130, 146, 146, 146, 146, 146, 146, 146, 0, 9, 48, 18, 9, 0, 15, 18, 67, 82, 114, 131, 130, 145, 130, 49, 34, 68, 0, 8, 48, 26, 14, 0, 10, 97, 193, 210, 194, 194, 209, 210, 194, 194, 68, 71, 84, 210, 194, 194, 209, 210, 194, 209, 0, 11, 64, 19, 11, 0, 22, 17, 146, 97, 34, 97, 34, 97, 49, 82, 50, 50, 85, 129, 0, 9, 21, 9, 0, 15, 49, 129, 114, 113, 49, 65, 49, 50, 50, 34, 65, 33, 84, 98, 0, 8, 64, 29, 13, 0, 25, 65, 49, 130, 33, 130, 33, 81, 34, 33, 67, 18, 33, 50, 17, 18, 33, 49, 36, 49, 18, 50, 82, 0, 10, 112, 18, 9, 0, 16, 113, 66, 49, 33, 84, 98, 114, 100, 81, 33, 145, 0, 8, 32, 22, 12, 0, 25, 17, 177, 34, 97, 65, 97, 83, 49, 131, 177, 177, 161, 177, 177, 177, 161, 240, 48, 19, 14, 0, 23, 83, 119, 83, 66, 65, 113, 193, 209, 193, 209, 194, 214, 0, 12, 64, 28, 16, 0, 6, 58, 82, 99, 97, 241, 241, 226, 211, 194, 226, 242, 242, 242, 241, 241, 241, 241, 241, 241, 241, 250, 0, 14, 32, 24, 4, 193, 50, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 49, 34, 49, 240, 240, 28, 15, 240, 240, 248, 226, 209, 225, 225, 225, 225, 225, 227, 227, 226, 179, 178, 209, 225, 225, 225, 210, 99, 21, 131, 0, 14, 48, 17, 13, 0, 21, 49, 113, 49, 18, 65, 49, 50, 33, 65, 67, 0, 21, 96, 25, 5, 240, 18, 50, 50, 240, 65, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 35, 49, 240, 240, 48, 36, 10, 240, 240, 34, 130, 130, 130, 146, 113, 17, 113, 17, 100, 82, 18, 81, 49, 81, 49, 81, 50, 66, 34, 67, 17, 18, 68, 145, 145, 146, 145, 145, 0, 9, 16, 45, 19, 0, 7, 115, 227, 18, 194, 81, 177, 240, 49, 240, 49, 240, 49, 240, 65, 240, 49, 240, 49, 240, 49, 240, 24, 225, 240, 49, 240, 49, 240, 49, 240, 49, 229, 210, 36, 180, 51, 240, 56, 0, 14, 64, 22, 12, 0, 16, 65, 113, 71, 98, 34, 97, 18, 17, 102, 98, 34, 81, 20, 17, 193, 0, 16, 96, 44, 18, 0, 8, 21, 36, 146, 240, 17, 97, 146, 98, 129, 129, 129, 145, 97, 162, 81, 177, 81, 193, 49, 210, 33, 216, 193, 17, 225, 33, 215, 241, 240, 33, 240, 33, 240, 33, 240, 33, 217, 0, 13, 112, 20, 4, 240, 81, 49, 49, 49, 49, 49, 49, 242, 34, 34, 34, 34, 34, 34, 240, 240, 64, 38, 15, 0, 6, 99, 164, 133, 132, 162, 194, 18, 162, 34, 146, 66, 131, 50, 131, 50, 147, 49, 163, 33, 181, 179, 226, 210, 81, 114, 81, 98, 97, 67, 119, 161, 0, 12, 48, 11, 12, 0, 6, 3, 67, 35, 66, 0, 36, 48, 46, 18, 0, 12, 50, 211, 17, 193, 240, 17, 240, 17, 240, 17, 240, 17, 82, 161, 65, 177, 65, 145, 33, 65, 145, 33, 65, 145, 33, 65, 145, 33, 70, 65, 33, 209, 49, 193, 81, 162, 99, 83, 150, 0, 16, 96, 18, 9, 240, 240, 129, 113, 113, 35, 49, 33, 17, 49, 17, 49, 49, 0, 23, 48, 35, 17, 0, 15, 113, 81, 146, 66, 115, 51, 114, 67, 114, 66, 115, 66, 114, 82, 130, 67, 147, 66, 162, 66, 162, 66, 162, 66, 162, 66, 162, 66, 0, 19, 48, 16, 13, 0, 16, 42, 179, 178, 193, 193, 193, 193, 193, 193, 0, 18, 16, 7, 10, 0, 17, 72, 0, 19, 52, 18, 0, 10, 18, 211, 225, 240, 17, 240, 17, 240, 17, 22, 145, 33, 50, 145, 33, 49, 145, 49, 33, 161, 49, 33, 113, 33, 49, 33, 113, 33, 49, 49, 81, 49, 49, 67, 33, 49, 49, 145, 49, 49, 129, 81, 161, 114, 98, 166, 0, 19, 8, 11, 0, 9, 89, 0, 30, 64, 17, 10, 240, 240, 50, 114, 113, 130, 81, 33, 97, 49, 81, 69, 0, 25, 48, 23, 13, 0, 11, 98, 193, 193, 194, 193, 193, 137, 129, 193, 193, 193, 193, 193, 240, 240, 59, 0, 11, 80, 21, 11, 240, 240, 67, 113, 33, 161, 145, 161, 161, 161, 66, 65, 49, 97, 18, 129, 0, 24, 32, 16, 9, 240, 213, 49, 65, 129, 113, 130, 145, 129, 113, 98, 0, 20, 96, 9, 6, 240, 97, 65, 50, 0, 18, 64, 40, 19, 0, 20, 65, 145, 129, 145, 145, 129, 145, 114, 145, 114, 146, 99, 130, 99, 116, 66, 17, 99, 17, 65, 50, 35, 49, 65, 82, 113, 49, 225, 49, 225, 49, 240, 33, 0, 18, 112, 51, 20, 0, 7, 95, 2, 240, 225, 65, 181, 49, 131, 50, 49, 114, 82, 49, 113, 98, 49, 113, 98, 49, 129, 82, 49, 132, 19, 49, 193, 33, 49, 241, 65, 225, 240, 66, 240, 50, 240, 50, 240, 50, 240, 50, 240, 50, 249, 0, 18, 32, 11, 6, 0, 9, 33, 67, 36, 50, 0, 10, 112, 12, 5, 0, 12, 1, 65, 66, 50, 34, 240, 240, 48, 13, 3, 145, 33, 33, 33, 33, 33, 33, 33, 33, 0, 7, 17, 10, 240, 240, 65, 114, 113, 145, 81, 33, 97, 49, 81, 69, 0, 25, 48, 40, 19, 0, 12, 97, 178, 82, 179, 67, 163, 67, 178, 82, 178, 82, 178, 82, 178, 82, 178, 82, 147, 51, 146, 67, 146, 82, 146, 82, 146, 82, 146, 82, 146, 82, 146, 82, 0, 20, 32, 49, 16, 0, 8, 17, 130, 81, 129, 97, 113, 113, 98, 113, 97, 129, 81, 145, 66, 145, 65, 161, 49, 226, 65, 145, 51, 130, 34, 17, 129, 33, 49, 113, 33, 65, 98, 17, 84, 49, 55, 66, 129, 81, 145, 241, 241, 0, 12, 80, 45, 15, 0, 7, 81, 225, 113, 97, 98, 97, 97, 113, 81, 129, 65, 145, 65, 145, 49, 161, 33, 177, 33, 209, 34, 146, 17, 17, 145, 65, 129, 81, 129, 65, 129, 81, 81, 129, 50, 145, 33, 194, 0, 13, 112, 56, 20, 0, 7, 85, 113, 97, 65, 98, 177, 97, 177, 98, 177, 97, 209, 65, 241, 34, 241, 33, 241, 33, 240, 17, 18, 50, 161, 33, 49, 17, 193, 49, 33, 193, 18, 49, 177, 33, 65, 162, 17, 85, 97, 56, 113, 145, 145, 145, 240, 65, 240, 81, 0, 18, 16, 29, 12, 0, 6, 66, 162, 0, 11, 82, 130, 161, 113, 34, 113, 33, 129, 33, 129, 33, 129, 34, 98, 49, 82, 71, 115, 0, 9, 96, 43, 13, 240, 241, 209, 209, 162, 178, 177, 17, 145, 33, 145, 33, 145, 34, 114, 49, 113, 65, 113, 65, 113, 66, 82, 81, 81, 24, 49, 97, 81, 97, 66, 98, 50, 113, 49, 129, 49, 129, 0, 11, 96, 44, 13, 240, 240, 226, 162, 193, 179, 161, 17, 161, 17, 145, 34, 129, 49, 129, 49, 114, 49, 113, 66, 97, 66, 97, 81, 82, 81, 90, 49, 98, 65, 113, 50, 113, 50, 113, 49, 129, 49, 129, 0, 10, 16, 42, 12, 0, 6, 66, 145, 18, 240, 97, 178, 145, 17, 145, 18, 129, 33, 113, 49, 113, 49, 113, 50, 97, 65, 82, 65, 89, 49, 81, 81, 82, 50, 97, 49, 113, 49, 113, 49, 113, 0, 9, 48, 44, 12, 240, 240, 130, 81, 49, 33, 49, 131, 240, 82, 145, 17, 145, 17, 145, 33, 129, 33, 113, 49, 113, 49, 113, 50, 82, 65, 81, 81, 89, 49, 81, 81, 97, 50, 97, 49, 113, 49, 113, 0, 10, 112, 43, 13, 240, 240, 178, 49, 193, 161, 178, 177, 17, 146, 17, 145, 33, 145, 34, 114, 49, 114, 49, 113, 65, 113, 66, 82, 81, 90, 49, 97, 81, 97, 66, 98, 50, 113, 49, 129, 49, 129, 0, 11, 96, 43, 12, 240, 240, 163, 132, 132, 240, 240, 50, 145, 17, 145, 17, 145, 33, 114, 33, 113, 49, 113, 49, 113, 65, 82, 65, 81, 81, 81, 23, 49, 81, 81, 97, 50, 97, 49, 113, 49, 113, 0, 9, 48, 47, 21, 0, 11, 81, 240, 75, 146, 17, 240, 33, 33, 240, 18, 33, 240, 17, 49, 242, 49, 242, 49, 226, 65, 226, 73, 97, 81, 217, 193, 52, 194, 97, 193, 113, 178, 113, 178, 113, 177, 129, 161, 152, 212, 0, 16, 80, 28, 14, 0, 7, 98, 162, 177, 194, 193, 193, 209, 193, 209, 209, 209, 209, 210, 210, 84, 70, 193, 209, 210, 209, 178, 0, 11, 48, 36, 13, 240, 240, 225, 209, 209, 240, 81, 193, 38, 66, 36, 81, 193, 193, 178, 177, 99, 49, 53, 50, 34, 114, 177, 193, 129, 50, 113, 50, 83, 72, 115, 0, 10, 80, 35, 14, 0, 6, 49, 193, 178, 240, 113, 209, 218, 66, 193, 209, 194, 194, 193, 99, 65, 51, 98, 49, 129, 209, 209, 129, 66, 114, 50, 98, 88, 131, 0, 9, 96, 34, 13, 0, 7, 33, 177, 17, 240, 240, 49, 193, 23, 65, 193, 193, 178, 177, 114, 49, 67, 66, 34, 114, 177, 193, 194, 113, 50, 83, 71, 145, 0, 10, 96, 37, 14, 0, 5, 97, 49, 145, 49, 240, 97, 209, 38, 88, 98, 193, 209, 194, 194, 114, 49, 84, 65, 51, 98, 49, 129, 209, 209, 210, 114, 50, 98, 88, 117, 0, 11, 48, 23, 5, 241, 82, 209, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 50, 65, 65, 240, 240, 112, 24, 5, 240, 18, 33, 225, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 81, 65, 65, 240, 240, 128, 26, 5, 240, 17, 49, 17, 129, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 81, 65, 65, 65, 240, 240, 112, 25, 7, 240, 209, 34, 240, 33, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 98, 97, 97, 0, 6, 80, 45, 21, 0, 13, 117, 240, 33, 20, 241, 68, 193, 99, 177, 130, 161, 146, 145, 161, 145, 161, 40, 161, 145, 161, 145, 146, 145, 145, 146, 145, 146, 130, 146, 129, 162, 17, 81, 177, 49, 50, 177, 53, 0, 19, 16, 53, 22, 0, 10, 33, 195, 81, 193, 49, 49, 240, 51, 240, 193, 97, 225, 83, 194, 83, 194, 83, 194, 84, 177, 100, 177, 97, 33, 162, 97, 34, 146, 97, 49, 130, 98, 50, 114, 98, 65, 98, 114, 68, 19, 129, 117, 130, 240, 82, 240, 66, 0, 19, 45, 18, 240, 240, 209, 240, 49, 240, 49, 0, 6, 2, 242, 241, 131, 82, 162, 65, 193, 50, 194, 34, 209, 34, 209, 33, 225, 34, 209, 34, 194, 49, 193, 66, 162, 81, 161, 98, 114, 130, 67, 181, 0, 14, 64, 42, 18, 0, 7, 114, 241, 240, 240, 18, 242, 241, 131, 82, 162, 65, 193, 50, 194, 34, 209, 34, 209, 33, 225, 34, 209, 34, 209, 49, 193, 66, 162, 81, 161, 98, 114, 130, 67, 181, 0, 16, 96, 44, 18, 0, 7, 97, 240, 19, 241, 240, 240, 34, 242, 241, 131, 82, 162, 65, 193, 50, 194, 34, 209, 34, 209, 34, 209, 34, 209, 34, 209, 49, 193, 66, 162, 81, 161, 98, 114, 130, 82, 181, 0, 14, 64, 44, 18, 0, 7, 51, 65, 145, 49, 33, 242, 178, 242, 241, 131, 82, 162, 65, 193, 50, 194, 34, 209, 34, 209, 34, 209, 34, 209, 34, 194, 49, 193, 66, 162, 81, 161, 98, 114, 130, 82, 181, 0, 16, 96, 44, 18, 0, 7, 18, 130, 98, 130, 240, 17, 130, 242, 241, 131, 82, 162, 65, 193, 50, 194, 34, 209, 34, 209, 34, 209, 34, 209, 34, 194, 49, 193, 66, 162, 81, 161, 98, 114, 130, 82, 181, 0, 16, 96, 26, 11, 0, 11, 1, 113, 34, 81, 66, 65, 82, 33, 98, 18, 115, 146, 132, 98, 33, 97, 50, 65, 81, 177, 0, 14, 58, 20, 0, 9, 17, 163, 82, 146, 113, 146, 113, 19, 82, 114, 50, 65, 129, 81, 50, 113, 98, 34, 98, 98, 34, 97, 129, 34, 81, 145, 34, 66, 145, 34, 65, 161, 34, 49, 162, 49, 49, 161, 66, 17, 162, 67, 177, 98, 161, 115, 114, 129, 19, 36, 213, 0, 18, 80, 45, 19, 0, 7, 113, 240, 66, 240, 50, 0, 6, 1, 240, 49, 225, 34, 225, 34, 225, 33, 241, 33, 241, 33, 241, 33, 241, 34, 225, 34, 210, 49, 209, 66, 178, 82, 146, 114, 99, 147, 20, 212, 0, 15, 48, 43, 19, 0, 8, 33, 240, 33, 240, 18, 0, 6, 33, 209, 50, 225, 34, 225, 33, 241, 33, 241, 33, 241, 33, 241, 33, 241, 34, 210, 49, 209, 66, 178, 81, 162, 98, 130, 130, 83, 182, 0, 17, 80, 46, 19, 0, 8, 17, 240, 35, 240, 17, 17, 241, 240, 240, 33, 240, 34, 225, 34, 225, 34, 225, 33, 241, 33, 241, 33, 241, 33, 241, 34, 210, 34, 210, 49, 209, 66, 178, 82, 146, 114, 99, 152, 211, 0, 15, 64, 44, 20, 0, 8, 18, 82, 178, 82, 0, 5, 113, 240, 66, 225, 49, 241, 34, 241, 34, 241, 34, 241, 34, 241, 34, 241, 34, 241, 49, 226, 50, 209, 66, 194, 82, 162, 114, 130, 131, 83, 183, 0, 18, 64, 36, 17, 0, 7, 50, 225, 241, 240, 130, 241, 50, 162, 66, 146, 82, 129, 115, 82, 133, 19, 194, 17, 242, 242, 241, 242, 241, 240, 17, 242, 241, 240, 17, 0, 15, 112, 31, 12, 0, 6, 10, 241, 177, 177, 177, 177, 22, 65, 98, 49, 98, 49, 98, 49, 82, 65, 20, 97, 177, 177, 177, 177, 177, 184, 0, 10, 112, 45, 14, 240, 240, 203, 49, 146, 49, 145, 49, 145, 49, 115, 49, 99, 65, 52, 97, 50, 129, 49, 145, 51, 113, 83, 81, 114, 65, 129, 65, 130, 49, 130, 49, 130, 49, 129, 50, 17, 82, 98, 35, 132, 0, 13, 29, 13, 0, 15, 17, 194, 209, 209, 240, 240, 66, 162, 177, 177, 51, 97, 49, 17, 81, 49, 34, 65, 33, 65, 68, 67, 35, 0, 11, 27, 11, 0, 15, 113, 130, 129, 240, 97, 145, 161, 145, 50, 65, 49, 17, 65, 33, 49, 49, 33, 49, 51, 81, 0, 9, 112, 28, 11, 0, 14, 33, 162, 129, 33, 240, 225, 145, 161, 145, 50, 65, 49, 17, 65, 33, 49, 49, 33, 49, 51, 81, 0, 9, 112, 30, 11, 0, 14, 1, 145, 17, 65, 33, 52, 240, 209, 145, 161, 145, 50, 65, 49, 17, 65, 33, 49, 49, 33, 49, 51, 81, 0, 9, 112, 26, 11, 0, 16, 98, 34, 240, 209, 145, 161, 145, 50, 65, 49, 17, 65, 33, 49, 49, 33, 49, 51, 81, 0, 9, 112, 27, 11, 0, 14, 20, 116, 116, 240, 226, 130, 145, 145, 50, 81, 33, 18, 49, 49, 33, 49, 33, 49, 51, 81, 0, 9, 112, 34, 19, 0, 32, 34, 242, 34, 209, 50, 113, 65, 20, 113, 38, 145, 97, 177, 35, 17, 161, 49, 17, 17, 161, 33, 49, 24, 33, 18, 51, 161, 0, 16, 48, 21, 9, 0, 16, 34, 82, 98, 113, 114, 113, 130, 130, 49, 68, 97, 129, 145, 99, 240, 240, 16, 24, 14, 0, 18, 17, 225, 225, 240, 240, 162, 162, 33, 129, 65, 129, 19, 101, 177, 209, 225, 232, 0, 10, 96, 24, 15, 0, 17, 97, 194, 0, 7, 52, 146, 50, 129, 50, 129, 35, 102, 193, 225, 225, 226, 248, 0, 11, 64, 26, 15, 0, 17, 82, 211, 177, 33, 240, 240, 196, 161, 65, 129, 66, 129, 20, 86, 193, 225, 241, 225, 248, 0, 11, 64, 22, 15, 0, 21, 17, 49, 0, 7, 5, 145, 66, 129, 20, 86, 193, 225, 225, 241, 248, 0, 11, 64, 20, 6, 0, 6, 98, 82, 97, 240, 17, 81, 81, 81, 81, 81, 81, 81, 97, 240, 240, 224, 20, 6, 0, 7, 2, 34, 240, 129, 81, 81, 81, 81, 81, 81, 81, 97, 81, 240, 240, 144, 19, 6, 0, 7, 82, 49, 33, 241, 81, 81, 81, 81, 81, 81, 81, 97, 240, 240, 240, 19, 7, 0, 10, 65, 49, 97, 177, 97, 97, 97, 97, 97, 97, 97, 113, 240, 240, 240, 22, 12, 0, 19, 101, 145, 240, 145, 178, 193, 97, 81, 65, 98, 33, 114, 34, 97, 86, 0, 9, 64, 31, 12, 0, 15, 33, 97, 49, 17, 65, 49, 50, 17, 161, 240, 98, 97, 36, 84, 33, 99, 34, 82, 65, 82, 66, 66, 81, 66, 0, 11, 80, 24, 12, 0, 16, 113, 193, 193, 240, 240, 66, 146, 145, 161, 82, 50, 97, 50, 113, 49, 98, 71, 0, 9, 48, 23, 12, 0, 17, 17, 146, 240, 240, 82, 146, 145, 161, 82, 50, 97, 50, 113, 49, 98, 71, 0, 10, 112, 25, 12, 0, 15, 81, 162, 145, 33, 240, 240, 50, 146, 145, 161, 82, 50, 97, 50, 113, 49, 98, 71, 0, 10, 112, 28, 12, 0, 16, 97, 161, 17, 65, 49, 50, 17, 161, 240, 98, 146, 145, 162, 81, 65, 97, 65, 113, 50, 82, 71, 0, 9, 48, 25, 12, 0, 16, 98, 49, 177, 240, 240, 34, 131, 130, 161, 97, 50, 98, 34, 113, 34, 113, 56, 99, 0, 9, 96, 16, 13, 0, 16, 97, 194, 240, 240, 59, 240, 240, 33, 193, 0, 18, 112, 29, 11, 0, 17, 49, 146, 145, 116, 98, 17, 98, 18, 82, 33, 33, 49, 34, 49, 33, 33, 65, 34, 17, 65, 55, 81, 0, 9, 32, 23, 11, 0, 15, 65, 177, 177, 240, 194, 145, 113, 33, 113, 34, 97, 34, 82, 50, 51, 70, 113, 0, 9, 23, 11, 0, 15, 113, 130, 129, 240, 226, 145, 113, 33, 113, 34, 97, 34, 82, 50, 51, 70, 113, 0, 9, 25, 11, 0, 15, 81, 145, 17, 113, 33, 240, 194, 145, 113, 33, 113, 34, 97, 34, 82, 50, 51, 70, 113, 0, 9, 26, 13, 0, 19, 98, 50, 177, 240, 65, 178, 113, 50, 114, 34, 114, 34, 114, 50, 83, 51, 51, 87, 131, 0, 10, 64, 26, 12, 0, 20, 17, 146, 240, 240, 161, 161, 49, 113, 65, 97, 82, 49, 130, 17, 177, 161, 177, 177, 161, 177, 240, 48, 27, 9, 0, 6, 103, 177, 129, 129, 133, 65, 51, 33, 81, 33, 81, 33, 81, 38, 49, 129, 129, 129, 129, 129, 135, 0, 7, 25, 11, 0, 18, 17, 49, 240, 240, 241, 33, 113, 49, 97, 65, 65, 101, 161, 145, 161, 161, 145, 161, 161, 240, 16, }; //***************************************************************************** // // The glyph offset table for the 26 point Handwriting font. // //***************************************************************************** const uint16_t g_pui16FontOffsetHandwriting26[] = { 0, 5, 29, 45, 89, 141, 191, 244, 254, 285, 311, 324, 348, 359, 367, 376, 403, 455, 479, 525, 564, 613, 647, 678, 705, 747, 799, 811, 826, 847, 858, 879, 907, 978, 1022, 1063, 1100, 1147, 1181, 1223, 1259, 1308, 1333, 1370, 1416, 1458, 1521, 1573, 1623, 1657, 1699, 1742, 1771, 1805, 1852, 1896, 1960, 2001, 2045, 2089, 2120, 2145, 2173, 2192, 2200, 2210, 2237, 2267, 2286, 2324, 2345, 2369, 2397, 2435, 2451, 2479, 2511, 2536, 2563, 2586, 2605, 2628, 2657, 2677, 2695, 2721, 2740, 2761, 2790, 2808, 2830, 2849, 2877, 2901, 2929, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2946, 2971, 3007, 3052, 3074, 3118, 3138, 3176, 3187, 3233, 3251, 3286, 3302, 3309, 3361, 3369, 3386, 3409, 3430, 3446, 3455, 3495, 3546, 3557, 3569, 3582, 3599, 3639, 3688, 3733, 3789, 3818, 3861, 3905, 3947, 3991, 4034, 4077, 4124, 4152, 4188, 4223, 4257, 4294, 4317, 4341, 4367, 4392, 4437, 4490, 4535, 4577, 4621, 4665, 4709, 4735, 4793, 4838, 4881, 4927, 4971, 5007, 5038, 5083, 5112, 5139, 5167, 5197, 5223, 5250, 5284, 5305, 5329, 5353, 5379, 5401, 5421, 5441, 5460, 5479, 5501, 5532, 5556, 5579, 5604, 5632, 5657, 5673, 5702, 5725, 5748, 5773, 5799, 5825, 5852, }; //***************************************************************************** // // The font definition for the 26 point Handwriting font. // //***************************************************************************** const tFontEx g_sFontExHandwriting26 = { // // The format of the font. // FONT_FMT_EX_PIXEL_RLE, // // The maximum width of the font. // 26, // // The height of the font. // 30, // // The baseline of the font. // 24, // // The first encoded character in the font. // 32, // // The last encoded character in the font. // 255, // // A pointer to the character offset table. // g_pui16FontOffsetHandwriting26, // // A pointer to the actual font data // g_pui8Handwriting26Data };
249858.c
/************************************************************************** * * Copyright 2013-2014 RAD Game Tools and Valve Software * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * **************************************************************************/ #include "miniz.h" typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; #ifdef __cplusplus extern "C" { #endif /* ------------------- zlib-style API's */ mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } /* Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/ */ #if 0 mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c }; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } #else /* Faster, but larger CPU cache footprint. */ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc_table[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; mz_uint32 crc32 = (mz_uint32)crc ^ 0xFFFFFFFF; const mz_uint8 *pByte_buf = (const mz_uint8 *)ptr; while (buf_len >= 4) { crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF]; crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[1]) & 0xFF]; crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[2]) & 0xFF]; crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[3]) & 0xFF]; pByte_buf += 4; buf_len -= 4; } while (buf_len) { crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF]; ++pByte_buf; --buf_len; } return ~crc32; } #endif void mz_free(void *p) { MZ_FREE(p); } void *miniz_def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } void miniz_def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size) { (void)opaque, (void)address, (void)items, (void)size; return MZ_REALLOC(address, items * size); } const char *mz_version(void) { return MZ_VERSION; } #ifndef MINIZ_NO_ZLIB_APIS int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = miniz_def_alloc_func; if (!pStream->zfree) pStream->zfree = miniz_def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; /* Can't make forward progress without some input. */ } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; /* This is really over conservative. (And lame, but it's actually pretty tricky to compute a true upper bound given the way tdefl's blocking works.) */ return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); /* In case mz_ulong is 64-bits (argh I hate longs). */ if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = miniz_def_alloc_func; if (!pStream->zfree) pStream->zfree = miniz_def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { /* MZ_FINISH on the first call implies that the input and output buffers are large enough to hold the entire compressed/decompressed file. */ decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } /* flush != MZ_FINISH then we must assume there's more input. */ if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; /* Stream is corrupted (there could be some uncompressed data left in the output dictionary - oh well). */ else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; /* Signal caller that we can't make forward progress without supplying more input or by setting flush to MZ_FINISH. */ else if (flush == MZ_FINISH) { /* The output buffer MUST be large to hold the remaining uncompressed data when flush==MZ_FINISH. */ if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; /* status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's at least 1 more byte on the way. If there's no more room left in the output buffer then something is wrong. */ else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); /* In case mz_ulong is 64-bits (argh I hate longs). */ if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = { { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" }, { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" } }; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif /*MINIZ_NO_ZLIB_APIS */ #ifdef __cplusplus } #endif /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ /************************************************************************** * * Copyright 2013-2014 RAD Game Tools and Valve Software * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * **************************************************************************/ #ifdef __cplusplus extern "C" { #endif /* ------------------- Low-level Compression (independent from all decompression API's) */ /* Purposely making these tables static for faster init and thread safety. */ static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285 }; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0 }; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17 }; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13 }; /* Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values. */ typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } /* tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, [email protected], Jyrki Katajainen, [email protected], November 1996. */ static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } /* Limits canonical Huffman code table's max code size. */ enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do \ { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) \ { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) \ { \ if (rle_repeat_count < 3) \ { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)(d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } \ else \ { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) \ { \ if (rle_z_count < 3) \ { \ d->m_huff_count[2][0] = (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) \ packed_code_sizes[num_packed_code_sizes++] = 0; \ } \ else if (rle_z_count <= 10) \ { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 3); \ } \ else \ { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS(d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF }; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); /* This sequence coaxes MSVC into using cmov's vs. jmp's. */ s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS */ static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); /* If the block gets expanded, forget the current contents of the output buffer and send a raw block instead. */ if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS(d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } /* Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes. */ else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #ifdef MINIZ_UNALIGNED_USE_MEMCPY static inline mz_uint16 TDEFL_READ_UNALIGNED_WORD(const mz_uint8* p) { mz_uint16 ret; memcpy(&ret, p, sizeof(mz_uint16)); return ret; } static inline mz_uint16 TDEFL_READ_UNALIGNED_WORD2(const mz_uint16* p) { mz_uint16 ret; memcpy(&ret, p, sizeof(mz_uint16)); return ret; } #else #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) #define TDEFL_READ_UNALIGNED_WORD2(p) *(const mz_uint16 *)(p) #endif static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD2(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD2(q) != s01) continue; p = s; probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, (mz_uint)TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES */ #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { /* Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio. */ mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, (mz_uint)TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, (mz_uint)TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */ static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; /* Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN. */ if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; /* Simple lazy/greedy parsing state machine. */ len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } /* Move the lookahead forward by len_to_move bytes. */ d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); /* Check if it's time to flush the current LZ codes to the internal output buffer. */ if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */ { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_dict); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; /* level may actually range from [0,10] (10 is a "hidden" max level, where we want a bit more compression and it's fine if throughput to fall off a cliff on some files). */ mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) /* nonstandard extension used : non-constant aggregate initializer (also supported by GNU C and C99, so no big deal) */ #endif /* Simple PNG writer function by Alex Evans, 2011. Released into the public domain: https://gist.github.com/908299, more context at http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. This is actually a modification of Alex's original code so PNG files generated by this function pass pngcheck. */ void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { /* Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was defined. */ static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } /* write dummy header */ for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); /* compress image data */ tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } /* write real header */ *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 }; mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x44, 0x41, 0x54 }; pnghdr[18] = (mz_uint8)(w >> 8); pnghdr[19] = (mz_uint8)w; pnghdr[22] = (mz_uint8)(h >> 8); pnghdr[23] = (mz_uint8)h; pnghdr[25] = chans[num_chans]; pnghdr[33] = (mz_uint8)(*pLen_out >> 24); pnghdr[34] = (mz_uint8)(*pLen_out >> 16); pnghdr[35] = (mz_uint8)(*pLen_out >> 8); pnghdr[36] = (mz_uint8)*pLen_out; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } /* write footer (IDAT CRC-32, followed by IEND chunk) */ if (!tdefl_output_buffer_putter("\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); /* compute final size of file, grab compressed data buffer and return */ *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { /* Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's where #defined out) */ return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } /* Allocate the tdefl_compressor and tinfl_decompressor structures in C so that */ /* non-C language bindings to tdefL_ and tinfl_ API don't need to worry about */ /* structure size and allocation mechanism. */ tdefl_compressor *tdefl_compressor_alloc() { return (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); } void tdefl_compressor_free(tdefl_compressor *pComp) { MZ_FREE(pComp); } #ifdef _MSC_VER #pragma warning(pop) #endif #ifdef __cplusplus } #endif /************************************************************************** * * Copyright 2013-2014 RAD Game Tools and Valve Software * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * **************************************************************************/ #ifdef __cplusplus extern "C" { #endif /* ------------------- Low-level Decompression (completely independent from all compression API's) */ #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) \ { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do \ { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do \ { \ for (;;) \ { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } #define TINFL_GET_BYTE(state_index, c) \ do \ { \ while (pIn_buf_cur >= pIn_buf_end) \ { \ TINFL_CR_RETURN(state_index, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS); \ } \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do \ { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do \ { \ if (num_bits < (mz_uint)(n)) \ { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do \ { \ if (num_bits < (mz_uint)(n)) \ { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END /* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2. */ /* It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a */ /* Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the */ /* bit buffer contains >=15 bits (deflate's max. Huffman code size). */ #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do \ { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) \ { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) \ break; \ } \ else if (num_bits > TINFL_FAST_LOOKUP_BITS) \ { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do \ { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) \ break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); /* TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read */ /* beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully */ /* decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32. */ /* The slow path is only executed at the very end of the input buffer. */ /* v1.16: The original macro handled the case at the very end of the passed-in input buffer, but we also need to handle the case where the user passes in 1+zillion bytes */ /* following the deflate data and our non-conservative read-ahead path won't kick in here on this code. This is much trickier. */ #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do \ { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) \ { \ if ((pIn_buf_end - pIn_buf_cur) < 2) \ { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } \ else \ { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \ code_len = temp >> 9, temp &= 511; \ else \ { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do \ { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 }; static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 }; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 }; static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 }; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static const int s_min_table_sizes[3] = { 257, 1, 4 }; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; /* Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter). */ if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { TINFL_CR_RETURN(38, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS); } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif while(counter>2) { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; counter -= 3; } if (counter > 0) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); /* Ensure byte alignment and put back any bytes from the bitbuf if we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */ /* I'm being super conservative here. A number of simplifications can be made to the byte alignment part, and the Adler32 check shouldn't ever need to worry about reading from the bitbuf now. */ TINFL_SKIP_BITS(32, num_bits & 7); while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8)) { --pIn_buf_cur; num_bits -= 8; } bit_buf &= (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1); MZ_ASSERT(!num_bits); /* if this assert fires then we've read beyond the end of non-deflate/zlib streams with following data (such as gzip streams). */ if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: /* As long as we aren't telling the caller that we NEED more input to make forward progress: */ /* Put back any bytes from the bitbuf in case we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */ /* We need to be very careful here to NOT push back any bytes we definitely know we need to make forward progress, though, or we'll lock the caller up into an inf loop. */ if ((status != TINFL_STATUS_NEEDS_MORE_INPUT) && (status != TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS)) { while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8)) { --pIn_buf_cur; num_bits -= 8; } } r->m_num_bits = num_bits; r->m_bit_buf = bit_buf & (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1); r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } /* Higher level helper functions. */ void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } tinfl_decompressor *tinfl_decompressor_alloc() { tinfl_decompressor *pDecomp = (tinfl_decompressor *)MZ_MALLOC(sizeof(tinfl_decompressor)); if (pDecomp) tinfl_init(pDecomp); return pDecomp; } void tinfl_decompressor_free(tinfl_decompressor *pDecomp) { MZ_FREE(pDecomp); } #ifdef __cplusplus } #endif /************************************************************************** * * Copyright 2013-2014 RAD Game Tools and Valve Software * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC * Copyright 2016 Martin Raiber * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * **************************************************************************/ #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus extern "C" { #endif /* ------------------- .ZIP archive reading */ #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #elif defined(__APPLE__) #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen(p, m, s) #define MZ_DELETE_FILE remove #else #pragma message("Using fopen, ftello, fseeko, stat() etc. path for file I/O - this path may not support large files.") #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #ifdef __STRICT_ANSI__ #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #else #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #endif #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif /* #ifdef _MSC_VER */ #endif /* #ifdef MINIZ_NO_STDIO */ #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) /* Various ZIP archive enums. To completely avoid cross platform compiler alignment and platform endian issues, miniz.c doesn't use structs for any of this stuff. */ enum { /* ZIP archive identifiers and record sizes */ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, /* ZIP64 archive identifier and record sizes */ MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06064b50, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG = 0x07064b50, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE = 56, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE = 20, MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID = 0x0001, MZ_ZIP_DATA_DESCRIPTOR_ID = 0x08074b50, MZ_ZIP_DATA_DESCRIPTER_SIZE64 = 24, MZ_ZIP_DATA_DESCRIPTER_SIZE32 = 16, /* Central directory header record offsets */ MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, /* Local directory header offsets */ MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR = 1 << 3, /* End of central directory offsets */ MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, /* ZIP64 End of central directory locator offsets */ MZ_ZIP64_ECDL_SIG_OFS = 0, /* 4 bytes */ MZ_ZIP64_ECDL_NUM_DISK_CDIR_OFS = 4, /* 4 bytes */ MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS = 8, /* 8 bytes */ MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS = 16, /* 4 bytes */ /* ZIP64 End of central directory header offsets */ MZ_ZIP64_ECDH_SIG_OFS = 0, /* 4 bytes */ MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS = 4, /* 8 bytes */ MZ_ZIP64_ECDH_VERSION_MADE_BY_OFS = 12, /* 2 bytes */ MZ_ZIP64_ECDH_VERSION_NEEDED_OFS = 14, /* 2 bytes */ MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS = 16, /* 4 bytes */ MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS = 20, /* 4 bytes */ MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 24, /* 8 bytes */ MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS = 32, /* 8 bytes */ MZ_ZIP64_ECDH_CDIR_SIZE_OFS = 40, /* 8 bytes */ MZ_ZIP64_ECDH_CDIR_OFS_OFS = 48, /* 8 bytes */ MZ_ZIP_VERSION_MADE_BY_DOS_FILESYSTEM_ID = 0, MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG = 0x10, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED = 1, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG = 32, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION = 64, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED = 8192, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8 = 1 << 11 }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; /* The flags passed in when the archive is initially opened. */ uint32_t m_init_flags; /* MZ_TRUE if the archive has a zip64 end of central directory headers, etc. */ mz_bool m_zip64; /* MZ_TRUE if we found zip64 extended info in the central directory (m_zip64 will also be slammed to true too, even if we didn't find a zip64 end of central dir header, etc.) */ mz_bool m_zip64_has_extended_info_fields; /* These fields are used by the file, FILE, memory, and memory/heap read/write helpers. */ MZ_FILE *m_pFile; mz_uint64 m_file_archive_start_ofs; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) (array_ptr)->m_element_size = element_size #if defined(DEBUG) || defined(_DEBUG) || defined(NDEBUG) static MZ_FORCEINLINE mz_uint mz_zip_array_range_check(const mz_zip_array *pArray, mz_uint index) { MZ_ASSERT(index < pArray->m_size); return index; } #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) ((element_type *)((array_ptr)->m_p))[mz_zip_array_range_check(array_ptr, index)] #else #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) ((element_type *)((array_ptr)->m_p))[index] #endif static MZ_FORCEINLINE void mz_zip_array_init(mz_zip_array *pArray, mz_uint32 element_size) { memset(pArray, 0, sizeof(mz_zip_array)); pArray->m_element_size = element_size; } static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static MZ_TIME_T mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_zip_time_t_to_dos_time(MZ_TIME_T time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif /* #ifdef _MSC_VER */ *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif /* MINIZ_NO_ARCHIVE_WRITING_APIS */ #ifndef MINIZ_NO_STDIO #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static mz_bool mz_zip_get_file_modified_time(const char *pFilename, MZ_TIME_T *pTime) { struct MZ_FILE_STAT_STRUCT file_stat; /* On Linux with x86 glibc, this call will fail on large files (I think >= 0x80000000 bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. */ if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; *pTime = file_stat.st_mtime; return MZ_TRUE; } #endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS*/ static mz_bool mz_zip_set_file_times(const char *pFilename, MZ_TIME_T access_time, MZ_TIME_T modified_time) { struct utimbuf t; memset(&t, 0, sizeof(t)); t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif /* #ifndef MINIZ_NO_STDIO */ #endif /* #ifndef MINIZ_NO_TIME */ static MZ_FORCEINLINE mz_bool mz_zip_set_error(mz_zip_archive *pZip, mz_zip_error err_num) { if (pZip) pZip->m_last_error = err_num; return MZ_FALSE; } static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!pZip->m_pAlloc) pZip->m_pAlloc = miniz_def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = miniz_def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = miniz_def_realloc_func; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; pZip->m_last_error = MZ_ZIP_NO_ERROR; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); pZip->m_pState->m_init_flags = flags; pZip->m_pState->m_zip64 = MZ_FALSE; pZip->m_pState->m_zip64_has_extended_info_fields = MZ_FALSE; pZip->m_zip_mode = MZ_ZIP_MODE_READING; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do \ { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END /* Heap sort of lowercased filenames, used to help accelerate plain central directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), but it could allocate memory.) */ static void mz_zip_reader_sort_central_dir_offsets_by_filename(mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices; mz_uint32 start, end; const mz_uint32 size = pZip->m_total_files; if (size <= 1U) return; pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0); start = (size - 2U) >> 1U; for (;;) { mz_uint64 child, root = start; for (;;) { if ((child = (root << 1U) + 1U) >= size) break; child += (((child + 1U) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1U]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } if (!start) break; start--; } end = size - 1; while (end > 0) { mz_uint64 child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1U) + 1U) >= end) break; child += (((child + 1U) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1U])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_locate_header_sig(mz_zip_archive *pZip, mz_uint32 record_sig, mz_uint32 record_size, mz_int64 *pOfs) { mz_int64 cur_file_ofs; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; /* Basic sanity checks - reject files which are too small */ if (pZip->m_archive_size < record_size) return MZ_FALSE; /* Find the record by scanning the file from the end towards the beginning. */ cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) { mz_uint s = MZ_READ_LE32(pBuf + i); if (s == record_sig) { if ((pZip->m_archive_size - (cur_file_ofs + i)) >= record_size) break; } } if (i >= 0) { cur_file_ofs += i; break; } /* Give up if we've searched the entire file, or we've gone back "too far" (~64kb) */ if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (MZ_UINT16_MAX + record_size))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } *pOfs = cur_file_ofs; return MZ_TRUE; } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint flags) { mz_uint cdir_size = 0, cdir_entries_on_this_disk = 0, num_this_disk = 0, cdir_disk_index = 0; mz_uint64 cdir_ofs = 0; mz_int64 cur_file_ofs = 0; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); mz_uint32 zip64_end_of_central_dir_locator_u32[(MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pZip64_locator = (mz_uint8 *)zip64_end_of_central_dir_locator_u32; mz_uint32 zip64_end_of_central_dir_header_u32[(MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pZip64_end_of_central_dir = (mz_uint8 *)zip64_end_of_central_dir_header_u32; mz_uint64 zip64_end_of_central_dir_ofs = 0; /* Basic sanity checks - reject files which are too small, and check the first 4 bytes of the file to make sure a local header is there. */ if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); if (!mz_zip_reader_locate_header_sig(pZip, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE, &cur_file_ofs)) return mz_zip_set_error(pZip, MZ_ZIP_FAILED_FINDING_CENTRAL_DIR); /* Read and verify the end of central directory record. */ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); if (cur_file_ofs >= (MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE + MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE)) { if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs - MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE, pZip64_locator, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) == MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) { if (MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_SIG_OFS) == MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG) { zip64_end_of_central_dir_ofs = MZ_READ_LE64(pZip64_locator + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS); if (zip64_end_of_central_dir_ofs > (pZip->m_archive_size - MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE)) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); if (pZip->m_pRead(pZip->m_pIO_opaque, zip64_end_of_central_dir_ofs, pZip64_end_of_central_dir, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) == MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) { if (MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIG_OFS) == MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG) { pZip->m_pState->m_zip64 = MZ_TRUE; } } } } } pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS); cdir_entries_on_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS); num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS); cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if (pZip->m_pState->m_zip64) { mz_uint32 zip64_total_num_of_disks = MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS); mz_uint64 zip64_cdir_total_entries = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS); mz_uint64 zip64_cdir_total_entries_on_this_disk = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS); mz_uint64 zip64_size_of_end_of_central_dir_record = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS); mz_uint64 zip64_size_of_central_directory = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_SIZE_OFS); if (zip64_size_of_end_of_central_dir_record < (MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - 12)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (zip64_total_num_of_disks != 1U) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); /* Check for miniz's practical limits */ if (zip64_cdir_total_entries > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); pZip->m_total_files = (mz_uint32)zip64_cdir_total_entries; if (zip64_cdir_total_entries_on_this_disk > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); cdir_entries_on_this_disk = (mz_uint32)zip64_cdir_total_entries_on_this_disk; /* Check for miniz's current practical limits (sorry, this should be enough for millions of files) */ if (zip64_size_of_central_directory > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); cdir_size = (mz_uint32)zip64_size_of_central_directory; num_this_disk = MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS); cdir_ofs = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_OFS_OFS); } if (pZip->m_total_files != cdir_entries_on_this_disk) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); if (cdir_size < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; /* Read the entire central directory into a heap block, and allocate another heap block to hold the unsorted central dir file record offsets, and possibly another to hold the sorted indices. */ if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); /* Now create an index into the central directory file records, do some basic sanity checking on each record */ p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, disk_index, bit_flags, filename_size, ext_data_size; mz_uint64 comp_size, decomp_size, local_header_ofs; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); filename_size = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); ext_data_size = MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS); if ((!pZip->m_pState->m_zip64_has_extended_info_fields) && (ext_data_size) && (MZ_MAX(MZ_MAX(comp_size, decomp_size), local_header_ofs) == MZ_UINT32_MAX)) { /* Attempt to find zip64 extended information field in the entry's extra data */ mz_uint32 extra_size_remaining = ext_data_size; if (extra_size_remaining) { const mz_uint8 *pExtra_data = p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size; do { mz_uint32 field_id; mz_uint32 field_data_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); if ((field_data_size + sizeof(mz_uint16) * 2) > extra_size_remaining) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { /* Ok, the archive didn't have any zip64 headers but it uses a zip64 extended information field so mark it as zip64 anyway (this can occur with infozip's zip util when it reads compresses files from stdin). */ pZip->m_pState->m_zip64 = MZ_TRUE; pZip->m_pState->m_zip64_has_extended_info_fields = MZ_TRUE; break; } pExtra_data += sizeof(mz_uint16) * 2 + field_data_size; extra_size_remaining = extra_size_remaining - sizeof(mz_uint16) * 2 - field_data_size; } while (extra_size_remaining); } } /* I've seen archives that aren't marked as zip64 that uses zip64 ext data, argh */ if ((comp_size != MZ_UINT32_MAX) && (decomp_size != MZ_UINT32_MAX)) { if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index == MZ_UINT16_MAX) || ((disk_index != num_this_disk) && (disk_index != 1))) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); if (comp_size != MZ_UINT32_MAX) { if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } bit_flags = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); if (bit_flags & MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } void mz_zip_zero_struct(mz_zip_archive *pZip) { if (pZip) MZ_CLEAR_OBJ(*pZip); } static mz_bool mz_zip_reader_end_internal(mz_zip_archive *pZip, mz_bool set_last_error) { mz_bool status = MZ_TRUE; if (!pZip) return MZ_FALSE; if ((!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) { if (set_last_error) pZip->m_last_error = MZ_ZIP_INVALID_PARAMETER; return MZ_FALSE; } if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { if (pZip->m_zip_type == MZ_ZIP_TYPE_FILE) { if (MZ_FCLOSE(pState->m_pFile) == EOF) { if (set_last_error) pZip->m_last_error = MZ_ZIP_FILE_CLOSE_FAILED; status = MZ_FALSE; } } pState->m_pFile = NULL; } #endif /* #ifndef MINIZ_NO_STDIO */ pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { return mz_zip_reader_end_internal(pZip, MZ_TRUE); } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint flags) { if ((!pZip) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_zip_type = MZ_ZIP_TYPE_USER; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end_internal(pZip, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint flags) { if (!pMem) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_zip_type = MZ_ZIP_TYPE_MEMORY; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pNeeds_keepalive = NULL; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end_internal(pZip, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); file_ofs += pZip->m_pState->m_file_archive_start_ofs; if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { return mz_zip_reader_init_file_v2(pZip, pFilename, flags, 0, 0); } mz_bool mz_zip_reader_init_file_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint flags, mz_uint64 file_start_ofs, mz_uint64 archive_size) { mz_uint64 file_size; MZ_FILE *pFile; if ((!pZip) || (!pFilename) || ((archive_size) && (archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); file_size = archive_size; if (!file_size) { if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return mz_zip_set_error(pZip, MZ_ZIP_FILE_SEEK_FAILED); } file_size = MZ_FTELL64(pFile); } /* TODO: Better sanity check archive_size and the # of actual remaining bytes */ if (file_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) { MZ_FCLOSE(pFile); return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); } if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_zip_type = MZ_ZIP_TYPE_FILE; pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; pZip->m_pState->m_file_archive_start_ofs = file_start_ofs; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end_internal(pZip, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } mz_bool mz_zip_reader_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint64 archive_size, mz_uint flags) { mz_uint64 cur_file_ofs; if ((!pZip) || (!pFile)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); cur_file_ofs = MZ_FTELL64(pFile); if (!archive_size) { if (MZ_FSEEK64(pFile, 0, SEEK_END)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_SEEK_FAILED); archive_size = MZ_FTELL64(pFile) - cur_file_ofs; if (archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); } if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_zip_type = MZ_ZIP_TYPE_CFILE; pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = archive_size; pZip->m_pState->m_file_archive_start_ofs = cur_file_ofs; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end_internal(pZip, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } #endif /* #ifndef MINIZ_NO_STDIO */ static MZ_FORCEINLINE const mz_uint8 *mz_zip_get_cdh(mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); if (!p) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return MZ_FALSE; } m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION)) != 0; } mz_bool mz_zip_reader_is_file_supported(mz_zip_archive *pZip, mz_uint file_index) { mz_uint bit_flag; mz_uint method; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); if (!p) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return MZ_FALSE; } method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); if ((method != 0) && (method != MZ_DEFLATED)) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); return MZ_FALSE; } if (bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION)) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); return MZ_FALSE; } if (bit_flag & MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE); return MZ_FALSE; } return MZ_TRUE; } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, attribute_mapping_id, external_attr; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); if (!p) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return MZ_FALSE; } filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } /* Bugfix: This code was also checking if the internal attribute was non-zero, which wasn't correct. */ /* Most/all zip writers (hopefully) set DOS file/directory attributes in the low 16-bits, so check for the DOS directory flag and ignore the source OS ID in the created by field. */ /* FIXME: Remove this check? Is it necessary - we already check the filename. */ attribute_mapping_id = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS) >> 8; (void)attribute_mapping_id; external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG) != 0) { return MZ_TRUE; } return MZ_FALSE; } static mz_bool mz_zip_file_stat_internal(mz_zip_archive *pZip, mz_uint file_index, const mz_uint8 *pCentral_dir_header, mz_zip_archive_file_stat *pStat, mz_bool *pFound_zip64_extra_data) { mz_uint n; const mz_uint8 *p = pCentral_dir_header; if (pFound_zip64_extra_data) *pFound_zip64_extra_data = MZ_FALSE; if ((!p) || (!pStat)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* Extract fields from the central directory record. */ pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); /* Copy as much of the filename and comment as possible. */ n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; /* Set some flags for convienance */ pStat->m_is_directory = mz_zip_reader_is_file_a_directory(pZip, file_index); pStat->m_is_encrypted = mz_zip_reader_is_file_encrypted(pZip, file_index); pStat->m_is_supported = mz_zip_reader_is_file_supported(pZip, file_index); /* See if we need to read any zip64 extended information fields. */ /* Confusingly, these zip64 fields can be present even on non-zip64 archives (Debian zip on a huge files from stdin piped to stdout creates them). */ if (MZ_MAX(MZ_MAX(pStat->m_comp_size, pStat->m_uncomp_size), pStat->m_local_header_ofs) == MZ_UINT32_MAX) { /* Attempt to find zip64 extended information field in the entry's extra data */ mz_uint32 extra_size_remaining = MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS); if (extra_size_remaining) { const mz_uint8 *pExtra_data = p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); do { mz_uint32 field_id; mz_uint32 field_data_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); if ((field_data_size + sizeof(mz_uint16) * 2) > extra_size_remaining) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { const mz_uint8 *pField_data = pExtra_data + sizeof(mz_uint16) * 2; mz_uint32 field_data_remaining = field_data_size; if (pFound_zip64_extra_data) *pFound_zip64_extra_data = MZ_TRUE; if (pStat->m_uncomp_size == MZ_UINT32_MAX) { if (field_data_remaining < sizeof(mz_uint64)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pStat->m_uncomp_size = MZ_READ_LE64(pField_data); pField_data += sizeof(mz_uint64); field_data_remaining -= sizeof(mz_uint64); } if (pStat->m_comp_size == MZ_UINT32_MAX) { if (field_data_remaining < sizeof(mz_uint64)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pStat->m_comp_size = MZ_READ_LE64(pField_data); pField_data += sizeof(mz_uint64); field_data_remaining -= sizeof(mz_uint64); } if (pStat->m_local_header_ofs == MZ_UINT32_MAX) { if (field_data_remaining < sizeof(mz_uint64)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pStat->m_local_header_ofs = MZ_READ_LE64(pField_data); pField_data += sizeof(mz_uint64); field_data_remaining -= sizeof(mz_uint64); } break; } pExtra_data += sizeof(mz_uint16) * 2 + field_data_size; extra_size_remaining = extra_size_remaining - sizeof(mz_uint16) * 2 - field_data_size; } while (extra_size_remaining); } } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_filename_compare(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static mz_bool mz_zip_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename, mz_uint32 *pIndex) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0); const uint32_t size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); if (pIndex) *pIndex = 0; if (size) { /* yes I could use uint32_t's, but then we would have to add some special case checks in the loop, argh, and */ /* honestly the major expense here on 32-bit CPU's will still be the filename compare */ mz_int64 l = 0, h = (mz_int64)size - 1; while (l <= h) { mz_int64 m = l + ((h - l) >> 1); uint32_t file_index = pIndices[(uint32_t)m]; int comp = mz_zip_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) { if (pIndex) *pIndex = file_index; return MZ_TRUE; } else if (comp < 0) l = m + 1; else h = m - 1; } } return mz_zip_set_error(pZip, MZ_ZIP_FILE_NOT_FOUND); } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint32 index; if (!mz_zip_reader_locate_file_v2(pZip, pName, pComment, flags, &index)) return -1; else return (int)index; } mz_bool mz_zip_reader_locate_file_v2(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags, mz_uint32 *pIndex) { mz_uint file_index; size_t name_len, comment_len; if (pIndex) *pIndex = 0; if ((!pZip) || (!pZip->m_pState) || (!pName)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* See if we can use a binary search */ if (((pZip->m_pState->m_init_flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0) && (pZip->m_zip_mode == MZ_ZIP_MODE_READING) && ((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) { return mz_zip_locate_file_binary_search(pZip, pName, pIndex); } /* Locate the entry by scanning the entire central directory */ name_len = strlen(pName); if (name_len > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); comment_len = pComment ? strlen(pComment) : 0; if (comment_len > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_string_equal(pName, pFilename, filename_len, flags))) { if (pIndex) *pIndex = file_index; return MZ_TRUE; } } return mz_zip_set_error(pZip, MZ_ZIP_FILE_NOT_FOUND); } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((!pZip) || (!pZip->m_pState) || ((buf_size) && (!pBuf)) || ((user_read_buf_size) && (!pUser_read_buf)) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; /* A directory or zero length file */ if ((file_stat.m_is_directory) || (!file_stat.m_comp_size)) return MZ_TRUE; /* Encryption and patch files are not supported. */ if (file_stat.m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); /* This function only supports decompressing stored and deflate. */ if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); /* Ensure supplied output buffer is large enough. */ needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return mz_zip_set_error(pZip, MZ_ZIP_BUF_TOO_SMALL); /* Read and parse the local directory entry. */ cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { /* The file is stored or the caller has requested the compressed data. */ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) == 0) { if (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32) return mz_zip_set_error(pZip, MZ_ZIP_CRC_CHECK_FAILED); } #endif return MZ_TRUE; } /* Decompress the file either directly from memory or from a file input buffer. */ tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { /* Read directly from the archive in memory. */ pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { /* Use a user provided read buffer. */ if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { /* Temporarily allocate a read buffer. */ read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE); if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { /* The size_t cast here should be OK because we've verified that the output buffer is >= file_stat.m_uncomp_size above */ size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; mz_zip_set_error(pZip, MZ_ZIP_DECOMPRESSION_FAILED); break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress(&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { /* Make sure the entire file was decompressed, and check its CRC. */ if (out_buf_ofs != file_stat.m_uncomp_size) { mz_zip_set_error(pZip, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE); status = TINFL_STATUS_FAILED; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS else if (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32) { mz_zip_set_error(pZip, MZ_ZIP_CRC_CHECK_FAILED); status = TINFL_STATUS_FAILED; } #endif } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index)) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return NULL; } comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) { mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); return NULL; } if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); return NULL; } if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index)) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if ((!pZip) || (!pZip->m_pState) || (!pCallback) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; /* A directory or zero length file */ if ((file_stat.m_is_directory) || (!file_stat.m_comp_size)) return MZ_TRUE; /* Encryption and patch files are not supported. */ if (file_stat.m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); /* This function only supports decompressing stored and deflate. */ if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); /* Read and do some minimal validation of the local directory entry (this doesn't crack the zip64 stuff, which we already have from the central dir) */ cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); /* Decompress the file either directly from memory or from a file input buffer. */ if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { /* The file is stored or the caller has requested the compressed data. */ if (pZip->m_pState->m_pMem) { if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) { mz_zip_set_error(pZip, MZ_ZIP_WRITE_CALLBACK_FAILED); status = TINFL_STATUS_FAILED; } else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); #endif } cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); status = TINFL_STATUS_FAILED; break; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); } #endif if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { mz_zip_set_error(pZip, MZ_ZIP_WRITE_CALLBACK_FAILED); status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); status = TINFL_STATUS_FAILED; } else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress(&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { mz_zip_set_error(pZip, MZ_ZIP_WRITE_CALLBACK_FAILED); status = TINFL_STATUS_FAILED; break; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); #endif if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { mz_zip_set_error(pZip, MZ_ZIP_DECOMPRESSION_FAILED); status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { /* Make sure the entire file was decompressed, and check its CRC. */ if (out_buf_ofs != file_stat.m_uncomp_size) { mz_zip_set_error(pZip, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE); status = TINFL_STATUS_FAILED; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS else if (file_crc32 != file_stat.m_crc32) { mz_zip_set_error(pZip, MZ_ZIP_DECOMPRESSION_FAILED); status = TINFL_STATUS_FAILED; } #endif } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index)) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } mz_zip_reader_extract_iter_state* mz_zip_reader_extract_iter_new(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags) { mz_zip_reader_extract_iter_state *pState; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; /* Argument sanity check */ if ((!pZip) || (!pZip->m_pState)) return NULL; /* Allocate an iterator status structure */ pState = (mz_zip_reader_extract_iter_state*)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_reader_extract_iter_state)); if (!pState) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); return NULL; } /* Fetch file details */ if (!mz_zip_reader_file_stat(pZip, file_index, &pState->file_stat)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } /* Encryption and patch files are not supported. */ if (pState->file_stat.m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG)) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } /* This function only supports decompressing stored and deflate. */ if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (pState->file_stat.m_method != 0) && (pState->file_stat.m_method != MZ_DEFLATED)) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } /* Init state - save args */ pState->pZip = pZip; pState->flags = flags; /* Init state - reset variables to defaults */ pState->status = TINFL_STATUS_DONE; #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS pState->file_crc32 = MZ_CRC32_INIT; #endif pState->read_buf_ofs = 0; pState->out_buf_ofs = 0; pState->pRead_buf = NULL; pState->pWrite_buf = NULL; pState->out_blk_remain = 0; /* Read and parse the local directory entry. */ pState->cur_file_ofs = pState->file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, pState->cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } pState->cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((pState->cur_file_ofs + pState->file_stat.m_comp_size) > pZip->m_archive_size) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } /* Decompress the file either directly from memory or from a file input buffer. */ if (pZip->m_pState->m_pMem) { pState->pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + pState->cur_file_ofs; pState->read_buf_size = pState->read_buf_avail = pState->file_stat.m_comp_size; pState->comp_remaining = pState->file_stat.m_comp_size; } else { if (!((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!pState->file_stat.m_method))) { /* Decompression required, therefore intermediate read buffer required */ pState->read_buf_size = MZ_MIN(pState->file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pState->pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)pState->read_buf_size))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } } else { /* Decompression not required - we will be reading directly into user buffer, no temp buf required */ pState->read_buf_size = 0; } pState->read_buf_avail = 0; pState->comp_remaining = pState->file_stat.m_comp_size; } if (!((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!pState->file_stat.m_method))) { /* Decompression required, init decompressor */ tinfl_init( &pState->inflator ); /* Allocate write buffer */ if (NULL == (pState->pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if (pState->pRead_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pState->pRead_buf); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } } return pState; } mz_zip_reader_extract_iter_state* mz_zip_reader_extract_file_iter_new(mz_zip_archive *pZip, const char *pFilename, mz_uint flags) { mz_uint32 file_index; /* Locate file index by name */ if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index)) return NULL; /* Construct iterator */ return mz_zip_reader_extract_iter_new(pZip, file_index, flags); } size_t mz_zip_reader_extract_iter_read(mz_zip_reader_extract_iter_state* pState, void* pvBuf, size_t buf_size) { size_t copied_to_caller = 0; /* Argument sanity check */ if ((!pState) || (!pState->pZip) || (!pState->pZip->m_pState) || (!pvBuf)) return 0; if ((pState->flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!pState->file_stat.m_method)) { /* The file is stored or the caller has requested the compressed data, calc amount to return. */ copied_to_caller = (size_t)MZ_MIN( buf_size, pState->comp_remaining ); /* Zip is in memory....or requires reading from a file? */ if (pState->pZip->m_pState->m_pMem) { /* Copy data to caller's buffer */ memcpy( pvBuf, pState->pRead_buf, copied_to_caller ); pState->pRead_buf = ((mz_uint8*)pState->pRead_buf) + copied_to_caller; } else { /* Read directly into caller's buffer */ if (pState->pZip->m_pRead(pState->pZip->m_pIO_opaque, pState->cur_file_ofs, pvBuf, copied_to_caller) != copied_to_caller) { /* Failed to read all that was asked for, flag failure and alert user */ mz_zip_set_error(pState->pZip, MZ_ZIP_FILE_READ_FAILED); pState->status = TINFL_STATUS_FAILED; copied_to_caller = 0; } } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS /* Compute CRC if not returning compressed data only */ if (!(pState->flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) pState->file_crc32 = (mz_uint32)mz_crc32(pState->file_crc32, (const mz_uint8 *)pvBuf, copied_to_caller); #endif /* Advance offsets, dec counters */ pState->cur_file_ofs += copied_to_caller; pState->out_buf_ofs += copied_to_caller; pState->comp_remaining -= copied_to_caller; } else { do { /* Calc ptr to write buffer - given current output pos and block size */ mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pState->pWrite_buf + (pState->out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); /* Calc max output size - given current output pos and block size */ size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (pState->out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if (!pState->out_blk_remain) { /* Read more data from file if none available (and reading from file) */ if ((!pState->read_buf_avail) && (!pState->pZip->m_pState->m_pMem)) { /* Calc read size */ pState->read_buf_avail = MZ_MIN(pState->read_buf_size, pState->comp_remaining); if (pState->pZip->m_pRead(pState->pZip->m_pIO_opaque, pState->cur_file_ofs, pState->pRead_buf, (size_t)pState->read_buf_avail) != pState->read_buf_avail) { mz_zip_set_error(pState->pZip, MZ_ZIP_FILE_READ_FAILED); pState->status = TINFL_STATUS_FAILED; break; } /* Advance offsets, dec counters */ pState->cur_file_ofs += pState->read_buf_avail; pState->comp_remaining -= pState->read_buf_avail; pState->read_buf_ofs = 0; } /* Perform decompression */ in_buf_size = (size_t)pState->read_buf_avail; pState->status = tinfl_decompress(&pState->inflator, (const mz_uint8 *)pState->pRead_buf + pState->read_buf_ofs, &in_buf_size, (mz_uint8 *)pState->pWrite_buf, pWrite_buf_cur, &out_buf_size, pState->comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); pState->read_buf_avail -= in_buf_size; pState->read_buf_ofs += in_buf_size; /* Update current output block size remaining */ pState->out_blk_remain = out_buf_size; } if (pState->out_blk_remain) { /* Calc amount to return. */ size_t to_copy = MZ_MIN( (buf_size - copied_to_caller), pState->out_blk_remain ); /* Copy data to caller's buffer */ memcpy( (uint8_t*)pvBuf + copied_to_caller, pWrite_buf_cur, to_copy ); #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS /* Perform CRC */ pState->file_crc32 = (mz_uint32)mz_crc32(pState->file_crc32, pWrite_buf_cur, to_copy); #endif /* Decrement data consumed from block */ pState->out_blk_remain -= to_copy; /* Inc output offset, while performing sanity check */ if ((pState->out_buf_ofs += to_copy) > pState->file_stat.m_uncomp_size) { mz_zip_set_error(pState->pZip, MZ_ZIP_DECOMPRESSION_FAILED); pState->status = TINFL_STATUS_FAILED; break; } /* Increment counter of data copied to caller */ copied_to_caller += to_copy; } } while ( (copied_to_caller < buf_size) && ((pState->status == TINFL_STATUS_NEEDS_MORE_INPUT) || (pState->status == TINFL_STATUS_HAS_MORE_OUTPUT)) ); } /* Return how many bytes were copied into user buffer */ return copied_to_caller; } mz_bool mz_zip_reader_extract_iter_free(mz_zip_reader_extract_iter_state* pState) { int status; /* Argument sanity check */ if ((!pState) || (!pState->pZip) || (!pState->pZip->m_pState)) return MZ_FALSE; /* Was decompression completed and requested? */ if ((pState->status == TINFL_STATUS_DONE) && (!(pState->flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { /* Make sure the entire file was decompressed, and check its CRC. */ if (pState->out_buf_ofs != pState->file_stat.m_uncomp_size) { mz_zip_set_error(pState->pZip, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE); pState->status = TINFL_STATUS_FAILED; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS else if (pState->file_crc32 != pState->file_stat.m_crc32) { mz_zip_set_error(pState->pZip, MZ_ZIP_DECOMPRESSION_FAILED); pState->status = TINFL_STATUS_FAILED; } #endif } /* Free buffers */ if (!pState->pZip->m_pState->m_pMem) pState->pZip->m_pFree(pState->pZip->m_pAlloc_opaque, pState->pRead_buf); if (pState->pWrite_buf) pState->pZip->m_pFree(pState->pZip->m_pAlloc_opaque, pState->pWrite_buf); /* Save status */ status = pState->status; /* Free context */ pState->pZip->m_pFree(pState->pZip->m_pAlloc_opaque, pState); return status == TINFL_STATUS_DONE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; if ((file_stat.m_is_directory) || (!file_stat.m_is_supported)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE); pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); status = mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) { if (status) mz_zip_set_error(pZip, MZ_ZIP_FILE_CLOSE_FAILED); status = MZ_FALSE; } #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_STDIO) if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pArchive_filename, NULL, flags, &file_index)) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } mz_bool mz_zip_reader_extract_to_cfile(mz_zip_archive *pZip, mz_uint file_index, MZ_FILE *pFile, mz_uint flags) { mz_zip_archive_file_stat file_stat; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; if ((file_stat.m_is_directory) || (!file_stat.m_is_supported)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE); return mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_file_write_callback, pFile, flags); } mz_bool mz_zip_reader_extract_file_to_cfile(mz_zip_archive *pZip, const char *pArchive_filename, MZ_FILE *pFile, mz_uint flags) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pArchive_filename, NULL, flags, &file_index)) return MZ_FALSE; return mz_zip_reader_extract_to_cfile(pZip, file_index, pFile, flags); } #endif /* #ifndef MINIZ_NO_STDIO */ static size_t mz_zip_compute_crc32_callback(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_uint32 *p = (mz_uint32 *)pOpaque; (void)file_ofs; *p = (mz_uint32)mz_crc32(*p, (const mz_uint8 *)pBuf, n); return n; } mz_bool mz_zip_validate_file(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags) { mz_zip_archive_file_stat file_stat; mz_zip_internal_state *pState; const mz_uint8 *pCentral_dir_header; mz_bool found_zip64_ext_data_in_cdir = MZ_FALSE; mz_bool found_zip64_ext_data_in_ldir = MZ_FALSE; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint64 local_header_ofs = 0; mz_uint32 local_header_filename_len, local_header_extra_len, local_header_crc32; mz_uint64 local_header_comp_size, local_header_uncomp_size; mz_uint32 uncomp_crc32 = MZ_CRC32_INIT; mz_bool has_data_descriptor; mz_uint32 local_header_bit_flags; mz_zip_array file_data_array; mz_zip_array_init(&file_data_array, 1); if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (file_index > pZip->m_total_files) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; pCentral_dir_header = mz_zip_get_cdh(pZip, file_index); if (!mz_zip_file_stat_internal(pZip, file_index, pCentral_dir_header, &file_stat, &found_zip64_ext_data_in_cdir)) return MZ_FALSE; /* A directory or zero length file */ if ((file_stat.m_is_directory) || (!file_stat.m_uncomp_size)) return MZ_TRUE; /* Encryption and patch files are not supported. */ if (file_stat.m_is_encrypted) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); /* This function only supports stored and deflate. */ if ((file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); if (!file_stat.m_is_supported) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE); /* Read and parse the local directory entry. */ local_header_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); local_header_filename_len = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS); local_header_extra_len = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); local_header_comp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS); local_header_uncomp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS); local_header_crc32 = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_CRC32_OFS); local_header_bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); has_data_descriptor = (local_header_bit_flags & 8) != 0; if (local_header_filename_len != strlen(file_stat.m_filename)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if ((local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len + local_header_extra_len + file_stat.m_comp_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (!mz_zip_array_resize(pZip, &file_data_array, MZ_MAX(local_header_filename_len, local_header_extra_len), MZ_FALSE)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if (local_header_filename_len) { if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE, file_data_array.m_p, local_header_filename_len) != local_header_filename_len) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); goto handle_failure; } /* I've seen 1 archive that had the same pathname, but used backslashes in the local dir and forward slashes in the central dir. Do we care about this? For now, this case will fail validation. */ if (memcmp(file_stat.m_filename, file_data_array.m_p, local_header_filename_len) != 0) { mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); goto handle_failure; } } if ((local_header_extra_len) && ((local_header_comp_size == MZ_UINT32_MAX) || (local_header_uncomp_size == MZ_UINT32_MAX))) { mz_uint32 extra_size_remaining = local_header_extra_len; const mz_uint8 *pExtra_data = (const mz_uint8 *)file_data_array.m_p; if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len, file_data_array.m_p, local_header_extra_len) != local_header_extra_len) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); goto handle_failure; } do { mz_uint32 field_id, field_data_size, field_total_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); field_total_size = field_data_size + sizeof(mz_uint16) * 2; if (field_total_size > extra_size_remaining) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { const mz_uint8 *pSrc_field_data = pExtra_data + sizeof(mz_uint32); if (field_data_size < sizeof(mz_uint64) * 2) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); goto handle_failure; } local_header_uncomp_size = MZ_READ_LE64(pSrc_field_data); local_header_comp_size = MZ_READ_LE64(pSrc_field_data + sizeof(mz_uint64)); found_zip64_ext_data_in_ldir = MZ_TRUE; break; } pExtra_data += field_total_size; extra_size_remaining -= field_total_size; } while (extra_size_remaining); } /* TODO: parse local header extra data when local_header_comp_size is 0xFFFFFFFF! (big_descriptor.zip) */ /* I've seen zips in the wild with the data descriptor bit set, but proper local header values and bogus data descriptors */ if ((has_data_descriptor) && (!local_header_comp_size) && (!local_header_crc32)) { mz_uint8 descriptor_buf[32]; mz_bool has_id; const mz_uint8 *pSrc; mz_uint32 file_crc32; mz_uint64 comp_size = 0, uncomp_size = 0; mz_uint32 num_descriptor_uint32s = ((pState->m_zip64) || (found_zip64_ext_data_in_ldir)) ? 6 : 4; if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len + local_header_extra_len + file_stat.m_comp_size, descriptor_buf, sizeof(mz_uint32) * num_descriptor_uint32s) != (sizeof(mz_uint32) * num_descriptor_uint32s)) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); goto handle_failure; } has_id = (MZ_READ_LE32(descriptor_buf) == MZ_ZIP_DATA_DESCRIPTOR_ID); pSrc = has_id ? (descriptor_buf + sizeof(mz_uint32)) : descriptor_buf; file_crc32 = MZ_READ_LE32(pSrc); if ((pState->m_zip64) || (found_zip64_ext_data_in_ldir)) { comp_size = MZ_READ_LE64(pSrc + sizeof(mz_uint32)); uncomp_size = MZ_READ_LE64(pSrc + sizeof(mz_uint32) + sizeof(mz_uint64)); } else { comp_size = MZ_READ_LE32(pSrc + sizeof(mz_uint32)); uncomp_size = MZ_READ_LE32(pSrc + sizeof(mz_uint32) + sizeof(mz_uint32)); } if ((file_crc32 != file_stat.m_crc32) || (comp_size != file_stat.m_comp_size) || (uncomp_size != file_stat.m_uncomp_size)) { mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); goto handle_failure; } } else { if ((local_header_crc32 != file_stat.m_crc32) || (local_header_comp_size != file_stat.m_comp_size) || (local_header_uncomp_size != file_stat.m_uncomp_size)) { mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); goto handle_failure; } } mz_zip_array_clear(pZip, &file_data_array); if ((flags & MZ_ZIP_FLAG_VALIDATE_HEADERS_ONLY) == 0) { if (!mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_compute_crc32_callback, &uncomp_crc32, 0)) return MZ_FALSE; /* 1 more check to be sure, although the extract checks too. */ if (uncomp_crc32 != file_stat.m_crc32) { mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); return MZ_FALSE; } } return MZ_TRUE; handle_failure: mz_zip_array_clear(pZip, &file_data_array); return MZ_FALSE; } mz_bool mz_zip_validate_archive(mz_zip_archive *pZip, mz_uint flags) { mz_zip_internal_state *pState; uint32_t i; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; /* Basic sanity checks */ if (!pState->m_zip64) { if (pZip->m_total_files > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (pZip->m_archive_size > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); } else { if (pZip->m_total_files >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (pState->m_central_dir.m_size >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); } for (i = 0; i < pZip->m_total_files; i++) { if (MZ_ZIP_FLAG_VALIDATE_LOCATE_FILE_FLAG & flags) { mz_uint32 found_index; mz_zip_archive_file_stat stat; if (!mz_zip_reader_file_stat(pZip, i, &stat)) return MZ_FALSE; if (!mz_zip_reader_locate_file_v2(pZip, stat.m_filename, NULL, 0, &found_index)) return MZ_FALSE; /* This check can fail if there are duplicate filenames in the archive (which we don't check for when writing - that's up to the user) */ if (found_index != i) return mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); } if (!mz_zip_validate_file(pZip, i, flags)) return MZ_FALSE; } return MZ_TRUE; } mz_bool mz_zip_validate_mem_archive(const void *pMem, size_t size, mz_uint flags, mz_zip_error *pErr) { mz_bool success = MZ_TRUE; mz_zip_archive zip; mz_zip_error actual_err = MZ_ZIP_NO_ERROR; if ((!pMem) || (!size)) { if (pErr) *pErr = MZ_ZIP_INVALID_PARAMETER; return MZ_FALSE; } mz_zip_zero_struct(&zip); if (!mz_zip_reader_init_mem(&zip, pMem, size, flags)) { if (pErr) *pErr = zip.m_last_error; return MZ_FALSE; } if (!mz_zip_validate_archive(&zip, flags)) { actual_err = zip.m_last_error; success = MZ_FALSE; } if (!mz_zip_reader_end_internal(&zip, success)) { if (!actual_err) actual_err = zip.m_last_error; success = MZ_FALSE; } if (pErr) *pErr = actual_err; return success; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_validate_file_archive(const char *pFilename, mz_uint flags, mz_zip_error *pErr) { mz_bool success = MZ_TRUE; mz_zip_archive zip; mz_zip_error actual_err = MZ_ZIP_NO_ERROR; if (!pFilename) { if (pErr) *pErr = MZ_ZIP_INVALID_PARAMETER; return MZ_FALSE; } mz_zip_zero_struct(&zip); if (!mz_zip_reader_init_file_v2(&zip, pFilename, flags, 0, 0)) { if (pErr) *pErr = zip.m_last_error; return MZ_FALSE; } if (!mz_zip_validate_archive(&zip, flags)) { actual_err = zip.m_last_error; success = MZ_FALSE; } if (!mz_zip_reader_end_internal(&zip, success)) { if (!actual_err) actual_err = zip.m_last_error; success = MZ_FALSE; } if (pErr) *pErr = actual_err; return success; } #endif /* #ifndef MINIZ_NO_STDIO */ /* ------------------- .ZIP archive writing */ #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static MZ_FORCEINLINE void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static MZ_FORCEINLINE void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } static MZ_FORCEINLINE void mz_write_le64(mz_uint8 *p, mz_uint64 v) { mz_write_le32(p, (mz_uint32)v); mz_write_le32(p + sizeof(mz_uint32), (mz_uint32)(v >> 32)); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) #define MZ_WRITE_LE64(p, v) mz_write_le64((mz_uint8 *)(p), (mz_uint64)(v)) static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); if (!n) return 0; /* An allocation this big is likely to just fail on 32-bit systems, so don't even go there. */ if ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)) { mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE); return 0; } if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); return 0; } pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } static mz_bool mz_zip_writer_end_internal(mz_zip_archive *pZip, mz_bool set_last_error) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) { if (set_last_error) mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return MZ_FALSE; } pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { if (pZip->m_zip_type == MZ_ZIP_TYPE_FILE) { if (MZ_FCLOSE(pState->m_pFile) == EOF) { if (set_last_error) mz_zip_set_error(pZip, MZ_ZIP_FILE_CLOSE_FAILED); status = MZ_FALSE; } } pState->m_pFile = NULL; } #endif /* #ifndef MINIZ_NO_STDIO */ if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } mz_bool mz_zip_writer_init_v2(mz_zip_archive *pZip, mz_uint64 existing_size, mz_uint flags) { mz_bool zip64 = (flags & MZ_ZIP_FLAG_WRITE_ZIP64) != 0; if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) { if (!pZip->m_pRead) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); } if (pZip->m_file_offset_alignment) { /* Ensure user specified file offset alignment is a power of 2. */ if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); } if (!pZip->m_pAlloc) pZip->m_pAlloc = miniz_def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = miniz_def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = miniz_def_realloc_func; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); pZip->m_pState->m_zip64 = zip64; pZip->m_pState->m_zip64_has_extended_info_fields = zip64; pZip->m_zip_type = MZ_ZIP_TYPE_USER; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; return MZ_TRUE; } mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { return mz_zip_writer_init_v2(pZip, existing_size, 0); } mz_bool mz_zip_writer_init_heap_v2(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size, mz_uint flags) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pNeeds_keepalive = NULL; if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init_v2(pZip, size_to_reserve_at_beginning, flags)) return MZ_FALSE; pZip->m_zip_type = MZ_ZIP_TYPE_HEAP; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end_internal(pZip, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { return mz_zip_writer_init_heap_v2(pZip, size_to_reserve_at_beginning, initial_allocation_size, 0); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); file_ofs += pZip->m_pState->m_file_archive_start_ofs; if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) { mz_zip_set_error(pZip, MZ_ZIP_FILE_SEEK_FAILED); return 0; } return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { return mz_zip_writer_init_file_v2(pZip, pFilename, size_to_reserve_at_beginning, 0); } mz_bool mz_zip_writer_init_file_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning, mz_uint flags) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pNeeds_keepalive = NULL; if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init_v2(pZip, size_to_reserve_at_beginning, flags)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) ? "w+b" : "wb"))) { mz_zip_writer_end(pZip); return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); } pZip->m_pState->m_pFile = pFile; pZip->m_zip_type = MZ_ZIP_TYPE_FILE; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } mz_bool mz_zip_writer_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint flags) { pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pNeeds_keepalive = NULL; if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init_v2(pZip, 0, flags)) return MZ_FALSE; pZip->m_pState->m_pFile = pFile; pZip->m_pState->m_file_archive_start_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); pZip->m_zip_type = MZ_ZIP_TYPE_CFILE; return MZ_TRUE; } #endif /* #ifndef MINIZ_NO_STDIO */ mz_bool mz_zip_writer_init_from_reader_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint flags) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (flags & MZ_ZIP_FLAG_WRITE_ZIP64) { /* We don't support converting a non-zip64 file to zip64 - this seems like more trouble than it's worth. (What about the existing 32-bit data descriptors that could follow the compressed data?) */ if (!pZip->m_pState->m_zip64) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); } /* No sense in trying to write to an archive that's already at the support max size */ if (pZip->m_pState->m_zip64) { if (pZip->m_total_files == MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { if (pZip->m_total_files == MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); if ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE); } pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO (void)pFilename; return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); #else if (pZip->m_pIO_opaque != pZip) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (pZip->m_zip_type == MZ_ZIP_TYPE_FILE) { if (!pFilename) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* Archive is being read from stdio and was originally opened only for reading. Try to reopen as writable. */ if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { /* The mz_zip_archive is now in a bogus state because pState->m_pFile is NULL, so just close it. */ mz_zip_reader_end_internal(pZip, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); } } pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pNeeds_keepalive = NULL; #endif /* #ifdef MINIZ_NO_STDIO */ } else if (pState->m_pMem) { /* Archive lives in a memory block. Assume it's from the heap that we can resize using the realloc callback. */ if (pZip->m_pIO_opaque != pZip) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pNeeds_keepalive = NULL; } /* Archive is being read via a user provided read function - make sure the user has specified a write function too. */ else if (!pZip->m_pWrite) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* Start writing new files at the archive's current central directory location. */ /* TODO: We could add a flag that lets the user start writing immediately AFTER the existing central dir - this would be safer. */ pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_central_directory_file_ofs = 0; /* Clear the sorted central dir offsets, they aren't useful or maintained now. */ /* Even though we're now in write mode, files can still be extracted and verified, but file locates will be slow. */ /* TODO: We could easily maintain the sorted central directory offsets. */ mz_zip_array_clear(pZip, &pZip->m_pState->m_sorted_central_dir_offsets); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; return MZ_TRUE; } mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { return mz_zip_writer_init_from_reader_v2(pZip, pFilename, 0); } /* TODO: pArchive_name is a terrible name here! */ mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } #define MZ_ZIP64_MAX_LOCAL_EXTRA_FIELD_SIZE (sizeof(mz_uint16) * 2 + sizeof(mz_uint64) * 2) #define MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE (sizeof(mz_uint16) * 2 + sizeof(mz_uint64) * 3) static mz_uint32 mz_zip_writer_create_zip64_extra_data(mz_uint8 *pBuf, mz_uint64 *pUncomp_size, mz_uint64 *pComp_size, mz_uint64 *pLocal_header_ofs) { mz_uint8 *pDst = pBuf; mz_uint32 field_size = 0; MZ_WRITE_LE16(pDst + 0, MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID); MZ_WRITE_LE16(pDst + 2, 0); pDst += sizeof(mz_uint16) * 2; if (pUncomp_size) { MZ_WRITE_LE64(pDst, *pUncomp_size); pDst += sizeof(mz_uint64); field_size += sizeof(mz_uint64); } if (pComp_size) { MZ_WRITE_LE64(pDst, *pComp_size); pDst += sizeof(mz_uint64); field_size += sizeof(mz_uint64); } if (pLocal_header_ofs) { MZ_WRITE_LE64(pDst, *pLocal_header_ofs); pDst += sizeof(mz_uint64); field_size += sizeof(mz_uint64); } MZ_WRITE_LE16(pBuf + 2, field_size); return (mz_uint32)(pDst - pBuf); } static mz_bool mz_zip_writer_create_local_dir_header(mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, MZ_MIN(comp_size, MZ_UINT32_MAX)); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, MZ_MIN(uncomp_size, MZ_UINT32_MAX)); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header(mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, MZ_MIN(comp_size, MZ_UINT32_MAX)); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, MZ_MIN(uncomp_size, MZ_UINT32_MAX)); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, MZ_MIN(local_header_ofs, MZ_UINT32_MAX)); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir(mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes, const char *user_extra_data, mz_uint user_extra_data_len) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; if (!pZip->m_pState->m_zip64) { if (local_header_ofs > 0xFFFFFFFF) return mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE); } /* miniz doesn't support central dirs >= MZ_UINT32_MAX bytes yet */ if (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + user_extra_data_len + comment_size) >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); if (!mz_zip_writer_create_central_dir_header(pZip, central_dir_header, filename_size, (mz_uint16)(extra_size + user_extra_data_len), comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, user_extra_data, user_extra_data_len)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { /* Try to resize the central directory array back into its original state. */ mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { /* Basic ZIP archive filename validity checks: Valid filenames cannot start with a forward slash, cannot contain a drive letter, and cannot use DOS-style backward slashes. */ if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (mz_uint)((pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1)); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { return mz_zip_writer_add_mem_ex_v2(pZip, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, uncomp_size, uncomp_crc32, NULL, NULL, 0, NULL, 0); } mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32, MZ_TIME_T *last_modified, const char *user_extra_data, mz_uint user_extra_data_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; mz_uint8 *pExtra_data = NULL; mz_uint32 extra_size = 0; mz_uint8 extra_data[MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE]; mz_uint16 bit_flags = 0; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if (uncomp_size || (buf_size && !(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) bit_flags |= MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR; if (!(level_and_flags & MZ_ZIP_FLAG_ASCII_FILENAME)) bit_flags |= MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; if (pState->m_zip64) { if (pZip->m_total_files == MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { if (pZip->m_total_files == MZ_UINT16_MAX) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); */ } if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */ } } if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_writer_validate_archive_name(pArchive_name)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME); #ifndef MINIZ_NO_TIME if (last_modified != NULL) { mz_zip_time_t_to_dos_time(*last_modified, &dos_time, &dos_date); } else { MZ_TIME_T cur_time; time(&cur_time); mz_zip_time_t_to_dos_time(cur_time, &dos_time, &dos_date); } #endif /* #ifndef MINIZ_NO_TIME */ if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } archive_name_size = strlen(pArchive_name); if (archive_name_size > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME); num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); /* miniz doesn't support central dirs >= MZ_UINT32_MAX bytes yet */ if (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE + comment_size) >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); if (!pState->m_zip64) { /* Bail early if the archive would obviously become too large */ if ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + user_extra_data_len + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + user_extra_data_central_len + MZ_ZIP_DATA_DESCRIPTER_SIZE32) > 0xFFFFFFFF) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */ } } if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { /* Set DOS Subdirectory attribute bit. */ ext_attributes |= MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG; /* Subdirectories cannot contain data. */ if ((buf_size) || (uncomp_size)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); } /* Try to do any allocations before writing to the archive, so if an allocation fails the file remains unmodified. (A good idea if we're doing an in-place modification.) */ if ((!mz_zip_array_ensure_room(pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + (pState->m_zip64 ? MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE : 0))) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes; MZ_CLEAR_OBJ(local_dir_header); if (!store_data_uncompressed || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { method = MZ_DEFLATED; } if (pState->m_zip64) { if (uncomp_size >= MZ_UINT32_MAX || local_dir_header_ofs >= MZ_UINT32_MAX) { pExtra_data = extra_data; extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL, (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL); } if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, (mz_uint16)(extra_size + user_extra_data_len), 0, 0, 0, method, bit_flags, dos_time, dos_date)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += sizeof(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += archive_name_size; if (pExtra_data != NULL) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, extra_data, extra_size) != extra_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += extra_size; } } else { if ((comp_size > MZ_UINT32_MAX) || (cur_archive_file_ofs > MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, (mz_uint16)user_extra_data_len, 0, 0, 0, method, bit_flags, dos_time, dos_date)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += sizeof(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += archive_name_size; } if (user_extra_data_len > 0) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, user_extra_data, user_extra_data_len) != user_extra_data_len) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += user_extra_data_len; } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += buf_size; comp_size = buf_size; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params(level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return mz_zip_set_error(pZip, MZ_ZIP_COMPRESSION_FAILED); } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; if (uncomp_size) { mz_uint8 local_dir_footer[MZ_ZIP_DATA_DESCRIPTER_SIZE64]; mz_uint32 local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE32; MZ_ASSERT(bit_flags & MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR); MZ_WRITE_LE32(local_dir_footer + 0, MZ_ZIP_DATA_DESCRIPTOR_ID); MZ_WRITE_LE32(local_dir_footer + 4, uncomp_crc32); if (pExtra_data == NULL) { if (comp_size > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); MZ_WRITE_LE32(local_dir_footer + 8, comp_size); MZ_WRITE_LE32(local_dir_footer + 12, uncomp_size); } else { MZ_WRITE_LE64(local_dir_footer + 8, comp_size); MZ_WRITE_LE64(local_dir_footer + 16, uncomp_size); local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE64; } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_footer, local_dir_footer_size) != local_dir_footer_size) return MZ_FALSE; cur_archive_file_ofs += local_dir_footer_size; } if (pExtra_data != NULL) { extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL, (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL); } if (!mz_zip_writer_add_to_central_dir(pZip, pArchive_name, (mz_uint16)archive_name_size, pExtra_data, (mz_uint16)extra_size, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_dir_header_ofs, ext_attributes, user_extra_data_central, user_extra_data_central_len)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_cfile(mz_zip_archive *pZip, const char *pArchive_name, MZ_FILE *pSrc_file, mz_uint64 size_to_add, const MZ_TIME_T *pFile_time, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, const char *user_extra_data, mz_uint user_extra_data_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len) { mz_uint16 gen_flags = MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR; mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = size_to_add, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; mz_uint8 *pExtra_data = NULL; mz_uint32 extra_size = 0; mz_uint8 extra_data[MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE]; mz_zip_internal_state *pState; if (!(level_and_flags & MZ_ZIP_FLAG_ASCII_FILENAME)) gen_flags |= MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; /* Sanity checks */ if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; if ((!pState->m_zip64) && (uncomp_size > MZ_UINT32_MAX)) { /* Source file is too large for non-zip64 */ /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */ pState->m_zip64 = MZ_TRUE; } /* We could support this, but why? */ if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_writer_validate_archive_name(pArchive_name)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME); if (pState->m_zip64) { if (pZip->m_total_files == MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { if (pZip->m_total_files == MZ_UINT16_MAX) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); */ } } archive_name_size = strlen(pArchive_name); if (archive_name_size > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME); num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); /* miniz doesn't support central dirs >= MZ_UINT32_MAX bytes yet */ if (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE + comment_size) >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); if (!pState->m_zip64) { /* Bail early if the archive would obviously become too large */ if ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + user_extra_data_len + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + 1024 + MZ_ZIP_DATA_DESCRIPTER_SIZE32 + user_extra_data_central_len) > 0xFFFFFFFF) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */ } } #ifndef MINIZ_NO_TIME if (pFile_time) { mz_zip_time_t_to_dos_time(*pFile_time, &dos_time, &dos_date); } #endif if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes)) { return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_archive_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((cur_archive_file_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (uncomp_size && level) { method = MZ_DEFLATED; } MZ_CLEAR_OBJ(local_dir_header); if (pState->m_zip64) { if (uncomp_size >= MZ_UINT32_MAX || local_dir_header_ofs >= MZ_UINT32_MAX) { pExtra_data = extra_data; extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL, (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL); } if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, (mz_uint16)(extra_size + user_extra_data_len), 0, 0, 0, method, gen_flags, dos_time, dos_date)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += sizeof(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += archive_name_size; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, extra_data, extra_size) != extra_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += extra_size; } else { if ((comp_size > MZ_UINT32_MAX) || (cur_archive_file_ofs > MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, (mz_uint16)user_extra_data_len, 0, 0, 0, method, gen_flags, dos_time, dos_date)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += sizeof(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += archive_name_size; } if (user_extra_data_len > 0) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, user_extra_data, user_extra_data_len) != user_extra_data_len) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += user_extra_data_len; } if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params(level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; tdefl_flush flush = TDEFL_NO_FLUSH; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); break; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; if (pZip->m_pNeeds_keepalive != NULL && pZip->m_pNeeds_keepalive(pZip->m_pIO_opaque)) flush = TDEFL_FULL_FLUSH; status = tdefl_compress_buffer(pComp, pRead_buf, in_buf_size, uncomp_remaining ? flush : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) { mz_zip_set_error(pZip, MZ_ZIP_COMPRESSION_FAILED); break; } } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } { mz_uint8 local_dir_footer[MZ_ZIP_DATA_DESCRIPTER_SIZE64]; mz_uint32 local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE32; MZ_WRITE_LE32(local_dir_footer + 0, MZ_ZIP_DATA_DESCRIPTOR_ID); MZ_WRITE_LE32(local_dir_footer + 4, uncomp_crc32); if (pExtra_data == NULL) { if (comp_size > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); MZ_WRITE_LE32(local_dir_footer + 8, comp_size); MZ_WRITE_LE32(local_dir_footer + 12, uncomp_size); } else { MZ_WRITE_LE64(local_dir_footer + 8, comp_size); MZ_WRITE_LE64(local_dir_footer + 16, uncomp_size); local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE64; } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_footer, local_dir_footer_size) != local_dir_footer_size) return MZ_FALSE; cur_archive_file_ofs += local_dir_footer_size; } if (pExtra_data != NULL) { extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL, (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL); } if (!mz_zip_writer_add_to_central_dir(pZip, pArchive_name, (mz_uint16)archive_name_size, pExtra_data, (mz_uint16)extra_size, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, gen_flags, dos_time, dos_date, local_dir_header_ofs, ext_attributes, user_extra_data_central, user_extra_data_central_len)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { MZ_FILE *pSrc_file = NULL; mz_uint64 uncomp_size = 0; MZ_TIME_T file_modified_time; MZ_TIME_T *pFile_time = NULL; mz_bool status; memset(&file_modified_time, 0, sizeof(file_modified_time)); #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_STDIO) pFile_time = &file_modified_time; if (!mz_zip_get_file_modified_time(pSrc_filename, &file_modified_time)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_STAT_FAILED); #endif pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); status = mz_zip_writer_add_cfile(pZip, pArchive_name, pSrc_file, uncomp_size, pFile_time, pComment, comment_size, level_and_flags, NULL, 0, NULL, 0); MZ_FCLOSE(pSrc_file); return status; } #endif /* #ifndef MINIZ_NO_STDIO */ static mz_bool mz_zip_writer_update_zip64_extension_block(mz_zip_array *pNew_ext, mz_zip_archive *pZip, const mz_uint8 *pExt, uint32_t ext_len, mz_uint64 *pComp_size, mz_uint64 *pUncomp_size, mz_uint64 *pLocal_header_ofs, mz_uint32 *pDisk_start) { /* + 64 should be enough for any new zip64 data */ if (!mz_zip_array_reserve(pZip, pNew_ext, ext_len + 64, MZ_FALSE)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); mz_zip_array_resize(pZip, pNew_ext, 0, MZ_FALSE); if ((pUncomp_size) || (pComp_size) || (pLocal_header_ofs) || (pDisk_start)) { mz_uint8 new_ext_block[64]; mz_uint8 *pDst = new_ext_block; mz_write_le16(pDst, MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID); mz_write_le16(pDst + sizeof(mz_uint16), 0); pDst += sizeof(mz_uint16) * 2; if (pUncomp_size) { mz_write_le64(pDst, *pUncomp_size); pDst += sizeof(mz_uint64); } if (pComp_size) { mz_write_le64(pDst, *pComp_size); pDst += sizeof(mz_uint64); } if (pLocal_header_ofs) { mz_write_le64(pDst, *pLocal_header_ofs); pDst += sizeof(mz_uint64); } if (pDisk_start) { mz_write_le32(pDst, *pDisk_start); pDst += sizeof(mz_uint32); } mz_write_le16(new_ext_block + sizeof(mz_uint16), (mz_uint16)((pDst - new_ext_block) - sizeof(mz_uint16) * 2)); if (!mz_zip_array_push_back(pZip, pNew_ext, new_ext_block, pDst - new_ext_block)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if ((pExt) && (ext_len)) { mz_uint32 extra_size_remaining = ext_len; const mz_uint8 *pExtra_data = pExt; do { mz_uint32 field_id, field_data_size, field_total_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); field_total_size = field_data_size + sizeof(mz_uint16) * 2; if (field_total_size > extra_size_remaining) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (field_id != MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { if (!mz_zip_array_push_back(pZip, pNew_ext, pExtra_data, field_total_size)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } pExtra_data += field_total_size; extra_size_remaining -= field_total_size; } while (extra_size_remaining); } return MZ_TRUE; } /* TODO: This func is now pretty freakin complex due to zip64, split it up? */ mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint src_file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes, src_central_dir_following_data_size; mz_uint64 src_archive_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 new_central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; mz_zip_archive_file_stat src_file_stat; mz_uint32 src_filename_len, src_comment_len, src_ext_len; mz_uint32 local_header_filename_size, local_header_extra_len; mz_uint64 local_header_comp_size, local_header_uncomp_size; mz_bool found_zip64_ext_data_in_ldir = MZ_FALSE; /* Sanity checks */ if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pSource_zip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; /* Don't support copying files from zip64 archives to non-zip64, even though in some cases this is possible */ if ((pSource_zip->m_pState->m_zip64) && (!pZip->m_pState->m_zip64)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* Get pointer to the source central dir header and crack it */ if (NULL == (pSrc_central_header = mz_zip_get_cdh(pSource_zip, src_file_index))) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_SIG_OFS) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); src_filename_len = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS); src_comment_len = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); src_ext_len = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS); src_central_dir_following_data_size = src_filename_len + src_ext_len + src_comment_len; /* TODO: We don't support central dir's >= MZ_UINT32_MAX bytes right now (+32 fudge factor in case we need to add more extra data) */ if ((pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_central_dir_following_data_size + 32) >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); if (!pState->m_zip64) { if (pZip->m_total_files == MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { /* TODO: Our zip64 support still has some 32-bit limits that may not be worth fixing. */ if (pZip->m_total_files == MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } if (!mz_zip_file_stat_internal(pSource_zip, src_file_index, pSrc_central_header, &src_file_stat, NULL)) return MZ_FALSE; cur_src_file_ofs = src_file_stat.m_local_header_ofs; cur_dst_file_ofs = pZip->m_archive_size; /* Read the source archive's local dir header */ if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; /* Compute the total size we need to copy (filename+extra data+compressed data) */ local_header_filename_size = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS); local_header_extra_len = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); local_header_comp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS); local_header_uncomp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS); src_archive_bytes_remaining = local_header_filename_size + local_header_extra_len + src_file_stat.m_comp_size; /* Try to find a zip64 extended information field */ if ((local_header_extra_len) && ((local_header_comp_size == MZ_UINT32_MAX) || (local_header_uncomp_size == MZ_UINT32_MAX))) { mz_zip_array file_data_array; const mz_uint8 *pExtra_data; mz_uint32 extra_size_remaining = local_header_extra_len; mz_zip_array_init(&file_data_array, 1); if (!mz_zip_array_resize(pZip, &file_data_array, local_header_extra_len, MZ_FALSE)) { return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, src_file_stat.m_local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_size, file_data_array.m_p, local_header_extra_len) != local_header_extra_len) { mz_zip_array_clear(pZip, &file_data_array); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } pExtra_data = (const mz_uint8 *)file_data_array.m_p; do { mz_uint32 field_id, field_data_size, field_total_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) { mz_zip_array_clear(pZip, &file_data_array); return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); field_total_size = field_data_size + sizeof(mz_uint16) * 2; if (field_total_size > extra_size_remaining) { mz_zip_array_clear(pZip, &file_data_array); return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { const mz_uint8 *pSrc_field_data = pExtra_data + sizeof(mz_uint32); if (field_data_size < sizeof(mz_uint64) * 2) { mz_zip_array_clear(pZip, &file_data_array); return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } local_header_uncomp_size = MZ_READ_LE64(pSrc_field_data); local_header_comp_size = MZ_READ_LE64(pSrc_field_data + sizeof(mz_uint64)); /* may be 0 if there's a descriptor */ found_zip64_ext_data_in_ldir = MZ_TRUE; break; } pExtra_data += field_total_size; extra_size_remaining -= field_total_size; } while (extra_size_remaining); mz_zip_array_clear(pZip, &file_data_array); } if (!pState->m_zip64) { /* Try to detect if the new archive will most likely wind up too big and bail early (+(sizeof(mz_uint32) * 4) is for the optional descriptor which could be present, +64 is a fudge factor). */ /* We also check when the archive is finalized so this doesn't need to be perfect. */ mz_uint64 approx_new_archive_size = cur_dst_file_ofs + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + src_archive_bytes_remaining + (sizeof(mz_uint32) * 4) + pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_central_dir_following_data_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + 64; if (approx_new_archive_size >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); } /* Write dest archive padding */ if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } /* The original zip's local header+ext block doesn't change, even with zip64, so we can just copy it over to the dest zip */ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; /* Copy over the source archive bytes to the dest archive, also ensure we have enough buf space to handle optional data descriptor */ if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(32U, MZ_MIN((mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE, src_archive_bytes_remaining))))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); while (src_archive_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE, src_archive_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_dst_file_ofs += n; src_archive_bytes_remaining -= n; } /* Now deal with the optional data descriptor */ bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { /* Copy data descriptor */ if ((pSource_zip->m_pState->m_zip64) || (found_zip64_ext_data_in_ldir)) { /* src is zip64, dest must be zip64 */ /* name uint32_t's */ /* id 1 (optional in zip64?) */ /* crc 1 */ /* comp_size 2 */ /* uncomp_size 2 */ if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, (sizeof(mz_uint32) * 6)) != (sizeof(mz_uint32) * 6)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == MZ_ZIP_DATA_DESCRIPTOR_ID) ? 6 : 5); } else { /* src is NOT zip64 */ mz_bool has_id; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } has_id = (MZ_READ_LE32(pBuf) == MZ_ZIP_DATA_DESCRIPTOR_ID); if (pZip->m_pState->m_zip64) { /* dest is zip64, so upgrade the data descriptor */ const mz_uint32 *pSrc_descriptor = (const mz_uint32 *)((const mz_uint8 *)pBuf + (has_id ? sizeof(mz_uint32) : 0)); const mz_uint32 src_crc32 = pSrc_descriptor[0]; const mz_uint64 src_comp_size = pSrc_descriptor[1]; const mz_uint64 src_uncomp_size = pSrc_descriptor[2]; mz_write_le32((mz_uint8 *)pBuf, MZ_ZIP_DATA_DESCRIPTOR_ID); mz_write_le32((mz_uint8 *)pBuf + sizeof(mz_uint32) * 1, src_crc32); mz_write_le64((mz_uint8 *)pBuf + sizeof(mz_uint32) * 2, src_comp_size); mz_write_le64((mz_uint8 *)pBuf + sizeof(mz_uint32) * 4, src_uncomp_size); n = sizeof(mz_uint32) * 6; } else { /* dest is NOT zip64, just copy it as-is */ n = sizeof(mz_uint32) * (has_id ? 4 : 3); } } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); /* Finally, add the new central dir header */ orig_central_dir_size = pState->m_central_dir.m_size; memcpy(new_central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); if (pState->m_zip64) { /* This is the painful part: We need to write a new central dir header + ext block with updated zip64 fields, and ensure the old fields (if any) are not included. */ const mz_uint8 *pSrc_ext = pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_filename_len; mz_zip_array new_ext_block; mz_zip_array_init(&new_ext_block, sizeof(mz_uint8)); MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, MZ_UINT32_MAX); MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, MZ_UINT32_MAX); MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, MZ_UINT32_MAX); if (!mz_zip_writer_update_zip64_extension_block(&new_ext_block, pZip, pSrc_ext, src_ext_len, &src_file_stat.m_comp_size, &src_file_stat.m_uncomp_size, &local_dir_header_ofs, NULL)) { mz_zip_array_clear(pZip, &new_ext_block); return MZ_FALSE; } MZ_WRITE_LE16(new_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS, new_ext_block.m_size); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, new_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) { mz_zip_array_clear(pZip, &new_ext_block); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, src_filename_len)) { mz_zip_array_clear(pZip, &new_ext_block); mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, new_ext_block.m_p, new_ext_block.m_size)) { mz_zip_array_clear(pZip, &new_ext_block); mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_filename_len + src_ext_len, src_comment_len)) { mz_zip_array_clear(pZip, &new_ext_block); mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } mz_zip_array_clear(pZip, &new_ext_block); } else { /* sanity checks */ if (cur_dst_file_ofs > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (local_dir_header_ofs >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, new_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, src_central_dir_following_data_size)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } } /* This shouldn't trigger unless we screwed up during the initial sanity checks */ if (pState->m_central_dir.m_size >= MZ_UINT32_MAX) { /* TODO: Support central dirs >= 32-bits in size */ mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); } n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[256]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; if (pState->m_zip64) { if ((pZip->m_total_files > MZ_UINT32_MAX) || (pState->m_central_dir.m_size >= MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { if ((pZip->m_total_files > MZ_UINT16_MAX) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { /* Write central directory */ central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); pZip->m_archive_size += central_dir_size; } if (pState->m_zip64) { /* Write zip64 end of central directory header */ mz_uint64 rel_ofs_to_zip64_ecdr = pZip->m_archive_size; MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDH_SIG_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - sizeof(mz_uint32) - sizeof(mz_uint64)); MZ_WRITE_LE16(hdr + MZ_ZIP64_ECDH_VERSION_MADE_BY_OFS, 0x031E); /* TODO: always Unix */ MZ_WRITE_LE16(hdr + MZ_ZIP64_ECDH_VERSION_NEEDED_OFS, 0x002D); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); pZip->m_archive_size += MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE; /* Write zip64 end of central directory locator */ MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDL_SIG_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS, rel_ofs_to_zip64_ecdr); MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS, 1); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) != MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); pZip->m_archive_size += MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE; } /* Write end of central directory record */ MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, MZ_MIN(MZ_UINT16_MAX, pZip->m_total_files)); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, MZ_MIN(MZ_UINT16_MAX, pZip->m_total_files)); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, MZ_MIN(MZ_UINT32_MAX, central_dir_size)); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, MZ_MIN(MZ_UINT32_MAX, central_dir_ofs)); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_CLOSE_FAILED); #endif /* #ifndef MINIZ_NO_STDIO */ pZip->m_archive_size += MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **ppBuf, size_t *pSize) { if ((!ppBuf) || (!pSize)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); *ppBuf = NULL; *pSize = 0; if ((!pZip) || (!pZip->m_pState)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (pZip->m_pWrite != mz_zip_heap_write_func) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *ppBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { return mz_zip_writer_end_internal(pZip, MZ_TRUE); } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { return mz_zip_add_mem_to_archive_file_in_place_v2(pZip_filename, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, NULL); } mz_bool mz_zip_add_mem_to_archive_file_in_place_v2(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_zip_error *pErr) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; mz_zip_error actual_err = MZ_ZIP_NO_ERROR; mz_zip_zero_struct(&zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) { if (pErr) *pErr = MZ_ZIP_INVALID_PARAMETER; return MZ_FALSE; } if (!mz_zip_writer_validate_archive_name(pArchive_name)) { if (pErr) *pErr = MZ_ZIP_INVALID_FILENAME; return MZ_FALSE; } /* Important: The regular non-64 bit version of stat() can fail here if the file is very large, which could cause the archive to be overwritten. */ /* So be sure to compile with _LARGEFILE64_SOURCE 1 */ if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { /* Create a new archive. */ if (!mz_zip_writer_init_file_v2(&zip_archive, pZip_filename, 0, level_and_flags)) { if (pErr) *pErr = zip_archive.m_last_error; return MZ_FALSE; } created_new_archive = MZ_TRUE; } else { /* Append to an existing archive. */ if (!mz_zip_reader_init_file_v2(&zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY, 0, 0)) { if (pErr) *pErr = zip_archive.m_last_error; return MZ_FALSE; } if (!mz_zip_writer_init_from_reader_v2(&zip_archive, pZip_filename, level_and_flags)) { if (pErr) *pErr = zip_archive.m_last_error; mz_zip_reader_end_internal(&zip_archive, MZ_FALSE); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); actual_err = zip_archive.m_last_error; /* Always finalize, even if adding failed for some reason, so we have a valid central directory. (This may not always succeed, but we can try.) */ if (!mz_zip_writer_finalize_archive(&zip_archive)) { if (!actual_err) actual_err = zip_archive.m_last_error; status = MZ_FALSE; } if (!mz_zip_writer_end_internal(&zip_archive, status)) { if (!actual_err) actual_err = zip_archive.m_last_error; status = MZ_FALSE; } if ((!status) && (created_new_archive)) { /* It's a new archive and something went wrong, so just delete it. */ int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } if (pErr) *pErr = actual_err; return status; } void *mz_zip_extract_archive_file_to_heap_v2(const char *pZip_filename, const char *pArchive_name, const char *pComment, size_t *pSize, mz_uint flags, mz_zip_error *pErr) { mz_uint32 file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) { if (pErr) *pErr = MZ_ZIP_INVALID_PARAMETER; return NULL; } mz_zip_zero_struct(&zip_archive); if (!mz_zip_reader_init_file_v2(&zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY, 0, 0)) { if (pErr) *pErr = zip_archive.m_last_error; return NULL; } if (mz_zip_reader_locate_file_v2(&zip_archive, pArchive_name, pComment, flags, &file_index)) { p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); } mz_zip_reader_end_internal(&zip_archive, p != NULL); if (pErr) *pErr = zip_archive.m_last_error; return p; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { return mz_zip_extract_archive_file_to_heap_v2(pZip_filename, pArchive_name, NULL, pSize, flags, NULL); } #endif /* #ifndef MINIZ_NO_STDIO */ #endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS */ /* ------------------- Misc utils */ mz_zip_mode mz_zip_get_mode(mz_zip_archive *pZip) { return pZip ? pZip->m_zip_mode : MZ_ZIP_MODE_INVALID; } mz_zip_type mz_zip_get_type(mz_zip_archive *pZip) { return pZip ? pZip->m_zip_type : MZ_ZIP_TYPE_INVALID; } mz_zip_error mz_zip_set_last_error(mz_zip_archive *pZip, mz_zip_error err_num) { mz_zip_error prev_err; if (!pZip) return MZ_ZIP_INVALID_PARAMETER; prev_err = pZip->m_last_error; pZip->m_last_error = err_num; return prev_err; } mz_zip_error mz_zip_peek_last_error(mz_zip_archive *pZip) { if (!pZip) return MZ_ZIP_INVALID_PARAMETER; return pZip->m_last_error; } mz_zip_error mz_zip_clear_last_error(mz_zip_archive *pZip) { return mz_zip_set_last_error(pZip, MZ_ZIP_NO_ERROR); } mz_zip_error mz_zip_get_last_error(mz_zip_archive *pZip) { mz_zip_error prev_err; if (!pZip) return MZ_ZIP_INVALID_PARAMETER; prev_err = pZip->m_last_error; pZip->m_last_error = MZ_ZIP_NO_ERROR; return prev_err; } const char *mz_zip_get_error_string(mz_zip_error mz_err) { switch (mz_err) { case MZ_ZIP_NO_ERROR: return "no error"; case MZ_ZIP_UNDEFINED_ERROR: return "undefined error"; case MZ_ZIP_TOO_MANY_FILES: return "too many files"; case MZ_ZIP_FILE_TOO_LARGE: return "file too large"; case MZ_ZIP_UNSUPPORTED_METHOD: return "unsupported method"; case MZ_ZIP_UNSUPPORTED_ENCRYPTION: return "unsupported encryption"; case MZ_ZIP_UNSUPPORTED_FEATURE: return "unsupported feature"; case MZ_ZIP_FAILED_FINDING_CENTRAL_DIR: return "failed finding central directory"; case MZ_ZIP_NOT_AN_ARCHIVE: return "not a ZIP archive"; case MZ_ZIP_INVALID_HEADER_OR_CORRUPTED: return "invalid header or archive is corrupted"; case MZ_ZIP_UNSUPPORTED_MULTIDISK: return "unsupported multidisk archive"; case MZ_ZIP_DECOMPRESSION_FAILED: return "decompression failed or archive is corrupted"; case MZ_ZIP_COMPRESSION_FAILED: return "compression failed"; case MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE: return "unexpected decompressed size"; case MZ_ZIP_CRC_CHECK_FAILED: return "CRC-32 check failed"; case MZ_ZIP_UNSUPPORTED_CDIR_SIZE: return "unsupported central directory size"; case MZ_ZIP_ALLOC_FAILED: return "allocation failed"; case MZ_ZIP_FILE_OPEN_FAILED: return "file open failed"; case MZ_ZIP_FILE_CREATE_FAILED: return "file create failed"; case MZ_ZIP_FILE_WRITE_FAILED: return "file write failed"; case MZ_ZIP_FILE_READ_FAILED: return "file read failed"; case MZ_ZIP_FILE_CLOSE_FAILED: return "file close failed"; case MZ_ZIP_FILE_SEEK_FAILED: return "file seek failed"; case MZ_ZIP_FILE_STAT_FAILED: return "file stat failed"; case MZ_ZIP_INVALID_PARAMETER: return "invalid parameter"; case MZ_ZIP_INVALID_FILENAME: return "invalid filename"; case MZ_ZIP_BUF_TOO_SMALL: return "buffer too small"; case MZ_ZIP_INTERNAL_ERROR: return "internal error"; case MZ_ZIP_FILE_NOT_FOUND: return "file not found"; case MZ_ZIP_ARCHIVE_TOO_LARGE: return "archive is too large"; case MZ_ZIP_VALIDATION_FAILED: return "validation failed"; case MZ_ZIP_WRITE_CALLBACK_FAILED: return "write calledback failed"; default: break; } return "unknown error"; } /* Note: Just because the archive is not zip64 doesn't necessarily mean it doesn't have Zip64 extended information extra field, argh. */ mz_bool mz_zip_is_zip64(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState)) return MZ_FALSE; return pZip->m_pState->m_zip64; } size_t mz_zip_get_central_dir_size(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState)) return 0; return pZip->m_pState->m_central_dir.m_size; } mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } mz_uint64 mz_zip_get_archive_size(mz_zip_archive *pZip) { if (!pZip) return 0; return pZip->m_archive_size; } mz_uint64 mz_zip_get_archive_file_start_offset(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState)) return 0; return pZip->m_pState->m_file_archive_start_ofs; } MZ_FILE *mz_zip_get_cfile(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState)) return 0; return pZip->m_pState->m_pFile; } size_t mz_zip_read_archive_data(mz_zip_archive *pZip, mz_uint64 file_ofs, void *pBuf, size_t n) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return pZip->m_pRead(pZip->m_pIO_opaque, file_ofs, pBuf, n); } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { return mz_zip_file_stat_internal(pZip, file_index, mz_zip_get_cdh(pZip, file_index), pStat, NULL); } mz_bool mz_zip_end(mz_zip_archive *pZip) { if (!pZip) return MZ_FALSE; if (pZip->m_zip_mode == MZ_ZIP_MODE_READING) return mz_zip_reader_end(pZip); #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS else if ((pZip->m_zip_mode == MZ_ZIP_MODE_WRITING) || (pZip->m_zip_mode == MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)) return mz_zip_writer_end(pZip); #endif return MZ_FALSE; } #ifdef __cplusplus } #endif #endif /*#ifndef MINIZ_NO_ARCHIVE_APIS*/
220192.c
#include "devicetree.h" #include "uart.h" #include "utils.h" #define FDT_BEGIN_NODE 0x00000001 #define FDT_END_NODE 0x00000002 #define FDT_PROP 0x00000003 #define FDT_NOP 0x00000004 #define FDT_END 0x00000009 int fdt_indent = 0; unsigned long fdt_addr = 0x8200000; typedef struct { unsigned int magic; unsigned int totalsize; unsigned int off_dt_struct; unsigned int off_dt_strings; unsigned int off_mem_rsvmap; unsigned int version; unsigned int last_comp_version; unsigned int boot_cpuid_phys; unsigned int size_dt_strings; unsigned int size_dt_struct; } fdt_header; void fdt_address() { asm volatile("mov %0, x9 \n":"=r"(fdt_addr):); fdt_addr += 0xFFFF000000000000; } unsigned int endiantoi(void* _endian) { char* endian = _endian; unsigned int tmp = 0; for (int i=0; i<4; i++) { tmp |= endian[i] << (3-i)*8; } return tmp; } unsigned int align(unsigned int bytes, int aligned) { return (bytes + aligned - 1) & -aligned; } void initramfs_callback(int type, char *name, void *data, unsigned int size) { switch(type) { case FDT_BEGIN_NODE: for (int i=0; i<fdt_indent; i++) uart_send('\t'); printf(name); printf(": {"); uart_send('\n'); fdt_indent++; break; case FDT_END_NODE: fdt_indent--; for (int i=0; i<fdt_indent; i++) uart_send('\t'); printf("}\n"); break; case FDT_PROP: for (int i=0; i<fdt_indent; i++) uart_send('\t'); printf(name); uart_send('\n'); break; case FDT_NOP: break; case FDT_END: break; default: break; } } void cpio_callback(int type, char *name, void *data, unsigned int size) { if (type == FDT_PROP && !strcmp(name, "linux,initrd-start")) { cpio_start = (void *)((unsigned long)endiantoi(data) + 0xFFFF000000000000); } if (type == FDT_PROP && !strcmp(name, "linux,initrd-end")) { cpio_end = (void *)((unsigned long)endiantoi(data) + 0xFFFF000000000000); } } int fdt_parser(unsigned long _dt_struct, unsigned long _dt_strings, unsigned int totalsize, void (*callback)(int type, char *name, void *data, unsigned int size)) { unsigned long end = _dt_struct + totalsize; while(_dt_struct < end) { unsigned int type = endiantoi((char*)_dt_struct); _dt_struct += 4; switch (type) { case FDT_BEGIN_NODE: callback(FDT_BEGIN_NODE, (char*)_dt_struct, 0, 0); _dt_struct += align(strlen((char*)_dt_struct)+1, 4);////////////// break; case FDT_END_NODE: callback(FDT_END_NODE, 0, 0, 0); break; case FDT_NOP: callback(FDT_NOP, 0, 0, 0); break; case FDT_PROP: { unsigned int size = endiantoi((char*)_dt_struct); _dt_struct += 4; unsigned int name = endiantoi((char*)_dt_struct); _dt_struct += 4; callback(FDT_PROP, (char *)(_dt_strings+name), (void *)_dt_struct, size); _dt_struct += align(size, 4); break; } case FDT_END: callback(FDT_END, 0, 0, 0); return 0; default: return -1; } } return -1; } int fdt_traverse(void (*callback)(int type, char *name, void *data, unsigned int size)) { unsigned long addr = fdt_addr; fdt_header* ftd = (fdt_header*)addr; if (endiantoi(&ftd->magic) != 0xd00dfeed) return 1; unsigned int totalsize = (endiantoi(&ftd->totalsize)); unsigned long off_dt_struct = addr + (endiantoi(&ftd->off_dt_struct)); unsigned long off_dt_strings = addr + (endiantoi(&ftd->off_dt_strings)); fdt_start = (void *)(unsigned long)addr; fdt_end = (void *)(unsigned long)(addr + totalsize); return fdt_parser(off_dt_struct, off_dt_strings, totalsize, callback); }
974094.c
#include "graph.h" #include <assert.h> #include <stdio.h> #include <stdlib.h> #include "alloc-testing.h" #include "test_helper.h" static AdjacencyMatrix *init_ud_uw_graph() { AdjacencyMatrix *graph = adjacency_matrix_new(UndirectedUnweighted, 10); adjacency_matrix_link(graph, 0, 1); adjacency_matrix_link(graph, 0, 2); adjacency_matrix_link(graph, 0, 3); adjacency_matrix_link(graph, 0, 4); adjacency_matrix_link(graph, 0, 5); adjacency_matrix_link(graph, 1, 2); adjacency_matrix_link(graph, 1, 3); adjacency_matrix_link(graph, 1, 4); adjacency_matrix_link(graph, 1, 5); adjacency_matrix_link(graph, 1, 6); adjacency_matrix_link(graph, 2, 3); adjacency_matrix_link(graph, 2, 4); adjacency_matrix_link(graph, 2, 5); adjacency_matrix_link(graph, 2, 6); adjacency_matrix_link(graph, 2, 7); adjacency_matrix_link(graph, 3, 4); adjacency_matrix_link(graph, 3, 5); adjacency_matrix_link(graph, 3, 6); adjacency_matrix_link(graph, 3, 7); adjacency_matrix_link(graph, 4, 5); adjacency_matrix_link(graph, 4, 6); adjacency_matrix_link(graph, 4, 7); adjacency_matrix_link(graph, 5, 6); adjacency_matrix_link(graph, 5, 7); adjacency_matrix_link(graph, 6, 7); adjacency_matrix_link(graph, 7, 8); return graph; } void test_graph_bfs() { AdjacencyMatrix *graph = init_ud_uw_graph(); ASSERT_INT_EQ(adjacency_matrix_bfs(graph, 0, 9), -1); ASSERT_INT_EQ(adjacency_matrix_bfs(graph, 0, 8), 0); adjacency_matrix_free(graph); } void test_graph_dfs() { AdjacencyMatrix *graph = init_ud_uw_graph(); ASSERT_INT_EQ(adjacency_matrix_dfs(graph, 0, 9), -1); ASSERT_INT_EQ(adjacency_matrix_dfs(graph, 0, 8), 0); adjacency_matrix_free(graph); } void test_graph() { test_graph_bfs(); test_graph_dfs(); }
640070.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2022 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "tool/lambda/lib/blc.h" char NeedBit(FILE* f) { char b = GetBit(f); if (b == -1) Error(9, "UNEXPECTED EOF"); return b; }
667736.c
/* * Copyright (c) 2017 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <plugins/abf/abf_policy.h> #include <vlib/vlib.h> #include <vnet/plugin/plugin.h> #include <vnet/fib/fib_path_list.h> #include <vnet/fib/fib_walk.h> /** * FIB node type the attachment is registered */ fib_node_type_t abf_policy_fib_node_type; /** * Pool of ABF objects */ static abf_policy_t *abf_policy_pool; /** * DB of ABF policy objects * - policy ID to index conversion. */ static uword *abf_policy_db; abf_policy_t * abf_policy_get (u32 index) { return (pool_elt_at_index (abf_policy_pool, index)); } static u32 abf_policy_get_index (const abf_policy_t * abf) { return (abf - abf_policy_pool); } static abf_policy_t * abf_policy_find_i (u32 policy_id) { u32 api; api = abf_policy_find (policy_id); if (INDEX_INVALID != api) return (abf_policy_get (api)); return (NULL); } u32 abf_policy_find (u32 policy_id) { uword *p; p = hash_get (abf_policy_db, policy_id); if (NULL != p) return (p[0]); return (INDEX_INVALID); } int abf_policy_update (u32 policy_id, u32 acl_index, const fib_route_path_t * rpaths) { abf_policy_t *ap; u32 api; api = abf_policy_find (policy_id); if (INDEX_INVALID == api) { /* * create a new policy */ pool_get (abf_policy_pool, ap); api = ap - abf_policy_pool; fib_node_init (&ap->ap_node, abf_policy_fib_node_type); ap->ap_acl = acl_index; ap->ap_id = policy_id; ap->ap_pl = fib_path_list_create ((FIB_PATH_LIST_FLAG_SHARED | FIB_PATH_LIST_FLAG_NO_URPF), rpaths); /* * become a child of the path list so we get poked when * the forwarding changes. */ ap->ap_sibling = fib_path_list_child_add (ap->ap_pl, abf_policy_fib_node_type, api); /* * add this new policy to the DB */ hash_set (abf_policy_db, policy_id, api); /* * take a lock on behalf of the CLI/API creation */ fib_node_lock (&ap->ap_node); } else { /* * update an existing policy. * - add the path to the path-list and swap our ancestry * - backwalk to poke all attachments to update */ fib_node_index_t old_pl; ap = abf_policy_get (api); old_pl = ap->ap_pl; if (ap->ap_acl != acl_index) { /* Should change this error code to something more descriptive */ return (VNET_API_ERROR_INVALID_VALUE); } if (FIB_NODE_INDEX_INVALID != old_pl) { ap->ap_pl = fib_path_list_copy_and_path_add (old_pl, (FIB_PATH_LIST_FLAG_SHARED | FIB_PATH_LIST_FLAG_NO_URPF), rpaths); fib_path_list_child_remove (old_pl, ap->ap_sibling); } else { ap->ap_pl = fib_path_list_create ((FIB_PATH_LIST_FLAG_SHARED | FIB_PATH_LIST_FLAG_NO_URPF), rpaths); } ap->ap_sibling = fib_path_list_child_add (ap->ap_pl, abf_policy_fib_node_type, api); fib_node_back_walk_ctx_t ctx = { .fnbw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE, }; fib_walk_sync (abf_policy_fib_node_type, api, &ctx); } return (0); } static void abf_policy_destroy (abf_policy_t * ap) { /* * this ABF should not be a sibling on the path list, since * that was removed when the API config went */ ASSERT (ap->ap_sibling == ~0); ASSERT (ap->ap_pl == FIB_NODE_INDEX_INVALID); hash_unset (abf_policy_db, ap->ap_id); pool_put (abf_policy_pool, ap); } int abf_policy_delete (u32 policy_id, const fib_route_path_t * rpaths) { abf_policy_t *ap; u32 api; api = abf_policy_find (policy_id); if (INDEX_INVALID == api) { /* * no such policy */ return (VNET_API_ERROR_INVALID_VALUE); } else { /* * update an existing policy. * - add the path to the path-list and swap our ancestry * - backwalk to poke all attachments to update */ fib_node_index_t old_pl; ap = abf_policy_get (api); old_pl = ap->ap_pl; fib_path_list_lock (old_pl); ap->ap_pl = fib_path_list_copy_and_path_remove (ap->ap_pl, (FIB_PATH_LIST_FLAG_SHARED | FIB_PATH_LIST_FLAG_NO_URPF), rpaths); fib_path_list_child_remove (old_pl, ap->ap_sibling); ap->ap_sibling = ~0; if (FIB_NODE_INDEX_INVALID == ap->ap_pl) { /* * no more paths on this policy. It's toast * remove the CLI/API's lock */ fib_node_unlock (&ap->ap_node); } else { ap->ap_sibling = fib_path_list_child_add (ap->ap_pl, abf_policy_fib_node_type, api); fib_node_back_walk_ctx_t ctx = { .fnbw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE, }; fib_walk_sync (abf_policy_fib_node_type, api, &ctx); } fib_path_list_unlock (old_pl); } return (0); } static clib_error_t * abf_policy_cmd (vlib_main_t * vm, unformat_input_t * main_input, vlib_cli_command_t * cmd) { unformat_input_t _line_input, *line_input = &_line_input; u32 acl_index, policy_id; fib_route_path_t *rpaths = NULL, rpath; u32 is_del; int rv = 0; is_del = 0; acl_index = INDEX_INVALID; policy_id = INDEX_INVALID; /* Get a line of input. */ if (!unformat_user (main_input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "acl %d", &acl_index)) ; else if (unformat (line_input, "id %d", &policy_id)) ; else if (unformat (line_input, "del")) is_del = 1; else if (unformat (line_input, "add")) is_del = 0; else if (unformat (line_input, "via %U", unformat_fib_route_path, &rpath)) vec_add1 (rpaths, rpath); else return (clib_error_return (0, "unknown input '%U'", format_unformat_error, line_input)); } if (INDEX_INVALID == policy_id) { vlib_cli_output (vm, "Specify a Policy ID"); return 0; } if (!is_del) { if (INDEX_INVALID == acl_index) { vlib_cli_output (vm, "ACL index must be set"); return 0; } rv = abf_policy_update (policy_id, acl_index, rpaths); /* Should change this error code to something more descriptive */ if (rv == VNET_API_ERROR_INVALID_VALUE) { vlib_cli_output (vm, "ACL index must match existing ACL index in policy"); return 0; } } else { abf_policy_delete (policy_id, rpaths); } unformat_free (line_input); return (NULL); } /* *INDENT-OFF* */ /** * Create an ABF policy. */ VLIB_CLI_COMMAND (abf_policy_cmd_node, static) = { .path = "abf policy", .function = abf_policy_cmd, .short_help = "abf policy [add|del] id <index> acl <index> via ...", .is_mp_safe = 1, }; /* *INDENT-ON* */ static u8 * format_abf (u8 * s, va_list * args) { abf_policy_t *ap = va_arg (*args, abf_policy_t *); s = format (s, "abf:[%d]: policy:%d acl:%d", ap - abf_policy_pool, ap->ap_id, ap->ap_acl); s = format (s, "\n "); if (FIB_NODE_INDEX_INVALID == ap->ap_pl) { s = format (s, "no forwarding"); } else { s = fib_path_list_format (ap->ap_pl, s); } return (s); } void abf_policy_walk (abf_policy_walk_cb_t cb, void *ctx) { u32 api; /* *INDENT-OFF* */ pool_foreach_index(api, abf_policy_pool, ({ if (!cb(api, ctx)) break; })); /* *INDENT-ON* */ } static clib_error_t * abf_show_policy_cmd (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { u32 policy_id; abf_policy_t *ap; policy_id = INDEX_INVALID; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "%d", &policy_id)) ; else return (clib_error_return (0, "unknown input '%U'", format_unformat_error, input)); } if (INDEX_INVALID == policy_id) { /* *INDENT-OFF* */ pool_foreach(ap, abf_policy_pool, ({ vlib_cli_output(vm, "%U", format_abf, ap); })); /* *INDENT-ON* */ } else { ap = abf_policy_find_i (policy_id); if (NULL != ap) vlib_cli_output (vm, "%U", format_abf, ap); else vlib_cli_output (vm, "Invalid policy ID:%d", policy_id); } return (NULL); } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (abf_policy_show_policy_cmd_node, static) = { .path = "show abf policy", .function = abf_show_policy_cmd, .short_help = "show abf policy <value>", .is_mp_safe = 1, }; /* *INDENT-ON* */ static fib_node_t * abf_policy_get_node (fib_node_index_t index) { abf_policy_t *ap = abf_policy_get (index); return (&(ap->ap_node)); } static abf_policy_t * abf_policy_get_from_node (fib_node_t * node) { return ((abf_policy_t *) (((char *) node) - STRUCT_OFFSET_OF (abf_policy_t, ap_node))); } static void abf_policy_last_lock_gone (fib_node_t * node) { abf_policy_destroy (abf_policy_get_from_node (node)); } /* * A back walk has reached this ABF policy */ static fib_node_back_walk_rc_t abf_policy_back_walk_notify (fib_node_t * node, fib_node_back_walk_ctx_t * ctx) { /* * re-stack the fmask on the n-eos of the via */ abf_policy_t *abf = abf_policy_get_from_node (node); /* * propagate further up the graph. * we can do this synchronously since the fan out is small. */ fib_walk_sync (abf_policy_fib_node_type, abf_policy_get_index (abf), ctx); return (FIB_NODE_BACK_WALK_CONTINUE); } /* * The BIER fmask's graph node virtual function table */ static const fib_node_vft_t abf_policy_vft = { .fnv_get = abf_policy_get_node, .fnv_last_lock = abf_policy_last_lock_gone, .fnv_back_walk = abf_policy_back_walk_notify, }; static clib_error_t * abf_policy_init (vlib_main_t * vm) { abf_policy_fib_node_type = fib_node_register_new_type (&abf_policy_vft); return (NULL); } VLIB_INIT_FUNCTION (abf_policy_init); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */
427564.c
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010-2015 Steven Toth <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. */ #include "saa7164.h" #include "tda10048.h" #include "tda18271.h" #include "s5h1411.h" #include "si2157.h" #include "si2168.h" #include "lgdt3306a.h" #define DRIVER_NAME "saa7164" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /* addr is in the card struct, get it from there */ static struct tda10048_config hauppauge_hvr2200_1_config = { .demod_address = 0x10 >> 1, .output_mode = TDA10048_SERIAL_OUTPUT, .fwbulkwritelen = TDA10048_BULKWRITE_200, .inversion = TDA10048_INVERSION_ON, .dtv6_if_freq_khz = TDA10048_IF_3300, .dtv7_if_freq_khz = TDA10048_IF_3500, .dtv8_if_freq_khz = TDA10048_IF_4000, .clk_freq_khz = TDA10048_CLK_16000, }; static struct tda10048_config hauppauge_hvr2200_2_config = { .demod_address = 0x12 >> 1, .output_mode = TDA10048_SERIAL_OUTPUT, .fwbulkwritelen = TDA10048_BULKWRITE_200, .inversion = TDA10048_INVERSION_ON, .dtv6_if_freq_khz = TDA10048_IF_3300, .dtv7_if_freq_khz = TDA10048_IF_3500, .dtv8_if_freq_khz = TDA10048_IF_4000, .clk_freq_khz = TDA10048_CLK_16000, }; static struct tda18271_std_map hauppauge_tda18271_std_map = { .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 3, .if_lvl = 6, .rfagc_top = 0x37 }, .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0, .if_lvl = 6, .rfagc_top = 0x37 }, }; static struct tda18271_config hauppauge_hvr22x0_tuner_config = { .std_map = &hauppauge_tda18271_std_map, .gate = TDA18271_GATE_ANALOG, .role = TDA18271_MASTER, }; static struct tda18271_config hauppauge_hvr22x0s_tuner_config = { .std_map = &hauppauge_tda18271_std_map, .gate = TDA18271_GATE_ANALOG, .role = TDA18271_SLAVE, .output_opt = TDA18271_OUTPUT_LT_OFF, .rf_cal_on_startup = 1 }; static struct s5h1411_config hauppauge_s5h1411_config = { .output_mode = S5H1411_SERIAL_OUTPUT, .gpio = S5H1411_GPIO_ON, .qam_if = S5H1411_IF_4000, .vsb_if = S5H1411_IF_3250, .inversion = S5H1411_INVERSION_ON, .status_mode = S5H1411_DEMODLOCKING, .mpeg_timing = S5H1411_MPEGTIMING_CONTINUOUS_NONINVERTING_CLOCK, }; static struct lgdt3306a_config hauppauge_hvr2255a_config = { .i2c_addr = 0xb2 >> 1, .qam_if_khz = 4000, .vsb_if_khz = 3250, .deny_i2c_rptr = 1, /* Disabled */ .spectral_inversion = 0, /* Disabled */ .mpeg_mode = LGDT3306A_MPEG_SERIAL, .tpclk_edge = LGDT3306A_TPCLK_RISING_EDGE, .tpvalid_polarity = LGDT3306A_TP_VALID_HIGH, .xtalMHz = 25, /* 24 or 25 */ }; static struct lgdt3306a_config hauppauge_hvr2255b_config = { .i2c_addr = 0x1c >> 1, .qam_if_khz = 4000, .vsb_if_khz = 3250, .deny_i2c_rptr = 1, /* Disabled */ .spectral_inversion = 0, /* Disabled */ .mpeg_mode = LGDT3306A_MPEG_SERIAL, .tpclk_edge = LGDT3306A_TPCLK_RISING_EDGE, .tpvalid_polarity = LGDT3306A_TP_VALID_HIGH, .xtalMHz = 25, /* 24 or 25 */ }; static struct si2157_config hauppauge_hvr2255_tuner_config = { .inversion = 1, .if_port = 1, }; static int si2157_attach(struct saa7164_port *port, struct i2c_adapter *adapter, struct dvb_frontend *fe, u8 addr8bit, struct si2157_config *cfg) { struct i2c_board_info bi; struct i2c_client *tuner; cfg->fe = fe; memset(&bi, 0, sizeof(bi)); strscpy(bi.type, "si2157", I2C_NAME_SIZE); bi.platform_data = cfg; bi.addr = addr8bit >> 1; request_module(bi.type); tuner = i2c_new_device(adapter, &bi); if (tuner == NULL || tuner->dev.driver == NULL) return -ENODEV; if (!try_module_get(tuner->dev.driver->owner)) { i2c_unregister_device(tuner); return -ENODEV; } port->i2c_client_tuner = tuner; return 0; } static int saa7164_dvb_stop_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() stop transition failed, ret = 0x%x\n", __func__, ret); ret = -EIO; } else { dprintk(DBGLVL_DVB, "%s() Stopped\n", __func__); ret = 0; } return ret; } static int saa7164_dvb_acquire_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE); if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() acquire transition failed, ret = 0x%x\n", __func__, ret); ret = -EIO; } else { dprintk(DBGLVL_DVB, "%s() Acquired\n", __func__); ret = 0; } return ret; } static int saa7164_dvb_pause_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE); if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() pause transition failed, ret = 0x%x\n", __func__, ret); ret = -EIO; } else { dprintk(DBGLVL_DVB, "%s() Paused\n", __func__); ret = 0; } return ret; } /* Firmware is very windows centric, meaning you have to transition * the part through AVStream / KS Windows stages, forwards or backwards. * States are: stopped, acquired (h/w), paused, started. */ static int saa7164_dvb_stop_streaming(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_buffer *buf; struct list_head *p, *q; int ret; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); ret = saa7164_dvb_pause_port(port); ret = saa7164_dvb_acquire_port(port); ret = saa7164_dvb_stop_port(port); /* Mark the hardware buffers as free */ mutex_lock(&port->dmaqueue_lock); list_for_each_safe(p, q, &port->dmaqueue.list) { buf = list_entry(p, struct saa7164_buffer, list); buf->flags = SAA7164_BUFFER_FREE; } mutex_unlock(&port->dmaqueue_lock); return ret; } static int saa7164_dvb_start_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret = 0, result; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); saa7164_buffer_cfg_port(port); /* Acquire the hardware */ result = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() acquire transition failed, res = 0x%x\n", __func__, result); /* Stop the hardware, regardless */ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() acquire/forced stop transition failed, res = 0x%x\n", __func__, result); } ret = -EIO; goto out; } else dprintk(DBGLVL_DVB, "%s() Acquired\n", __func__); /* Pause the hardware */ result = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() pause transition failed, res = 0x%x\n", __func__, result); /* Stop the hardware, regardless */ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n", __func__, result); } ret = -EIO; goto out; } else dprintk(DBGLVL_DVB, "%s() Paused\n", __func__); /* Start the hardware */ result = saa7164_api_transition_port(port, SAA_DMASTATE_RUN); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() run transition failed, result = 0x%x\n", __func__, result); /* Stop the hardware, regardless */ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n", __func__, result); } ret = -EIO; } else dprintk(DBGLVL_DVB, "%s() Running\n", __func__); out: return ret; } static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct saa7164_port *port = (struct saa7164_port *) demux->priv; struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; int ret = 0; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); if (!demux->dmx.frontend) return -EINVAL; if (dvb) { mutex_lock(&dvb->lock); if (dvb->feeding++ == 0) { /* Start transport */ ret = saa7164_dvb_start_port(port); } mutex_unlock(&dvb->lock); dprintk(DBGLVL_DVB, "%s(port=%d) now feeding = %d\n", __func__, port->nr, dvb->feeding); } return ret; } static int saa7164_dvb_stop_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct saa7164_port *port = (struct saa7164_port *) demux->priv; struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; int ret = 0; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); if (dvb) { mutex_lock(&dvb->lock); if (--dvb->feeding == 0) { /* Stop transport */ ret = saa7164_dvb_stop_streaming(port); } mutex_unlock(&dvb->lock); dprintk(DBGLVL_DVB, "%s(port=%d) now feeding = %d\n", __func__, port->nr, dvb->feeding); } return ret; } static int dvb_register(struct saa7164_port *port) { struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; struct saa7164_buffer *buf; int result, i; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); if (port->type != SAA7164_MPEG_DVB) BUG(); /* Sanity check that the PCI configuration space is active */ if (port->hwcfg.BARLocation == 0) { result = -ENOMEM; printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d), NO PCI configuration\n", DRIVER_NAME, result); goto fail_adapter; } /* Init and establish defaults */ port->hw_streamingparams.bitspersample = 8; port->hw_streamingparams.samplesperline = 188; port->hw_streamingparams.numberoflines = (SAA7164_TS_NUMBER_OF_LINES * 188) / 188; port->hw_streamingparams.pitch = 188; port->hw_streamingparams.linethreshold = 0; port->hw_streamingparams.pagetablelistvirt = NULL; port->hw_streamingparams.pagetablelistphys = NULL; port->hw_streamingparams.numpagetables = 2 + ((SAA7164_TS_NUMBER_OF_LINES * 188) / PAGE_SIZE); port->hw_streamingparams.numpagetableentries = port->hwcfg.buffercount; /* Allocate the PCI resources */ for (i = 0; i < port->hwcfg.buffercount; i++) { buf = saa7164_buffer_alloc(port, port->hw_streamingparams.numberoflines * port->hw_streamingparams.pitch); if (!buf) { result = -ENOMEM; printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d), unable to allocate buffers\n", DRIVER_NAME, result); goto fail_adapter; } mutex_lock(&port->dmaqueue_lock); list_add_tail(&buf->list, &port->dmaqueue.list); mutex_unlock(&port->dmaqueue_lock); } /* register adapter */ result = dvb_register_adapter(&dvb->adapter, DRIVER_NAME, THIS_MODULE, &dev->pci->dev, adapter_nr); if (result < 0) { printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d)\n", DRIVER_NAME, result); goto fail_adapter; } dvb->adapter.priv = port; /* register frontend */ result = dvb_register_frontend(&dvb->adapter, dvb->frontend); if (result < 0) { printk(KERN_ERR "%s: dvb_register_frontend failed (errno = %d)\n", DRIVER_NAME, result); goto fail_frontend; } /* register demux stuff */ dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; dvb->demux.priv = port; dvb->demux.filternum = 256; dvb->demux.feednum = 256; dvb->demux.start_feed = saa7164_dvb_start_feed; dvb->demux.stop_feed = saa7164_dvb_stop_feed; result = dvb_dmx_init(&dvb->demux); if (result < 0) { printk(KERN_ERR "%s: dvb_dmx_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmx; } dvb->dmxdev.filternum = 256; dvb->dmxdev.demux = &dvb->demux.dmx; dvb->dmxdev.capabilities = 0; result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter); if (result < 0) { printk(KERN_ERR "%s: dvb_dmxdev_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmxdev; } dvb->fe_hw.source = DMX_FRONTEND_0; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_hw; } dvb->fe_mem.source = DMX_MEMORY_FE; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_mem; } result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: connect_frontend failed (errno = %d)\n", DRIVER_NAME, result); goto fail_fe_conn; } /* register network adapter */ dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx); return 0; fail_fe_conn: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); fail_fe_mem: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); fail_fe_hw: dvb_dmxdev_release(&dvb->dmxdev); fail_dmxdev: dvb_dmx_release(&dvb->demux); fail_dmx: dvb_unregister_frontend(dvb->frontend); fail_frontend: dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); fail_adapter: return result; } int saa7164_dvb_unregister(struct saa7164_port *port) { struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; struct saa7164_buffer *b; struct list_head *c, *n; struct i2c_client *client; dprintk(DBGLVL_DVB, "%s()\n", __func__); if (port->type != SAA7164_MPEG_DVB) BUG(); /* Remove any allocated buffers */ mutex_lock(&port->dmaqueue_lock); list_for_each_safe(c, n, &port->dmaqueue.list) { b = list_entry(c, struct saa7164_buffer, list); list_del(c); saa7164_buffer_dealloc(b); } mutex_unlock(&port->dmaqueue_lock); if (dvb->frontend == NULL) return 0; /* remove I2C client for tuner */ client = port->i2c_client_tuner; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } /* remove I2C client for demodulator */ client = port->i2c_client_demod; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } dvb_net_release(&dvb->net); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); dvb_dmxdev_release(&dvb->dmxdev); dvb_dmx_release(&dvb->demux); dvb_unregister_frontend(dvb->frontend); dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); return 0; } /* All the DVB attach calls go here, this function get's modified * for each new card. */ int saa7164_dvb_register(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_dvb *dvb = &port->dvb; struct saa7164_i2c *i2c_bus = NULL; struct si2168_config si2168_config; struct si2157_config si2157_config; struct i2c_adapter *adapter; struct i2c_board_info info; struct i2c_client *client_demod; struct i2c_client *client_tuner; int ret; dprintk(DBGLVL_DVB, "%s()\n", __func__); /* init frontend */ switch (dev->board) { case SAA7164_BOARD_HAUPPAUGE_HVR2200: case SAA7164_BOARD_HAUPPAUGE_HVR2200_2: case SAA7164_BOARD_HAUPPAUGE_HVR2200_3: case SAA7164_BOARD_HAUPPAUGE_HVR2200_4: case SAA7164_BOARD_HAUPPAUGE_HVR2200_5: i2c_bus = &dev->i2c_bus[port->nr + 1]; switch (port->nr) { case 0: port->dvb.frontend = dvb_attach(tda10048_attach, &hauppauge_hvr2200_1_config, &i2c_bus->i2c_adap); if (port->dvb.frontend != NULL) { /* TODO: addr is in the card struct */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0_tuner_config); } break; case 1: port->dvb.frontend = dvb_attach(tda10048_attach, &hauppauge_hvr2200_2_config, &i2c_bus->i2c_adap); if (port->dvb.frontend != NULL) { /* TODO: addr is in the card struct */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0s_tuner_config); } break; } break; case SAA7164_BOARD_HAUPPAUGE_HVR2250: case SAA7164_BOARD_HAUPPAUGE_HVR2250_2: case SAA7164_BOARD_HAUPPAUGE_HVR2250_3: i2c_bus = &dev->i2c_bus[port->nr + 1]; port->dvb.frontend = dvb_attach(s5h1411_attach, &hauppauge_s5h1411_config, &i2c_bus->i2c_adap); if (port->dvb.frontend != NULL) { if (port->nr == 0) { /* Master TDA18271 */ /* TODO: addr is in the card struct */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0_tuner_config); } else { /* Slave TDA18271 */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0s_tuner_config); } } break; case SAA7164_BOARD_HAUPPAUGE_HVR2255proto: case SAA7164_BOARD_HAUPPAUGE_HVR2255: i2c_bus = &dev->i2c_bus[2]; if (port->nr == 0) { port->dvb.frontend = dvb_attach(lgdt3306a_attach, &hauppauge_hvr2255a_config, &i2c_bus->i2c_adap); } else { port->dvb.frontend = dvb_attach(lgdt3306a_attach, &hauppauge_hvr2255b_config, &i2c_bus->i2c_adap); } if (port->dvb.frontend != NULL) { if (port->nr == 0) { si2157_attach(port, &dev->i2c_bus[0].i2c_adap, port->dvb.frontend, 0xc0, &hauppauge_hvr2255_tuner_config); } else { si2157_attach(port, &dev->i2c_bus[1].i2c_adap, port->dvb.frontend, 0xc0, &hauppauge_hvr2255_tuner_config); } } break; case SAA7164_BOARD_HAUPPAUGE_HVR2205: if (port->nr == 0) { /* attach frontend */ memset(&si2168_config, 0, sizeof(si2168_config)); si2168_config.i2c_adapter = &adapter; si2168_config.fe = &port->dvb.frontend; si2168_config.ts_mode = SI2168_TS_SERIAL; memset(&info, 0, sizeof(struct i2c_board_info)); strscpy(info.type, "si2168", I2C_NAME_SIZE); info.addr = 0xc8 >> 1; info.platform_data = &si2168_config; request_module(info.type); client_demod = i2c_new_device(&dev->i2c_bus[2].i2c_adap, &info); if (!client_demod || !client_demod->dev.driver) goto frontend_detach; if (!try_module_get(client_demod->dev.driver->owner)) { i2c_unregister_device(client_demod); goto frontend_detach; } port->i2c_client_demod = client_demod; /* attach tuner */ memset(&si2157_config, 0, sizeof(si2157_config)); si2157_config.if_port = 1; si2157_config.fe = port->dvb.frontend; memset(&info, 0, sizeof(struct i2c_board_info)); strscpy(info.type, "si2157", I2C_NAME_SIZE); info.addr = 0xc0 >> 1; info.platform_data = &si2157_config; request_module(info.type); client_tuner = i2c_new_device(&dev->i2c_bus[0].i2c_adap, &info); if (!client_tuner || !client_tuner->dev.driver) { module_put(client_demod->dev.driver->owner); i2c_unregister_device(client_demod); goto frontend_detach; } if (!try_module_get(client_tuner->dev.driver->owner)) { i2c_unregister_device(client_tuner); module_put(client_demod->dev.driver->owner); i2c_unregister_device(client_demod); goto frontend_detach; } port->i2c_client_tuner = client_tuner; } else { /* attach frontend */ memset(&si2168_config, 0, sizeof(si2168_config)); si2168_config.i2c_adapter = &adapter; si2168_config.fe = &port->dvb.frontend; si2168_config.ts_mode = SI2168_TS_SERIAL; memset(&info, 0, sizeof(struct i2c_board_info)); strscpy(info.type, "si2168", I2C_NAME_SIZE); info.addr = 0xcc >> 1; info.platform_data = &si2168_config; request_module(info.type); client_demod = i2c_new_device(&dev->i2c_bus[2].i2c_adap, &info); if (!client_demod || !client_demod->dev.driver) goto frontend_detach; if (!try_module_get(client_demod->dev.driver->owner)) { i2c_unregister_device(client_demod); goto frontend_detach; } port->i2c_client_demod = client_demod; /* attach tuner */ memset(&si2157_config, 0, sizeof(si2157_config)); si2157_config.fe = port->dvb.frontend; si2157_config.if_port = 1; memset(&info, 0, sizeof(struct i2c_board_info)); strscpy(info.type, "si2157", I2C_NAME_SIZE); info.addr = 0xc0 >> 1; info.platform_data = &si2157_config; request_module(info.type); client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info); if (!client_tuner || !client_tuner->dev.driver) { module_put(client_demod->dev.driver->owner); i2c_unregister_device(client_demod); goto frontend_detach; } if (!try_module_get(client_tuner->dev.driver->owner)) { i2c_unregister_device(client_tuner); module_put(client_demod->dev.driver->owner); i2c_unregister_device(client_demod); goto frontend_detach; } port->i2c_client_tuner = client_tuner; } break; default: printk(KERN_ERR "%s: The frontend isn't supported\n", dev->name); break; } if (NULL == dvb->frontend) { printk(KERN_ERR "%s() Frontend initialization failed\n", __func__); return -1; } /* register everything */ ret = dvb_register(port); if (ret < 0) { if (dvb->frontend->ops.release) dvb->frontend->ops.release(dvb->frontend); return ret; } return 0; frontend_detach: printk(KERN_ERR "%s() Frontend/I2C initialization failed\n", __func__); return -1; }
400222.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (c) 2012 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or modify it under the * terms of the version 2.1 (or later) of the GNU Lesser General Public License * as published by the Free Software Foundation; or version 2.0 of the Apache * License as published by the Apache Software Foundation. See the LICENSE files * for more details. * * RELIC is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the LICENSE files for more details. * * You should have received a copy of the GNU Lesser General Public or the * Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/> * or <https://www.apache.org/licenses/>. */ /** * @file * * Benchmarks for elliptic curves defined over extensions of prime fields. * * @ingroup bench */ #include <stdio.h> #include "relic.h" #include "relic_bench.h" static void memory2(void) { ep4_t a[BENCH]; BENCH_FEW("ep4_null", ep4_null(a[i]), 1); BENCH_FEW("ep4_new", ep4_new(a[i]), 1); for (int i = 0; i < BENCH; i++) { ep4_free(a[i]); } for (int i = 0; i < BENCH; i++) { ep4_new(a[i]); } BENCH_FEW("ep4_free", ep4_free(a[i]), 1); (void)a; } static void util2(void) { ep2_t p, q, t[2]; uint8_t bin[4 * RLC_FP_BYTES + 1]; int l; ep2_null(p); ep2_null(q); ep2_null(t[0]); ep2_null(t[1]); ep2_new(p); ep2_new(q); ep2_new(t[0]); ep2_new(t[1]); BENCH_RUN("ep2_is_infty") { ep2_rand(p); BENCH_ADD(ep2_is_infty(p)); } BENCH_END; BENCH_RUN("ep2_set_infty") { ep2_rand(p); BENCH_ADD(ep2_set_infty(p)); } BENCH_END; BENCH_RUN("ep2_copy") { ep2_rand(p); ep2_rand(q); BENCH_ADD(ep2_copy(p, q)); } BENCH_END; BENCH_RUN("ep2_cmp") { ep2_rand(p); ep2_dbl(p, p); ep2_rand(q); ep2_dbl(q, q); BENCH_ADD(ep2_cmp(p, q)); } BENCH_END; BENCH_RUN("ep2_norm") { ep2_rand(p); ep2_dbl(p, p); BENCH_ADD(ep2_norm(p, p)); } BENCH_END; BENCH_RUN("ep2_norm_sim (2)") { ep2_rand(t[0]); ep2_rand(t[1]); ep2_dbl(t[0], t[0]); ep2_dbl(t[1], t[1]); BENCH_ADD(ep2_norm_sim(t, t, 2)); } BENCH_END; BENCH_RUN("ep2_cmp (1 norm)") { ep2_rand(p); ep2_dbl(p, p); ep2_rand(q); BENCH_ADD(ep2_cmp(p, q)); } BENCH_END; BENCH_RUN("ep2_cmp (2 norm)") { ep2_rand(p); ep2_rand(q); BENCH_ADD(ep2_cmp(p, q)); } BENCH_END; BENCH_RUN("ep2_rand") { BENCH_ADD(ep2_rand(p)); } BENCH_END; BENCH_RUN("ep2_blind") { BENCH_ADD(ep2_blind(p, p)); } BENCH_END; BENCH_RUN("ep2_on_curve") { ep2_rand(p); BENCH_ADD(ep2_on_curve(p)); } BENCH_END; BENCH_RUN("ep2_size_bin (0)") { ep2_rand(p); BENCH_ADD(ep2_size_bin(p, 0)); } BENCH_END; BENCH_RUN("ep2_size_bin (1)") { ep2_rand(p); BENCH_ADD(ep2_size_bin(p, 1)); } BENCH_END; BENCH_RUN("ep2_write_bin (0)") { ep2_rand(p); l = ep2_size_bin(p, 0); BENCH_ADD(ep2_write_bin(bin, l, p, 0)); } BENCH_END; BENCH_RUN("ep2_write_bin (1)") { ep2_rand(p); l = ep2_size_bin(p, 1); BENCH_ADD(ep2_write_bin(bin, l, p, 1)); } BENCH_END; BENCH_RUN("ep2_read_bin (0)") { ep2_rand(p); l = ep2_size_bin(p, 0); ep2_write_bin(bin, l, p, 0); BENCH_ADD(ep2_read_bin(p, bin, l)); } BENCH_END; BENCH_RUN("ep2_read_bin (1)") { ep2_rand(p); l = ep2_size_bin(p, 1); ep2_write_bin(bin, l, p, 1); BENCH_ADD(ep2_read_bin(p, bin, l)); } BENCH_END; ep2_free(p); ep2_free(q); ep2_free(t[0]); ep2_free(t[1]); } static void arith2(void) { ep2_t p, q, r, t[RLC_EPX_TABLE_MAX]; bn_t k, n, l; fp2_t s; ep2_null(p); ep2_null(q); ep2_null(r); bn_null(k); bn_null(n); fp2_null(s); for (int i = 0; i < RLC_EPX_TABLE_MAX; i++) { ep2_null(t[i]); } ep2_new(p); ep2_new(q); ep2_new(r); bn_new(k); bn_new(n); bn_new(l); fp2_new(s); ep2_curve_get_ord(n); BENCH_RUN("ep2_add") { ep2_rand(p); ep2_rand(q); ep2_add(p, p, q); ep2_rand(q); ep2_rand(p); ep2_add(q, q, p); BENCH_ADD(ep2_add(r, p, q)); } BENCH_END; #if EP_ADD == BASIC || !defined(STRIP) BENCH_RUN("ep2_add_basic") { ep2_rand(p); ep2_rand(q); BENCH_ADD(ep2_add_basic(r, p, q)); } BENCH_END; BENCH_RUN("ep2_add_slp_basic") { ep2_rand(p); ep2_rand(q); BENCH_ADD(ep2_add_slp_basic(r, s, p, q)); } BENCH_END; #endif #if EP_ADD == PROJC || !defined(STRIP) BENCH_RUN("ep2_add_projc") { ep2_rand(p); ep2_rand(q); ep2_add_projc(p, p, q); ep2_rand(q); ep2_rand(p); ep2_add_projc(q, q, p); BENCH_ADD(ep2_add_projc(r, p, q)); } BENCH_END; BENCH_RUN("ep2_add_projc (z2 = 1)") { ep2_rand(p); ep2_rand(q); ep2_add_projc(p, p, q); ep2_rand(q); ep2_norm(q, q); BENCH_ADD(ep2_add_projc(r, p, q)); } BENCH_END; BENCH_RUN("ep2_add_projc (z1,z2 = 1)") { ep2_rand(p); ep2_norm(p, p); ep2_rand(q); ep2_norm(q, q); BENCH_ADD(ep2_add_projc(r, p, q)); } BENCH_END; #endif BENCH_RUN("ep2_sub") { ep2_rand(p); ep2_rand(q); ep2_add(p, p, q); ep2_rand(q); ep2_rand(p); ep2_add(q, q, p); BENCH_ADD(ep2_sub(r, p, q)); } BENCH_END; BENCH_RUN("ep2_dbl") { ep2_rand(p); ep2_rand(q); ep2_add(p, p, q); BENCH_ADD(ep2_dbl(r, p)); } BENCH_END; #if EP_ADD == BASIC || !defined(STRIP) BENCH_RUN("ep2_dbl_basic") { ep2_rand(p); BENCH_ADD(ep2_dbl_basic(r, p)); } BENCH_END; BENCH_RUN("ep2_dbl_slp_basic") { ep2_rand(p); BENCH_ADD(ep2_dbl_slp_basic(r, s, p)); } BENCH_END; #endif #if EP_ADD == PROJC || !defined(STRIP) BENCH_RUN("ep2_dbl_projc") { ep2_rand(p); ep2_rand(q); ep2_add_projc(p, p, q); BENCH_ADD(ep2_dbl_projc(r, p)); } BENCH_END; BENCH_RUN("ep2_dbl_projc (z1 = 1)") { ep2_rand(p); ep2_norm(p, p); BENCH_ADD(ep2_dbl_projc(r, p)); } BENCH_END; #endif BENCH_RUN("ep2_neg") { ep2_rand(p); ep2_rand(q); ep2_add(p, p, q); BENCH_ADD(ep2_neg(r, p)); } BENCH_END; BENCH_RUN("ep2_mul") { bn_rand_mod(k, n); BENCH_ADD(ep2_mul(q, p, k)); } BENCH_END; #if EP_MUL == BASIC || !defined(STRIP) BENCH_RUN("ep2_mul_basic") { bn_rand_mod(k, n); BENCH_ADD(ep2_mul_basic(q, p, k)); } BENCH_END; #endif #if EP_MUL == SLIDE || !defined(STRIP) BENCH_RUN("ep2_mul_slide") { bn_rand_mod(k, n); ep2_rand(p); BENCH_ADD(ep2_mul_slide(q, p, k)); } BENCH_END; #endif #if EP_MUL == MONTY || !defined(STRIP) BENCH_RUN("ep2_mul_monty") { bn_rand_mod(k, n); ep2_rand(p); BENCH_ADD(ep2_mul_monty(q, p, k)); } BENCH_END; #endif #if EP_MUL == LWNAF || !defined(STRIP) BENCH_RUN("ep2_mul_lwnaf") { bn_rand_mod(k, n); ep2_rand(p); BENCH_ADD(ep2_mul_lwnaf(q, p, k)); } BENCH_END; #endif BENCH_RUN("ep2_mul_gen") { bn_rand_mod(k, n); BENCH_ADD(ep2_mul_gen(q, k)); } BENCH_END; BENCH_RUN("ep2_mul_dig") { bn_rand(k, RLC_POS, RLC_DIG); bn_rand_mod(k, n); BENCH_ADD(ep2_mul_dig(p, q, k->dp[0])); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_MAX; i++) { ep2_new(t[i]); } BENCH_RUN("ep2_mul_pre") { ep2_rand(p); BENCH_ADD(ep2_mul_pre(t, p)); } BENCH_END; BENCH_RUN("ep2_mul_fix") { bn_rand_mod(k, n); ep2_rand(p); ep2_mul_pre(t, p); BENCH_ADD(ep2_mul_fix(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_MAX; i++) { ep2_free(t[i]); } #if EP_FIX == BASIC || !defined(STRIP) for (int i = 0; i < RLC_EPX_TABLE_BASIC; i++) { ep2_new(t[i]); } BENCH_RUN("ep2_mul_pre_basic") { ep2_rand(p); BENCH_ADD(ep2_mul_pre_basic(t, p)); } BENCH_END; BENCH_RUN("ep2_mul_fix_basic") { bn_rand_mod(k, n); ep2_rand(p); ep2_mul_pre_basic(t, p); BENCH_ADD(ep2_mul_fix_basic(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_BASIC; i++) { ep2_free(t[i]); } #endif #if EP_FIX == COMBS || !defined(STRIP) for (int i = 0; i < RLC_EPX_TABLE_COMBS; i++) { ep2_new(t[i]); } BENCH_RUN("ep2_mul_pre_combs") { ep2_rand(p); BENCH_ADD(ep2_mul_pre_combs(t, p)); } BENCH_END; BENCH_RUN("ep2_mul_fix_combs") { bn_rand_mod(k, n); ep2_rand(p); ep2_mul_pre_combs(t, p); BENCH_ADD(ep2_mul_fix_combs(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_COMBS; i++) { ep2_free(t[i]); } #endif #if EP_FIX == COMBD || !defined(STRIP) for (int i = 0; i < RLC_EPX_TABLE_COMBD; i++) { ep2_new(t[i]); } BENCH_RUN("ep2_mul_pre_combd") { BENCH_ADD(ep2_mul_pre_combd(t, p)); } BENCH_END; BENCH_RUN("ep2_mul_fix_combd") { bn_rand_mod(k, n); ep2_mul_pre_combd(t, p); BENCH_ADD(ep2_mul_fix_combd(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_COMBD; i++) { ep2_free(t[i]); } #endif #if EP_FIX == LWNAF || !defined(STRIP) for (int i = 0; i < RLC_EPX_TABLE_LWNAF; i++) { ep2_new(t[i]); } BENCH_RUN("ep2_mul_pre_lwnaf") { ep2_rand(p); BENCH_ADD(ep2_mul_pre_lwnaf(t, p)); } BENCH_END; BENCH_RUN("ep2_mul_fix_lwnaf") { bn_rand_mod(k, n); ep2_rand(p); ep2_mul_pre_lwnaf(t, p); BENCH_ADD(ep2_mul_fix_lwnaf(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_LWNAF; i++) { ep2_free(t[i]); } #endif BENCH_RUN("ep2_mul_sim") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep2_rand(p); ep2_rand(q); BENCH_ADD(ep2_mul_sim(r, p, k, q, l)); } BENCH_END; #if EP_SIM == BASIC || !defined(STRIP) BENCH_RUN("ep2_mul_sim_basic") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep2_rand(p); ep2_rand(q); BENCH_ADD(ep2_mul_sim_basic(r, p, k, q, l)); } BENCH_END; #endif #if EP_SIM == TRICK || !defined(STRIP) BENCH_RUN("ep2_mul_sim_trick") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep2_rand(p); ep2_rand(q); BENCH_ADD(ep2_mul_sim_trick(r, p, k, q, l)); } BENCH_END; #endif #if EP_SIM == INTER || !defined(STRIP) BENCH_RUN("ep2_mul_sim_inter") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep2_rand(p); ep2_rand(q); BENCH_ADD(ep2_mul_sim_inter(r, p, k, q, l)); } BENCH_END; #endif #if EP_SIM == JOINT || !defined(STRIP) BENCH_RUN("ep2_mul_sim_joint") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep2_rand(p); ep2_rand(q); BENCH_ADD(ep2_mul_sim_joint(r, p, k, q, l)); } BENCH_END; #endif BENCH_RUN("ep2_mul_sim_gen") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep2_rand(q); BENCH_ADD(ep2_mul_sim_gen(r, k, q, l)); } BENCH_END; BENCH_RUN("ep2_frb") { ep2_rand(q); BENCH_ADD(ep2_frb(r, q, 1)); } BENCH_END; BENCH_RUN("ep2_map") { uint8_t msg[5]; rand_bytes(msg, 5); BENCH_ADD(ep2_map(p, msg, 5)); } BENCH_END; BENCH_RUN("ep2_pck") { ep2_rand(p); BENCH_ADD(ep2_pck(q, p)); } BENCH_END; BENCH_RUN("ep2_upk") { ep2_rand(p); BENCH_ADD(ep2_upk(q, p)); } BENCH_END; ep2_free(p); ep2_free(q); ep2_free(r); bn_free(k); bn_free(n); bn_free(l); fp2_free(s); } static void memory4(void) { ep4_t a[BENCH]; BENCH_FEW("ep4_null", ep4_null(a[i]), 1); BENCH_FEW("ep4_new", ep4_new(a[i]), 1); for (int i = 0; i < BENCH; i++) { ep4_free(a[i]); } for (int i = 0; i < BENCH; i++) { ep4_new(a[i]); } BENCH_FEW("ep4_free", ep4_free(a[i]), 1); (void)a; } static void util4(void) { ep4_t p, q, t[2]; uint8_t bin[4 * RLC_FP_BYTES + 1]; int l; ep4_null(p); ep4_null(q); ep4_null(t[0]); ep4_null(t[1]); ep4_new(p); ep4_new(q); ep4_new(t[0]); ep4_new(t[1]); BENCH_RUN("ep4_is_infty") { ep4_rand(p); BENCH_ADD(ep4_is_infty(p)); } BENCH_END; BENCH_RUN("ep4_set_infty") { ep4_rand(p); BENCH_ADD(ep4_set_infty(p)); } BENCH_END; BENCH_RUN("ep4_copy") { ep4_rand(p); ep4_rand(q); BENCH_ADD(ep4_copy(p, q)); } BENCH_END; BENCH_RUN("ep4_cmp") { ep4_rand(p); ep4_dbl(p, p); ep4_rand(q); ep4_dbl(q, q); BENCH_ADD(ep4_cmp(p, q)); } BENCH_END; BENCH_RUN("ep4_norm") { ep4_rand(p); ep4_dbl(p, p); BENCH_ADD(ep4_norm(p, p)); } BENCH_END; BENCH_RUN("ep4_norm_sim (2)") { ep4_rand(t[0]); ep4_rand(t[1]); ep4_dbl(t[0], t[0]); ep4_dbl(t[1], t[1]); BENCH_ADD(ep4_norm_sim(t, t, 2)); } BENCH_END; BENCH_RUN("ep4_cmp (1 norm)") { ep4_rand(p); ep4_dbl(p, p); ep4_rand(q); BENCH_ADD(ep4_cmp(p, q)); } BENCH_END; BENCH_RUN("ep4_cmp (2 norm)") { ep4_rand(p); ep4_rand(q); BENCH_ADD(ep4_cmp(p, q)); } BENCH_END; BENCH_RUN("ep4_rand") { BENCH_ADD(ep4_rand(p)); } BENCH_END; BENCH_RUN("ep4_blind") { BENCH_ADD(ep4_blind(p, p)); } BENCH_END; BENCH_RUN("ep4_on_curve") { ep4_rand(p); BENCH_ADD(ep4_on_curve(p)); } BENCH_END; BENCH_RUN("ep4_size_bin") { ep4_rand(p); BENCH_ADD(ep4_size_bin(p, 0)); } BENCH_END; BENCH_RUN("ep4_write_bin") { ep4_rand(p); l = ep4_size_bin(p, 0); BENCH_ADD(ep4_write_bin(bin, l, p, 0)); } BENCH_END; BENCH_RUN("ep4_read_bin") { ep4_rand(p); l = ep4_size_bin(p, 0); ep4_write_bin(bin, l, p, 0); BENCH_ADD(ep4_read_bin(p, bin, l)); } BENCH_END; ep4_free(p); ep4_free(q); ep4_free(t[0]); ep4_free(t[1]); } static void arith4(void) { ep4_t p, q, r, t[RLC_EPX_TABLE_MAX]; bn_t k, n, l; fp4_t s; ep4_null(p); ep4_null(q); ep4_null(r); bn_null(k); bn_null(n); fp4_null(s); for (int i = 0; i < RLC_EPX_TABLE_MAX; i++) { ep4_null(t[i]); } ep4_new(p); ep4_new(q); ep4_new(r); bn_new(k); bn_new(n); bn_new(l); fp4_new(s); ep4_curve_get_ord(n); BENCH_RUN("ep4_add") { ep4_rand(p); ep4_rand(q); ep4_add(p, p, q); ep4_rand(q); ep4_rand(p); ep4_add(q, q, p); BENCH_ADD(ep4_add(r, p, q)); } BENCH_END; #if EP_ADD == BASIC || !defined(STRIP) BENCH_RUN("ep4_add_basic") { ep4_rand(p); ep4_rand(q); BENCH_ADD(ep4_add_basic(r, p, q)); } BENCH_END; BENCH_RUN("ep4_add_slp_basic") { ep4_rand(p); ep4_rand(q); BENCH_ADD(ep4_add_slp_basic(r, s, p, q)); } BENCH_END; #endif #if EP_ADD == PROJC || !defined(STRIP) BENCH_RUN("ep4_add_projc") { ep4_rand(p); ep4_rand(q); ep4_add_projc(p, p, q); ep4_rand(q); ep4_rand(p); ep4_add_projc(q, q, p); BENCH_ADD(ep4_add_projc(r, p, q)); } BENCH_END; BENCH_RUN("ep4_add_projc (z2 = 1)") { ep4_rand(p); ep4_rand(q); ep4_add_projc(p, p, q); ep4_rand(q); ep4_norm(q, q); BENCH_ADD(ep4_add_projc(r, p, q)); } BENCH_END; BENCH_RUN("ep4_add_projc (z1,z2 = 1)") { ep4_rand(p); ep4_norm(p, p); ep4_rand(q); ep4_norm(q, q); BENCH_ADD(ep4_add_projc(r, p, q)); } BENCH_END; #endif BENCH_RUN("ep4_sub") { ep4_rand(p); ep4_rand(q); ep4_add(p, p, q); ep4_rand(q); ep4_rand(p); ep4_add(q, q, p); BENCH_ADD(ep4_sub(r, p, q)); } BENCH_END; BENCH_RUN("ep4_dbl") { ep4_rand(p); ep4_rand(q); ep4_add(p, p, q); BENCH_ADD(ep4_dbl(r, p)); } BENCH_END; #if EP_ADD == BASIC || !defined(STRIP) BENCH_RUN("ep4_dbl_basic") { ep4_rand(p); BENCH_ADD(ep4_dbl_basic(r, p)); } BENCH_END; BENCH_RUN("ep4_dbl_slp_basic") { ep4_rand(p); BENCH_ADD(ep4_dbl_slp_basic(r, s, p)); } BENCH_END; #endif #if EP_ADD == PROJC || !defined(STRIP) BENCH_RUN("ep4_dbl_projc") { ep4_rand(p); ep4_rand(q); ep4_add_projc(p, p, q); BENCH_ADD(ep4_dbl_projc(r, p)); } BENCH_END; BENCH_RUN("ep4_dbl_projc (z1 = 1)") { ep4_rand(p); ep4_norm(p, p); BENCH_ADD(ep4_dbl_projc(r, p)); } BENCH_END; #endif BENCH_RUN("ep4_neg") { ep4_rand(p); ep4_rand(q); ep4_add(p, p, q); BENCH_ADD(ep4_neg(r, p)); } BENCH_END; BENCH_RUN("ep4_mul") { bn_rand_mod(k, n); BENCH_ADD(ep4_mul(q, p, k)); } BENCH_END; #if EP_MUL == BASIC || !defined(STRIP) BENCH_RUN("ep4_mul_basic") { bn_rand_mod(k, n); BENCH_ADD(ep4_mul_basic(q, p, k)); } BENCH_END; #endif #if EP_MUL == SLIDE || !defined(STRIP) BENCH_RUN("ep4_mul_slide") { bn_rand_mod(k, n); ep4_rand(p); BENCH_ADD(ep4_mul_slide(q, p, k)); } BENCH_END; #endif #if EP_MUL == MONTY || !defined(STRIP) BENCH_RUN("ep4_mul_monty") { bn_rand_mod(k, n); ep4_rand(p); BENCH_ADD(ep4_mul_monty(q, p, k)); } BENCH_END; #endif #if EP_MUL == LWNAF || !defined(STRIP) BENCH_RUN("ep4_mul_lwnaf") { bn_rand_mod(k, n); ep4_rand(p); BENCH_ADD(ep4_mul_lwnaf(q, p, k)); } BENCH_END; #endif BENCH_RUN("ep4_mul_gen") { bn_rand_mod(k, n); BENCH_ADD(ep4_mul_gen(q, k)); } BENCH_END; BENCH_RUN("ep4_mul_dig") { bn_rand(k, RLC_POS, RLC_DIG); bn_rand_mod(k, n); BENCH_ADD(ep4_mul_dig(p, q, k->dp[0])); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_MAX; i++) { ep4_new(t[i]); } BENCH_RUN("ep4_mul_pre") { ep4_rand(p); BENCH_ADD(ep4_mul_pre(t, p)); } BENCH_END; BENCH_RUN("ep4_mul_fix") { bn_rand_mod(k, n); ep4_rand(p); ep4_mul_pre(t, p); BENCH_ADD(ep4_mul_fix(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_MAX; i++) { ep4_free(t[i]); } #if EP_FIX == BASIC || !defined(STRIP) for (int i = 0; i < RLC_EPX_TABLE_BASIC; i++) { ep4_new(t[i]); } BENCH_RUN("ep4_mul_pre_basic") { ep4_rand(p); BENCH_ADD(ep4_mul_pre_basic(t, p)); } BENCH_END; BENCH_RUN("ep4_mul_fix_basic") { bn_rand_mod(k, n); ep4_rand(p); ep4_mul_pre_basic(t, p); BENCH_ADD(ep4_mul_fix_basic(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_BASIC; i++) { ep4_free(t[i]); } #endif #if EP_FIX == COMBS || !defined(STRIP) for (int i = 0; i < RLC_EPX_TABLE_COMBS; i++) { ep4_new(t[i]); } BENCH_RUN("ep4_mul_pre_combs") { ep4_rand(p); BENCH_ADD(ep4_mul_pre_combs(t, p)); } BENCH_END; BENCH_RUN("ep4_mul_fix_combs") { bn_rand_mod(k, n); ep4_rand(p); ep4_mul_pre_combs(t, p); BENCH_ADD(ep4_mul_fix_combs(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_COMBS; i++) { ep4_free(t[i]); } #endif #if EP_FIX == COMBD || !defined(STRIP) for (int i = 0; i < RLC_EPX_TABLE_COMBD; i++) { ep4_new(t[i]); } BENCH_RUN("ep4_mul_pre_combd") { BENCH_ADD(ep4_mul_pre_combd(t, p)); } BENCH_END; BENCH_RUN("ep4_mul_fix_combd") { bn_rand_mod(k, n); ep4_mul_pre_combd(t, p); BENCH_ADD(ep4_mul_fix_combd(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_COMBD; i++) { ep4_free(t[i]); } #endif #if EP_FIX == LWNAF || !defined(STRIP) for (int i = 0; i < RLC_EPX_TABLE_LWNAF; i++) { ep4_new(t[i]); } BENCH_RUN("ep4_mul_pre_lwnaf") { ep4_rand(p); BENCH_ADD(ep4_mul_pre_lwnaf(t, p)); } BENCH_END; BENCH_RUN("ep4_mul_fix_lwnaf") { bn_rand_mod(k, n); ep4_rand(p); ep4_mul_pre_lwnaf(t, p); BENCH_ADD(ep4_mul_fix_lwnaf(q, t, k)); } BENCH_END; for (int i = 0; i < RLC_EPX_TABLE_LWNAF; i++) { ep4_free(t[i]); } #endif BENCH_RUN("ep4_mul_sim") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep4_rand(p); ep4_rand(q); BENCH_ADD(ep4_mul_sim(r, p, k, q, l)); } BENCH_END; #if EP_SIM == BASIC || !defined(STRIP) BENCH_RUN("ep4_mul_sim_basic") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep4_rand(p); ep4_rand(q); BENCH_ADD(ep4_mul_sim_basic(r, p, k, q, l)); } BENCH_END; #endif #if EP_SIM == TRICK || !defined(STRIP) BENCH_RUN("ep4_mul_sim_trick") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep4_rand(p); ep4_rand(q); BENCH_ADD(ep4_mul_sim_trick(r, p, k, q, l)); } BENCH_END; #endif #if EP_SIM == INTER || !defined(STRIP) BENCH_RUN("ep4_mul_sim_inter") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep4_rand(p); ep4_rand(q); BENCH_ADD(ep4_mul_sim_inter(r, p, k, q, l)); } BENCH_END; #endif #if EP_SIM == JOINT || !defined(STRIP) BENCH_RUN("ep4_mul_sim_joint") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep4_rand(p); ep4_rand(q); BENCH_ADD(ep4_mul_sim_joint(r, p, k, q, l)); } BENCH_END; #endif BENCH_RUN("ep4_mul_sim_gen") { bn_rand_mod(k, n); bn_rand_mod(l, n); ep4_rand(q); BENCH_ADD(ep4_mul_sim_gen(r, k, q, l)); } BENCH_END; BENCH_RUN("ep4_frb") { ep4_rand(q); BENCH_ADD(ep4_frb(r, q, 1)); } BENCH_END; BENCH_RUN("ep4_map") { uint8_t msg[5]; rand_bytes(msg, 5); BENCH_ADD(ep4_map(p, msg, 5)); } BENCH_END; ep4_free(p); ep4_free(q); ep4_free(r); bn_free(k); bn_free(n); bn_free(l); fp4_free(s); } int main(void) { int r0, r1; if (core_init() != RLC_OK) { core_clean(); return 1; } conf_print(); util_banner("Benchmarks for the EPX module:", 0); if (ep_param_set_any_pairf() != RLC_OK) { RLC_THROW(ERR_NO_CURVE); core_clean(); return 0; } if ((r0 = ep2_curve_is_twist())) { ep_param_print(); util_banner("Utilities:", 1); memory2(); util2(); util_banner("Arithmetic:", 1); arith2(); } if ((r1 = ep4_curve_is_twist())) { ep_param_print(); util_banner("Utilities:", 1); memory4(); util4(); util_banner("Arithmetic:", 1); arith4(); } if (!r0 && !r1) { RLC_THROW(ERR_NO_CURVE); core_clean(); return 0; } core_clean(); return 0; }
59561.c
/* * Copyright 2014-2022 Real Logic Limited. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if defined(__linux__) #define _BSD_SOURCE #define _GNU_SOURCE #endif #include <string.h> #include "aeron_socket.h" #include "uri/aeron_uri.h" #include "aeron_driver_sender.h" #include "aeron_driver_context.h" #include "aeron_alloc.h" #include "aeron_position.h" #include "aeron_timestamps.h" #if !defined(HAVE_STRUCT_MMSGHDR) struct mmsghdr { struct msghdr msg_hdr; unsigned int msg_len; }; #endif int aeron_send_channel_endpoint_create( aeron_send_channel_endpoint_t **endpoint, aeron_udp_channel_t *channel, aeron_driver_context_t *context, aeron_counters_manager_t *counters_manager, int64_t registration_id) { aeron_send_channel_endpoint_t *_endpoint = NULL; char bind_addr_and_port[AERON_NETUTIL_FORMATTED_MAX_LENGTH]; int bind_addr_and_port_length; if (aeron_alloc((void **)&_endpoint, sizeof(aeron_send_channel_endpoint_t)) < 0) { return -1; } _endpoint->destination_tracker = NULL; _endpoint->data_paths = &context->sender_proxy->sender->data_paths; struct sockaddr_storage *connect_addr = NULL; if (channel->has_explicit_control || channel->is_dynamic_control_mode || channel->is_manual_control_mode) { if (aeron_alloc((void **)&_endpoint->destination_tracker, sizeof(aeron_udp_destination_tracker_t)) < 0 || aeron_udp_destination_tracker_init( _endpoint->destination_tracker, _endpoint->data_paths, context->sender_cached_clock, channel->is_manual_control_mode, AERON_UDP_DESTINATION_TRACKER_DESTINATION_TIMEOUT_NS) < 0) { return -1; } } else if (context->connect_enabled) { connect_addr = &channel->remote_data; } _endpoint->conductor_fields.refcnt = 0; _endpoint->conductor_fields.udp_channel = channel; _endpoint->conductor_fields.managed_resource.incref = aeron_send_channel_endpoint_incref; _endpoint->conductor_fields.managed_resource.decref = aeron_send_channel_endpoint_decref; _endpoint->conductor_fields.managed_resource.clientd = _endpoint; _endpoint->conductor_fields.managed_resource.registration_id = -1; _endpoint->conductor_fields.status = AERON_SEND_CHANNEL_ENDPOINT_STATUS_ACTIVE; _endpoint->conductor_fields.socket_sndbuf = 0 != channel->socket_sndbuf_length ? channel->socket_sndbuf_length : context->socket_sndbuf; _endpoint->conductor_fields.socket_rcvbuf = 0 != channel->socket_rcvbuf_length ? channel->socket_rcvbuf_length : context->socket_rcvbuf; _endpoint->transport.fd = -1; _endpoint->channel_status.counter_id = -1; _endpoint->local_sockaddr_indicator.counter_id = -1; _endpoint->transport_bindings = context->udp_channel_transport_bindings; _endpoint->data_paths = &context->sender_proxy->sender->data_paths; _endpoint->transport.data_paths = _endpoint->data_paths; if (context->udp_channel_transport_bindings->init_func( &_endpoint->transport, channel->is_multicast ? &channel->remote_control : &channel->local_control, channel->is_multicast ? &channel->local_control : &channel->remote_control, connect_addr, channel->interface_index, 0 != channel->multicast_ttl ? channel->multicast_ttl : context->multicast_ttl, _endpoint->conductor_fields.socket_rcvbuf, _endpoint->conductor_fields.socket_sndbuf, false, context, AERON_UDP_CHANNEL_TRANSPORT_AFFINITY_SENDER) < 0) { AERON_APPEND_ERR("uri=%s", channel->original_uri); aeron_send_channel_endpoint_delete(counters_manager, _endpoint); return -1; } if (aeron_udp_channel_is_channel_snd_timestamps_enabled(channel)) { _endpoint->transport.timestamp_flags |= AERON_UDP_CHANNEL_TRANSPORT_CHANNEL_SND_TIMESTAMP; } if (aeron_int64_to_ptr_hash_map_init( &_endpoint->publication_dispatch_map, 8, AERON_MAP_DEFAULT_LOAD_FACTOR) < 0) { aeron_send_channel_endpoint_delete(counters_manager, _endpoint); return -1; } if ((bind_addr_and_port_length = aeron_send_channel_endpoint_bind_addr_and_port( _endpoint, bind_addr_and_port, sizeof(bind_addr_and_port))) < 0) { aeron_send_channel_endpoint_delete(counters_manager, _endpoint); return -1; } _endpoint->transport.dispatch_clientd = _endpoint; _endpoint->has_sender_released = false; _endpoint->channel_status.counter_id = aeron_counter_send_channel_status_allocate( counters_manager, registration_id, channel->uri_length, channel->original_uri); _endpoint->channel_status.value_addr = aeron_counters_manager_addr( counters_manager, _endpoint->channel_status.counter_id); if (_endpoint->channel_status.counter_id < 0) { aeron_send_channel_endpoint_delete(counters_manager, _endpoint); return -1; } // TODO: Remove the update and just create in a single shot. aeron_channel_endpoint_status_update_label( counters_manager, _endpoint->channel_status.counter_id, AERON_COUNTER_SEND_CHANNEL_STATUS_NAME, channel->uri_length, channel->original_uri, bind_addr_and_port_length, bind_addr_and_port); _endpoint->local_sockaddr_indicator.counter_id = aeron_counter_local_sockaddr_indicator_allocate( counters_manager, AERON_COUNTER_SND_LOCAL_SOCKADDR_NAME, registration_id, _endpoint->channel_status.counter_id, bind_addr_and_port); _endpoint->local_sockaddr_indicator.value_addr = aeron_counters_manager_addr( counters_manager, _endpoint->local_sockaddr_indicator.counter_id); if (_endpoint->local_sockaddr_indicator.counter_id < 0) { aeron_send_channel_endpoint_delete(counters_manager, _endpoint); return -1; } aeron_counter_set_ordered( _endpoint->local_sockaddr_indicator.value_addr, AERON_COUNTER_CHANNEL_ENDPOINT_STATUS_ACTIVE); _endpoint->sender_proxy = context->sender_proxy; _endpoint->cached_clock = context->sender_cached_clock; _endpoint->time_of_last_sm_ns = aeron_clock_cached_nano_time(_endpoint->cached_clock); memcpy(&_endpoint->current_data_addr, &channel->remote_data, sizeof(_endpoint->current_data_addr)); *endpoint = _endpoint; return 0; } int aeron_send_channel_endpoint_delete( aeron_counters_manager_t *counters_manager, aeron_send_channel_endpoint_t *endpoint) { if (NULL != counters_manager) { if (-1 != endpoint->channel_status.counter_id) { aeron_counters_manager_free(counters_manager, endpoint->channel_status.counter_id); } if (-1 != endpoint->local_sockaddr_indicator.counter_id) { aeron_counters_manager_free(counters_manager, endpoint->local_sockaddr_indicator.counter_id); } } aeron_int64_to_ptr_hash_map_delete(&endpoint->publication_dispatch_map); aeron_udp_channel_delete(endpoint->conductor_fields.udp_channel); endpoint->transport_bindings->close_func(&endpoint->transport); if (NULL != endpoint->destination_tracker) { aeron_udp_destination_tracker_close(endpoint->destination_tracker); aeron_free(endpoint->destination_tracker); } aeron_free(endpoint); return 0; } void aeron_send_channel_endpoint_incref(void *clientd) { aeron_send_channel_endpoint_t *endpoint = (aeron_send_channel_endpoint_t *)clientd; endpoint->conductor_fields.refcnt++; } void aeron_send_channel_endpoint_decref(void *clientd) { aeron_send_channel_endpoint_t *endpoint = (aeron_send_channel_endpoint_t *)clientd; if (0 == --endpoint->conductor_fields.refcnt) { /* mark as CLOSING to be aware not to use again (to be receiver_released and deleted) */ endpoint->conductor_fields.status = AERON_SEND_CHANNEL_ENDPOINT_STATUS_CLOSING; aeron_driver_sender_proxy_on_remove_endpoint(endpoint->sender_proxy, endpoint); } } static void aeron_send_channel_apply_timestamps( aeron_send_channel_endpoint_t *endpoint, struct iovec *iov, size_t iov_length) { if (AERON_UDP_CHANNEL_TRANSPORT_CHANNEL_SND_TIMESTAMP & endpoint->transport.timestamp_flags) { struct timespec send_timestamp; if (0 == aeron_clock_gettime_realtime(&send_timestamp)) { int32_t offset = endpoint->conductor_fields.udp_channel->channel_snd_timestamp_offset; for (size_t i = 0; i < iov_length; i++) { aeron_timestamps_set_timestamp( &send_timestamp, offset, (uint8_t *)iov[0].iov_base, iov[0].iov_len); } } } } int aeron_send_channel_send( aeron_send_channel_endpoint_t *endpoint, struct iovec *iov, size_t iov_length, int64_t *bytes_sent) { int result; aeron_send_channel_apply_timestamps(endpoint, iov, iov_length); if (NULL == endpoint->destination_tracker) { result = endpoint->data_paths->send_func( endpoint->data_paths, &endpoint->transport, &endpoint->current_data_addr, iov, iov_length, bytes_sent); } else { result = aeron_udp_destination_tracker_send( endpoint->destination_tracker, &endpoint->transport, iov, iov_length, bytes_sent); } return result; } int aeron_send_channel_endpoint_add_publication( aeron_send_channel_endpoint_t *endpoint, aeron_network_publication_t *publication) { int64_t key_value = aeron_map_compound_key(publication->stream_id, publication->session_id); int result = aeron_int64_to_ptr_hash_map_put(&endpoint->publication_dispatch_map, key_value, publication); if (result < 0) { AERON_APPEND_ERR("%s", "Failed to add publication to publication_dispatch_map"); } return result; } int aeron_send_channel_endpoint_remove_publication( aeron_send_channel_endpoint_t *endpoint, aeron_network_publication_t *publication) { int64_t key_value = aeron_map_compound_key(publication->stream_id, publication->session_id); aeron_int64_to_ptr_hash_map_remove(&endpoint->publication_dispatch_map, key_value); return 0; } void aeron_send_channel_endpoint_dispatch( aeron_udp_channel_data_paths_t *data_paths, aeron_udp_channel_transport_t *transport, void *sender_clientd, void *endpoint_clientd, void *destination_clientd, uint8_t *buffer, size_t length, struct sockaddr_storage *addr, struct timespec *media_timestamp) { aeron_driver_sender_t *sender = (aeron_driver_sender_t *)sender_clientd; aeron_frame_header_t *frame_header = (aeron_frame_header_t *)buffer; aeron_send_channel_endpoint_t *endpoint = (aeron_send_channel_endpoint_t *)endpoint_clientd; if ((length < sizeof(aeron_frame_header_t)) || (frame_header->version != AERON_FRAME_HEADER_VERSION)) { aeron_counter_increment(sender->invalid_frames_counter, 1); return; } switch (frame_header->type) { case AERON_HDR_TYPE_NAK: if (length >= sizeof(aeron_nak_header_t)) { aeron_send_channel_endpoint_on_nak(endpoint, buffer, length, addr); aeron_counter_ordered_increment(sender->nak_messages_received_counter, 1); } else { aeron_counter_increment(sender->invalid_frames_counter, 1); } break; case AERON_HDR_TYPE_SM: if (length >= sizeof(aeron_status_message_header_t)) { aeron_send_channel_endpoint_on_status_message(endpoint, buffer, length, addr); aeron_counter_ordered_increment(sender->status_messages_received_counter, 1); } else { aeron_counter_increment(sender->invalid_frames_counter, 1); } break; case AERON_HDR_TYPE_RTTM: if (length >= sizeof(aeron_rttm_header_t)) { aeron_send_channel_endpoint_on_rttm(endpoint, buffer, length, addr); } else { aeron_counter_increment(sender->invalid_frames_counter, 1); } break; default: break; } } void aeron_send_channel_endpoint_on_nak( aeron_send_channel_endpoint_t *endpoint, uint8_t *buffer, size_t length, struct sockaddr_storage *addr) { aeron_nak_header_t *nak_header = (aeron_nak_header_t *)buffer; int64_t key_value = aeron_map_compound_key(nak_header->stream_id, nak_header->session_id); aeron_network_publication_t *publication = aeron_int64_to_ptr_hash_map_get( &endpoint->publication_dispatch_map, key_value); if (NULL != publication) { aeron_network_publication_on_nak(publication, nak_header->term_id, nak_header->term_offset, nak_header->length); } } void aeron_send_channel_endpoint_publication_trigger_send_setup_frame(void *clientd, int64_t key, void *value) { aeron_network_publication_t *publication = (aeron_network_publication_t *)value; aeron_network_publication_trigger_send_setup_frame(publication); } void aeron_send_channel_endpoint_on_status_message( aeron_send_channel_endpoint_t *endpoint, uint8_t *buffer, size_t length, struct sockaddr_storage *addr) { aeron_status_message_header_t *sm_header = (aeron_status_message_header_t *)buffer; int64_t key_value = aeron_map_compound_key(sm_header->stream_id, sm_header->session_id); aeron_network_publication_t *publication = aeron_int64_to_ptr_hash_map_get( &endpoint->publication_dispatch_map, key_value); if (NULL != endpoint->destination_tracker) { aeron_udp_destination_tracker_on_status_message(endpoint->destination_tracker, buffer, length, addr); if (0 == sm_header->session_id && 0 == sm_header->stream_id && (sm_header->frame_header.flags & AERON_STATUS_MESSAGE_HEADER_SEND_SETUP_FLAG)) { aeron_int64_to_ptr_hash_map_for_each( &endpoint->publication_dispatch_map, aeron_send_channel_endpoint_publication_trigger_send_setup_frame, endpoint); } } if (NULL != publication) { if (sm_header->frame_header.flags & AERON_STATUS_MESSAGE_HEADER_SEND_SETUP_FLAG) { aeron_network_publication_trigger_send_setup_frame(publication); } else { aeron_network_publication_on_status_message(publication, buffer, length, addr); } endpoint->time_of_last_sm_ns = aeron_clock_cached_nano_time(endpoint->cached_clock); } } void aeron_send_channel_endpoint_on_rttm( aeron_send_channel_endpoint_t *endpoint, uint8_t *buffer, size_t length, struct sockaddr_storage *addr) { aeron_rttm_header_t *rttm_header = (aeron_rttm_header_t *)buffer; int64_t key_value = aeron_map_compound_key(rttm_header->stream_id, rttm_header->session_id); aeron_network_publication_t *publication = aeron_int64_to_ptr_hash_map_get( &endpoint->publication_dispatch_map, key_value); if (NULL != publication) { aeron_network_publication_on_rttm(publication, buffer, length, addr); } } int aeron_send_channel_endpoint_check_for_re_resolution( aeron_send_channel_endpoint_t *endpoint, int64_t now_ns, aeron_driver_conductor_proxy_t *conductor_proxy) { if (endpoint->conductor_fields.udp_channel->is_manual_control_mode) { aeron_udp_destination_tracker_check_for_re_resolution( endpoint->destination_tracker, endpoint, now_ns, conductor_proxy); } else if (!endpoint->conductor_fields.udp_channel->is_multicast && endpoint->conductor_fields.udp_channel->has_explicit_endpoint && now_ns > (endpoint->time_of_last_sm_ns + AERON_SEND_CHANNEL_ENDPOINT_DESTINATION_TIMEOUT_NS)) { const char *endpoint_name = endpoint->conductor_fields.udp_channel->uri.params.udp.endpoint; aeron_driver_conductor_proxy_on_re_resolve_endpoint( conductor_proxy, endpoint_name, endpoint, &endpoint->current_data_addr); } return 0; } int aeron_send_channel_endpoint_resolution_change( aeron_driver_context_t *context, aeron_send_channel_endpoint_t *endpoint, const char *endpoint_name, struct sockaddr_storage *new_addr) { if (NULL != endpoint->destination_tracker) { aeron_udp_destination_tracker_resolution_change(endpoint->destination_tracker, endpoint_name, new_addr); } else { memcpy(&endpoint->current_data_addr, new_addr, sizeof(endpoint->current_data_addr)); if (context->udp_channel_transport_bindings->reconnect_func(&endpoint->transport, &endpoint->current_data_addr) < 0) { char addr_str[AERON_NETUTIL_FORMATTED_MAX_LENGTH]; aeron_format_source_identity(addr_str, sizeof(addr_str), &endpoint->current_data_addr); AERON_APPEND_ERR("failed to reconnect transport with re-resolved address: %s", addr_str); return -1; } } return 0; } extern void aeron_send_channel_endpoint_sender_release(aeron_send_channel_endpoint_t *endpoint); extern bool aeron_send_channel_endpoint_has_sender_released(aeron_send_channel_endpoint_t *endpoint); extern int aeron_send_channel_endpoint_add_destination( aeron_send_channel_endpoint_t *endpoint, aeron_uri_t *uri, struct sockaddr_storage *addr); extern int aeron_send_channel_endpoint_remove_destination( aeron_send_channel_endpoint_t *endpoint, struct sockaddr_storage *addr, aeron_uri_t **removed_uri); extern bool aeron_send_channel_endpoint_tags_match( aeron_send_channel_endpoint_t *endpoint, aeron_udp_channel_t *channel); extern int aeron_send_channel_endpoint_bind_addr_and_port( aeron_send_channel_endpoint_t *endpoint, char *buffer, size_t length);
509257.c
/* Copyright 2010-2013 SourceGear, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /** * * @file sg_jsondb.c * * @details Implementation for storing json objects in a sql store for speedier access to elements. * */ #include <sg.h> #define MY_BUSY_TIMEOUT_MS (30000) typedef struct { sqlite3* psql; const char* pszObjectName; SG_int64 objectId; } _jsondb_handle; struct _add_foreach_state { _jsondb_handle* pJsonDb; const char* pszParentPath; SG_uint32 iVarrayIdxOffset; }; typedef struct _add_foreach_state add_foreach_state; ////////////////////////////////////////////////////////////////////////// #define STACK_SIZE 1024 struct _subTreeStack { void* stack[STACK_SIZE]; SG_bool isVhash[STACK_SIZE]; SG_uint32 ndx; }; typedef struct _subTreeStack subTreeStack; static void _push(SG_context* pCtx, subTreeStack* pStack, SG_bool isVhash, void* p) { if (pStack->ndx >= (STACK_SIZE - 1)) SG_ERR_THROW_RETURN(SG_ERR_LIMIT_EXCEEDED); pStack->stack[++pStack->ndx] = p; pStack->isVhash[pStack->ndx] = isVhash; } static void _pop(SG_context* pCtx, subTreeStack* pStack, SG_bool* pIsVhash, void** pp) { if (pStack->ndx == 0) SG_ERR_THROW_RETURN(SG_ERR_LIMIT_EXCEEDED); *pIsVhash = pStack->isVhash[pStack->ndx]; *pp = pStack->stack[pStack->ndx--]; } ////////////////////////////////////////////////////////////////////////// static void _insertVariant( SG_context* pCtx, _jsondb_handle* pMe, const char* pszNewNodePath, SG_bool bAddRecursive, SG_bool bVariantIndexOk, const SG_variant* pv); /** * Don't call this on the root path. It won't work. */ static void _pathHelper( SG_context* pCtx, const char* pszInPath, SG_uint32 lenPath, char** ppszScrubbedPath, char** ppszParentPath, char** ppszLeafName) { SG_uint32 i; char* pszScrubbedPath = NULL; char* pszParentPath = NULL; char* pszLeafName = NULL; if (!lenPath) lenPath = SG_STRLEN(pszInPath); SG_ASSERT(lenPath > 1); // Look for double-slashes for (i = 1; i < lenPath; i++) { if ( (pszInPath[i] == '/') && (pszInPath[i-1] == '/') ) SG_ERR_THROW_RETURN(SG_ERR_JSONDB_INVALID_PATH); } // Ignore trailing slash. if (pszInPath[lenPath - 1] == '/') { // As long as it's not escaped. if ( (lenPath < 2) || (pszInPath[lenPath-2] != '\\') ) lenPath--; } if (ppszScrubbedPath) { SG_ERR_CHECK( SG_allocN(pCtx,lenPath+1,pszScrubbedPath) ); memcpy(pszScrubbedPath, pszInPath, lenPath); pszScrubbedPath[lenPath] = 0; } for (i = lenPath - 1; SG_TRUE; i--) { if ( (pszInPath[i] == '/') && ((i < 2) || (pszInPath[i-1] != '\\')) ) { if (ppszParentPath) { // Remove the trailing slash except when it's the root node. SG_uint32 lenDestBuf = (i ? i+1 : 2); SG_ERR_CHECK( SG_allocN(pCtx, lenDestBuf, pszParentPath) ); // We expect strcpy to truncate the child path to create the parent path. SG_strcpy(pCtx, pszParentPath, lenDestBuf, pszInPath); SG_ASSERT(SG_context__err_equals(pCtx, SG_ERR_BUFFERTOOSMALL)); SG_ERR_CHECK_CURRENT_DISREGARD(SG_ERR_BUFFERTOOSMALL); } if (ppszLeafName) { const char* pszLeaf = &pszInPath[i] + 1; SG_uint32 len = SG_STRLEN(pszLeaf); // remove trailing slash if (pszLeaf[len-1] == '/') { // As long as it's not escaped. if ( (len < 2) || (pszLeaf[len-2] != '\\') ) len--; } SG_ERR_CHECK_RETURN( SG_malloc(pCtx, len+1, &pszLeafName) ); memcpy(pszLeafName,pszLeaf,len); pszLeafName[len] = 0; } break; } if (0 == i) SG_ERR_THROW(SG_ERR_JSONDB_INVALID_PATH); } SG_RETURN_AND_NULL(pszScrubbedPath, ppszScrubbedPath); SG_RETURN_AND_NULL(pszParentPath, ppszParentPath); SG_RETURN_AND_NULL(pszLeafName, ppszLeafName); /* fall through */ fail: SG_NULLFREE(pCtx, pszScrubbedPath); SG_NULLFREE(pCtx, pszParentPath); SG_NULLFREE(pCtx, pszLeafName); } /** * Caller must manage sqlite tx. */ static void _insertNode( SG_context* pCtx, _jsondb_handle* pMe, const char* pszNewNodePathGiven, SG_bool bAddRecursive, SG_uint16 type, SG_bool bVariantIndexOk, const char* pszVal, SG_uint32* piNewNodeRgt, char** ppszNewNodePath) { SG_uint32 lenPath = SG_STRLEN(pszNewNodePathGiven); char* pszNewNodePathForVarray = NULL; char* pszNewNodePath = NULL; char* pszParentPath = NULL; char* pszYoungestVarraySiblingLeafName = NULL; char* pszNodeLeafName = NULL; SG_uint32 iParentNodeRgt; SG_bool bValidVarrayPath = SG_FALSE; SG_uint16 parentType; const char* pszYoungestVarraySiblingPath = NULL; char buf[11]; sqlite3_stmt* pStmt = NULL; #ifdef DEBUG SG_uint32 nrNodesUpdated; #endif if (!pMe->objectId) SG_ERR_THROW_RETURN( SG_ERR_JSONDB_NO_CURRENT_OBJECT ); if (pszNewNodePathGiven[0] != '/') SG_ERR_THROW_RETURN(SG_ERR_JSONDB_INVALID_PATH); // path not rooted if (lenPath > 1) { // Not the root node, so we need to find/create/update ancestors // Get parent path SG_ERR_CHECK( _pathHelper(pCtx, pszNewNodePathGiven, lenPath, &pszNewNodePath, &pszParentPath, &pszNodeLeafName) ); // When adding an element to a varray, the node name should be "#", e.g. "/my_varray/#" // to signify "add this element at the end of the array". if ( (pszNodeLeafName[0] == '#') && (strlen(pszNodeLeafName) == 1) ) bValidVarrayPath = SG_TRUE; // Get parent SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT type, rgt FROM nodes " "WHERE json_object_id = ? AND full_path = ? LIMIT 1") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, pszParentPath) ); sg_sqlite__step(pCtx, pStmt, SQLITE_ROW); if (SG_CONTEXT__HAS_ERR(pCtx)) { // The parent node doesn't already exist. if (bAddRecursive) { // Recursively add parents, get parent rgt. SG_ERR_CHECK_CURRENT_DISREGARD(SG_ERR_SQLITE(SQLITE_DONE)); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); // Add the impliied parent node. It will be a vhash unless '#' // was given for its name, in which case it will be a varray. if (bValidVarrayPath) { char* pTmp = NULL; parentType = SG_VARIANT_TYPE_VARRAY; // When adding to a varray, the path changes: the #'s get replaced with numbers. // So we have to re-set the actual parent path here. SG_ERR_CHECK( _insertNode(pCtx, pMe, pszParentPath, bAddRecursive, parentType, SG_FALSE, NULL, &iParentNodeRgt, &pTmp) ); SG_NULLFREE(pCtx, pszParentPath); pszParentPath = pTmp; } else { parentType = SG_VARIANT_TYPE_VHASH; SG_ERR_CHECK( _insertNode(pCtx, pMe, pszParentPath, bAddRecursive, parentType, SG_FALSE, NULL, &iParentNodeRgt, NULL) ); } } else { SG_ERR_REPLACE(SG_ERR_SQLITE(SQLITE_DONE), SG_ERR_JSONDB_PARENT_DOESNT_EXIST); SG_context__err_set_description(pCtx, "%s", pszParentPath); SG_ERR_RETHROW; } } else { // The parent node exists, but it has to be a vhash or varray to have have descendants. parentType = (SG_uint16)sqlite3_column_int(pStmt, 0); if (parentType == SG_VARIANT_TYPE_VHASH) { // A vhash is always an acceptable parent. All is well, do nothing. } else if (parentType == SG_VARIANT_TYPE_VARRAY) { // A varray is an acceptable parent if this node's key is "#", which we determined above. if (!bValidVarrayPath && !bVariantIndexOk) SG_ERR_THROW2(SG_ERR_JSONDB_INVALID_PATH, (pCtx, "%s", pszNewNodePathGiven)); } else SG_ERR_THROW2(SG_ERR_JSONDB_NON_CONTAINER_ANCESTOR_EXISTS, (pCtx, "%s", pszParentPath)); // The parent exists and is a valid container type. Grab its rgt. iParentNodeRgt = sqlite3_column_int(pStmt, 1); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); } // If we're inserting a varray element, we need to determine its index. if (parentType == SG_VARIANT_TYPE_VARRAY && !bVariantIndexOk) { SG_uint32 index; SG_uint32 lenNewPath; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT child.full_path " "FROM nodes AS parent, nodes AS child " "WHERE parent.json_object_id = ?1 AND child.json_object_id = ?1 " " AND parent.rgt = ?2 AND child.rgt = parent.rgt - 1 " "LIMIT 1;") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 2, iParentNodeRgt) ); sg_sqlite__step(pCtx, pStmt, SQLITE_ROW); if (SG_context__err_equals(pCtx, SG_ERR_SQLITE(SQLITE_DONE))) { SG_ERR_DISCARD; // The parent varray is empty. New node's index should be 0. index = 0; } else { SG_ERR_CHECK_CURRENT; // The parent varray is not empty. Get next index. pszYoungestVarraySiblingPath = (const char*)sqlite3_column_text(pStmt, 0); SG_ERR_CHECK( _pathHelper(pCtx, pszYoungestVarraySiblingPath, 0, NULL, NULL, &pszYoungestVarraySiblingLeafName) ); SG_ERR_CHECK( SG_uint32__parse(pCtx, &index, pszYoungestVarraySiblingLeafName) ); index++; } SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); // Set path with correct index SG_ERR_CHECK( SG_sprintf(pCtx, buf, 10, "%d", index) ); if (strcmp(pszParentPath, "/") == 0) { lenNewPath = SG_STRLEN(pszParentPath) + SG_STRLEN(buf) + 1; SG_ERR_CHECK( SG_allocN(pCtx, lenNewPath, pszNewNodePathForVarray) ); SG_ERR_CHECK( SG_strcpy(pCtx, pszNewNodePathForVarray, lenNewPath, pszParentPath) ); } else { lenNewPath = SG_STRLEN(pszParentPath) + SG_STRLEN(buf) + 2; SG_ERR_CHECK( SG_allocN(pCtx, lenNewPath, pszNewNodePathForVarray) ); SG_ERR_CHECK( SG_strcpy(pCtx, pszNewNodePathForVarray, lenNewPath, pszParentPath) ); SG_ERR_CHECK( SG_strcat(pCtx, pszNewNodePathForVarray, lenNewPath, "/") ); } SG_ERR_CHECK( SG_strcat(pCtx, pszNewNodePathForVarray, lenNewPath, buf) ); SG_NULLFREE(pCtx, pszNodeLeafName); pszNodeLeafName = buf; } // Update ancestor lft/rgt values SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "UPDATE nodes " " SET lft = CASE WHEN lft > ?1 THEN lft + 2 ELSE lft END, " " rgt = CASE when rgt >= ?1 THEN rgt + 2 ELSE rgt END " "WHERE json_object_id = ?2 AND rgt >= ?1;") ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 1, iParentNodeRgt) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 2, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); #ifdef DEBUG nrNodesUpdated = 0; SG_ERR_CHECK( sg_sqlite__num_changes(pCtx, pMe->psql, &nrNodesUpdated) ); SG_ASSERT(nrNodesUpdated); #endif } // (lenPath > 1) else { // Inserting root node. SG_ERR_CHECK( SG_STRDUP(pCtx, pszNewNodePathGiven, &pszNewNodePath) ); iParentNodeRgt = 1; } // Insert new node { const char* pszActualPath = (pszNewNodePathForVarray ? pszNewNodePathForVarray : pszNewNodePath); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "INSERT into nodes " " (json_object_id, full_path, leaf_name, type, val, lft, rgt) VALUES " " (?, ?, ?, ?, ?, ?, ?);") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, pszActualPath) ); if (pszNodeLeafName) SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 3, pszNodeLeafName) ); else SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 3, "**ROOT**") ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 4, type) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 5, pszVal) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 6, iParentNodeRgt) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 7, iParentNodeRgt + 1) ); sg_sqlite__step(pCtx, pStmt, SQLITE_DONE); if (SG_context__err_equals(pCtx, SG_ERR_SQLITE(SQLITE_CONSTRAINT))) SG_ERR_RESET_THROW2(SG_ERR_JSONDB_OBJECT_ALREADY_EXISTS, (pCtx, "%s", pszActualPath)); SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); } #ifdef DEBUG nrNodesUpdated = 0; SG_ERR_CHECK( sg_sqlite__num_changes(pCtx, pMe->psql, &nrNodesUpdated) ); SG_ASSERT(nrNodesUpdated == 1); #endif if (ppszNewNodePath) { if (pszNewNodePathForVarray) SG_RETURN_AND_NULL(pszNewNodePathForVarray, ppszNewNodePath); else SG_RETURN_AND_NULL(pszNewNodePath, ppszNewNodePath); } if (piNewNodeRgt) *piNewNodeRgt = iParentNodeRgt + 1; /* fall through */ fail: SG_NULLFREE(pCtx, pszNewNodePath); SG_NULLFREE(pCtx, pszParentPath); if (pszNodeLeafName != buf) SG_NULLFREE(pCtx, pszNodeLeafName); SG_NULLFREE(pCtx, pszYoungestVarraySiblingLeafName); SG_NULLFREE(pCtx, pszNewNodePathForVarray); SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); } /** * Caller must manage sqlite tx. */ static void _vhash_add_foreach_cb( SG_context* pCtx, void* ctx, const SG_vhash* pvh, const char* putf8Key, const SG_variant* pv) { add_foreach_state* pState = (add_foreach_state*)ctx; SG_uint32 len; char* pszNewNodePath = NULL; SG_UNUSED(pvh); len = SG_STRLEN(pState->pszParentPath) + SG_STRLEN(putf8Key) + 2; SG_ERR_CHECK( SG_allocN(pCtx, len, pszNewNodePath) ); SG_ERR_CHECK( SG_strcpy(pCtx, pszNewNodePath, len, pState->pszParentPath) ); SG_ERR_CHECK( SG_strcat(pCtx, pszNewNodePath, len, "/") ); SG_ERR_CHECK( SG_strcat(pCtx, pszNewNodePath, len, putf8Key) ); SG_ERR_CHECK( _insertVariant(pCtx, pState->pJsonDb, pszNewNodePath, SG_FALSE, SG_FALSE, pv) ); /* fall through */ fail: SG_NULLFREE(pCtx, pszNewNodePath); } static void _varray_add_foreach_cb( SG_context* pCtx, void* pVoidData, const SG_varray* pva, SG_uint32 ndx, const SG_variant* pv) { add_foreach_state* pState = (add_foreach_state*)pVoidData; SG_uint32 len; char* pszNewNodePath = NULL; char buf[11]; SG_UNUSED(pva); ndx += pState->iVarrayIdxOffset; SG_ERR_CHECK( SG_sprintf(pCtx, buf, 10, "%d", ndx) ); len = SG_STRLEN(pState->pszParentPath) + SG_STRLEN(buf) + 2; SG_ERR_CHECK( SG_allocN(pCtx, len, pszNewNodePath) ); SG_ERR_CHECK( SG_strcpy(pCtx, pszNewNodePath, len, pState->pszParentPath) ); SG_ERR_CHECK( SG_strcat(pCtx, pszNewNodePath, len, "/") ); SG_ERR_CHECK( SG_strcat(pCtx, pszNewNodePath, len, buf) ); SG_ERR_CHECK( _insertVariant(pCtx, pState->pJsonDb, pszNewNodePath, SG_FALSE, SG_TRUE, pv) ); /* fall through */ fail: SG_NULLFREE(pCtx, pszNewNodePath); } static void _insertVariant( SG_context* pCtx, _jsondb_handle* pMe, const char* pszNewNodePath, SG_bool bAddRecursive, SG_bool bVariantIndexOk, const SG_variant* pv) { char buf[256]; add_foreach_state state; char* pszParentPath = NULL; switch (pv->type) { case SG_VARIANT_TYPE_NULL: SG_ERR_CHECK( _insertNode(pCtx, pMe, pszNewNodePath, bAddRecursive, pv->type, bVariantIndexOk, NULL, NULL, NULL) ); break; case SG_VARIANT_TYPE_INT64: SG_int64_to_sz(pv->v.val_int64, buf); SG_ERR_CHECK( _insertNode(pCtx, pMe, pszNewNodePath, bAddRecursive, pv->type, bVariantIndexOk, buf, NULL, NULL) ); break; case SG_VARIANT_TYPE_DOUBLE: SG_ERR_CHECK( SG_sprintf(pCtx, buf, 255, "%f", pv->v.val_double) ); SG_ERR_CHECK( _insertNode(pCtx, pMe, pszNewNodePath, bAddRecursive, pv->type, bVariantIndexOk, buf, NULL, NULL) ); break; case SG_VARIANT_TYPE_BOOL: SG_ERR_CHECK( _insertNode(pCtx, pMe, pszNewNodePath, bAddRecursive, pv->type, bVariantIndexOk, pv->v.val_bool ? "1" : "0", NULL, NULL) ); break; case SG_VARIANT_TYPE_SZ: SG_ERR_CHECK( _insertNode(pCtx, pMe, pszNewNodePath, bAddRecursive, pv->type, bVariantIndexOk, pv->v.val_sz, NULL, NULL) ); break; case SG_VARIANT_TYPE_VHASH: SG_ERR_CHECK( _insertNode(pCtx, pMe, pszNewNodePath, bAddRecursive, pv->type, bVariantIndexOk, NULL, NULL, NULL) ); state.pJsonDb = pMe; if (0 == strcmp(pszNewNodePath, "/")) state.pszParentPath = ""; else state.pszParentPath = pszNewNodePath; SG_ERR_CHECK( SG_vhash__foreach(pCtx, pv->v.val_vhash, _vhash_add_foreach_cb, &state) ); break; case SG_VARIANT_TYPE_VARRAY: SG_ERR_CHECK( _insertNode(pCtx, pMe, pszNewNodePath, bAddRecursive, pv->type, bVariantIndexOk, NULL, NULL, &pszParentPath) ); state.pJsonDb = pMe; state.iVarrayIdxOffset = 0; if (0 == strcmp(pszParentPath, "/")) state.pszParentPath = ""; else state.pszParentPath = pszParentPath; SG_ERR_CHECK( SG_varray__foreach(pCtx, pv->v.val_varray, _varray_add_foreach_cb, &state) ); SG_NULLFREE(pCtx, pszParentPath); break; default: SG_ERR_THROW(SG_ERR_NOTIMPLEMENTED); } return; fail: SG_NULLFREE(pCtx, pszParentPath); } static SG_uint32 _countSlashes(const char* sz) { SG_uint32 count = 0; while (*sz) { if ('/' == *sz) count++; sz++; } return count; } static void _removeNode( SG_context* pCtx, _jsondb_handle* pMe, SG_bool bDescendantsOnly, // used by update, delete subtree but not the node itself SG_int64 deleteNodeId, SG_uint32 deleteNodeLft, SG_uint32 deleteNodeRgt) { sqlite3_stmt* pStmt = NULL; sqlite3_stmt* pStmtSib = NULL; sqlite3_stmt* pStmtSibDesc = NULL; SG_uint32 newLftRgt; char* pszParentNodeFullPath = NULL; char* pszDeleteNodeFullPath = NULL; SG_string* pstrNewSibFullPath = NULL; // If the deleted node's parent is a varray, and the deleted node has younger // siblings, we need to update their paths and the paths of their descendants. if (!bDescendantsOnly) { // Get parent node SG_uint32 parentRgt = 0; SG_uint16 parentType = 0; newLftRgt = deleteNodeRgt - deleteNodeLft + 1; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT parent.id, parent.full_path, parent.lft, parent.rgt, parent.type, child.full_path " "FROM nodes child, nodes parent " "WHERE parent.lft < child.lft AND parent.rgt > child.rgt " " AND child.id= ? " " AND parent.json_object_id = ? " "ORDER BY parent.rgt-child.rgt ASC " "LIMIT 1;") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, deleteNodeId) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 2, pMe->objectId) ); sg_sqlite__step(pCtx, pStmt, SQLITE_ROW); if (!SG_CONTEXT__HAS_ERR(pCtx)) { SG_ERR_CHECK( SG_STRDUP(pCtx, (const char*)sqlite3_column_text(pStmt, 1), &pszParentNodeFullPath) ); parentRgt = sqlite3_column_int(pStmt, 3); parentType = (SG_uint16)sqlite3_column_int(pStmt, 4); SG_ERR_CHECK( SG_STRDUP(pCtx, (const char*)sqlite3_column_text(pStmt, 5), &pszDeleteNodeFullPath) ); } else { // The root node has no parent, so this is ok. SG_ERR_CHECK_CURRENT_DISREGARD(SG_ERR_SQLITE(SQLITE_DONE)); } SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); // Delete the node and and all its descendants SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "DELETE FROM nodes " "WHERE json_object_id = ?1 " " AND lft >= ?2 AND rgt <= ?3") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 2, deleteNodeLft) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 3, deleteNodeRgt) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); if (parentType == SG_VARIANT_TYPE_VARRAY) { // The parent's a varray, so we'll update the path of any younger // siblings and their descendants. SG_int32 rc; SG_uint32 newSibLeafVal; char newSibLeafName[11]; SG_uint32 deletedNodeSlashCount = _countSlashes(pszDeleteNodeFullPath); // Get the deleted node's younger siblings and their descendants. SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT id, leaf_name, lft, rgt, full_path " "FROM nodes " "WHERE lft BETWEEN ? AND ? " " AND json_object_id = ? " "ORDER BY id") ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 1, deleteNodeRgt) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 2, parentRgt) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 3, pMe->objectId) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstrNewSibFullPath) ); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmtSib, "UPDATE nodes " "SET leaf_name = ?, full_path = ? " "WHERE id = ?") ); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmtSibDesc, "UPDATE nodes " "SET full_path = ? || substr(full_path, ?) " "WHERE id = ? ") ); while ((rc=sqlite3_step(pStmt)) == SQLITE_ROW) { SG_uint32 thisNodeSlashCount = _countSlashes((const char*)sqlite3_column_text(pStmt, 4)); if (thisNodeSlashCount == deletedNodeSlashCount) { /* It's a direct sibling. */ newSibLeafVal = sqlite3_column_int(pStmt, 1) - 1; SG_ERR_CHECK( SG_sprintf(pCtx, newSibLeafName, 11, "%d", newSibLeafVal) ); SG_ERR_CHECK( SG_string__set__sz(pCtx, pstrNewSibFullPath, pszParentNodeFullPath) ); if (pszParentNodeFullPath[1] != 0) SG_ERR_CHECK( SG_string__append__sz(pCtx, pstrNewSibFullPath, "/") ); SG_ERR_CHECK( SG_string__append__sz(pCtx, pstrNewSibFullPath, newSibLeafName) ); SG_ERR_CHECK( sg_sqlite__reset(pCtx, pStmtSib) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmtSib, 1, newSibLeafName) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmtSib, 2, SG_string__sz(pstrNewSibFullPath)) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmtSib, 3, sqlite3_column_int64(pStmt, 0)) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmtSib, SQLITE_DONE) ); } else { /* It's a sibling's descendant. */ SG_ERR_CHECK( sg_sqlite__reset(pCtx, pStmtSibDesc) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmtSibDesc, 1, SG_string__sz(pstrNewSibFullPath)) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmtSibDesc, 2, SG_string__length_in_bytes(pstrNewSibFullPath) + 1) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmtSibDesc, 3, sqlite3_column_int64(pStmt, 0)) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmtSibDesc, SQLITE_DONE) ); } } if (rc != SQLITE_DONE) SG_ERR_THROW( SG_ERR_SQLITE(rc) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmtSibDesc) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmtSib) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); } } else { newLftRgt = deleteNodeRgt - deleteNodeLft - 1; // Deleting only the descendants of the specified node, not the node itself. SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "DELETE FROM nodes " "WHERE json_object_id = ?1 " " AND lft > ?2 AND rgt < ?3") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 2, deleteNodeLft) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 3, deleteNodeRgt) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); } // Close the gap left by the deleted subtree SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "UPDATE nodes " "SET lft = CASE " " WHEN lft > ?1 THEN lft - ?4 " " ELSE lft END, " " rgt = CASE " " WHEN rgt > ?1 THEN rgt - ?4 " " ELSE rgt END " "WHERE json_object_id = ?3 " " AND (lft > ?1 OR rgt > ?1)") ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 1, deleteNodeLft) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 2, deleteNodeRgt) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 3, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 4, newLftRgt) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); /* fall through */ fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmtSib) ); SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmtSibDesc) ); SG_NULLFREE(pCtx, pszParentNodeFullPath); SG_NULLFREE(pCtx, pszDeleteNodeFullPath); SG_STRING_NULLFREE(pCtx, pstrNewSibFullPath); } ////////////////////////////////////////////////////////////////////////// void SG_jsondb__close_free(SG_context* pCtx, SG_jsondb* pThis) { _jsondb_handle* pMe = (_jsondb_handle*)pThis; if (pMe) { SG_ERR_CHECK_RETURN( sg_sqlite__close(pCtx, pMe->psql) ); SG_NULLFREE(pCtx, pMe->pszObjectName); SG_NULLFREE(pCtx, pMe); } } ////////////////////////////////////////////////////////////////////////// void SG_jsondb__create(SG_context* pCtx, const SG_pathname* pPathDbFile, SG_jsondb** ppJsondb) { _jsondb_handle* pMe = NULL; SG_jsondb* pJsondb = NULL; SG_bool bCreated = SG_FALSE; SG_bool bInTx = SG_FALSE; SG_ERR_CHECK( SG_alloc1(pCtx, pMe) ); pJsondb = (SG_jsondb*)pMe; SG_ERR_CHECK( sg_sqlite__create__pathname(pCtx, pPathDbFile, SG_SQLITE__SYNC__NORMAL, &pMe->psql) ); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "PRAGMA journal_mode=WAL") ); bCreated = SG_TRUE; sqlite3_busy_timeout(pMe->psql, MY_BUSY_TIMEOUT_MS); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "BEGIN TRANSACTION;") ); bInTx = SG_TRUE; SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "CREATE TABLE json_objects" " (" " id INTEGER PRIMARY KEY," " name VARCHAR NULL" " )") ); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "CREATE TABLE nodes" " (" " id INTEGER PRIMARY KEY," " json_object_id INTEGER NOT NULL," " full_path VARCHAR NOT NULL," " leaf_name VARCHAR NOT NULL," " type INTEGER NOT NULL," " val VARCHAR NULL," " lft INTEGER NOT NULL," " rgt INTEGER NOT NULL" " )") ); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "CREATE UNIQUE INDEX json_objects_name on json_objects ( name )") ); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "CREATE UNIQUE INDEX nodes_json_object_id_full_path on nodes ( json_object_id, full_path )") ); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "CREATE INDEX nodes_json_object_id_lft_rgt on nodes ( json_object_id, lft, rgt )") ); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "CREATE INDEX nodes_json_object_id_rgt on nodes ( json_object_id, rgt )") ); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "COMMIT TRANSACTION;") ); bInTx = SG_FALSE; SG_RETURN_AND_NULL(pJsondb, ppJsondb); /* fall through */ fail: if (bInTx && SG_CONTEXT__HAS_ERR(pCtx) && pMe && pMe->psql) SG_ERR_IGNORE( sg_sqlite__exec(pCtx, pMe->psql, "ROLLBACK TRANSACTION;") ); SG_JSONDB_NULLFREE(pCtx, pJsondb); if (bCreated && SG_CONTEXT__HAS_ERR(pCtx)) // If we created the database but we're failing, clean it up. SG_ERR_IGNORE( SG_fsobj__rmdir__pathname(pCtx, pPathDbFile) ); } void SG_jsondb__open( SG_context* pCtx, const SG_pathname* pPathDbFile, const char* pszObjectName, SG_jsondb** ppJsondb) { _jsondb_handle* pMe = NULL; SG_jsondb* pJsondb = NULL; SG_ERR_CHECK( SG_alloc1(pCtx, pMe) ); pJsondb = (SG_jsondb*)pMe; SG_ERR_CHECK( sg_sqlite__open__pathname(pCtx, pPathDbFile, SG_SQLITE__SYNC__NORMAL, &pMe->psql) ); sqlite3_busy_timeout(pMe->psql, MY_BUSY_TIMEOUT_MS); SG_ERR_CHECK( SG_jsondb__set_current_object(pCtx, pJsondb, pszObjectName) ); SG_RETURN_AND_NULL(pJsondb, ppJsondb); /* fall through */ fail: SG_JSONDB_NULLFREE(pCtx, pJsondb); } void SG_jsondb__create_or_open( SG_context* pCtx, const SG_pathname* pPathDbFile, const char* pszObjectName, SG_jsondb** ppJsondb) { SG_bool exists; SG_jsondb* pDb = NULL; SG_bool bCreatedFile = SG_FALSE; SG_NULLARGCHECK_RETURN(pszObjectName); SG_ERR_CHECK_RETURN( SG_fsobj__exists__pathname(pCtx, pPathDbFile, &exists, NULL, NULL) ); if (exists) { SG_ERR_CHECK( SG_jsondb__open(pCtx, pPathDbFile, NULL, &pDb) ); SG_jsondb__set_current_object(pCtx, pDb, pszObjectName); if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND)) { SG_ERR_DISCARD; SG_ERR_CHECK( SG_jsondb__add__object(pCtx, pDb, pszObjectName, NULL) ); } else { SG_ERR_CHECK_CURRENT; } } else { SG_ERR_CHECK( SG_jsondb__create(pCtx, pPathDbFile, &pDb) ); bCreatedFile = SG_TRUE; SG_ERR_CHECK( SG_jsondb__add__object(pCtx, pDb, pszObjectName, NULL) ); } SG_RETURN_AND_NULL(pDb, ppJsondb); return; fail: SG_JSONDB_NULLFREE(pCtx, pDb); if (bCreatedFile) SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pPathDbFile) ); } ////////////////////////////////////////////////////////////////////////// void SG_jsondb__get_current_object_name( SG_context* pCtx, SG_jsondb* pThis, const char** ppszObjectName) { _jsondb_handle* pMe = (_jsondb_handle*)pThis; SG_NULLARGCHECK_RETURN(pThis); SG_NULLARGCHECK_RETURN(ppszObjectName); *ppszObjectName = pMe->pszObjectName; } void SG_jsondb__set_current_object( SG_context* pCtx, SG_jsondb* pThis, const char* pszObjectName) { _jsondb_handle* pMe = (_jsondb_handle*)pThis; const char* pszOldObjectName = NULL; sqlite3_stmt* pStmt = NULL; SG_NULLARGCHECK_RETURN(pThis); if (!pszObjectName) { pMe->objectId = 0; SG_NULLFREE(pCtx, pMe->pszObjectName); return; } SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT id FROM json_objects WHERE name = ? LIMIT 1") ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 1, pszObjectName) ); sg_sqlite__step(pCtx, pStmt, SQLITE_ROW); if (SG_CONTEXT__HAS_ERR(pCtx)) { SG_ERR_REPLACE(SG_ERR_SQLITE(SQLITE_DONE), SG_ERR_NOT_FOUND); SG_ERR_RETHROW; } pMe->objectId = (SG_int32)sqlite3_column_int(pStmt, 0); pszOldObjectName = pMe->pszObjectName; SG_ERR_CHECK( SG_STRDUP(pCtx, pszObjectName, (char**)&pMe->pszObjectName) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); /* fall through */ fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); SG_NULLFREE(pCtx, pszOldObjectName); } void SG_jsondb__get__object( SG_context* pCtx, SG_jsondb* pThis, SG_variant** ppvhObject) { SG_jsondb__get__variant(pCtx, pThis, "/", ppvhObject); } void SG_jsondb__add__object( SG_context* pCtx, SG_jsondb* pThis, const char* pszObjectName, SG_variant* pvRootObject) { _jsondb_handle* pMe = (_jsondb_handle*)pThis; _jsondb_handle tempMe; sqlite3_stmt* pStmt = NULL; SG_int64 lNewObjectId; SG_bool bInTx = SG_FALSE; SG_NULLARGCHECK_RETURN(pThis); SG_NULLARGCHECK_RETURN(pszObjectName); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "BEGIN TRANSACTION;") ); bInTx = SG_TRUE; // Insert object SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "INSERT INTO json_objects (name) VALUES (?)") ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 1, pszObjectName) ); sg_sqlite__step(pCtx, pStmt, SQLITE_DONE); SG_ERR_REPLACE(SG_ERR_SQLITE(SQLITE_CONSTRAINT), SG_ERR_JSONDB_OBJECT_ALREADY_EXISTS); SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); SG_ERR_CHECK( sg_sqlite__last_insert_rowid(pCtx, pMe->psql, &lNewObjectId) ); if (pvRootObject) { tempMe.objectId = lNewObjectId; tempMe.psql = pMe->psql; tempMe.pszObjectName = pszObjectName; SG_ERR_CHECK( _insertVariant(pCtx, &tempMe, "/", SG_FALSE, SG_FALSE, pvRootObject) ); } SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "COMMIT TRANSACTION;") ); bInTx = SG_FALSE; // Now that everything has succeeded, set the new current object. pMe->objectId = lNewObjectId; SG_NULLFREE(pCtx, pMe->pszObjectName); SG_ERR_CHECK( SG_STRDUP(pCtx, pszObjectName, (char**)&pMe->pszObjectName) ); return; fail: SG_ERR_IGNORE(sg_sqlite__finalize(pCtx, pStmt)); if (bInTx && pMe && pMe->psql) SG_ERR_IGNORE( sg_sqlite__exec(pCtx, pMe->psql, "ROLLBACK TRANSACTION;") ); } void SG_jsondb__remove__object( SG_context* pCtx, SG_jsondb* pThis) { _jsondb_handle* pMe = (_jsondb_handle*)pThis; sqlite3_stmt* pStmt = NULL; SG_bool bInTx = SG_FALSE; SG_NULLARGCHECK_RETURN(pThis); if (!pMe->objectId) SG_ERR_THROW_RETURN(SG_ERR_JSONDB_NO_CURRENT_OBJECT); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "BEGIN TRANSACTION;") ); bInTx = SG_TRUE; // delete object SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "DELETE FROM nodes WHERE json_object_id = ?;") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); SG_ERR_CHECK( sg_sqlite__finalize(pCtx, pStmt) ); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "DELETE FROM json_objects WHERE id = ?;") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); #ifdef DEBUG { SG_uint32 nrNodesUpdated = 0; SG_ERR_CHECK( sg_sqlite__num_changes(pCtx, pMe->psql, &nrNodesUpdated) ); SG_ASSERT(nrNodesUpdated == 1); } #endif SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "COMMIT TRANSACTION;") ); bInTx = SG_FALSE; // Now that everything has succeeded, set the new current object. pMe->objectId = 0; SG_NULLFREE(pCtx, pMe->pszObjectName); return; fail: SG_ERR_IGNORE(sg_sqlite__finalize(pCtx, pStmt)); if (bInTx && pMe && pMe->psql) SG_ERR_IGNORE( sg_sqlite__exec(pCtx, pMe->psql, "ROLLBACK TRANSACTION;") ); } ////////////////////////////////////////////////////////////////////////// /** * pStmt should have these columns in this order: * leaf_name, type, val, lft, rgt * * Note that ppszVal is pStmt's copy. You have to copy it if you want it after * the statement is finalized. */ static void _getSingleNode( SG_context* pCtx, sqlite3_stmt* pStmt, SG_uint16 expectedType, SG_uint16* pActualType, SG_int64* plVal, double* pDblVal, SG_bool* pbVal, const char** ppszVal, char** ppszLeafName, SG_uint32* piLft, SG_uint32* piRgt) { SG_uint16 fetchedType; fetchedType = (SG_uint16)sqlite3_column_int(pStmt, 1); if ((fetchedType & expectedType) != fetchedType) SG_ERR_THROW_RETURN(SG_ERR_VARIANT_INVALIDTYPE); switch (fetchedType) { case SG_VARIANT_TYPE_NULL: // nothing to do break; case SG_VARIANT_TYPE_INT64: if (plVal) *plVal = sqlite3_column_int64(pStmt, 2); break; case SG_VARIANT_TYPE_DOUBLE: if (pDblVal) *pDblVal = sqlite3_column_double(pStmt, 2); break; case SG_VARIANT_TYPE_BOOL: if (pbVal) *pbVal = sqlite3_column_int(pStmt, 2); break; case SG_VARIANT_TYPE_SZ: if (ppszVal) *ppszVal = (const char*)sqlite3_column_text(pStmt, 2); break; case SG_VARIANT_TYPE_VHASH: // nothing to do break; case SG_VARIANT_TYPE_VARRAY: // nothing to do break; default: SG_ERR_THROW_RETURN(SG_ERR_NOTIMPLEMENTED); } if (piLft) *piLft = sqlite3_column_int(pStmt, 3); if (piRgt) *piRgt = sqlite3_column_int(pStmt, 4); if (pActualType) *pActualType = fetchedType; if (ppszLeafName) { SG_ERR_CHECK_RETURN( SG_jsondb__unescape_keyname(pCtx, (const char*)sqlite3_column_text(pStmt, 0), ppszLeafName) ); } } static void _getSubTree( SG_context* pCtx, _jsondb_handle* pMe, SG_uint32 root_lft, SG_uint32 root_rgt, SG_vhash* root_pvh, SG_varray* root_pva) { sqlite3_stmt* pStmt = NULL; SG_int32 rc; SG_vhash* current_pvh = root_pvh; SG_varray* current_pva = root_pva; SG_vhash* pvh_temp = NULL; // We alloc this and therefore own it SG_vhash* pvh_temp_ref = NULL; // We need to refer to pvh_temp after losing ownership SG_varray* pva_temp = NULL; // We alloc this and therefore own it SG_varray* pva_temp_ref = NULL; // We need to refer to pva_temp after losing ownership SG_uint32 last_rgt = root_lft; char* pszLeafName = NULL; subTreeStack stack; stack.ndx = 0; SG_ASSERT( (root_pvh == NULL) ^ (root_pva == NULL)); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT leaf_name, type, val, lft, rgt FROM nodes " "WHERE json_object_id = ? " " AND lft > ? AND rgt < ? " "ORDER BY lft;") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 2, root_lft) ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 3, root_rgt) ); while ((rc=sqlite3_step(pStmt)) == SQLITE_ROW) { SG_uint16 type; SG_int64 lVal; double dblVal; SG_bool bVal; const char* pszVal; SG_uint32 lft, rgt; SG_ERR_CHECK( _getSingleNode(pCtx, pStmt, SG_UINT16_MAX, &type, &lVal, &dblVal, &bVal, &pszVal, &pszLeafName, &lft, &rgt) ); for (; last_rgt+1 < lft; last_rgt++) { SG_bool isVhash = SG_FALSE; void* p = NULL; SG_ERR_CHECK( _pop(pCtx, &stack, &isVhash, &p) ); if (isVhash) { current_pvh = (SG_vhash*)p; current_pva = NULL; } else { current_pvh = NULL; current_pva = (SG_varray*)p; } } switch (type) { case SG_VARIANT_TYPE_NULL: SG_ASSERT(rgt - lft == 1); if (current_pvh) SG_ERR_CHECK( SG_vhash__add__null(pCtx, current_pvh, pszLeafName) ); else SG_ERR_CHECK( SG_varray__append__null(pCtx, current_pva) ); break; case SG_VARIANT_TYPE_INT64: SG_ASSERT(rgt - lft == 1); if (current_pvh) SG_ERR_CHECK( SG_vhash__add__int64(pCtx, current_pvh, pszLeafName, lVal) ); else SG_ERR_CHECK( SG_varray__append__int64(pCtx, current_pva, lVal) ); break; case SG_VARIANT_TYPE_DOUBLE: SG_ASSERT(rgt - lft == 1); if (current_pvh) SG_ERR_CHECK( SG_vhash__add__double(pCtx, current_pvh, pszLeafName, dblVal) ); else SG_ERR_CHECK( SG_varray__append__double(pCtx, current_pva, dblVal) ); break; case SG_VARIANT_TYPE_BOOL: SG_ASSERT(rgt - lft == 1); if (current_pvh) SG_ERR_CHECK( SG_vhash__add__bool(pCtx, current_pvh, pszLeafName, bVal) ); else SG_ERR_CHECK( SG_varray__append__bool(pCtx, current_pva, bVal) ); break; case SG_VARIANT_TYPE_SZ: SG_ASSERT(rgt - lft == 1); if (current_pvh) SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, current_pvh, pszLeafName, pszVal) ); else SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, current_pva, pszVal) ); break; case SG_VARIANT_TYPE_VHASH: SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_temp) ); pvh_temp_ref = pvh_temp; pva_temp_ref = NULL; if (current_pvh) SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, current_pvh, pszLeafName, &pvh_temp) ); else SG_ERR_CHECK( SG_varray__append__vhash(pCtx, current_pva, &pvh_temp) ); break; case SG_VARIANT_TYPE_VARRAY: SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pva_temp) ); pva_temp_ref = pva_temp; pvh_temp_ref = NULL; if (current_pvh) SG_ERR_CHECK( SG_vhash__add__varray(pCtx, current_pvh, pszLeafName, &pva_temp) ); else SG_ERR_CHECK( SG_varray__append__varray(pCtx, current_pva, &pva_temp) ); break; default: SG_ERR_THROW(SG_ERR_NOTIMPLEMENTED); } if (rgt - lft > 1) { if (current_pvh) SG_ERR_CHECK( _push(pCtx, &stack, SG_TRUE, current_pvh) ); else SG_ERR_CHECK( _push(pCtx, &stack, SG_FALSE, current_pva) ); current_pvh = pvh_temp_ref; current_pva = pva_temp_ref; } last_rgt = rgt; SG_NULLFREE(pCtx, pszLeafName); } // while (rc == SQLITE_ROW) if (rc != SQLITE_DONE) SG_ERR_THROW( SG_ERR_SQLITE(rc) ); /* fall through */ fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); SG_VHASH_NULLFREE(pCtx, pvh_temp); SG_VARRAY_NULLFREE(pCtx, pva_temp); SG_NULLFREE(pCtx, pszLeafName); } static void _getByPath( SG_context* pCtx, _jsondb_handle* pMe, const char* pszPath, SG_uint16 expectedType, SG_uint16* pActualType, SG_int64* plVal, double* pDblVal, SG_bool* pbVal, char** ppszVal, SG_vhash** ppvhVal, SG_varray** ppvaVal) { SG_uint32 lenPath; char* pszScrubbedPath = NULL; sqlite3_stmt* pStmt = NULL; SG_uint16 actualType; const char* pszVal = NULL; SG_uint32 lft, rgt; SG_vhash* pvh = NULL; SG_varray* pva = NULL; if (!pMe->objectId) SG_ERR_THROW_RETURN(SG_ERR_JSONDB_NO_CURRENT_OBJECT); if (pszPath[0] != '/') SG_ERR_THROW_RETURN(SG_ERR_JSONDB_INVALID_PATH); lenPath = SG_STRLEN(pszPath); if (lenPath > 1) SG_ERR_CHECK( _pathHelper(pCtx, pszPath, lenPath, &pszScrubbedPath, NULL, NULL) ); else SG_ERR_CHECK( SG_STRDUP(pCtx, pszPath, &pszScrubbedPath) ); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT leaf_name, type, val, lft, rgt " "FROM nodes " "WHERE json_object_id = ? AND full_path = ? " "LIMIT 1") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, pszScrubbedPath) ); sg_sqlite__step(pCtx, pStmt, SQLITE_ROW); SG_ERR_REPLACE(SG_ERR_SQLITE(SQLITE_DONE), SG_ERR_NOT_FOUND); SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( _getSingleNode(pCtx, pStmt, expectedType, &actualType, plVal, pDblVal, pbVal, &pszVal, NULL, &lft, &rgt) ); if ( (actualType == SG_VARIANT_TYPE_VHASH) && ppvhVal ) { SG_VHASH__ALLOC(pCtx, &pvh); SG_ERR_CHECK( _getSubTree(pCtx, pMe, lft, rgt, pvh, NULL) ); *ppvhVal = pvh; pvh = NULL; } else if ( (actualType == SG_VARIANT_TYPE_VARRAY) && ppvaVal ) { SG_VARRAY__ALLOC(pCtx, &pva); SG_ERR_CHECK( _getSubTree(pCtx, pMe, lft, rgt, NULL, pva) ); *ppvaVal = pva; pva = NULL; } if (pActualType) *pActualType = actualType; if (ppszVal && pszVal) SG_ERR_CHECK( SG_STRDUP(pCtx, pszVal, ppszVal) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); SG_NULLFREE(pCtx, pszScrubbedPath); return; fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); SG_NULLFREE(pCtx, pszScrubbedPath); SG_VHASH_NULLFREE(pCtx, pvh); SG_VARRAY_NULLFREE(pCtx, pva); } #define PRE_GET _jsondb_handle* pMe = (_jsondb_handle*)pThis; \ SG_bool bInTx = SG_FALSE; \ SG_NULLARGCHECK_RETURN(pThis); \ SG_NULLARGCHECK_RETURN(pszPath); \ SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "BEGIN TRANSACTION;") ); \ bInTx = SG_TRUE #define POST_GET fail: \ if (bInTx && pMe && pMe->psql) \ SG_ERR_IGNORE( sg_sqlite__exec(pCtx, pMe->psql, "ROLLBACK TRANSACTION;") ) void SG_jsondb__has( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool* pbResult) { SG_bool bExists; PRE_GET; SG_NULLARGCHECK(pbResult); _getByPath(pCtx, pMe, pszPath, SG_UINT16_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL); if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND)) { SG_ERR_DISCARD; bExists = SG_FALSE; } else { bExists = SG_TRUE; } SG_ERR_CHECK_CURRENT; *pbResult = bExists; POST_GET; } void SG_jsondb__typeof( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_uint16* pResult) { PRE_GET; SG_NULLARGCHECK(pResult); SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_UINT16_MAX, pResult, NULL, NULL, NULL, NULL, NULL, NULL) ); POST_GET; } void SG_jsondb__count( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_uint32* piResult) { sqlite3_stmt* pStmt = NULL; SG_int64 id; SG_uint32 lft, rgt, count; PRE_GET; SG_NULLARGCHECK(piResult); if (!pMe->objectId) SG_ERR_THROW_RETURN(SG_ERR_JSONDB_NO_CURRENT_OBJECT); if (pszPath[0] != '/') SG_ERR_THROW_RETURN(SG_ERR_JSONDB_INVALID_PATH); // Check that an object exists at the specified path. SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT id, lft, rgt " "FROM nodes " "WHERE json_object_id = ? AND full_path = ? " "LIMIT 1") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, pszPath) ); sg_sqlite__step(pCtx, pStmt, SQLITE_ROW); SG_ERR_REPLACE(SG_ERR_SQLITE(SQLITE_DONE), SG_ERR_NOT_FOUND); SG_ERR_CHECK_CURRENT; id = sqlite3_column_int64(pStmt, 0); lft = sqlite3_column_int(pStmt, 1); rgt = sqlite3_column_int(pStmt, 2); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); // If the node's a leaf, we don't need to bother with the second query. if (!(rgt-lft-1)) { *piResult = 0; } else { // The node's not a leaf so we have to count the edges. SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT COUNT(child.id) " "FROM nodes child, nodes parent " "WHERE parent.id = ? " "AND child.lft BETWEEN parent.lft AND parent.rgt " "AND parent.id = (SELECT MAX(s.id) FROM nodes s WHERE s.json_object_id = ? AND s.lft < child.lft AND s.rgt > child.rgt) " "AND parent.json_object_id = child.json_object_id") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, id) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 2, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_ROW) ); count = sqlite3_column_int(pStmt, 0); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); *piResult = count; } POST_GET; SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); } void SG_jsondb__get__variant( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_variant** ppResult) { SG_variant* pResult = NULL; PRE_GET; SG_NULLARGCHECK(ppResult); SG_ERR_CHECK( SG_alloc1(pCtx, pResult) ); SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_UINT16_MAX, &pResult->type, &pResult->v.val_int64, &pResult->v.val_double, &pResult->v.val_bool, (char**)&pResult->v.val_sz, &pResult->v.val_vhash, &pResult->v.val_varray) ); *ppResult = pResult; pResult = NULL; POST_GET; SG_NULLFREE(pCtx, pResult); } void SG_jsondb__get__sz( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, char** ppszValue) { PRE_GET; SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_VARIANT_TYPE_SZ, NULL, NULL, NULL, NULL, ppszValue, NULL, NULL) ); POST_GET; } void SG_jsondb__get__vhash( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_vhash** ppvhObject) { PRE_GET; SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_VARIANT_TYPE_VHASH, NULL, NULL, NULL, NULL, NULL, ppvhObject, NULL) ); POST_GET; } void SG_jsondb__check__sz( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool* pbExists, char** ppszValue) { PRE_GET; SG_NULLARGCHECK(pbExists); _getByPath(pCtx, pMe, pszPath, SG_VARIANT_TYPE_SZ, NULL, NULL, NULL, NULL, ppszValue, NULL, NULL); if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND)) { SG_ERR_DISCARD; *pbExists = SG_FALSE; } else { SG_ERR_CHECK_CURRENT; *pbExists = SG_TRUE; } POST_GET; } void SG_jsondb__get__double( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, double* pResult) { PRE_GET; SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_VARIANT_TYPE_DOUBLE, NULL, NULL, pResult, NULL, NULL, NULL, NULL) ); POST_GET; } void SG_jsondb__get__int64( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_int64* pResult) { PRE_GET; SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_VARIANT_TYPE_INT64, NULL, pResult, NULL, NULL, NULL, NULL, NULL) ); POST_GET; } void SG_jsondb__get__int64_or_double( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_int64* pResult) { SG_int64 i64 = 0; double dbl = 0; PRE_GET; SG_NULLARGCHECK(pResult); SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_VARIANT_TYPE_DOUBLE | SG_VARIANT_TYPE_INT64, NULL, &i64, &dbl, NULL, NULL, NULL, NULL) ); if (pResult) { if (dbl) { if (SG_double__fits_in_int64(dbl)) *pResult = (SG_int64)dbl; else SG_ERR_THROW( SG_ERR_VARIANT_INVALIDTYPE ); } else *pResult = i64; } POST_GET; } void SG_jsondb__get__uint32( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_uint32* pResult) { SG_int64 i64; PRE_GET; SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_VARIANT_TYPE_INT64, NULL, &i64, NULL, NULL, NULL, NULL, NULL) ); if (SG_int64__fits_in_uint32(i64)) *pResult = (SG_uint32) i64; else SG_ERR_THROW( SG_ERR_INTEGER_OVERFLOW ); POST_GET; } void SG_jsondb__get__bool( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool* pResult) { PRE_GET; SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_VARIANT_TYPE_BOOL, NULL, NULL, NULL, pResult, NULL, NULL, NULL) ); POST_GET; } void SG_jsondb__get__varray( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_varray** ppResult) { PRE_GET; SG_ERR_CHECK( _getByPath(pCtx, pMe, pszPath, SG_VARIANT_TYPE_VARRAY, NULL, NULL, NULL, NULL, NULL, NULL, ppResult) ); POST_GET; } ////////////////////////////////////////////////////////////////////////// #define PRE_ADD _jsondb_handle* pMe = (_jsondb_handle*)pThis; \ SG_bool bInTx = SG_FALSE; \ SG_NULLARGCHECK_RETURN(pThis); \ SG_NULLARGCHECK_RETURN(pszPath); \ SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "BEGIN TRANSACTION;") ); \ bInTx = SG_TRUE #define POST_ADD SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "COMMIT TRANSACTION;") ); \ bInTx = SG_FALSE; \ fail: \ if (bInTx && pMe && pMe->psql) \ SG_ERR_IGNORE( sg_sqlite__exec(pCtx, pMe->psql, "ROLLBACK TRANSACTION;") ) void SG_jsondb__add__string__sz( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, const char* pszValue) { PRE_ADD; SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_SZ, SG_FALSE, pszValue, NULL, NULL) ); POST_ADD; } void SG_jsondb__add__string__buflen( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, const char* pszValue, SG_uint32 len) { char* pszToStore = NULL; PRE_ADD; if (len == 0 || SG_STRLEN(pszValue) < len) { SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_SZ, SG_FALSE, pszValue, NULL, NULL) ); } else { SG_ERR_CHECK( SG_allocN(pCtx, len+1, pszToStore) ); SG_strcpy(pCtx, pszToStore, len+1, pszValue); SG_ERR_CHECK_CURRENT_DISREGARD(SG_ERR_BUFFERTOOSMALL); SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_SZ, SG_FALSE, pszToStore, NULL, NULL) ); } POST_ADD; SG_NULLFREE(pCtx, pszToStore); } void SG_jsondb__add__int64( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, SG_int64 intValue) { SG_int_to_string_buffer tmp; PRE_ADD; SG_int64_to_sz(intValue, tmp); SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_INT64, SG_FALSE, tmp, NULL, NULL) ); POST_ADD; } void SG_jsondb__add__double( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, double fv) { char buf[256]; PRE_ADD; SG_ERR_CHECK( SG_sprintf(pCtx, buf, 255, "%f", fv) ); SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_DOUBLE, SG_FALSE, buf, NULL, NULL) ); POST_ADD; } void SG_jsondb__add__null( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive) { PRE_ADD; SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_NULL, SG_FALSE, NULL, NULL, NULL) ); POST_ADD; } void SG_jsondb__add__bool( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, SG_bool b) { PRE_ADD; SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_BOOL, SG_FALSE, b ? "1" : "0", NULL, NULL) ); POST_ADD; } void SG_jsondb__add__vhash( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, const SG_vhash* pHashValue) { struct _add_foreach_state state; PRE_ADD; SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_VHASH, SG_FALSE, NULL, NULL, NULL) ); if (pHashValue) { state.pJsonDb = pMe; if (0 == strcmp(pszPath, "/")) state.pszParentPath = ""; else state.pszParentPath = pszPath; SG_ERR_CHECK( SG_vhash__foreach(pCtx, pHashValue, _vhash_add_foreach_cb, &state) ); } POST_ADD; } void SG_jsondb__add__varray( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, const SG_varray* pva) { struct _add_foreach_state state; char* pszParentPath = NULL; PRE_ADD; SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_VARRAY, SG_FALSE, NULL, NULL, &pszParentPath) ); if (pva) { state.pJsonDb = pMe; state.iVarrayIdxOffset = 0; if (0 == strcmp(pszParentPath, "/")) state.pszParentPath = ""; else state.pszParentPath = pszParentPath; SG_ERR_CHECK( SG_varray__foreach(pCtx, pva, _varray_add_foreach_cb, &state) ); } POST_ADD; SG_NULLFREE(pCtx, pszParentPath); } ////////////////////////////////////////////////////////////////////////// static void _updateNode( SG_context* pCtx, _jsondb_handle* pMe, const char* pszPath, SG_bool bAddRecursive, SG_uint16 type, SG_bool bVariantIndexOk, const char* pszVal, const SG_vhash* pvhValue, const SG_varray* pvaValue) { sqlite3_stmt* pStmt = NULL; SG_int64 existingNodeId; SG_uint32 existingNodeLft, existingNodeRgt; SG_int32 rc; struct _add_foreach_state state; // Look up the provided path. SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT id, lft, rgt " "FROM nodes " "WHERE json_object_id = ? AND full_path = ? " "LIMIT 1") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, pszPath) ); rc = sqlite3_step(pStmt); if (rc == SQLITE_ROW) { // The node exists. existingNodeId = sqlite3_column_int64(pStmt, 0); existingNodeLft = sqlite3_column_int(pStmt, 1); existingNodeRgt = sqlite3_column_int(pStmt, 2); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); if ((existingNodeRgt - existingNodeLft) > 1) SG_ERR_CHECK( _removeNode(pCtx, pMe, SG_TRUE, existingNodeId, existingNodeLft, existingNodeRgt) ); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "UPDATE nodes " "SET type = ?, val = ? " "WHERE id = ?") ); SG_ERR_CHECK( sg_sqlite__bind_int(pCtx, pStmt, 1, type) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, pszVal) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 3, existingNodeId) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); #ifdef DEBUG { SG_uint32 nrRowsUpdated = 0; SG_ERR_CHECK( sg_sqlite__num_changes(pCtx, pMe->psql, &nrRowsUpdated) ); SG_ASSERT(nrRowsUpdated == 1); } #endif } else if (rc == SQLITE_DONE) { // The node doesn't already exist. SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); SG_ERR_CHECK( _insertNode(pCtx, pMe, pszPath, bAddRecursive, type, bVariantIndexOk, pszVal, NULL, NULL) ); } else SG_ERR_THROW(SG_ERR_SQLITE(rc)); // The node itself has now been updated or inserted. If it was a container type, fill it in. if ((type == SG_VARIANT_TYPE_VHASH) && pvhValue) { state.pJsonDb = pMe; if (0 == strcmp(pszPath, "/")) state.pszParentPath = ""; else state.pszParentPath = pszPath; SG_ERR_CHECK( SG_vhash__foreach(pCtx, pvhValue, _vhash_add_foreach_cb, &state) ); } else if ((type == SG_VARIANT_TYPE_VARRAY) && pvaValue) { state.pJsonDb = pMe; if (0 == strcmp(pszPath, "/")) state.pszParentPath = ""; else state.pszParentPath = pszPath; state.iVarrayIdxOffset = 0; SG_ERR_CHECK( SG_varray__foreach(pCtx, pvaValue, _varray_add_foreach_cb, &state) ); } return; fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); } void SG_jsondb__update__string__sz( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, const char* pszValue) { PRE_ADD; SG_ERR_CHECK( _updateNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_SZ, SG_FALSE, pszValue, NULL, NULL) ); POST_ADD; } void SG_jsondb__update__string__buflen( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, const char* pszValue, // We'll make our own copy of this. The caller still owns it. SG_uint32 len) { char* pszToStore = NULL; PRE_ADD; if (len == 0 || SG_STRLEN(pszValue) < len) { SG_ERR_CHECK( _updateNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_SZ, SG_FALSE, pszValue, NULL, NULL) ); } else { SG_ERR_CHECK( SG_allocN(pCtx, len+1, pszToStore) ); SG_strcpy(pCtx, pszToStore, len+1, pszValue); SG_ERR_CHECK_CURRENT_DISREGARD(SG_ERR_BUFFERTOOSMALL); SG_ERR_CHECK( _updateNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_SZ, SG_FALSE, pszToStore, NULL, NULL) ); } POST_ADD; SG_NULLFREE(pCtx, pszToStore); } void SG_jsondb__update__int64( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, SG_int64 intValue) { SG_int_to_string_buffer tmp; PRE_ADD; SG_int64_to_sz(intValue, tmp); SG_ERR_CHECK( _updateNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_INT64, SG_FALSE, tmp, NULL, NULL) ); POST_ADD; } void SG_jsondb__update__double( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, double fv) { char buf[256]; PRE_ADD; SG_ERR_CHECK( SG_sprintf(pCtx, buf, 255, "%f", fv) ); SG_ERR_CHECK( _updateNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_DOUBLE, SG_FALSE, buf, NULL, NULL) ); POST_ADD; } void SG_jsondb__update__null( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive) { PRE_ADD; SG_ERR_CHECK( _updateNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_NULL, SG_FALSE, NULL, NULL, NULL) ); POST_ADD; } void SG_jsondb__update__bool( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, SG_bool b) { PRE_ADD; SG_ERR_CHECK( _updateNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_BOOL, SG_FALSE, b ? "1" : "0", NULL, NULL) ); POST_ADD; } void SG_jsondb__update__vhash( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, const SG_vhash* pHashValue) { PRE_ADD; SG_ERR_CHECK( _updateNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_VHASH, SG_FALSE, NULL, pHashValue, NULL) ); POST_ADD; } void SG_jsondb__update__varray( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath, SG_bool bAddRecursive, const SG_varray* pva) { PRE_ADD; SG_ERR_CHECK( _updateNode(pCtx, pMe, pszPath, bAddRecursive, SG_VARIANT_TYPE_VARRAY, SG_FALSE, NULL, NULL, pva) ); POST_ADD; } ////////////////////////////////////////////////////////////////////////// void SG_jsondb__remove( SG_context* pCtx, SG_jsondb* pThis, const char* pszPath) { _jsondb_handle* pMe = (_jsondb_handle*)pThis; sqlite3_stmt* pStmt = NULL; SG_int64 deleteNodeId; SG_uint32 deleteNodeLft, deleteNodeRgt; SG_bool bInTx = SG_FALSE; SG_NULLARGCHECK_RETURN(pThis); SG_NULLARGCHECK_RETURN(pszPath); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "BEGIN TRANSACTION;") ); bInTx = SG_TRUE; // Find the node in question SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT id, lft, rgt " "FROM nodes " "WHERE json_object_id = ? AND full_path = ? " "LIMIT 1") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, pszPath) ); sg_sqlite__step(pCtx, pStmt, SQLITE_ROW); SG_ERR_REPLACE(SG_ERR_SQLITE(SQLITE_DONE), SG_ERR_NOT_FOUND); SG_ERR_CHECK_CURRENT; deleteNodeId = sqlite3_column_int64(pStmt, 0); deleteNodeLft = sqlite3_column_int(pStmt, 1); deleteNodeRgt = sqlite3_column_int(pStmt, 2); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); SG_ERR_CHECK( _removeNode(pCtx, pMe, SG_FALSE, deleteNodeId, deleteNodeLft, deleteNodeRgt) ); SG_ERR_CHECK( sg_sqlite__exec(pCtx, pMe->psql, "COMMIT TRANSACTION;") ); bInTx = SG_FALSE; return; fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); if (bInTx && pMe && pMe->psql) SG_ERR_IGNORE( sg_sqlite__exec(pCtx, pMe->psql, "ROLLBACK TRANSACTION;") ); } void SG_jsondb__escape_keyname( SG_context* pCtx, const char* pszKeyname, char** ppszEscapedKeyname) { const char* p = pszKeyname; SG_uint32 padCount = 0; char* pszEscapedKeyname = NULL; SG_NONEMPTYCHECK_RETURN(pszKeyname); SG_NULLARGCHECK_RETURN(ppszEscapedKeyname); while (*p) { unsigned char c = (unsigned char)*p; if (c == '/' || c == '\\') padCount++; p++; } if (padCount) { char* ep; SG_ERR_CHECK( SG_allocN(pCtx, (SG_uint32)(p - pszKeyname + padCount + 1), pszEscapedKeyname) ); ep = pszEscapedKeyname; p = pszKeyname; while (*p) { unsigned char c = (unsigned char)*p; if (c == '/' || c == '\\') { *ep = '\\'; ep++; } *ep = c; ep++; p++; } } else { SG_ERR_CHECK( SG_STRDUP(pCtx, pszKeyname, &pszEscapedKeyname) ); } *ppszEscapedKeyname = pszEscapedKeyname; return; fail: SG_NULLFREE(pCtx, pszEscapedKeyname); } void SG_jsondb__unescape_keyname( SG_context* pCtx, const char* pszKeyname, char** ppszUnescapedKeyname) { const char* p = pszKeyname; SG_uint32 padCount = 0; char* pszUnescapedKeyname = NULL; SG_NONEMPTYCHECK_RETURN(pszKeyname); while (*p) { unsigned char c = (unsigned char)*p; if (c == '\\') { p++; padCount++; } p++; } if (padCount) { char* up; SG_ERR_CHECK( SG_allocN(pCtx, (SG_uint32)(p - pszKeyname - padCount + 1), pszUnescapedKeyname) ); up = pszUnescapedKeyname; p = pszKeyname; while (*p) { unsigned char c = *p; if (c == '\\') c = *++p; *up = c; up++; p++; } } else { SG_ERR_CHECK( SG_STRDUP(pCtx, pszKeyname, &pszUnescapedKeyname) ); } *ppszUnescapedKeyname = pszUnescapedKeyname; return; fail: SG_NULLFREE(pCtx, pszUnescapedKeyname); } #if defined(DEBUG) void SG_jsondb_debug__verify_tree( SG_context* pCtx, SG_jsondb* pThis) { _jsondb_handle* pMe = (_jsondb_handle*)pThis; sqlite3_stmt* pStmt = NULL; SG_int32 rc; SG_uint32 nodeCount = 0; SG_uint32 rootRgt = 0; char* pszParsedLeafName = NULL; char* pszParsedParentPath = NULL; SG_uint32 last_rgt = 1; char* pszPath = NULL; subTreeStack stack; stack.ndx = 0; SG_NULLARGCHECK_RETURN(pThis); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pMe->psql, &pStmt, "SELECT full_path, leaf_name, type, lft, rgt FROM nodes " "WHERE json_object_id = ? " "ORDER BY lft;") ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, pMe->objectId) ); while ((rc=sqlite3_step(pStmt)) == SQLITE_ROW) { const char* pszFullPath = (const char*)sqlite3_column_text(pStmt, 0); SG_uint32 lenPath = SG_STRLEN(pszFullPath); SG_uint32 lft = sqlite3_column_int(pStmt, 3); SG_uint32 rgt = sqlite3_column_int(pStmt, 4); //SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "[jsondb verify tree] Verifying %s\n", pszFullPath) ); nodeCount++; for (; last_rgt+1 < lft; last_rgt++) { void* p = NULL; SG_bool dontCare; SG_ERR_CHECK( _pop(pCtx, &stack, &dontCare, &p) ); SG_NULLFREE(pCtx, p); } if (lenPath == 1) { if (pszFullPath[0] != '/') SG_ERR_THROW(SG_ERR_JSONDB_INVALID_PATH); if (strcmp("**ROOT**", (const char*)sqlite3_column_text(pStmt, 1)) != 0) SG_ERR_THROW(SG_ERR_JSONDB_INVALID_PATH); if (stack.ndx != 0) SG_ERR_THROW(SG_ERR_JSONDB_INVALID_PATH); if (lft != 1) SG_ERR_THROW(SG_ERR_JSONDB_INVALID_PATH); rootRgt = rgt; } else { char* pszParentPath; SG_ERR_CHECK( _pathHelper(pCtx, pszFullPath, lenPath, NULL, &pszParsedParentPath, &pszParsedLeafName) ); if (stack.ndx == 0) SG_ERR_THROW(SG_ERR_JSONDB_INVALID_PATH); if (strcmp(pszParsedLeafName, (const char*)sqlite3_column_text(pStmt, 1)) != 0) SG_ERR_THROW(SG_ERR_JSONDB_INVALID_PATH); pszParentPath = (char*)stack.stack[stack.ndx]; if (strcmp(pszParsedParentPath, pszParentPath) != 0) SG_ERR_THROW(SG_ERR_JSONDB_INVALID_PATH); } if (rgt - lft > 1) { SG_ERR_CHECK( SG_STRDUP(pCtx, pszFullPath, &pszPath) ); SG_ERR_CHECK( _push(pCtx, &stack, SG_FALSE, pszPath) ); pszPath = NULL; } last_rgt = rgt; SG_NULLFREE(pCtx, pszParsedParentPath); SG_NULLFREE(pCtx, pszParsedLeafName); } if (rc != SQLITE_DONE) SG_ERR_THROW( SG_ERR_SQLITE(rc) ); if (!rootRgt) SG_ERR_THROW2(SG_ERR_UNSPECIFIED, (pCtx, "No root node.")); if ((rootRgt / 2) != nodeCount) SG_ERR_THROW2(SG_ERR_UNSPECIFIED, (pCtx, "Root lft/rgt and node count disagree.")); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); /* fall through */ fail: while (stack.ndx) { void* p = NULL; SG_bool dontCare; SG_ERR_CHECK( _pop(pCtx, &stack, &dontCare, &p) ); SG_NULLFREE(pCtx, p); } SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); SG_NULLFREE(pCtx, pszParsedParentPath); SG_NULLFREE(pCtx, pszParsedLeafName); SG_NULLFREE(pCtx, pszPath); } #else void SG_jsondb_debug__verify_tree( SG_UNUSED_PARAM(SG_context* pCtx), SG_UNUSED_PARAM(SG_jsondb* pThis)) { SG_UNUSED(pCtx); SG_UNUSED(pThis); } #endif
749005.c
/* * tclCompile.c -- * * This file contains procedures that compile Tcl commands or parts of * commands (like quoted strings or nested sub-commands) into a sequence * of instructions ("bytecodes"). * * Copyright (c) 1996-1998 Sun Microsystems, Inc. * Copyright (c) 2001 by Kevin B. Kenny. All rights reserved. * * See the file "license.terms" for information on usage and redistribution of * this file, and for a DISCLAIMER OF ALL WARRANTIES. */ #include "tclInt.h" #include "tclCompile.h" #include <assert.h> /* * Variable that controls whether compilation tracing is enabled and, if so, * what level of tracing is desired: * 0: no compilation tracing * 1: summarize compilation of top level cmds and proc bodies * 2: display all instructions of each ByteCode compiled * This variable is linked to the Tcl variable "tcl_traceCompile". */ #ifdef TCL_COMPILE_DEBUG int tclTraceCompile = 0; static int traceInitialized = 0; #endif /* * A table describing the Tcl bytecode instructions. Entries in this table * must correspond to the instruction opcode definitions in tclCompile.h. The * names "op1" and "op4" refer to an instruction's one or four byte first * operand. Similarly, "stktop" and "stknext" refer to the topmost and next to * topmost stack elements. * * Note that the load, store, and incr instructions do not distinguish local * from global variables; the bytecode interpreter at runtime uses the * existence of a procedure call frame to distinguish these. */ InstructionDesc const tclInstructionTable[] = { /* Name Bytes stackEffect #Opnds Operand types */ {"done", 1, -1, 0, {OPERAND_NONE}}, /* Finish ByteCode execution and return stktop (top stack item) */ {"push1", 2, +1, 1, {OPERAND_LIT1}}, /* Push object at ByteCode objArray[op1] */ {"push4", 5, +1, 1, {OPERAND_LIT4}}, /* Push object at ByteCode objArray[op4] */ {"pop", 1, -1, 0, {OPERAND_NONE}}, /* Pop the topmost stack object */ {"dup", 1, +1, 0, {OPERAND_NONE}}, /* Duplicate the topmost stack object and push the result */ {"strcat", 2, INT_MIN, 1, {OPERAND_UINT1}}, /* Concatenate the top op1 items and push result */ {"invokeStk1", 2, INT_MIN, 1, {OPERAND_UINT1}}, /* Invoke command named objv[0]; <objc,objv> = <op1,top op1> */ {"invokeStk4", 5, INT_MIN, 1, {OPERAND_UINT4}}, /* Invoke command named objv[0]; <objc,objv> = <op4,top op4> */ {"evalStk", 1, 0, 0, {OPERAND_NONE}}, /* Evaluate command in stktop using Tcl_EvalObj. */ {"exprStk", 1, 0, 0, {OPERAND_NONE}}, /* Execute expression in stktop using Tcl_ExprStringObj. */ {"loadScalar1", 2, 1, 1, {OPERAND_LVT1}}, /* Load scalar variable at index op1 <= 255 in call frame */ {"loadScalar4", 5, 1, 1, {OPERAND_LVT4}}, /* Load scalar variable at index op1 >= 256 in call frame */ {"loadScalarStk", 1, 0, 0, {OPERAND_NONE}}, /* Load scalar variable; scalar's name is stktop */ {"loadArray1", 2, 0, 1, {OPERAND_LVT1}}, /* Load array element; array at slot op1<=255, element is stktop */ {"loadArray4", 5, 0, 1, {OPERAND_LVT4}}, /* Load array element; array at slot op1 > 255, element is stktop */ {"loadArrayStk", 1, -1, 0, {OPERAND_NONE}}, /* Load array element; element is stktop, array name is stknext */ {"loadStk", 1, 0, 0, {OPERAND_NONE}}, /* Load general variable; unparsed variable name is stktop */ {"storeScalar1", 2, 0, 1, {OPERAND_LVT1}}, /* Store scalar variable at op1<=255 in frame; value is stktop */ {"storeScalar4", 5, 0, 1, {OPERAND_LVT4}}, /* Store scalar variable at op1 > 255 in frame; value is stktop */ {"storeScalarStk", 1, -1, 0, {OPERAND_NONE}}, /* Store scalar; value is stktop, scalar name is stknext */ {"storeArray1", 2, -1, 1, {OPERAND_LVT1}}, /* Store array element; array at op1<=255, value is top then elem */ {"storeArray4", 5, -1, 1, {OPERAND_LVT4}}, /* Store array element; array at op1>=256, value is top then elem */ {"storeArrayStk", 1, -2, 0, {OPERAND_NONE}}, /* Store array element; value is stktop, then elem, array names */ {"storeStk", 1, -1, 0, {OPERAND_NONE}}, /* Store general variable; value is stktop, then unparsed name */ {"incrScalar1", 2, 0, 1, {OPERAND_LVT1}}, /* Incr scalar at index op1<=255 in frame; incr amount is stktop */ {"incrScalarStk", 1, -1, 0, {OPERAND_NONE}}, /* Incr scalar; incr amount is stktop, scalar's name is stknext */ {"incrArray1", 2, -1, 1, {OPERAND_LVT1}}, /* Incr array elem; arr at slot op1<=255, amount is top then elem */ {"incrArrayStk", 1, -2, 0, {OPERAND_NONE}}, /* Incr array element; amount is top then elem then array names */ {"incrStk", 1, -1, 0, {OPERAND_NONE}}, /* Incr general variable; amount is stktop then unparsed var name */ {"incrScalar1Imm", 3, +1, 2, {OPERAND_LVT1, OPERAND_INT1}}, /* Incr scalar at slot op1 <= 255; amount is 2nd operand byte */ {"incrScalarStkImm", 2, 0, 1, {OPERAND_INT1}}, /* Incr scalar; scalar name is stktop; incr amount is op1 */ {"incrArray1Imm", 3, 0, 2, {OPERAND_LVT1, OPERAND_INT1}}, /* Incr array elem; array at slot op1 <= 255, elem is stktop, * amount is 2nd operand byte */ {"incrArrayStkImm", 2, -1, 1, {OPERAND_INT1}}, /* Incr array element; elem is top then array name, amount is op1 */ {"incrStkImm", 2, 0, 1, {OPERAND_INT1}}, /* Incr general variable; unparsed name is top, amount is op1 */ {"jump1", 2, 0, 1, {OPERAND_OFFSET1}}, /* Jump relative to (pc + op1) */ {"jump4", 5, 0, 1, {OPERAND_OFFSET4}}, /* Jump relative to (pc + op4) */ {"jumpTrue1", 2, -1, 1, {OPERAND_OFFSET1}}, /* Jump relative to (pc + op1) if stktop expr object is true */ {"jumpTrue4", 5, -1, 1, {OPERAND_OFFSET4}}, /* Jump relative to (pc + op4) if stktop expr object is true */ {"jumpFalse1", 2, -1, 1, {OPERAND_OFFSET1}}, /* Jump relative to (pc + op1) if stktop expr object is false */ {"jumpFalse4", 5, -1, 1, {OPERAND_OFFSET4}}, /* Jump relative to (pc + op4) if stktop expr object is false */ {"bitor", 1, -1, 0, {OPERAND_NONE}}, /* Bitwise or: push (stknext | stktop) */ {"bitxor", 1, -1, 0, {OPERAND_NONE}}, /* Bitwise xor push (stknext ^ stktop) */ {"bitand", 1, -1, 0, {OPERAND_NONE}}, /* Bitwise and: push (stknext & stktop) */ {"eq", 1, -1, 0, {OPERAND_NONE}}, /* Equal: push (stknext == stktop) */ {"neq", 1, -1, 0, {OPERAND_NONE}}, /* Not equal: push (stknext != stktop) */ {"lt", 1, -1, 0, {OPERAND_NONE}}, /* Less: push (stknext < stktop) */ {"gt", 1, -1, 0, {OPERAND_NONE}}, /* Greater: push (stknext > stktop) */ {"le", 1, -1, 0, {OPERAND_NONE}}, /* Less or equal: push (stknext <= stktop) */ {"ge", 1, -1, 0, {OPERAND_NONE}}, /* Greater or equal: push (stknext >= stktop) */ {"lshift", 1, -1, 0, {OPERAND_NONE}}, /* Left shift: push (stknext << stktop) */ {"rshift", 1, -1, 0, {OPERAND_NONE}}, /* Right shift: push (stknext >> stktop) */ {"add", 1, -1, 0, {OPERAND_NONE}}, /* Add: push (stknext + stktop) */ {"sub", 1, -1, 0, {OPERAND_NONE}}, /* Sub: push (stkext - stktop) */ {"mult", 1, -1, 0, {OPERAND_NONE}}, /* Multiply: push (stknext * stktop) */ {"div", 1, -1, 0, {OPERAND_NONE}}, /* Divide: push (stknext / stktop) */ {"mod", 1, -1, 0, {OPERAND_NONE}}, /* Mod: push (stknext % stktop) */ {"uplus", 1, 0, 0, {OPERAND_NONE}}, /* Unary plus: push +stktop */ {"uminus", 1, 0, 0, {OPERAND_NONE}}, /* Unary minus: push -stktop */ {"bitnot", 1, 0, 0, {OPERAND_NONE}}, /* Bitwise not: push ~stktop */ {"not", 1, 0, 0, {OPERAND_NONE}}, /* Logical not: push !stktop */ {"tryCvtToNumeric", 1, 0, 0, {OPERAND_NONE}}, /* Try converting stktop to first int then double if possible. */ {"break", 1, 0, 0, {OPERAND_NONE}}, /* Abort closest enclosing loop; if none, return TCL_BREAK code. */ {"continue", 1, 0, 0, {OPERAND_NONE}}, /* Skip to next iteration of closest enclosing loop; if none, return * TCL_CONTINUE code. */ {"beginCatch4", 5, 0, 1, {OPERAND_UINT4}}, /* Record start of catch with the operand's exception index. Push the * current stack depth onto a special catch stack. */ {"endCatch", 1, 0, 0, {OPERAND_NONE}}, /* End of last catch. Pop the bytecode interpreter's catch stack. */ {"pushResult", 1, +1, 0, {OPERAND_NONE}}, /* Push the interpreter's object result onto the stack. */ {"pushReturnCode", 1, +1, 0, {OPERAND_NONE}}, /* Push interpreter's return code (e.g. TCL_OK or TCL_ERROR) as a new * object onto the stack. */ {"streq", 1, -1, 0, {OPERAND_NONE}}, /* Str Equal: push (stknext eq stktop) */ {"strneq", 1, -1, 0, {OPERAND_NONE}}, /* Str !Equal: push (stknext neq stktop) */ {"strcmp", 1, -1, 0, {OPERAND_NONE}}, /* Str Compare: push (stknext cmp stktop) */ {"strlen", 1, 0, 0, {OPERAND_NONE}}, /* Str Length: push (strlen stktop) */ {"strindex", 1, -1, 0, {OPERAND_NONE}}, /* Str Index: push (strindex stknext stktop) */ {"strmatch", 2, -1, 1, {OPERAND_INT1}}, /* Str Match: push (strmatch stknext stktop) opnd == nocase */ {"list", 5, INT_MIN, 1, {OPERAND_UINT4}}, /* List: push (stk1 stk2 ... stktop) */ {"listIndex", 1, -1, 0, {OPERAND_NONE}}, /* List Index: push (listindex stknext stktop) */ {"listLength", 1, 0, 0, {OPERAND_NONE}}, /* List Len: push (listlength stktop) */ {"appendScalar1", 2, 0, 1, {OPERAND_LVT1}}, /* Append scalar variable at op1<=255 in frame; value is stktop */ {"appendScalar4", 5, 0, 1, {OPERAND_LVT4}}, /* Append scalar variable at op1 > 255 in frame; value is stktop */ {"appendArray1", 2, -1, 1, {OPERAND_LVT1}}, /* Append array element; array at op1<=255, value is top then elem */ {"appendArray4", 5, -1, 1, {OPERAND_LVT4}}, /* Append array element; array at op1>=256, value is top then elem */ {"appendArrayStk", 1, -2, 0, {OPERAND_NONE}}, /* Append array element; value is stktop, then elem, array names */ {"appendStk", 1, -1, 0, {OPERAND_NONE}}, /* Append general variable; value is stktop, then unparsed name */ {"lappendScalar1", 2, 0, 1, {OPERAND_LVT1}}, /* Lappend scalar variable at op1<=255 in frame; value is stktop */ {"lappendScalar4", 5, 0, 1, {OPERAND_LVT4}}, /* Lappend scalar variable at op1 > 255 in frame; value is stktop */ {"lappendArray1", 2, -1, 1, {OPERAND_LVT1}}, /* Lappend array element; array at op1<=255, value is top then elem */ {"lappendArray4", 5, -1, 1, {OPERAND_LVT4}}, /* Lappend array element; array at op1>=256, value is top then elem */ {"lappendArrayStk", 1, -2, 0, {OPERAND_NONE}}, /* Lappend array element; value is stktop, then elem, array names */ {"lappendStk", 1, -1, 0, {OPERAND_NONE}}, /* Lappend general variable; value is stktop, then unparsed name */ {"lindexMulti", 5, INT_MIN, 1, {OPERAND_UINT4}}, /* Lindex with generalized args, operand is number of stacked objs * used: (operand-1) entries from stktop are the indices; then list to * process. */ {"over", 5, +1, 1, {OPERAND_UINT4}}, /* Duplicate the arg-th element from top of stack (TOS=0) */ {"lsetList", 1, -2, 0, {OPERAND_NONE}}, /* Four-arg version of 'lset'. stktop is old value; next is new * element value, next is the index list; pushes new value */ {"lsetFlat", 5, INT_MIN, 1, {OPERAND_UINT4}}, /* Three- or >=5-arg version of 'lset', operand is number of stacked * objs: stktop is old value, next is new element value, next come * (operand-2) indices; pushes the new value. */ {"returnImm", 9, -1, 2, {OPERAND_INT4, OPERAND_UINT4}}, /* Compiled [return], code, level are operands; options and result * are on the stack. */ {"expon", 1, -1, 0, {OPERAND_NONE}}, /* Binary exponentiation operator: push (stknext ** stktop) */ /* * NOTE: the stack effects of expandStkTop and invokeExpanded are wrong - * but it cannot be done right at compile time, the stack effect is only * known at run time. The value for invokeExpanded is estimated better at * compile time. * See the comments further down in this file, where INST_INVOKE_EXPANDED * is emitted. */ {"expandStart", 1, 0, 0, {OPERAND_NONE}}, /* Start of command with {*} (expanded) arguments */ {"expandStkTop", 5, 0, 1, {OPERAND_UINT4}}, /* Expand the list at stacktop: push its elements on the stack */ {"invokeExpanded", 1, 0, 0, {OPERAND_NONE}}, /* Invoke the command marked by the last 'expandStart' */ {"listIndexImm", 5, 0, 1, {OPERAND_IDX4}}, /* List Index: push (lindex stktop op4) */ {"listRangeImm", 9, 0, 2, {OPERAND_IDX4, OPERAND_IDX4}}, /* List Range: push (lrange stktop op4 op4) */ {"startCommand", 9, 0, 2, {OPERAND_OFFSET4, OPERAND_UINT4}}, /* Start of bytecoded command: op is the length of the cmd's code, op2 * is number of commands here */ {"listIn", 1, -1, 0, {OPERAND_NONE}}, /* List containment: push [lsearch stktop stknext]>=0) */ {"listNotIn", 1, -1, 0, {OPERAND_NONE}}, /* List negated containment: push [lsearch stktop stknext]<0) */ {"pushReturnOpts", 1, +1, 0, {OPERAND_NONE}}, /* Push the interpreter's return option dictionary as an object on the * stack. */ {"returnStk", 1, -1, 0, {OPERAND_NONE}}, /* Compiled [return]; options and result are on the stack, code and * level are in the options. */ {"dictGet", 5, INT_MIN, 1, {OPERAND_UINT4}}, /* The top op4 words (min 1) are a key path into the dictionary just * below the keys on the stack, and all those values are replaced by * the value read out of that key-path (like [dict get]). * Stack: ... dict key1 ... keyN => ... value */ {"dictSet", 9, INT_MIN, 2, {OPERAND_UINT4, OPERAND_LVT4}}, /* Update a dictionary value such that the keys are a path pointing to * the value. op4#1 = numKeys, op4#2 = LVTindex * Stack: ... key1 ... keyN value => ... newDict */ {"dictUnset", 9, INT_MIN, 2, {OPERAND_UINT4, OPERAND_LVT4}}, /* Update a dictionary value such that the keys are not a path pointing * to any value. op4#1 = numKeys, op4#2 = LVTindex * Stack: ... key1 ... keyN => ... newDict */ {"dictIncrImm", 9, 0, 2, {OPERAND_INT4, OPERAND_LVT4}}, /* Update a dictionary value such that the value pointed to by key is * incremented by some value (or set to it if the key isn't in the * dictionary at all). op4#1 = incrAmount, op4#2 = LVTindex * Stack: ... key => ... newDict */ {"dictAppend", 5, -1, 1, {OPERAND_LVT4}}, /* Update a dictionary value such that the value pointed to by key has * some value string-concatenated onto it. op4 = LVTindex * Stack: ... key valueToAppend => ... newDict */ {"dictLappend", 5, -1, 1, {OPERAND_LVT4}}, /* Update a dictionary value such that the value pointed to by key has * some value list-appended onto it. op4 = LVTindex * Stack: ... key valueToAppend => ... newDict */ {"dictFirst", 5, +2, 1, {OPERAND_LVT4}}, /* Begin iterating over the dictionary, using the local scalar * indicated by op4 to hold the iterator state. The local scalar * should not refer to a named variable as the value is not wholly * managed correctly. * Stack: ... dict => ... value key doneBool */ {"dictNext", 5, +3, 1, {OPERAND_LVT4}}, /* Get the next iteration from the iterator in op4's local scalar. * Stack: ... => ... value key doneBool */ {"dictUpdateStart", 9, 0, 2, {OPERAND_LVT4, OPERAND_AUX4}}, /* Create the variables (described in the aux data referred to by the * second immediate argument) to mirror the state of the dictionary in * the variable referred to by the first immediate argument. The list * of keys (top of the stack, not popped) must be the same length as * the list of variables. * Stack: ... keyList => ... keyList */ {"dictUpdateEnd", 9, -1, 2, {OPERAND_LVT4, OPERAND_AUX4}}, /* Reflect the state of local variables (described in the aux data * referred to by the second immediate argument) back to the state of * the dictionary in the variable referred to by the first immediate * argument. The list of keys (popped from the stack) must be the same * length as the list of variables. * Stack: ... keyList => ... */ {"jumpTable", 5, -1, 1, {OPERAND_AUX4}}, /* Jump according to the jump-table (in AuxData as indicated by the * operand) and the argument popped from the list. Always executes the * next instruction if no match against the table's entries was found. * Stack: ... value => ... * Note that the jump table contains offsets relative to the PC when * it points to this instruction; the code is relocatable. */ {"upvar", 5, -1, 1, {OPERAND_LVT4}}, /* finds level and otherName in stack, links to local variable at * index op1. Leaves the level on stack. */ {"nsupvar", 5, -1, 1, {OPERAND_LVT4}}, /* finds namespace and otherName in stack, links to local variable at * index op1. Leaves the namespace on stack. */ {"variable", 5, -1, 1, {OPERAND_LVT4}}, /* finds namespace and otherName in stack, links to local variable at * index op1. Leaves the namespace on stack. */ {"syntax", 9, -1, 2, {OPERAND_INT4, OPERAND_UINT4}}, /* Compiled bytecodes to signal syntax error. Equivalent to returnImm * except for the ERR_ALREADY_LOGGED flag in the interpreter. */ {"reverse", 5, 0, 1, {OPERAND_UINT4}}, /* Reverse the order of the arg elements at the top of stack */ {"regexp", 2, -1, 1, {OPERAND_INT1}}, /* Regexp: push (regexp stknext stktop) opnd == nocase */ {"existScalar", 5, 1, 1, {OPERAND_LVT4}}, /* Test if scalar variable at index op1 in call frame exists */ {"existArray", 5, 0, 1, {OPERAND_LVT4}}, /* Test if array element exists; array at slot op1, element is * stktop */ {"existArrayStk", 1, -1, 0, {OPERAND_NONE}}, /* Test if array element exists; element is stktop, array name is * stknext */ {"existStk", 1, 0, 0, {OPERAND_NONE}}, /* Test if general variable exists; unparsed variable name is stktop*/ {"nop", 1, 0, 0, {OPERAND_NONE}}, /* Do nothing */ {"returnCodeBranch", 1, -1, 0, {OPERAND_NONE}}, /* Jump to next instruction based on the return code on top of stack * ERROR: +1; RETURN: +3; BREAK: +5; CONTINUE: +7; * Other non-OK: +9 */ {"unsetScalar", 6, 0, 2, {OPERAND_UINT1, OPERAND_LVT4}}, /* Make scalar variable at index op2 in call frame cease to exist; * op1 is 1 for errors on problems, 0 otherwise */ {"unsetArray", 6, -1, 2, {OPERAND_UINT1, OPERAND_LVT4}}, /* Make array element cease to exist; array at slot op2, element is * stktop; op1 is 1 for errors on problems, 0 otherwise */ {"unsetArrayStk", 2, -2, 1, {OPERAND_UINT1}}, /* Make array element cease to exist; element is stktop, array name is * stknext; op1 is 1 for errors on problems, 0 otherwise */ {"unsetStk", 2, -1, 1, {OPERAND_UINT1}}, /* Make general variable cease to exist; unparsed variable name is * stktop; op1 is 1 for errors on problems, 0 otherwise */ {"dictExpand", 1, -1, 0, {OPERAND_NONE}}, /* Probe into a dict and extract it (or a subdict of it) into * variables with matched names. Produces list of keys bound as * result. Part of [dict with]. * Stack: ... dict path => ... keyList */ {"dictRecombineStk", 1, -3, 0, {OPERAND_NONE}}, /* Map variable contents back into a dictionary in a variable. Part of * [dict with]. * Stack: ... dictVarName path keyList => ... */ {"dictRecombineImm", 5, -2, 1, {OPERAND_LVT4}}, /* Map variable contents back into a dictionary in the local variable * indicated by the LVT index. Part of [dict with]. * Stack: ... path keyList => ... */ {"dictExists", 5, INT_MIN, 1, {OPERAND_UINT4}}, /* The top op4 words (min 1) are a key path into the dictionary just * below the keys on the stack, and all those values are replaced by a * boolean indicating whether it is possible to read out a value from * that key-path (like [dict exists]). * Stack: ... dict key1 ... keyN => ... boolean */ {"verifyDict", 1, -1, 0, {OPERAND_NONE}}, /* Verifies that the word on the top of the stack is a dictionary, * popping it if it is and throwing an error if it is not. * Stack: ... value => ... */ {"strmap", 1, -2, 0, {OPERAND_NONE}}, /* Simplified version of [string map] that only applies one change * string, and only case-sensitively. * Stack: ... from to string => ... changedString */ {"strfind", 1, -1, 0, {OPERAND_NONE}}, /* Find the first index of a needle string in a haystack string, * producing the index (integer) or -1 if nothing found. * Stack: ... needle haystack => ... index */ {"strrfind", 1, -1, 0, {OPERAND_NONE}}, /* Find the last index of a needle string in a haystack string, * producing the index (integer) or -1 if nothing found. * Stack: ... needle haystack => ... index */ {"strrangeImm", 9, 0, 2, {OPERAND_IDX4, OPERAND_IDX4}}, /* String Range: push (string range stktop op4 op4) */ {"strrange", 1, -2, 0, {OPERAND_NONE}}, /* String Range with non-constant arguments. * Stack: ... string idxA idxB => ... substring */ {"yield", 1, 0, 0, {OPERAND_NONE}}, /* Makes the current coroutine yield the value at the top of the * stack, and places the response back on top of the stack when it * resumes. * Stack: ... valueToYield => ... resumeValue */ {"coroName", 1, +1, 0, {OPERAND_NONE}}, /* Push the name of the interpreter's current coroutine as an object * on the stack. */ {"tailcall", 2, INT_MIN, 1, {OPERAND_UINT1}}, /* Do a tailcall with the opnd items on the stack as the thing to * tailcall to; opnd must be greater than 0 for the semantics to work * right. */ {"currentNamespace", 1, +1, 0, {OPERAND_NONE}}, /* Push the name of the interpreter's current namespace as an object * on the stack. */ {"infoLevelNumber", 1, +1, 0, {OPERAND_NONE}}, /* Push the stack depth (i.e., [info level]) of the interpreter as an * object on the stack. */ {"infoLevelArgs", 1, 0, 0, {OPERAND_NONE}}, /* Push the argument words to a stack depth (i.e., [info level <n>]) * of the interpreter as an object on the stack. * Stack: ... depth => ... argList */ {"resolveCmd", 1, 0, 0, {OPERAND_NONE}}, /* Resolves the command named on the top of the stack to its fully * qualified version, or produces the empty string if no such command * exists. Never generates errors. * Stack: ... cmdName => ... fullCmdName */ {"tclooSelf", 1, +1, 0, {OPERAND_NONE}}, /* Push the identity of the current TclOO object (i.e., the name of * its current public access command) on the stack. */ {"tclooClass", 1, 0, 0, {OPERAND_NONE}}, /* Push the class of the TclOO object named at the top of the stack * onto the stack. * Stack: ... object => ... class */ {"tclooNamespace", 1, 0, 0, {OPERAND_NONE}}, /* Push the namespace of the TclOO object named at the top of the * stack onto the stack. * Stack: ... object => ... namespace */ {"tclooIsObject", 1, 0, 0, {OPERAND_NONE}}, /* Push whether the value named at the top of the stack is a TclOO * object (i.e., a boolean). Can corrupt the interpreter result * despite not throwing, so not safe for use in a post-exception * context. * Stack: ... value => ... boolean */ {"arrayExistsStk", 1, 0, 0, {OPERAND_NONE}}, /* Looks up the element on the top of the stack and tests whether it * is an array. Pushes a boolean describing whether this is the * case. Also runs the whole-array trace on the named variable, so can * throw anything. * Stack: ... varName => ... boolean */ {"arrayExistsImm", 5, +1, 1, {OPERAND_LVT4}}, /* Looks up the variable indexed by opnd and tests whether it is an * array. Pushes a boolean describing whether this is the case. Also * runs the whole-array trace on the named variable, so can throw * anything. * Stack: ... => ... boolean */ {"arrayMakeStk", 1, -1, 0, {OPERAND_NONE}}, /* Forces the element on the top of the stack to be the name of an * array. * Stack: ... varName => ... */ {"arrayMakeImm", 5, 0, 1, {OPERAND_LVT4}}, /* Forces the variable indexed by opnd to be an array. Does not touch * the stack. */ {"invokeReplace", 6, INT_MIN, 2, {OPERAND_UINT4,OPERAND_UINT1}}, /* Invoke command named objv[0], replacing the first two words with * the word at the top of the stack; * <objc,objv> = <op4,top op4 after popping 1> */ {"listConcat", 1, -1, 0, {OPERAND_NONE}}, /* Concatenates the two lists at the top of the stack into a single * list and pushes that resulting list onto the stack. * Stack: ... list1 list2 => ... [lconcat list1 list2] */ {"expandDrop", 1, 0, 0, {OPERAND_NONE}}, /* Drops an element from the auxiliary stack, popping stack elements * until the matching stack depth is reached. */ /* New foreach implementation */ {"foreach_start", 5, +2, 1, {OPERAND_AUX4}}, /* Initialize execution of a foreach loop. Operand is aux data index * of the ForeachInfo structure for the foreach command. It pushes 2 * elements which hold runtime params for foreach_step, they are later * dropped by foreach_end together with the value lists. NOTE that the * iterator-tracker and info reference must not be passed to bytecodes * that handle normal Tcl values. NOTE that this instruction jumps to * the foreach_step instruction paired with it; the stack info below * is only nominal. * Stack: ... listObjs... => ... listObjs... iterTracker info */ {"foreach_step", 1, 0, 0, {OPERAND_NONE}}, /* "Step" or begin next iteration of foreach loop. Assigns to foreach * iteration variables. May jump to straight after the foreach_start * that pushed the iterTracker and info values. MUST be followed * immediately by a foreach_end. * Stack: ... listObjs... iterTracker info => * ... listObjs... iterTracker info */ {"foreach_end", 1, 0, 0, {OPERAND_NONE}}, /* Clean up a foreach loop by dropping the info value, the tracker * value and the lists that were being iterated over. * Stack: ... listObjs... iterTracker info => ... */ {"lmap_collect", 1, -1, 0, {OPERAND_NONE}}, /* Appends the value at the top of the stack to the list located on * the stack the "other side" of the foreach-related values. * Stack: ... collector listObjs... iterTracker info value => * ... collector listObjs... iterTracker info */ {"strtrim", 1, -1, 0, {OPERAND_NONE}}, /* [string trim] core: removes the characters (designated by the value * at the top of the stack) from both ends of the string and pushes * the resulting string. * Stack: ... string charset => ... trimmedString */ {"strtrimLeft", 1, -1, 0, {OPERAND_NONE}}, /* [string trimleft] core: removes the characters (designated by the * value at the top of the stack) from the left of the string and * pushes the resulting string. * Stack: ... string charset => ... trimmedString */ {"strtrimRight", 1, -1, 0, {OPERAND_NONE}}, /* [string trimright] core: removes the characters (designated by the * value at the top of the stack) from the right of the string and * pushes the resulting string. * Stack: ... string charset => ... trimmedString */ {"concatStk", 5, INT_MIN, 1, {OPERAND_UINT4}}, /* Wrapper round Tcl_ConcatObj(), used for [concat] and [eval]. opnd * is number of values to concatenate. * Operation: push concat(stk1 stk2 ... stktop) */ {"strcaseUpper", 1, 0, 0, {OPERAND_NONE}}, /* [string toupper] core: converts whole string to upper case using * the default (extended "C" locale) rules. * Stack: ... string => ... newString */ {"strcaseLower", 1, 0, 0, {OPERAND_NONE}}, /* [string tolower] core: converts whole string to upper case using * the default (extended "C" locale) rules. * Stack: ... string => ... newString */ {"strcaseTitle", 1, 0, 0, {OPERAND_NONE}}, /* [string totitle] core: converts whole string to upper case using * the default (extended "C" locale) rules. * Stack: ... string => ... newString */ {"strreplace", 1, -3, 0, {OPERAND_NONE}}, /* [string replace] core: replaces a non-empty range of one string * with the contents of another. * Stack: ... string fromIdx toIdx replacement => ... newString */ {"originCmd", 1, 0, 0, {OPERAND_NONE}}, /* Reports which command was the origin (via namespace import chain) * of the command named on the top of the stack. * Stack: ... cmdName => ... fullOriginalCmdName */ {"tclooNext", 2, INT_MIN, 1, {OPERAND_UINT1}}, /* Call the next item on the TclOO call chain, passing opnd arguments * (min 1, max 255, *includes* "next"). The result of the invoked * method implementation will be pushed on the stack in place of the * arguments (similar to invokeStk). * Stack: ... "next" arg2 arg3 -- argN => ... result */ {"tclooNextClass", 2, INT_MIN, 1, {OPERAND_UINT1}}, /* Call the following item on the TclOO call chain defined by class * className, passing opnd arguments (min 2, max 255, *includes* * "nextto" and the class name). The result of the invoked method * implementation will be pushed on the stack in place of the * arguments (similar to invokeStk). * Stack: ... "nextto" className arg3 arg4 -- argN => ... result */ {"yieldToInvoke", 1, 0, 0, {OPERAND_NONE}}, /* Makes the current coroutine yield the value at the top of the * stack, invoking the given command/args with resolution in the given * namespace (all packed into a list), and places the list of values * that are the response back on top of the stack when it resumes. * Stack: ... [list ns cmd arg1 ... argN] => ... resumeList */ {"numericType", 1, 0, 0, {OPERAND_NONE}}, /* Pushes the numeric type code of the word at the top of the stack. * Stack: ... value => ... typeCode */ {"tryCvtToBoolean", 1, +1, 0, {OPERAND_NONE}}, /* Try converting stktop to boolean if possible. No errors. * Stack: ... value => ... value isStrictBool */ {"strclass", 2, 0, 1, {OPERAND_SCLS1}}, /* See if all the characters of the given string are a member of the * specified (by opnd) character class. Note that an empty string will * satisfy the class check (standard definition of "all"). * Stack: ... stringValue => ... boolean */ {"lappendList", 5, 0, 1, {OPERAND_LVT4}}, /* Lappend list to scalar variable at op4 in frame. * Stack: ... list => ... listVarContents */ {"lappendListArray", 5, -1, 1, {OPERAND_LVT4}}, /* Lappend list to array element; array at op4. * Stack: ... elem list => ... listVarContents */ {"lappendListArrayStk", 1, -2, 0, {OPERAND_NONE}}, /* Lappend list to array element. * Stack: ... arrayName elem list => ... listVarContents */ {"lappendListStk", 1, -1, 0, {OPERAND_NONE}}, /* Lappend list to general variable. * Stack: ... varName list => ... listVarContents */ {"clockRead", 2, +1, 1, {OPERAND_UINT1}}, /* Read clock out to the stack. Operand is which clock to read * 0=clicks, 1=microseconds, 2=milliseconds, 3=seconds. * Stack: ... => ... time */ {"dictGetDef", 5, INT_MIN, 1, {OPERAND_UINT4}}, /* The top word is the default, the next op4 words (min 1) are a key * path into the dictionary just below the keys on the stack, and all * those values are replaced by the value read out of that key-path * (like [dict get]) except if there is no such key, when instead the * default is pushed instead. * Stack: ... dict key1 ... keyN default => ... value */ {"strlt", 1, -1, 0, {OPERAND_NONE}}, /* String Less: push (stknext < stktop) */ {"strgt", 1, -1, 0, {OPERAND_NONE}}, /* String Greater: push (stknext > stktop) */ {"strle", 1, -1, 0, {OPERAND_NONE}}, /* String Less or equal: push (stknext <= stktop) */ {"strge", 1, -1, 0, {OPERAND_NONE}}, /* String Greater or equal: push (stknext >= stktop) */ {NULL, 0, 0, 0, {OPERAND_NONE}} }; /* * Prototypes for procedures defined later in this file: */ static void CleanupByteCode(ByteCode *codePtr); static ByteCode * CompileSubstObj(Tcl_Interp *interp, Tcl_Obj *objPtr, int flags); static void DupByteCodeInternalRep(Tcl_Obj *srcPtr, Tcl_Obj *copyPtr); static unsigned char * EncodeCmdLocMap(CompileEnv *envPtr, ByteCode *codePtr, unsigned char *startPtr); static void EnterCmdExtentData(CompileEnv *envPtr, int cmdNumber, int numSrcBytes, int numCodeBytes); static void EnterCmdStartData(CompileEnv *envPtr, int cmdNumber, int srcOffset, int codeOffset); static void FreeByteCodeInternalRep(Tcl_Obj *objPtr); static void FreeSubstCodeInternalRep(Tcl_Obj *objPtr); static int GetCmdLocEncodingSize(CompileEnv *envPtr); static int IsCompactibleCompileEnv(CompileEnv *envPtr); static void PreventCycle(Tcl_Obj *objPtr, CompileEnv *envPtr); #ifdef TCL_COMPILE_STATS static void RecordByteCodeStats(ByteCode *codePtr); #endif /* TCL_COMPILE_STATS */ static int SetByteCodeFromAny(Tcl_Interp *interp, Tcl_Obj *objPtr); static void StartExpanding(CompileEnv *envPtr); /* * TIP #280: Helper for building the per-word line information of all compiled * commands. */ static void EnterCmdWordData(ExtCmdLoc *eclPtr, int srcOffset, Tcl_Token *tokenPtr, const char *cmd, int numWords, int line, int *clNext, int **lines, CompileEnv *envPtr); static void ReleaseCmdWordData(ExtCmdLoc *eclPtr); /* * The structure below defines the bytecode Tcl object type by means of * procedures that can be invoked by generic object code. */ const Tcl_ObjType tclByteCodeType = { "bytecode", /* name */ FreeByteCodeInternalRep, /* freeIntRepProc */ DupByteCodeInternalRep, /* dupIntRepProc */ NULL, /* updateStringProc */ SetByteCodeFromAny /* setFromAnyProc */ }; /* * The structure below defines a bytecode Tcl object type to hold the * compiled bytecode for the [subst]itution of Tcl values. */ static const Tcl_ObjType substCodeType = { "substcode", /* name */ FreeSubstCodeInternalRep, /* freeIntRepProc */ DupByteCodeInternalRep, /* dupIntRepProc - shared with bytecode */ NULL, /* updateStringProc */ NULL, /* setFromAnyProc */ }; #define SubstFlags(objPtr) (objPtr)->internalRep.twoPtrValue.ptr2 /* * Helper macros. */ #define TclIncrUInt4AtPtr(ptr, delta) \ TclStoreInt4AtPtr(TclGetUInt4AtPtr(ptr)+(delta), (ptr)) /* *---------------------------------------------------------------------- * * TclSetByteCodeFromAny -- * * Part of the bytecode Tcl object type implementation. Attempts to * generate an byte code internal form for the Tcl object "objPtr" by * compiling its string representation. This function also takes a hook * procedure that will be invoked to perform any needed post processing * on the compilation results before generating byte codes. interp is * compilation context and may not be NULL. * * Results: * The return value is a standard Tcl object result. If an error occurs * during compilation, an error message is left in the interpreter's * result. * * Side effects: * Frees the old internal representation. If no error occurs, then the * compiled code is stored as "objPtr"s bytecode representation. Also, if * debugging, initializes the "tcl_traceCompile" Tcl variable used to * trace compilations. * *---------------------------------------------------------------------- */ int TclSetByteCodeFromAny( Tcl_Interp *interp, /* The interpreter for which the code is being * compiled. Must not be NULL. */ Tcl_Obj *objPtr, /* The object to make a ByteCode object. */ CompileHookProc *hookProc, /* Procedure to invoke after compilation. */ ClientData clientData) /* Hook procedure private data. */ { Interp *iPtr = (Interp *) interp; CompileEnv compEnv; /* Compilation environment structure allocated * in frame. */ size_t length; int result = TCL_OK; const char *stringPtr; Proc *procPtr = iPtr->compiledProcPtr; ContLineLoc *clLocPtr; #ifdef TCL_COMPILE_DEBUG if (!traceInitialized) { if (Tcl_LinkVar(interp, "tcl_traceCompile", &tclTraceCompile, TCL_LINK_INT) != TCL_OK) { Tcl_Panic("SetByteCodeFromAny: unable to create link for tcl_traceCompile variable"); } traceInitialized = 1; } #endif stringPtr = TclGetStringFromObj(objPtr, &length); /* * TIP #280: Pick up the CmdFrame in which the BC compiler was invoked and * use to initialize the tracking in the compiler. This information was * stored by TclCompEvalObj and ProcCompileProc. */ TclInitCompileEnv(interp, &compEnv, stringPtr, length, iPtr->invokeCmdFramePtr, iPtr->invokeWord); /* * Now we check if we have data about invisible continuation lines for the * script, and make it available to the compile environment, if so. * * It is not clear if the script Tcl_Obj* can be free'd while the compiler * is using it, leading to the release of the associated ContLineLoc * structure as well. To ensure that the latter doesn't happen we set a * lock on it. We release this lock in the function TclFreeCompileEnv(), * found in this file. The "lineCLPtr" hashtable is managed in the file * "tclObj.c". */ clLocPtr = TclContinuationsGet(objPtr); if (clLocPtr) { compEnv.clNext = &clLocPtr->loc[0]; } TclCompileScript(interp, stringPtr, length, &compEnv); /* * Successful compilation. Add a "done" instruction at the end. */ TclEmitOpcode(INST_DONE, &compEnv); /* * Check for optimizations! * * Test if the generated code is free of most hazards; if so, recompile * but with generation of INST_START_CMD disabled. This produces somewhat * faster code in some cases, and more compact code in more. */ if (Tcl_GetMaster(interp) == NULL && !Tcl_LimitTypeEnabled(interp, TCL_LIMIT_COMMANDS|TCL_LIMIT_TIME) && IsCompactibleCompileEnv(&compEnv)) { TclFreeCompileEnv(&compEnv); iPtr->compiledProcPtr = procPtr; TclInitCompileEnv(interp, &compEnv, stringPtr, length, iPtr->invokeCmdFramePtr, iPtr->invokeWord); if (clLocPtr) { compEnv.clNext = &clLocPtr->loc[0]; } compEnv.atCmdStart = 2; /* The disabling magic. */ TclCompileScript(interp, stringPtr, length, &compEnv); assert (compEnv.atCmdStart > 1); TclEmitOpcode(INST_DONE, &compEnv); assert (compEnv.atCmdStart > 1); } /* * Apply some peephole optimizations that can cross specific/generic * instruction generator boundaries. */ if (iPtr->optimizer) { (iPtr->optimizer)(&compEnv); } /* * Invoke the compilation hook procedure if one exists. */ if (hookProc) { result = hookProc(interp, &compEnv, clientData); } /* * Change the object into a ByteCode object. Ownership of the literal * objects and aux data items is given to the ByteCode object. */ #ifdef TCL_COMPILE_DEBUG TclVerifyLocalLiteralTable(&compEnv); #endif /*TCL_COMPILE_DEBUG*/ if (result == TCL_OK) { (void) TclInitByteCodeObj(objPtr, &tclByteCodeType, &compEnv); #ifdef TCL_COMPILE_DEBUG if (tclTraceCompile >= 2) { TclPrintByteCodeObj(interp, objPtr); fflush(stdout); } #endif /* TCL_COMPILE_DEBUG */ } TclFreeCompileEnv(&compEnv); return result; } /* *----------------------------------------------------------------------- * * SetByteCodeFromAny -- * * Part of the bytecode Tcl object type implementation. Attempts to * generate an byte code internal form for the Tcl object "objPtr" by * compiling its string representation. * * Results: * The return value is a standard Tcl object result. If an error occurs * during compilation, an error message is left in the interpreter's * result unless "interp" is NULL. * * Side effects: * Frees the old internal representation. If no error occurs, then the * compiled code is stored as "objPtr"s bytecode representation. Also, if * debugging, initializes the "tcl_traceCompile" Tcl variable used to * trace compilations. * *---------------------------------------------------------------------- */ static int SetByteCodeFromAny( Tcl_Interp *interp, /* The interpreter for which the code is being * compiled. Must not be NULL. */ Tcl_Obj *objPtr) /* The object to make a ByteCode object. */ { if (interp == NULL) { return TCL_ERROR; } return TclSetByteCodeFromAny(interp, objPtr, NULL, NULL); } /* *---------------------------------------------------------------------- * * DupByteCodeInternalRep -- * * Part of the bytecode Tcl object type implementation. However, it does * not copy the internal representation of a bytecode Tcl_Obj, but * instead leaves the new object untyped (with a NULL type pointer). * Code will be compiled for the new object only if necessary. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void DupByteCodeInternalRep( TCL_UNUSED(Tcl_Obj *) /*srcPtr*/, TCL_UNUSED(Tcl_Obj *) /*copyPtr*/) { return; } /* *---------------------------------------------------------------------- * * FreeByteCodeInternalRep -- * * Part of the bytecode Tcl object type implementation. Frees the storage * associated with a bytecode object's internal representation unless its * code is actively being executed. * * Results: * None. * * Side effects: * The bytecode object's internal rep is marked invalid and its code gets * freed unless the code is actively being executed. In that case the * cleanup is delayed until the last execution of the code completes. * *---------------------------------------------------------------------- */ static void FreeByteCodeInternalRep( Tcl_Obj *objPtr) /* Object whose internal rep to free. */ { ByteCode *codePtr; ByteCodeGetIntRep(objPtr, &tclByteCodeType, codePtr); assert(codePtr != NULL); TclReleaseByteCode(codePtr); } /* *---------------------------------------------------------------------- * * TclReleaseByteCode -- * * This procedure does all the real work of freeing up a bytecode * object's ByteCode structure. It's called only when the structure's * reference count becomes zero. * * Results: * None. * * Side effects: * Frees objPtr's bytecode internal representation and sets its type NULL * Also releases its literals and frees its auxiliary data items. * *---------------------------------------------------------------------- */ void TclPreserveByteCode( ByteCode *codePtr) { codePtr->refCount++; } void TclReleaseByteCode( ByteCode *codePtr) { if (codePtr->refCount-- > 1) { return; } /* Just dropped to refcount==0. Clean up. */ CleanupByteCode(codePtr); } static void CleanupByteCode( ByteCode *codePtr) /* Points to the ByteCode to free. */ { Tcl_Interp *interp = (Tcl_Interp *) *codePtr->interpHandle; Interp *iPtr = (Interp *) interp; int numLitObjects = codePtr->numLitObjects; int numAuxDataItems = codePtr->numAuxDataItems; Tcl_Obj **objArrayPtr, *objPtr; const AuxData *auxDataPtr; int i; #ifdef TCL_COMPILE_STATS if (interp != NULL) { ByteCodeStats *statsPtr; Tcl_Time destroyTime; int lifetimeSec, lifetimeMicroSec, log2; statsPtr = &iPtr->stats; statsPtr->numByteCodesFreed++; statsPtr->currentSrcBytes -= (double) codePtr->numSrcBytes; statsPtr->currentByteCodeBytes -= (double) codePtr->structureSize; statsPtr->currentInstBytes -= (double) codePtr->numCodeBytes; statsPtr->currentLitBytes -= (double) codePtr->numLitObjects * sizeof(Tcl_Obj *); statsPtr->currentExceptBytes -= (double) codePtr->numExceptRanges * sizeof(ExceptionRange); statsPtr->currentAuxBytes -= (double) codePtr->numAuxDataItems * sizeof(AuxData); statsPtr->currentCmdMapBytes -= (double) codePtr->numCmdLocBytes; Tcl_GetTime(&destroyTime); lifetimeSec = destroyTime.sec - codePtr->createTime.sec; if (lifetimeSec > 2000) { /* avoid overflow */ lifetimeSec = 2000; } lifetimeMicroSec = 1000000 * lifetimeSec + (destroyTime.usec - codePtr->createTime.usec); log2 = TclLog2(lifetimeMicroSec); if (log2 > 31) { log2 = 31; } statsPtr->lifetimeCount[log2]++; } #endif /* TCL_COMPILE_STATS */ /* * A single heap object holds the ByteCode structure and its code, object, * command location, and auxiliary data arrays. This means we only need to * 1) decrement the ref counts of the LiteralEntry's in its literal array, * 2) call the free procs for the auxiliary data items, 3) free the * localCache if it is unused, and finally 4) free the ByteCode * structure's heap object. * * The case for TCL_BYTECODE_PRECOMPILED (precompiled ByteCodes, like * those generated from tbcload) is special, as they doesn't make use of * the global literal table. They instead maintain private references to * their literals which must be decremented. * * In order to insure a proper and efficient cleanup of the literal array * when it contains non-shared literals [Bug 983660], we also distinguish * the case of an interpreter being deleted (signaled by interp == NULL). * Also, as the interp deletion will remove the global literal table * anyway, we avoid the extra cost of updating it for each literal being * released. */ if (codePtr->flags & TCL_BYTECODE_PRECOMPILED) { objArrayPtr = codePtr->objArrayPtr; for (i = 0; i < numLitObjects; i++) { objPtr = *objArrayPtr; if (objPtr) { Tcl_DecrRefCount(objPtr); } objArrayPtr++; } codePtr->numLitObjects = 0; } else { objArrayPtr = codePtr->objArrayPtr; while (numLitObjects--) { /* TclReleaseLiteral calls Tcl_DecrRefCount() for us */ TclReleaseLiteral(interp, *objArrayPtr++); } } auxDataPtr = codePtr->auxDataArrayPtr; for (i = 0; i < numAuxDataItems; i++) { if (auxDataPtr->type->freeProc != NULL) { auxDataPtr->type->freeProc(auxDataPtr->clientData); } auxDataPtr++; } /* * TIP #280. Release the location data associated with this byte code * structure, if any. NOTE: The interp we belong to may be gone already, * and the data with it. * * See also tclBasic.c, DeleteInterpProc */ if (iPtr) { Tcl_HashEntry *hePtr = Tcl_FindHashEntry(iPtr->lineBCPtr, (char *) codePtr); if (hePtr) { ReleaseCmdWordData((ExtCmdLoc *)Tcl_GetHashValue(hePtr)); Tcl_DeleteHashEntry(hePtr); } } if (codePtr->localCachePtr && (codePtr->localCachePtr->refCount-- <= 1)) { TclFreeLocalCache(interp, codePtr->localCachePtr); } TclHandleRelease(codePtr->interpHandle); Tcl_Free(codePtr); } /* * --------------------------------------------------------------------- * * IsCompactibleCompileEnv -- * * Checks to see if we may apply some basic compaction optimizations to a * piece of bytecode. Idempotent. * * --------------------------------------------------------------------- */ static int IsCompactibleCompileEnv( CompileEnv *envPtr) { unsigned char *pc; int size; /* * Special: procedures in the '::tcl' namespace (or its children) are * considered to be well-behaved and so can have compaction applied even * if it would otherwise be invalid. */ if (envPtr->procPtr != NULL && envPtr->procPtr->cmdPtr != NULL && envPtr->procPtr->cmdPtr->nsPtr != NULL) { Namespace *nsPtr = envPtr->procPtr->cmdPtr->nsPtr; if (strcmp(nsPtr->fullName, "::tcl") == 0 || strncmp(nsPtr->fullName, "::tcl::", 7) == 0) { return 1; } } /* * Go through and ensure that no operation involved can cause a desired * change of bytecode sequence during running. This comes down to ensuring * that there are no mapped variables (due to traces) or calls to external * commands (traces, [uplevel] trickery). This is actually a very * conservative check; it turns down a lot of code that is OK in practice. */ for (pc = envPtr->codeStart ; pc < envPtr->codeNext ; pc += size) { switch (*pc) { /* Invokes */ case INST_INVOKE_STK1: case INST_INVOKE_STK4: case INST_INVOKE_EXPANDED: case INST_INVOKE_REPLACE: return 0; /* Runtime evals */ case INST_EVAL_STK: case INST_EXPR_STK: case INST_YIELD: return 0; /* Upvars */ case INST_UPVAR: case INST_NSUPVAR: case INST_VARIABLE: return 0; default: size = tclInstructionTable[*pc].numBytes; assert (size > 0); break; } } return 1; } /* *---------------------------------------------------------------------- * * Tcl_SubstObj -- * * This function performs the substitutions specified on the given string * as described in the user documentation for the "subst" Tcl command. * * Results: * A Tcl_Obj* containing the substituted string, or NULL to indicate that * an error occurred. * * Side effects: * See the user documentation. * *---------------------------------------------------------------------- */ Tcl_Obj * Tcl_SubstObj( Tcl_Interp *interp, /* Interpreter in which substitution occurs */ Tcl_Obj *objPtr, /* The value to be substituted. */ int flags) /* What substitutions to do. */ { NRE_callback *rootPtr = TOP_CB(interp); if (TclNRRunCallbacks(interp, Tcl_NRSubstObj(interp, objPtr, flags), rootPtr) != TCL_OK) { return NULL; } return Tcl_GetObjResult(interp); } /* *---------------------------------------------------------------------- * * Tcl_NRSubstObj -- * * Request substitution of a Tcl value by the NR stack. * * Results: * Returns TCL_OK. * * Side effects: * Compiles objPtr into bytecode that performs the substitutions as * governed by flags and places callbacks on the NR stack to execute * the bytecode and store the result in the interp. * *---------------------------------------------------------------------- */ int Tcl_NRSubstObj( Tcl_Interp *interp, Tcl_Obj *objPtr, int flags) { ByteCode *codePtr = CompileSubstObj(interp, objPtr, flags); /* TODO: Confirm we do not need this. */ /* Tcl_ResetResult(interp); */ return TclNRExecuteByteCode(interp, codePtr); } /* *---------------------------------------------------------------------- * * CompileSubstObj -- * * Compile a Tcl value into ByteCode implementing its substitution, as * governed by flags. * * Results: * A (ByteCode *) is returned pointing to the resulting ByteCode. * * Side effects: * The Tcl_ObjType of objPtr is changed to the "substcode" type, and the * ByteCode and governing flags value are kept in the internal rep for * faster operations the next time CompileSubstObj is called on the same * value. * *---------------------------------------------------------------------- */ static ByteCode * CompileSubstObj( Tcl_Interp *interp, Tcl_Obj *objPtr, int flags) { Interp *iPtr = (Interp *) interp; ByteCode *codePtr = NULL; ByteCodeGetIntRep(objPtr, &substCodeType, codePtr); if (codePtr != NULL) { Namespace *nsPtr = iPtr->varFramePtr->nsPtr; if (flags != PTR2INT(SubstFlags(objPtr)) || ((Interp *) *codePtr->interpHandle != iPtr) || (codePtr->compileEpoch != iPtr->compileEpoch) || (codePtr->nsPtr != nsPtr) || (codePtr->nsEpoch != nsPtr->resolverEpoch) || (codePtr->localCachePtr != iPtr->varFramePtr->localCachePtr)) { Tcl_StoreIntRep(objPtr, &substCodeType, NULL); codePtr = NULL; } } if (codePtr == NULL) { CompileEnv compEnv; size_t numBytes; const char *bytes = TclGetStringFromObj(objPtr, &numBytes); /* TODO: Check for more TIP 280 */ TclInitCompileEnv(interp, &compEnv, bytes, numBytes, NULL, 0); TclSubstCompile(interp, bytes, numBytes, flags, 1, &compEnv); TclEmitOpcode(INST_DONE, &compEnv); codePtr = TclInitByteCodeObj(objPtr, &substCodeType, &compEnv); TclFreeCompileEnv(&compEnv); SubstFlags(objPtr) = INT2PTR(flags); if (iPtr->varFramePtr->localCachePtr) { codePtr->localCachePtr = iPtr->varFramePtr->localCachePtr; codePtr->localCachePtr->refCount++; } #ifdef TCL_COMPILE_DEBUG if (tclTraceCompile >= 2) { TclPrintByteCodeObj(interp, objPtr); fflush(stdout); } #endif /* TCL_COMPILE_DEBUG */ } return codePtr; } /* *---------------------------------------------------------------------- * * FreeSubstCodeInternalRep -- * * Part of the substcode Tcl object type implementation. Frees the * storage associated with a substcode object's internal representation * unless its code is actively being executed. * * Results: * None. * * Side effects: * The substcode object's internal rep is marked invalid and its code * gets freed unless the code is actively being executed. In that case * the cleanup is delayed until the last execution of the code completes. * *---------------------------------------------------------------------- */ static void FreeSubstCodeInternalRep( Tcl_Obj *objPtr) /* Object whose internal rep to free. */ { ByteCode *codePtr; ByteCodeGetIntRep(objPtr, &substCodeType, codePtr); assert(codePtr != NULL); TclReleaseByteCode(codePtr); } static void ReleaseCmdWordData( ExtCmdLoc *eclPtr) { int i; if (eclPtr->type == TCL_LOCATION_SOURCE) { Tcl_DecrRefCount(eclPtr->path); } for (i=0 ; i<eclPtr->nuloc ; i++) { Tcl_Free(eclPtr->loc[i].line); } if (eclPtr->loc != NULL) { Tcl_Free(eclPtr->loc); } Tcl_Free(eclPtr); } /* *---------------------------------------------------------------------- * * TclInitCompileEnv -- * * Initializes a CompileEnv compilation environment structure for the * compilation of a string in an interpreter. * * Results: * None. * * Side effects: * The CompileEnv structure is initialized. * *---------------------------------------------------------------------- */ void TclInitCompileEnv( Tcl_Interp *interp, /* The interpreter for which a CompileEnv * structure is initialized. */ CompileEnv *envPtr,/* Points to the CompileEnv structure to * initialize. */ const char *stringPtr, /* The source string to be compiled. */ size_t numBytes, /* Number of bytes in source string. */ const CmdFrame *invoker, /* Location context invoking the bcc */ int word) /* Index of the word in that context getting * compiled */ { Interp *iPtr = (Interp *) interp; assert(tclInstructionTable[LAST_INST_OPCODE].name == NULL); envPtr->iPtr = iPtr; envPtr->source = stringPtr; envPtr->numSrcBytes = numBytes; envPtr->procPtr = iPtr->compiledProcPtr; iPtr->compiledProcPtr = NULL; envPtr->numCommands = 0; envPtr->exceptDepth = 0; envPtr->maxExceptDepth = 0; envPtr->maxStackDepth = 0; envPtr->currStackDepth = 0; TclInitLiteralTable(&envPtr->localLitTable); envPtr->codeStart = envPtr->staticCodeSpace; envPtr->codeNext = envPtr->codeStart; envPtr->codeEnd = envPtr->codeStart + COMPILEENV_INIT_CODE_BYTES; envPtr->mallocedCodeArray = 0; envPtr->literalArrayPtr = envPtr->staticLiteralSpace; envPtr->literalArrayNext = 0; envPtr->literalArrayEnd = COMPILEENV_INIT_NUM_OBJECTS; envPtr->mallocedLiteralArray = 0; envPtr->exceptArrayPtr = envPtr->staticExceptArraySpace; envPtr->exceptAuxArrayPtr = envPtr->staticExAuxArraySpace; envPtr->exceptArrayNext = 0; envPtr->exceptArrayEnd = COMPILEENV_INIT_EXCEPT_RANGES; envPtr->mallocedExceptArray = 0; envPtr->cmdMapPtr = envPtr->staticCmdMapSpace; envPtr->cmdMapEnd = COMPILEENV_INIT_CMD_MAP_SIZE; envPtr->mallocedCmdMap = 0; envPtr->atCmdStart = 1; envPtr->expandCount = 0; /* * TIP #280: Set up the extended command location information, based on * the context invoking the byte code compiler. This structure is used to * keep the per-word line information for all compiled commands. * * See also tclBasic.c, TclEvalObjEx, for the equivalent code in the * non-compiling evaluator */ envPtr->extCmdMapPtr = (ExtCmdLoc *)Tcl_Alloc(sizeof(ExtCmdLoc)); envPtr->extCmdMapPtr->loc = NULL; envPtr->extCmdMapPtr->nloc = 0; envPtr->extCmdMapPtr->nuloc = 0; envPtr->extCmdMapPtr->path = NULL; if (invoker == NULL) { /* * Initialize the compiler for relative counting in case of a * dynamic context. */ envPtr->line = 1; if (iPtr->evalFlags & TCL_EVAL_FILE) { iPtr->evalFlags &= ~TCL_EVAL_FILE; envPtr->extCmdMapPtr->type = TCL_LOCATION_SOURCE; if (iPtr->scriptFile) { /* * Normalization here, to have the correct pwd. Should have * negligible impact on performance, as the norm should have * been done already by the 'source' invoking us, and it * caches the result. */ Tcl_Obj *norm = Tcl_FSGetNormalizedPath(interp, iPtr->scriptFile); if (norm == NULL) { /* * Error message in the interp result. No place to put it. * And no place to serve the error itself to either. Fake * a path, empty string. */ TclNewLiteralStringObj(envPtr->extCmdMapPtr->path, ""); } else { envPtr->extCmdMapPtr->path = norm; } } else { TclNewLiteralStringObj(envPtr->extCmdMapPtr->path, ""); } Tcl_IncrRefCount(envPtr->extCmdMapPtr->path); } else { envPtr->extCmdMapPtr->type = (envPtr->procPtr ? TCL_LOCATION_PROC : TCL_LOCATION_BC); } } else { /* * Initialize the compiler using the context, making counting absolute * to that context. Note that the context can be byte code execution. * In that case we have to fill out the missing pieces (line, path, * ...) which may make change the type as well. */ CmdFrame *ctxPtr = (CmdFrame *)TclStackAlloc(interp, sizeof(CmdFrame)); int pc = 0; *ctxPtr = *invoker; if (invoker->type == TCL_LOCATION_BC) { /* * Note: Type BC => ctx.data.eval.path is not used. * ctx.data.tebc.codePtr is used instead. */ TclGetSrcInfoForPc(ctxPtr); pc = 1; } if ((ctxPtr->nline <= word) || (ctxPtr->line[word] < 0)) { /* * Word is not a literal, relative counting. */ envPtr->line = 1; envPtr->extCmdMapPtr->type = (envPtr->procPtr ? TCL_LOCATION_PROC : TCL_LOCATION_BC); if (pc && (ctxPtr->type == TCL_LOCATION_SOURCE)) { /* * The reference made by 'TclGetSrcInfoForPc' is dead. */ Tcl_DecrRefCount(ctxPtr->data.eval.path); } } else { envPtr->line = ctxPtr->line[word]; envPtr->extCmdMapPtr->type = ctxPtr->type; if (ctxPtr->type == TCL_LOCATION_SOURCE) { envPtr->extCmdMapPtr->path = ctxPtr->data.eval.path; if (pc) { /* * The reference 'TclGetSrcInfoForPc' made is transfered. */ ctxPtr->data.eval.path = NULL; } else { /* * We have a new reference here. */ Tcl_IncrRefCount(envPtr->extCmdMapPtr->path); } } } TclStackFree(interp, ctxPtr); } envPtr->extCmdMapPtr->start = envPtr->line; /* * Initialize the data about invisible continuation lines as empty, i.e. * not used. The caller (TclSetByteCodeFromAny) will set this up, if such * data is available. */ envPtr->clNext = NULL; envPtr->auxDataArrayPtr = envPtr->staticAuxDataArraySpace; envPtr->auxDataArrayNext = 0; envPtr->auxDataArrayEnd = COMPILEENV_INIT_AUX_DATA_SIZE; envPtr->mallocedAuxDataArray = 0; } /* *---------------------------------------------------------------------- * * TclFreeCompileEnv -- * * Free the storage allocated in a CompileEnv compilation environment * structure. * * Results: * None. * * Side effects: * Allocated storage in the CompileEnv structure is freed. Note that its * local literal table is not deleted and its literal objects are not * released. In addition, storage referenced by its auxiliary data items * is not freed. This is done so that, when compilation is successful, * "ownership" of these objects and aux data items is handed over to the * corresponding ByteCode structure. * *---------------------------------------------------------------------- */ void TclFreeCompileEnv( CompileEnv *envPtr)/* Points to the CompileEnv structure. */ { if (envPtr->localLitTable.buckets != envPtr->localLitTable.staticBuckets){ Tcl_Free(envPtr->localLitTable.buckets); envPtr->localLitTable.buckets = envPtr->localLitTable.staticBuckets; } if (envPtr->iPtr) { /* * We never converted to Bytecode, so free the things we would * have transferred to it. */ int i; LiteralEntry *entryPtr = envPtr->literalArrayPtr; AuxData *auxDataPtr = envPtr->auxDataArrayPtr; for (i = 0; i < envPtr->literalArrayNext; i++) { TclReleaseLiteral((Tcl_Interp *)envPtr->iPtr, entryPtr->objPtr); entryPtr++; } #ifdef TCL_COMPILE_DEBUG TclVerifyGlobalLiteralTable(envPtr->iPtr); #endif /*TCL_COMPILE_DEBUG*/ for (i = 0; i < envPtr->auxDataArrayNext; i++) { if (auxDataPtr->type->freeProc != NULL) { auxDataPtr->type->freeProc(auxDataPtr->clientData); } auxDataPtr++; } } if (envPtr->mallocedCodeArray) { Tcl_Free(envPtr->codeStart); } if (envPtr->mallocedLiteralArray) { Tcl_Free(envPtr->literalArrayPtr); } if (envPtr->mallocedExceptArray) { Tcl_Free(envPtr->exceptArrayPtr); Tcl_Free(envPtr->exceptAuxArrayPtr); } if (envPtr->mallocedCmdMap) { Tcl_Free(envPtr->cmdMapPtr); } if (envPtr->mallocedAuxDataArray) { Tcl_Free(envPtr->auxDataArrayPtr); } if (envPtr->extCmdMapPtr) { ReleaseCmdWordData(envPtr->extCmdMapPtr); envPtr->extCmdMapPtr = NULL; } } /* *---------------------------------------------------------------------- * * TclWordKnownAtCompileTime -- * * Test whether the value of a token is completely known at compile time. * * Results: * Returns true if the tokenPtr argument points to a word value that is * completely known at compile time. Generally, values that are known at * compile time can be compiled to their values, while values that cannot * be known until substitution at runtime must be compiled to bytecode * instructions that perform that substitution. For several commands, * whether or not arguments are known at compile time determine whether * it is worthwhile to compile at all. * * Side effects: * When returning true, appends the known value of the word to the * unshared Tcl_Obj (*valuePtr), unless valuePtr is NULL. * *---------------------------------------------------------------------- */ int TclWordKnownAtCompileTime( Tcl_Token *tokenPtr, /* Points to Tcl_Token we should check */ Tcl_Obj *valuePtr) /* If not NULL, points to an unshared Tcl_Obj * to which we should append the known value * of the word. */ { int numComponents = tokenPtr->numComponents; Tcl_Obj *tempPtr = NULL; if (tokenPtr->type == TCL_TOKEN_SIMPLE_WORD) { if (valuePtr != NULL) { Tcl_AppendToObj(valuePtr, tokenPtr[1].start, tokenPtr[1].size); } return 1; } if (tokenPtr->type != TCL_TOKEN_WORD) { return 0; } tokenPtr++; if (valuePtr != NULL) { tempPtr = Tcl_NewObj(); Tcl_IncrRefCount(tempPtr); } while (numComponents--) { switch (tokenPtr->type) { case TCL_TOKEN_TEXT: if (tempPtr != NULL) { Tcl_AppendToObj(tempPtr, tokenPtr->start, tokenPtr->size); } break; case TCL_TOKEN_BS: if (tempPtr != NULL) { char utfBuf[4] = ""; size_t length = TclParseBackslash(tokenPtr->start, tokenPtr->size, NULL, utfBuf); Tcl_AppendToObj(tempPtr, utfBuf, length); } break; default: if (tempPtr != NULL) { Tcl_DecrRefCount(tempPtr); } return 0; } tokenPtr++; } if (valuePtr != NULL) { Tcl_AppendObjToObj(valuePtr, tempPtr); Tcl_DecrRefCount(tempPtr); } return 1; } /* *---------------------------------------------------------------------- * * TclCompileScript -- * * Compile a Tcl script in a string. * * Results: * The return value is TCL_OK on a successful compilation and TCL_ERROR * on failure. If TCL_ERROR is returned, then the interpreter's result * contains an error message. * * Side effects: * Adds instructions to envPtr to evaluate the script at runtime. * *---------------------------------------------------------------------- */ static int ExpandRequested( Tcl_Token *tokenPtr, size_t numWords) { /* Determine whether any words of the command require expansion */ while (numWords--) { if (tokenPtr->type == TCL_TOKEN_EXPAND_WORD) { return 1; } tokenPtr = TokenAfter(tokenPtr); } return 0; } static void CompileCmdLiteral( Tcl_Interp *interp, Tcl_Obj *cmdObj, CompileEnv *envPtr) { const char *bytes; Command *cmdPtr; int cmdLitIdx, extraLiteralFlags = LITERAL_CMD_NAME; size_t length; cmdPtr = (Command *) Tcl_GetCommandFromObj(interp, cmdObj); if ((cmdPtr != NULL) && (cmdPtr->flags & CMD_VIA_RESOLVER)) { extraLiteralFlags |= LITERAL_UNSHARED; } bytes = TclGetStringFromObj(cmdObj, &length); cmdLitIdx = TclRegisterLiteral(envPtr, bytes, length, extraLiteralFlags); if (cmdPtr) { TclSetCmdNameObj(interp, TclFetchLiteral(envPtr, cmdLitIdx), cmdPtr); } TclEmitPush(cmdLitIdx, envPtr); } void TclCompileInvocation( Tcl_Interp *interp, Tcl_Token *tokenPtr, Tcl_Obj *cmdObj, size_t numWords, CompileEnv *envPtr) { size_t wordIdx = 0; int depth = TclGetStackDepth(envPtr); DefineLineInformation; if (cmdObj) { CompileCmdLiteral(interp, cmdObj, envPtr); wordIdx = 1; tokenPtr = TokenAfter(tokenPtr); } for (; wordIdx < numWords; wordIdx++, tokenPtr = TokenAfter(tokenPtr)) { int objIdx; SetLineInformation(wordIdx); if (tokenPtr->type != TCL_TOKEN_SIMPLE_WORD) { CompileTokens(envPtr, tokenPtr, interp); continue; } objIdx = TclRegisterLiteral(envPtr, tokenPtr[1].start, tokenPtr[1].size, 0); if (envPtr->clNext) { TclContinuationsEnterDerived(TclFetchLiteral(envPtr, objIdx), tokenPtr[1].start - envPtr->source, envPtr->clNext); } TclEmitPush(objIdx, envPtr); } if (wordIdx <= 255) { TclEmitInvoke(envPtr, INST_INVOKE_STK1, wordIdx); } else { TclEmitInvoke(envPtr, INST_INVOKE_STK4, wordIdx); } TclCheckStackDepth(depth+1, envPtr); } static void CompileExpanded( Tcl_Interp *interp, Tcl_Token *tokenPtr, Tcl_Obj *cmdObj, int numWords, CompileEnv *envPtr) { int wordIdx = 0; DefineLineInformation; int depth = TclGetStackDepth(envPtr); StartExpanding(envPtr); if (cmdObj) { CompileCmdLiteral(interp, cmdObj, envPtr); wordIdx = 1; tokenPtr = TokenAfter(tokenPtr); } for (; wordIdx < numWords; wordIdx++, tokenPtr = TokenAfter(tokenPtr)) { int objIdx; SetLineInformation(wordIdx); if (tokenPtr->type != TCL_TOKEN_SIMPLE_WORD) { CompileTokens(envPtr, tokenPtr, interp); if (tokenPtr->type == TCL_TOKEN_EXPAND_WORD) { TclEmitInstInt4(INST_EXPAND_STKTOP, envPtr->currStackDepth, envPtr); } continue; } objIdx = TclRegisterLiteral(envPtr, tokenPtr[1].start, tokenPtr[1].size, 0); if (envPtr->clNext) { TclContinuationsEnterDerived(TclFetchLiteral(envPtr, objIdx), tokenPtr[1].start - envPtr->source, envPtr->clNext); } TclEmitPush(objIdx, envPtr); } /* * The stack depth during argument expansion can only be managed at * runtime, as the number of elements in the expanded lists is not known * at compile time. We adjust here the stack depth estimate so that it is * correct after the command with expanded arguments returns. * * The end effect of this command's invocation is that all the words of * the command are popped from the stack, and the result is pushed: the * stack top changes by (1-wordIdx). * * Note that the estimates are not correct while the command is being * prepared and run, INST_EXPAND_STKTOP is not stack-neutral in general. */ TclEmitInvoke(envPtr, INST_INVOKE_EXPANDED, wordIdx); TclCheckStackDepth(depth+1, envPtr); } static int CompileCmdCompileProc( Tcl_Interp *interp, Tcl_Parse *parsePtr, Command *cmdPtr, CompileEnv *envPtr) { int unwind = 0, incrOffset = -1; DefineLineInformation; int depth = TclGetStackDepth(envPtr); /* * Emit of the INST_START_CMD instruction is controlled by the value of * envPtr->atCmdStart: * * atCmdStart == 2 : We are not using the INST_START_CMD instruction. * atCmdStart == 1 : INST_START_CMD was the last instruction emitted. * : We do not need to emit another. Instead we * : increment the number of cmds started at it (except * : for the special case at the start of a script.) * atCmdStart == 0 : The last instruction was something else. We need * : to emit INST_START_CMD here. */ switch (envPtr->atCmdStart) { case 0: unwind = tclInstructionTable[INST_START_CMD].numBytes; TclEmitInstInt4(INST_START_CMD, 0, envPtr); incrOffset = envPtr->codeNext - envPtr->codeStart; TclEmitInt4(0, envPtr); break; case 1: if (envPtr->codeNext > envPtr->codeStart) { incrOffset = envPtr->codeNext - 4 - envPtr->codeStart; } break; case 2: /* Nothing to do */ ; } if (TCL_OK == TclAttemptCompileProc(interp, parsePtr, 1, cmdPtr, envPtr)) { if (incrOffset >= 0) { /* * We successfully compiled a command. Increment the number of * commands that start at the currently active INST_START_CMD. */ unsigned char *incrPtr = envPtr->codeStart + incrOffset; unsigned char *startPtr = incrPtr - 5; TclIncrUInt4AtPtr(incrPtr, 1); if (unwind) { /* We started the INST_START_CMD. Record the code length. */ TclStoreInt4AtPtr(envPtr->codeNext - startPtr, startPtr + 1); } } TclCheckStackDepth(depth+1, envPtr); return TCL_OK; } envPtr->codeNext -= unwind; /* Unwind INST_START_CMD */ /* * Throw out any line information generated by the failed compile attempt. */ while (mapPtr->nuloc - 1 > eclIndex) { mapPtr->nuloc--; Tcl_Free(mapPtr->loc[mapPtr->nuloc].line); mapPtr->loc[mapPtr->nuloc].line = NULL; } /* * Reset the index of next command. Toss out any from failed nested * partial compiles. */ envPtr->numCommands = mapPtr->nuloc; return TCL_ERROR; } static int CompileCommandTokens( Tcl_Interp *interp, Tcl_Parse *parsePtr, CompileEnv *envPtr) { Interp *iPtr = (Interp *) interp; Tcl_Token *tokenPtr = parsePtr->tokenPtr; ExtCmdLoc *eclPtr = envPtr->extCmdMapPtr; Tcl_Obj *cmdObj = Tcl_NewObj(); Command *cmdPtr = NULL; int code = TCL_ERROR; int cmdKnown, expand = -1; int *wlines, wlineat; int cmdLine = envPtr->line; int *clNext = envPtr->clNext; int cmdIdx = envPtr->numCommands; int startCodeOffset = envPtr->codeNext - envPtr->codeStart; int depth = TclGetStackDepth(envPtr); assert (parsePtr->numWords > 0); /* Pre-Compile */ envPtr->numCommands++; EnterCmdStartData(envPtr, cmdIdx, parsePtr->commandStart - envPtr->source, startCodeOffset); /* * TIP #280. Scan the words and compute the extended location information. * The map first contain full per-word line information for use by the * compiler. This is later replaced by a reduced form which signals * non-literal words, stored in 'wlines'. */ EnterCmdWordData(eclPtr, parsePtr->commandStart - envPtr->source, parsePtr->tokenPtr, parsePtr->commandStart, parsePtr->numWords, cmdLine, clNext, &wlines, envPtr); wlineat = eclPtr->nuloc - 1; envPtr->line = eclPtr->loc[wlineat].line[0]; envPtr->clNext = eclPtr->loc[wlineat].next[0]; /* Do we know the command word? */ Tcl_IncrRefCount(cmdObj); tokenPtr = parsePtr->tokenPtr; cmdKnown = TclWordKnownAtCompileTime(tokenPtr, cmdObj); /* Is this a command we should (try to) compile with a compileProc ? */ if (cmdKnown && !(iPtr->flags & DONT_COMPILE_CMDS_INLINE)) { cmdPtr = (Command *) Tcl_GetCommandFromObj(interp, cmdObj); if (cmdPtr) { /* * Found a command. Test the ways we can be told not to attempt * to compile it. */ if ((cmdPtr->compileProc == NULL) || (cmdPtr->nsPtr->flags & NS_SUPPRESS_COMPILATION) || (cmdPtr->flags & CMD_HAS_EXEC_TRACES)) { cmdPtr = NULL; } } if (cmdPtr && !(cmdPtr->flags & CMD_COMPILES_EXPANDED)) { expand = ExpandRequested(parsePtr->tokenPtr, parsePtr->numWords); if (expand) { /* We need to expand, but compileProc cannot. */ cmdPtr = NULL; } } } /* If cmdPtr != NULL, we will try to call cmdPtr->compileProc */ if (cmdPtr) { code = CompileCmdCompileProc(interp, parsePtr, cmdPtr, envPtr); } if (code == TCL_ERROR) { if (expand < 0) { expand = ExpandRequested(parsePtr->tokenPtr, parsePtr->numWords); } if (expand) { CompileExpanded(interp, parsePtr->tokenPtr, cmdKnown ? cmdObj : NULL, parsePtr->numWords, envPtr); } else { TclCompileInvocation(interp, parsePtr->tokenPtr, cmdKnown ? cmdObj : NULL, parsePtr->numWords, envPtr); } } Tcl_DecrRefCount(cmdObj); TclEmitOpcode(INST_POP, envPtr); EnterCmdExtentData(envPtr, cmdIdx, parsePtr->term - parsePtr->commandStart, (envPtr->codeNext-envPtr->codeStart) - startCodeOffset); /* * TIP #280: Free full form of per-word line data and insert the reduced * form now */ envPtr->line = cmdLine; envPtr->clNext = clNext; Tcl_Free(eclPtr->loc[wlineat].line); Tcl_Free(eclPtr->loc[wlineat].next); eclPtr->loc[wlineat].line = wlines; eclPtr->loc[wlineat].next = NULL; TclCheckStackDepth(depth, envPtr); return cmdIdx; } void TclCompileScript( Tcl_Interp *interp, /* Used for error and status reporting. Also * serves as context for finding and compiling * commands. May not be NULL. */ const char *script, /* The source script to compile. */ size_t numBytes, /* Number of bytes in script. If -1, the * script consists of all bytes up to the * first null character. */ CompileEnv *envPtr) /* Holds resulting instructions. */ { int lastCmdIdx = -1; /* Index into envPtr->cmdMapPtr of the last * command this routine compiles into bytecode. * Initial value of -1 indicates this routine * has not yet generated any bytecode. */ const char *p = script; /* Where we are in our compile. */ int depth = TclGetStackDepth(envPtr); Interp *iPtr = (Interp *) interp; if (envPtr->iPtr == NULL) { Tcl_Panic("TclCompileScript() called on uninitialized CompileEnv"); } /* * Check depth to avoid overflow of the C execution stack by too many * nested calls of TclCompileScript (considering interp recursionlimit). * Factor 5/4 (1.25) is used to avoid too mistaken limit recognition * during "mixed" evaluation and compilation process (nested eval+compile) * and is good enough for default recursionlimit (1000). */ if (iPtr->numLevels / 5 > iPtr->maxNestingDepth / 4) { Tcl_SetObjResult(interp, Tcl_NewStringObj( "too many nested compilations (infinite loop?)", -1)); Tcl_SetErrorCode(interp, "TCL", "LIMIT", "STACK", NULL); TclCompileSyntaxError(interp, envPtr); return; } /* Each iteration compiles one command from the script. */ if (numBytes + 1 > 1) { /* * Don't use system stack (size of Tcl_Parse is ca. 400 bytes), so * many nested compilations (body enclosed in body) can cause abnormal * program termination with a stack overflow exception, bug [fec0c17d39]. */ Tcl_Parse *parsePtr = (Tcl_Parse *)Tcl_Alloc(sizeof(Tcl_Parse)); do { const char *next; if (TCL_OK != Tcl_ParseCommand(interp, p, numBytes, 0, parsePtr)) { /* * Compile bytecodes to report the parsePtr error at runtime. */ Tcl_LogCommandInfo(interp, script, parsePtr->commandStart, parsePtr->term + 1 - parsePtr->commandStart); TclCompileSyntaxError(interp, envPtr); Tcl_Free(parsePtr); return; } #ifdef TCL_COMPILE_DEBUG /* * If tracing, print a line for each top level command compiled. * TODO: Suppress when numWords == 0 ? */ if ((tclTraceCompile >= 1) && (envPtr->procPtr == NULL)) { int commandLength = parsePtr->term - parsePtr->commandStart; fprintf(stdout, " Compiling: "); TclPrintSource(stdout, parsePtr->commandStart, TclMin(commandLength, 55)); fprintf(stdout, "\n"); } #endif /* * TIP #280: Count newlines before the command start. * (See test info-30.33). */ TclAdvanceLines(&envPtr->line, p, parsePtr->commandStart); TclAdvanceContinuations(&envPtr->line, &envPtr->clNext, parsePtr->commandStart - envPtr->source); /* * Advance parser to the next command in the script. */ next = parsePtr->commandStart + parsePtr->commandSize; numBytes -= next - p; p = next; if (parsePtr->numWords == 0) { /* * The "command" parsed has no words. In this case we can skip * the rest of the loop body. With no words, clearly * CompileCommandTokens() has nothing to do. Since the parser * aggressively sucks up leading comment and white space, * including newlines, parsePtr->commandStart must be pointing at * either the end of script, or a command-terminating semi-colon. * In either case, the TclAdvance*() calls have nothing to do. * Finally, when no words are parsed, no tokens have been * allocated at parsePtr->tokenPtr so there's also nothing for * Tcl_FreeParse() to do. * * The advantage of this shortcut is that CompileCommandTokens() * can be written with an assumption that parsePtr->numWords > 0, with * the implication the CCT() always generates bytecode. */ continue; } /* * Avoid stack exhaustion by too many nested calls of TclCompileScript * (considering interp recursionlimit). */ iPtr->numLevels++; lastCmdIdx = CompileCommandTokens(interp, parsePtr, envPtr); iPtr->numLevels--; /* * TIP #280: Track lines in the just compiled command. */ TclAdvanceLines(&envPtr->line, parsePtr->commandStart, p); TclAdvanceContinuations(&envPtr->line, &envPtr->clNext, p - envPtr->source); Tcl_FreeParse(parsePtr); } while (numBytes > 0); Tcl_Free(parsePtr); } if (lastCmdIdx == -1) { /* * Compiling the script yielded no bytecode. The script must be all * whitespace, comments, and empty commands. Such scripts are defined * to successfully produce the empty string result, so we emit the * simple bytecode that makes that happen. */ PushStringLiteral(envPtr, ""); } else { /* * We compiled at least one command to bytecode. The routine * CompileCommandTokens() follows the bytecode of each compiled * command with an INST_POP, so that stack balance is maintained when * several commands are in sequence. (The result of each command is * thrown away before moving on to the next command). For the last * command compiled, we need to undo that INST_POP so that the result * of the last command becomes the result of the script. The code * here removes that trailing INST_POP. */ envPtr->cmdMapPtr[lastCmdIdx].numCodeBytes--; envPtr->codeNext--; envPtr->currStackDepth++; } TclCheckStackDepth(depth+1, envPtr); } /* *---------------------------------------------------------------------- * * TclCompileTokens -- * * Given an array of tokens parsed from a Tcl command (e.g., the tokens * that make up a word) this procedure emits instructions to evaluate the * tokens and concatenate their values to form a single result value on * the interpreter's runtime evaluation stack. * * Results: * The return value is a standard Tcl result. If an error occurs, an * error message is left in the interpreter's result. * * Side effects: * Instructions are added to envPtr to push and evaluate the tokens at * runtime. * *---------------------------------------------------------------------- */ void TclCompileVarSubst( Tcl_Interp *interp, Tcl_Token *tokenPtr, CompileEnv *envPtr) { const char *p, *name = tokenPtr[1].start; size_t i, nameBytes = tokenPtr[1].size; int localVar, localVarName = 1; /* * Determine how the variable name should be handled: if it contains any * namespace qualifiers it is not a local variable (localVarName=-1); if * it looks like an array element and the token has a single component, it * should not be created here [Bug 569438] (localVarName=0); otherwise, * the local variable can safely be created (localVarName=1). */ for (i = 0, p = name; i < nameBytes; i++, p++) { if ((*p == ':') && (i < nameBytes-1) && (*(p+1) == ':')) { localVarName = -1; break; } else if ((*p == '(') && (tokenPtr->numComponents == 1) && (*(name + nameBytes - 1) == ')')) { localVarName = 0; break; } } /* * Either push the variable's name, or find its index in the array * of local variables in a procedure frame. */ localVar = -1; if (localVarName != -1) { localVar = TclFindCompiledLocal(name, nameBytes, localVarName, envPtr); } if (localVar < 0) { PushLiteral(envPtr, name, nameBytes); } /* * Emit instructions to load the variable. */ TclAdvanceLines(&envPtr->line, tokenPtr[1].start, tokenPtr[1].start + tokenPtr[1].size); if (tokenPtr->numComponents == 1) { if (localVar < 0) { TclEmitOpcode(INST_LOAD_STK, envPtr); } else if (localVar <= 255) { TclEmitInstInt1(INST_LOAD_SCALAR1, localVar, envPtr); } else { TclEmitInstInt4(INST_LOAD_SCALAR4, localVar, envPtr); } } else { TclCompileTokens(interp, tokenPtr+2, tokenPtr->numComponents-1, envPtr); if (localVar < 0) { TclEmitOpcode(INST_LOAD_ARRAY_STK, envPtr); } else if (localVar <= 255) { TclEmitInstInt1(INST_LOAD_ARRAY1, localVar, envPtr); } else { TclEmitInstInt4(INST_LOAD_ARRAY4, localVar, envPtr); } } } void TclCompileTokens( Tcl_Interp *interp, /* Used for error and status reporting. */ Tcl_Token *tokenPtr, /* Pointer to first in an array of tokens to * compile. */ int count, /* Number of tokens to consider at tokenPtr. * Must be at least 1. */ CompileEnv *envPtr) /* Holds the resulting instructions. */ { Tcl_DString textBuffer; /* Holds concatenated chars from adjacent * TCL_TOKEN_TEXT, TCL_TOKEN_BS tokens. */ char buffer[4] = ""; int i, numObjsToConcat, adjust; size_t length; unsigned char *entryCodeNext = envPtr->codeNext; #define NUM_STATIC_POS 20 int isLiteral, maxNumCL, numCL; int *clPosition = NULL; int depth = TclGetStackDepth(envPtr); /* * For the handling of continuation lines in literals we first check if * this is actually a literal. For if not we can forego the additional * processing. Otherwise we pre-allocate a small table to store the * locations of all continuation lines we find in this literal, if any. * The table is extended if needed. * * Note: Different to the equivalent code in function 'TclSubstTokens()' * (see file "tclParse.c") we do not seem to need the 'adjust' variable. * We also do not seem to need code which merges continuation line * information of multiple words which concat'd at runtime. Either that or * I have not managed to find a test case for these two possibilities yet. * It might be a difference between compile- versus run-time processing. */ numCL = 0; maxNumCL = 0; isLiteral = 1; for (i=0 ; i < count; i++) { if ((tokenPtr[i].type != TCL_TOKEN_TEXT) && (tokenPtr[i].type != TCL_TOKEN_BS)) { isLiteral = 0; break; } } if (isLiteral) { maxNumCL = NUM_STATIC_POS; clPosition = (int *)Tcl_Alloc(maxNumCL * sizeof(int)); } adjust = 0; Tcl_DStringInit(&textBuffer); numObjsToConcat = 0; for ( ; count > 0; count--, tokenPtr++) { switch (tokenPtr->type) { case TCL_TOKEN_TEXT: TclDStringAppendToken(&textBuffer, tokenPtr); TclAdvanceLines(&envPtr->line, tokenPtr->start, tokenPtr->start + tokenPtr->size); break; case TCL_TOKEN_BS: length = TclParseBackslash(tokenPtr->start, tokenPtr->size, NULL, buffer); Tcl_DStringAppend(&textBuffer, buffer, length); /* * If the backslash sequence we found is in a literal, and * represented a continuation line, we compute and store its * location (as char offset to the beginning of the _result_ * script). We may have to extend the table of locations. * * Note that the continuation line information is relevant even if * the word we are processing is not a literal, as it can affect * nested commands. See the branch for TCL_TOKEN_COMMAND below, * where the adjustment we are tracking here is taken into * account. The good thing is that we do not need a table of * everything, just the number of lines we have to add as * correction. */ if ((length == 1) && (buffer[0] == ' ') && (tokenPtr->start[1] == '\n')) { if (isLiteral) { int clPos = Tcl_DStringLength(&textBuffer); if (numCL >= maxNumCL) { maxNumCL *= 2; clPosition = (int *)Tcl_Realloc(clPosition, maxNumCL * sizeof(int)); } clPosition[numCL] = clPos; numCL ++; } adjust++; } break; case TCL_TOKEN_COMMAND: /* * Push any accumulated chars appearing before the command. */ if (Tcl_DStringLength(&textBuffer) > 0) { int literal = TclRegisterDStringLiteral(envPtr, &textBuffer); TclEmitPush(literal, envPtr); numObjsToConcat++; Tcl_DStringFree(&textBuffer); if (numCL) { TclContinuationsEnter(TclFetchLiteral(envPtr, literal), numCL, clPosition); } numCL = 0; } envPtr->line += adjust; TclCompileScript(interp, tokenPtr->start+1, tokenPtr->size-2, envPtr); envPtr->line -= adjust; numObjsToConcat++; break; case TCL_TOKEN_VARIABLE: /* * Push any accumulated chars appearing before the $<var>. */ if (Tcl_DStringLength(&textBuffer) > 0) { int literal; literal = TclRegisterDStringLiteral(envPtr, &textBuffer); TclEmitPush(literal, envPtr); numObjsToConcat++; Tcl_DStringFree(&textBuffer); } TclCompileVarSubst(interp, tokenPtr, envPtr); numObjsToConcat++; count -= tokenPtr->numComponents; tokenPtr += tokenPtr->numComponents; break; default: Tcl_Panic("Unexpected token type in TclCompileTokens: %d; %.*s", tokenPtr->type, (int)tokenPtr->size, tokenPtr->start); } } /* * Push any accumulated characters appearing at the end. */ if (Tcl_DStringLength(&textBuffer) > 0) { int literal = TclRegisterDStringLiteral(envPtr, &textBuffer); TclEmitPush(literal, envPtr); numObjsToConcat++; if (numCL) { TclContinuationsEnter(TclFetchLiteral(envPtr, literal), numCL, clPosition); } numCL = 0; } /* * If necessary, concatenate the parts of the word. */ while (numObjsToConcat > 255) { TclEmitInstInt1(INST_STR_CONCAT1, 255, envPtr); numObjsToConcat -= 254; /* concat pushes 1 obj, the result */ } if (numObjsToConcat > 1) { TclEmitInstInt1(INST_STR_CONCAT1, numObjsToConcat, envPtr); } /* * If the tokens yielded no instructions, push an empty string. */ if (envPtr->codeNext == entryCodeNext) { PushStringLiteral(envPtr, ""); } Tcl_DStringFree(&textBuffer); /* * Release the temp table we used to collect the locations of continuation * lines, if any. */ if (maxNumCL) { Tcl_Free(clPosition); } TclCheckStackDepth(depth+1, envPtr); } /* *---------------------------------------------------------------------- * * TclCompileCmdWord -- * * Given an array of parse tokens for a word containing one or more Tcl * commands, emit inline instructions to execute them. This procedure * differs from TclCompileTokens in that a simple word such as a loop * body enclosed in braces is not just pushed as a string, but is itself * parsed into tokens and compiled. * * Results: * The return value is a standard Tcl result. If an error occurs, an * error message is left in the interpreter's result. * * Side effects: * Instructions are added to envPtr to execute the tokens at runtime. * *---------------------------------------------------------------------- */ void TclCompileCmdWord( Tcl_Interp *interp, /* Used for error and status reporting. */ Tcl_Token *tokenPtr, /* Pointer to first in an array of tokens for * a command word to compile inline. */ int count, /* Number of tokens to consider at tokenPtr. * Must be at least 1. */ CompileEnv *envPtr) /* Holds the resulting instructions. */ { if ((count == 1) && (tokenPtr->type == TCL_TOKEN_TEXT)) { /* * Handle the common case: if there is a single text token, compile it * into an inline sequence of instructions. */ TclCompileScript(interp, tokenPtr->start, tokenPtr->size, envPtr); } else { /* * Multiple tokens or the single token involves substitutions. Emit * instructions to invoke the eval command procedure at runtime on the * result of evaluating the tokens. */ TclCompileTokens(interp, tokenPtr, count, envPtr); TclEmitInvoke(envPtr, INST_EVAL_STK); } } /* *---------------------------------------------------------------------- * * TclCompileExprWords -- * * Given an array of parse tokens representing one or more words that * contain a Tcl expression, emit inline instructions to execute the * expression. This procedure differs from TclCompileExpr in that it * supports Tcl's two-level substitution semantics for expressions that * appear as command words. * * Results: * The return value is a standard Tcl result. If an error occurs, an * error message is left in the interpreter's result. * * Side effects: * Instructions are added to envPtr to execute the expression. * *---------------------------------------------------------------------- */ void TclCompileExprWords( Tcl_Interp *interp, /* Used for error and status reporting. */ Tcl_Token *tokenPtr, /* Points to first in an array of word tokens * tokens for the expression to compile * inline. */ int numWords, /* Number of word tokens starting at tokenPtr. * Must be at least 1. Each word token * contains one or more subtokens. */ CompileEnv *envPtr) /* Holds the resulting instructions. */ { Tcl_Token *wordPtr; int i, concatItems; /* * If the expression is a single word that doesn't require substitutions, * just compile its string into inline instructions. */ if ((numWords == 1) && (tokenPtr->type == TCL_TOKEN_SIMPLE_WORD)) { TclCompileExpr(interp, tokenPtr[1].start,tokenPtr[1].size, envPtr, 1); return; } /* * Emit code to call the expr command proc at runtime. Concatenate the * (already substituted once) expr tokens with a space between each. */ wordPtr = tokenPtr; for (i = 0; i < numWords; i++) { CompileTokens(envPtr, wordPtr, interp); if (i < (numWords - 1)) { PushStringLiteral(envPtr, " "); } wordPtr += wordPtr->numComponents + 1; } concatItems = 2*numWords - 1; while (concatItems > 255) { TclEmitInstInt1(INST_STR_CONCAT1, 255, envPtr); concatItems -= 254; } if (concatItems > 1) { TclEmitInstInt1(INST_STR_CONCAT1, concatItems, envPtr); } TclEmitOpcode(INST_EXPR_STK, envPtr); } /* *---------------------------------------------------------------------- * * TclCompileNoOp -- * * Function called to compile no-op's * * Results: * The return value is TCL_OK, indicating successful compilation. * * Side effects: * Instructions are added to envPtr to execute a no-op at runtime. No * result is pushed onto the stack: the compiler has to take care of this * itself if the last compiled command is a NoOp. * *---------------------------------------------------------------------- */ int TclCompileNoOp( Tcl_Interp *interp, /* Used for error reporting. */ Tcl_Parse *parsePtr, /* Points to a parse structure for the command * created by Tcl_ParseCommand. */ TCL_UNUSED(Command *), CompileEnv *envPtr) /* Holds resulting instructions. */ { Tcl_Token *tokenPtr; int i; tokenPtr = parsePtr->tokenPtr; for (i = 1; i < parsePtr->numWords; i++) { tokenPtr = tokenPtr + tokenPtr->numComponents + 1; if (tokenPtr->type != TCL_TOKEN_SIMPLE_WORD) { CompileTokens(envPtr, tokenPtr, interp); TclEmitOpcode(INST_POP, envPtr); } } PushStringLiteral(envPtr, ""); return TCL_OK; } /* *---------------------------------------------------------------------- * * TclInitByteCodeObj -- * * Create a ByteCode structure and initialize it from a CompileEnv * compilation environment structure. The ByteCode structure is smaller * and contains just that information needed to execute the bytecode * instructions resulting from compiling a Tcl script. The resulting * structure is placed in the specified object. * * Results: * A newly constructed ByteCode object is stored in the internal * representation of the objPtr. * * Side effects: * A single heap object is allocated to hold the new ByteCode structure * and its code, object, command location, and aux data arrays. Note that * "ownership" (i.e., the pointers to) the Tcl objects and aux data items * will be handed over to the new ByteCode structure from the CompileEnv * structure. * *---------------------------------------------------------------------- */ static void PreventCycle( Tcl_Obj *objPtr, CompileEnv *envPtr) { int i; for (i = 0; i < envPtr->literalArrayNext; i++) { if (objPtr == TclFetchLiteral(envPtr, i)) { /* * Prevent circular reference where the bytecode intrep of * a value contains a literal which is that same value. * If this is allowed to happen, refcount decrements may not * reach zero, and memory may leak. Bugs 467523, 3357771 * * NOTE: [Bugs 3392070, 3389764] We make a copy based completely * on the string value, and do not call Tcl_DuplicateObj() so we * can be sure we do not have any lingering cycles hiding in * the intrep. */ size_t numBytes; const char *bytes = TclGetStringFromObj(objPtr, &numBytes); Tcl_Obj *copyPtr = Tcl_NewStringObj(bytes, numBytes); Tcl_IncrRefCount(copyPtr); TclReleaseLiteral((Tcl_Interp *)envPtr->iPtr, objPtr); envPtr->literalArrayPtr[i].objPtr = copyPtr; } } } ByteCode * TclInitByteCode( CompileEnv *envPtr)/* Points to the CompileEnv structure from * which to create a ByteCode structure. */ { ByteCode *codePtr; size_t codeBytes, objArrayBytes, exceptArrayBytes, cmdLocBytes; size_t auxDataArrayBytes, structureSize; unsigned char *p; #ifdef TCL_COMPILE_DEBUG unsigned char *nextPtr; #endif int numLitObjects = envPtr->literalArrayNext; Namespace *namespacePtr; int i, isNew; Interp *iPtr; if (envPtr->iPtr == NULL) { Tcl_Panic("TclInitByteCodeObj() called on uninitialized CompileEnv"); } iPtr = envPtr->iPtr; codeBytes = envPtr->codeNext - envPtr->codeStart; objArrayBytes = envPtr->literalArrayNext * sizeof(Tcl_Obj *); exceptArrayBytes = envPtr->exceptArrayNext * sizeof(ExceptionRange); auxDataArrayBytes = envPtr->auxDataArrayNext * sizeof(AuxData); cmdLocBytes = GetCmdLocEncodingSize(envPtr); /* * Compute the total number of bytes needed for this bytecode. */ structureSize = sizeof(ByteCode); structureSize += TCL_ALIGN(codeBytes); /* align object array */ structureSize += TCL_ALIGN(objArrayBytes); /* align exc range arr */ structureSize += TCL_ALIGN(exceptArrayBytes); /* align AuxData array */ structureSize += auxDataArrayBytes; structureSize += cmdLocBytes; if (envPtr->iPtr->varFramePtr != NULL) { namespacePtr = envPtr->iPtr->varFramePtr->nsPtr; } else { namespacePtr = envPtr->iPtr->globalNsPtr; } p = (unsigned char *)Tcl_Alloc(structureSize); codePtr = (ByteCode *) p; codePtr->interpHandle = TclHandlePreserve(iPtr->handle); codePtr->compileEpoch = iPtr->compileEpoch; codePtr->nsPtr = namespacePtr; codePtr->nsEpoch = namespacePtr->resolverEpoch; codePtr->refCount = 0; TclPreserveByteCode(codePtr); if (namespacePtr->compiledVarResProc || iPtr->resolverPtr) { codePtr->flags = TCL_BYTECODE_RESOLVE_VARS; } else { codePtr->flags = 0; } codePtr->source = envPtr->source; codePtr->procPtr = envPtr->procPtr; codePtr->numCommands = envPtr->numCommands; codePtr->numSrcBytes = envPtr->numSrcBytes; codePtr->numCodeBytes = codeBytes; codePtr->numLitObjects = numLitObjects; codePtr->numExceptRanges = envPtr->exceptArrayNext; codePtr->numAuxDataItems = envPtr->auxDataArrayNext; codePtr->numCmdLocBytes = cmdLocBytes; codePtr->maxExceptDepth = envPtr->maxExceptDepth; codePtr->maxStackDepth = envPtr->maxStackDepth; p += sizeof(ByteCode); codePtr->codeStart = p; memcpy(p, envPtr->codeStart, codeBytes); p += TCL_ALIGN(codeBytes); /* align object array */ codePtr->objArrayPtr = (Tcl_Obj **) p; for (i = 0; i < numLitObjects; i++) { codePtr->objArrayPtr[i] = TclFetchLiteral(envPtr, i); } p += TCL_ALIGN(objArrayBytes); /* align exception range array */ if (exceptArrayBytes > 0) { codePtr->exceptArrayPtr = (ExceptionRange *) p; memcpy(p, envPtr->exceptArrayPtr, exceptArrayBytes); } else { codePtr->exceptArrayPtr = NULL; } p += TCL_ALIGN(exceptArrayBytes); /* align AuxData array */ if (auxDataArrayBytes > 0) { codePtr->auxDataArrayPtr = (AuxData *) p; memcpy(p, envPtr->auxDataArrayPtr, auxDataArrayBytes); } else { codePtr->auxDataArrayPtr = NULL; } p += auxDataArrayBytes; #ifndef TCL_COMPILE_DEBUG EncodeCmdLocMap(envPtr, codePtr, (unsigned char *) p); #else nextPtr = EncodeCmdLocMap(envPtr, codePtr, (unsigned char *) p); if (((size_t)(nextPtr - p)) != cmdLocBytes) { Tcl_Panic("TclInitByteCodeObj: encoded cmd location bytes %lu != expected size %lu", (unsigned long)(nextPtr - p), (unsigned long)cmdLocBytes); } #endif /* * Record various compilation-related statistics about the new ByteCode * structure. Don't include overhead for statistics-related fields. */ #ifdef TCL_COMPILE_STATS codePtr->structureSize = structureSize - (sizeof(size_t) + sizeof(Tcl_Time)); Tcl_GetTime(&codePtr->createTime); RecordByteCodeStats(codePtr); #endif /* TCL_COMPILE_STATS */ /* * TIP #280. Associate the extended per-word line information with the * byte code object (internal rep), for use with the bc compiler. */ Tcl_SetHashValue(Tcl_CreateHashEntry(iPtr->lineBCPtr, codePtr, &isNew), envPtr->extCmdMapPtr); envPtr->extCmdMapPtr = NULL; /* We've used up the CompileEnv. Mark as uninitialized. */ envPtr->iPtr = NULL; codePtr->localCachePtr = NULL; return codePtr; } ByteCode * TclInitByteCodeObj( Tcl_Obj *objPtr, /* Points object that should be initialized, * and whose string rep contains the source * code. */ const Tcl_ObjType *typePtr, CompileEnv *envPtr)/* Points to the CompileEnv structure from * which to create a ByteCode structure. */ { ByteCode *codePtr; PreventCycle(objPtr, envPtr); codePtr = TclInitByteCode(envPtr); /* * Free the old internal rep then convert the object to a bytecode object * by making its internal rep point to the just compiled ByteCode. */ ByteCodeSetIntRep(objPtr, typePtr, codePtr); return codePtr; } /* *---------------------------------------------------------------------- * * TclFindCompiledLocal -- * * This procedure is called at compile time to look up and optionally * allocate an entry ("slot") for a variable in a procedure's array of * local variables. If the variable's name is NULL, a new temporary * variable is always created. (Such temporary variables can only be * referenced using their slot index.) * * Results: * If create is 0 and the name is non-NULL, then if the variable is * found, the index of its entry in the procedure's array of local * variables is returned; otherwise -1 is returned. If name is NULL, the * index of a new temporary variable is returned. Finally, if create is 1 * and name is non-NULL, the index of a new entry is returned. * * Side effects: * Creates and registers a new local variable if create is 1 and the * variable is unknown, or if the name is NULL. * *---------------------------------------------------------------------- */ int TclFindCompiledLocal( const char *name, /* Points to first character of the name of a * scalar or array variable. If NULL, a * temporary var should be created. */ size_t nameBytes, /* Number of bytes in the name. */ int create, /* If 1, allocate a local frame entry for the * variable if it is new. */ CompileEnv *envPtr) /* Points to the current compile environment*/ { CompiledLocal *localPtr; int localVar = -1; int i; Proc *procPtr; /* * If not creating a temporary, does a local variable of the specified * name already exist? */ procPtr = envPtr->procPtr; if (procPtr == NULL) { /* * Compiling a non-body script: give it read access to the LVT in the * current localCache */ LocalCache *cachePtr = envPtr->iPtr->varFramePtr->localCachePtr; const char *localName; Tcl_Obj **varNamePtr; size_t len; if (!cachePtr || !name) { return -1; } varNamePtr = &cachePtr->varName0; for (i=0; i < cachePtr->numVars; varNamePtr++, i++) { if (*varNamePtr) { localName = TclGetStringFromObj(*varNamePtr, &len); if ((len == nameBytes) && !strncmp(name, localName, len)) { return i; } } } return -1; } if (name != NULL) { int localCt = procPtr->numCompiledLocals; localPtr = procPtr->firstLocalPtr; for (i = 0; i < localCt; i++) { if (!TclIsVarTemporary(localPtr)) { char *localName = localPtr->name; if ((nameBytes == localPtr->nameLength) && (strncmp(name,localName,nameBytes) == 0)) { return i; } } localPtr = localPtr->nextPtr; } } /* * Create a new variable if appropriate. */ if (create || (name == NULL)) { localVar = procPtr->numCompiledLocals; localPtr = (CompiledLocal *)Tcl_Alloc(offsetof(CompiledLocal, name) + nameBytes + 1); if (procPtr->firstLocalPtr == NULL) { procPtr->firstLocalPtr = procPtr->lastLocalPtr = localPtr; } else { procPtr->lastLocalPtr->nextPtr = localPtr; procPtr->lastLocalPtr = localPtr; } localPtr->nextPtr = NULL; localPtr->nameLength = nameBytes; localPtr->frameIndex = localVar; localPtr->flags = 0; if (name == NULL) { localPtr->flags |= VAR_TEMPORARY; } localPtr->defValuePtr = NULL; localPtr->resolveInfo = NULL; if (name != NULL) { memcpy(localPtr->name, name, nameBytes); } localPtr->name[nameBytes] = '\0'; procPtr->numCompiledLocals++; } return localVar; } /* *---------------------------------------------------------------------- * * TclExpandCodeArray -- * * Procedure that uses malloc to allocate more storage for a CompileEnv's * code array. * * Results: * None. * * Side effects: * The byte code array in *envPtr is reallocated to a new array of double * the size, and if envPtr->mallocedCodeArray is non-zero the old array * is freed. Byte codes are copied from the old array to the new one. * *---------------------------------------------------------------------- */ void TclExpandCodeArray( void *envArgPtr) /* Points to the CompileEnv whose code array * must be enlarged. */ { CompileEnv *envPtr = (CompileEnv *)envArgPtr; /* The CompileEnv containing the code array to * be doubled in size. */ /* * envPtr->codeNext is equal to envPtr->codeEnd. The currently defined * code bytes are stored between envPtr->codeStart and envPtr->codeNext-1 * [inclusive]. */ size_t currBytes = envPtr->codeNext - envPtr->codeStart; size_t newBytes = 2 * (envPtr->codeEnd - envPtr->codeStart); if (envPtr->mallocedCodeArray) { envPtr->codeStart = (unsigned char *)Tcl_Realloc(envPtr->codeStart, newBytes); } else { /* * envPtr->codeStart isn't a Tcl_Alloc'd pointer, so we must code a * Tcl_Realloc equivalent for ourselves. */ unsigned char *newPtr = (unsigned char *)Tcl_Alloc(newBytes); memcpy(newPtr, envPtr->codeStart, currBytes); envPtr->codeStart = newPtr; envPtr->mallocedCodeArray = 1; } envPtr->codeNext = envPtr->codeStart + currBytes; envPtr->codeEnd = envPtr->codeStart + newBytes; } /* *---------------------------------------------------------------------- * * EnterCmdStartData -- * * Registers the starting source and bytecode location of a command. This * information is used at runtime to map between instruction pc and * source locations. * * Results: * None. * * Side effects: * Inserts source and code location information into the compilation * environment envPtr for the command at index cmdIndex. The compilation * environment's CmdLocation array is grown if necessary. * *---------------------------------------------------------------------- */ static void EnterCmdStartData( CompileEnv *envPtr, /* Points to the compilation environment * structure in which to enter command * location information. */ int cmdIndex, /* Index of the command whose start data is * being set. */ int srcOffset, /* Offset of first char of the command. */ int codeOffset) /* Offset of first byte of command code. */ { CmdLocation *cmdLocPtr; if ((cmdIndex < 0) || (cmdIndex >= envPtr->numCommands)) { Tcl_Panic("EnterCmdStartData: bad command index %d", cmdIndex); } if (cmdIndex >= envPtr->cmdMapEnd) { /* * Expand the command location array by allocating more storage from * the heap. The currently allocated CmdLocation entries are stored * from cmdMapPtr[0] up to cmdMapPtr[envPtr->cmdMapEnd] (inclusive). */ size_t currElems = envPtr->cmdMapEnd; size_t newElems = 2 * currElems; size_t currBytes = currElems * sizeof(CmdLocation); size_t newBytes = newElems * sizeof(CmdLocation); if (envPtr->mallocedCmdMap) { envPtr->cmdMapPtr = (CmdLocation *)Tcl_Realloc(envPtr->cmdMapPtr, newBytes); } else { /* * envPtr->cmdMapPtr isn't a Tcl_Alloc'd pointer, so we must code a * Tcl_Realloc equivalent for ourselves. */ CmdLocation *newPtr = (CmdLocation *)Tcl_Alloc(newBytes); memcpy(newPtr, envPtr->cmdMapPtr, currBytes); envPtr->cmdMapPtr = newPtr; envPtr->mallocedCmdMap = 1; } envPtr->cmdMapEnd = newElems; } if (cmdIndex > 0) { if (codeOffset < envPtr->cmdMapPtr[cmdIndex-1].codeOffset) { Tcl_Panic("EnterCmdStartData: cmd map not sorted by code offset"); } } cmdLocPtr = &envPtr->cmdMapPtr[cmdIndex]; cmdLocPtr->codeOffset = codeOffset; cmdLocPtr->srcOffset = srcOffset; cmdLocPtr->numSrcBytes = -1; cmdLocPtr->numCodeBytes = -1; } /* *---------------------------------------------------------------------- * * EnterCmdExtentData -- * * Registers the source and bytecode length for a command. This * information is used at runtime to map between instruction pc and * source locations. * * Results: * None. * * Side effects: * Inserts source and code length information into the compilation * environment envPtr for the command at index cmdIndex. Starting source * and bytecode information for the command must already have been * registered. * *---------------------------------------------------------------------- */ static void EnterCmdExtentData( CompileEnv *envPtr, /* Points to the compilation environment * structure in which to enter command * location information. */ int cmdIndex, /* Index of the command whose source and code * length data is being set. */ int numSrcBytes, /* Number of command source chars. */ int numCodeBytes) /* Offset of last byte of command code. */ { CmdLocation *cmdLocPtr; if ((cmdIndex < 0) || (cmdIndex >= envPtr->numCommands)) { Tcl_Panic("EnterCmdExtentData: bad command index %d", cmdIndex); } if (cmdIndex > envPtr->cmdMapEnd) { Tcl_Panic("EnterCmdExtentData: missing start data for command %d", cmdIndex); } cmdLocPtr = &envPtr->cmdMapPtr[cmdIndex]; cmdLocPtr->numSrcBytes = numSrcBytes; cmdLocPtr->numCodeBytes = numCodeBytes; } /* *---------------------------------------------------------------------- * TIP #280 * * EnterCmdWordData -- * * Registers the lines for the words of a command. This information is * used at runtime by 'info frame'. * * Results: * None. * * Side effects: * Inserts word location information into the compilation environment * envPtr for the command at index cmdIndex. The compilation * environment's ExtCmdLoc.ECL array is grown if necessary. * *---------------------------------------------------------------------- */ static void EnterCmdWordData( ExtCmdLoc *eclPtr, /* Points to the map environment structure in * which to enter command location * information. */ int srcOffset, /* Offset of first char of the command. */ Tcl_Token *tokenPtr, const char *cmd, int numWords, int line, int *clNext, int **wlines, CompileEnv *envPtr) { ECL *ePtr; const char *last; int wordIdx, wordLine, *wwlines, *wordNext; if (eclPtr->nuloc >= eclPtr->nloc) { /* * Expand the ECL array by allocating more storage from the heap. The * currently allocated ECL entries are stored from eclPtr->loc[0] up * to eclPtr->loc[eclPtr->nuloc-1] (inclusive). */ size_t currElems = eclPtr->nloc; size_t newElems = (currElems ? 2*currElems : 1); size_t newBytes = newElems * sizeof(ECL); eclPtr->loc = (ECL *)Tcl_Realloc(eclPtr->loc, newBytes); eclPtr->nloc = newElems; } ePtr = &eclPtr->loc[eclPtr->nuloc]; ePtr->srcOffset = srcOffset; ePtr->line = (int *)Tcl_Alloc(numWords * sizeof(int)); ePtr->next = (int **)Tcl_Alloc(numWords * sizeof(int *)); ePtr->nline = numWords; wwlines = (int *)Tcl_Alloc(numWords * sizeof(int)); last = cmd; wordLine = line; wordNext = clNext; for (wordIdx=0 ; wordIdx<numWords; wordIdx++, tokenPtr += tokenPtr->numComponents + 1) { TclAdvanceLines(&wordLine, last, tokenPtr->start); TclAdvanceContinuations(&wordLine, &wordNext, tokenPtr->start - envPtr->source); /* See Ticket 4b61afd660 */ wwlines[wordIdx] = ((wordIdx == 0) || TclWordKnownAtCompileTime(tokenPtr, NULL)) ? wordLine : -1; ePtr->line[wordIdx] = wordLine; ePtr->next[wordIdx] = wordNext; last = tokenPtr->start; } *wlines = wwlines; eclPtr->nuloc ++; } /* *---------------------------------------------------------------------- * * TclCreateExceptRange -- * * Procedure that allocates and initializes a new ExceptionRange * structure of the specified kind in a CompileEnv. * * Results: * Returns the index for the newly created ExceptionRange. * * Side effects: * If there is not enough room in the CompileEnv's ExceptionRange array, * the array in expanded: a new array of double the size is allocated, if * envPtr->mallocedExceptArray is non-zero the old array is freed, and * ExceptionRange entries are copied from the old array to the new one. * *---------------------------------------------------------------------- */ int TclCreateExceptRange( ExceptionRangeType type, /* The kind of ExceptionRange desired. */ CompileEnv *envPtr)/* Points to CompileEnv for which to create a * new ExceptionRange structure. */ { ExceptionRange *rangePtr; ExceptionAux *auxPtr; int index = envPtr->exceptArrayNext; if (index >= envPtr->exceptArrayEnd) { /* * Expand the ExceptionRange array. The currently allocated entries * are stored between elements 0 and (envPtr->exceptArrayNext - 1) * [inclusive]. */ size_t currBytes = envPtr->exceptArrayNext * sizeof(ExceptionRange); size_t currBytes2 = envPtr->exceptArrayNext * sizeof(ExceptionAux); int newElems = 2*envPtr->exceptArrayEnd; size_t newBytes = newElems * sizeof(ExceptionRange); size_t newBytes2 = newElems * sizeof(ExceptionAux); if (envPtr->mallocedExceptArray) { envPtr->exceptArrayPtr = (ExceptionRange *)Tcl_Realloc(envPtr->exceptArrayPtr, newBytes); envPtr->exceptAuxArrayPtr = (ExceptionAux *)Tcl_Realloc(envPtr->exceptAuxArrayPtr, newBytes2); } else { /* * envPtr->exceptArrayPtr isn't a Tcl_Alloc'd pointer, so we must * code a Tcl_Realloc equivalent for ourselves. */ ExceptionRange *newPtr = (ExceptionRange *)Tcl_Alloc(newBytes); ExceptionAux *newPtr2 = (ExceptionAux *)Tcl_Alloc(newBytes2); memcpy(newPtr, envPtr->exceptArrayPtr, currBytes); memcpy(newPtr2, envPtr->exceptAuxArrayPtr, currBytes2); envPtr->exceptArrayPtr = newPtr; envPtr->exceptAuxArrayPtr = newPtr2; envPtr->mallocedExceptArray = 1; } envPtr->exceptArrayEnd = newElems; } envPtr->exceptArrayNext++; rangePtr = &envPtr->exceptArrayPtr[index]; rangePtr->type = type; rangePtr->nestingLevel = envPtr->exceptDepth; rangePtr->codeOffset = -1; rangePtr->numCodeBytes = -1; rangePtr->breakOffset = -1; rangePtr->continueOffset = -1; rangePtr->catchOffset = -1; auxPtr = &envPtr->exceptAuxArrayPtr[index]; auxPtr->supportsContinue = 1; auxPtr->stackDepth = envPtr->currStackDepth; auxPtr->expandTarget = envPtr->expandCount; auxPtr->expandTargetDepth = -1; auxPtr->numBreakTargets = 0; auxPtr->breakTargets = NULL; auxPtr->allocBreakTargets = 0; auxPtr->numContinueTargets = 0; auxPtr->continueTargets = NULL; auxPtr->allocContinueTargets = 0; return index; } /* * --------------------------------------------------------------------- * * TclGetInnermostExceptionRange -- * * Returns the innermost exception range that covers the current code * creation point, and (optionally) the stack depth that is expected at * that point. Relies on the fact that the range has a numCodeBytes = -1 * when it is being populated and that inner ranges come after outer * ranges. * * --------------------------------------------------------------------- */ ExceptionRange * TclGetInnermostExceptionRange( CompileEnv *envPtr, int returnCode, ExceptionAux **auxPtrPtr) { int i = envPtr->exceptArrayNext; ExceptionRange *rangePtr = envPtr->exceptArrayPtr + i; while (i > 0) { rangePtr--; i--; if (CurrentOffset(envPtr) >= rangePtr->codeOffset && (rangePtr->numCodeBytes == -1 || CurrentOffset(envPtr) < rangePtr->codeOffset+rangePtr->numCodeBytes) && (returnCode != TCL_CONTINUE || envPtr->exceptAuxArrayPtr[i].supportsContinue)) { if (auxPtrPtr) { *auxPtrPtr = envPtr->exceptAuxArrayPtr + i; } return rangePtr; } } return NULL; } /* * --------------------------------------------------------------------- * * TclAddLoopBreakFixup, TclAddLoopContinueFixup -- * * Adds a place that wants to break/continue to the loop exception range * tracking that will be fixed up once the loop can be finalized. These * functions will generate an INST_JUMP4 that will be fixed up during the * loop finalization. * * --------------------------------------------------------------------- */ void TclAddLoopBreakFixup( CompileEnv *envPtr, ExceptionAux *auxPtr) { int range = auxPtr - envPtr->exceptAuxArrayPtr; if (envPtr->exceptArrayPtr[range].type != LOOP_EXCEPTION_RANGE) { Tcl_Panic("trying to add 'break' fixup to full exception range"); } if (++auxPtr->numBreakTargets > auxPtr->allocBreakTargets) { auxPtr->allocBreakTargets *= 2; auxPtr->allocBreakTargets += 2; if (auxPtr->breakTargets) { auxPtr->breakTargets = (unsigned int *)Tcl_Realloc(auxPtr->breakTargets, sizeof(int) * auxPtr->allocBreakTargets); } else { auxPtr->breakTargets = (unsigned int *)Tcl_Alloc(sizeof(int) * auxPtr->allocBreakTargets); } } auxPtr->breakTargets[auxPtr->numBreakTargets - 1] = CurrentOffset(envPtr); TclEmitInstInt4(INST_JUMP4, 0, envPtr); } void TclAddLoopContinueFixup( CompileEnv *envPtr, ExceptionAux *auxPtr) { int range = auxPtr - envPtr->exceptAuxArrayPtr; if (envPtr->exceptArrayPtr[range].type != LOOP_EXCEPTION_RANGE) { Tcl_Panic("trying to add 'continue' fixup to full exception range"); } if (++auxPtr->numContinueTargets > auxPtr->allocContinueTargets) { auxPtr->allocContinueTargets *= 2; auxPtr->allocContinueTargets += 2; if (auxPtr->continueTargets) { auxPtr->continueTargets = (unsigned int *)Tcl_Realloc(auxPtr->continueTargets, sizeof(int) * auxPtr->allocContinueTargets); } else { auxPtr->continueTargets = (unsigned int *)Tcl_Alloc(sizeof(int) * auxPtr->allocContinueTargets); } } auxPtr->continueTargets[auxPtr->numContinueTargets - 1] = CurrentOffset(envPtr); TclEmitInstInt4(INST_JUMP4, 0, envPtr); } /* * --------------------------------------------------------------------- * * TclCleanupStackForBreakContinue -- * * Ditch the extra elements from the auxiliary stack and the main stack. * How to do this exactly depends on whether there are any elements on * the auxiliary stack to pop. * * --------------------------------------------------------------------- */ void TclCleanupStackForBreakContinue( CompileEnv *envPtr, ExceptionAux *auxPtr) { int savedStackDepth = envPtr->currStackDepth; int toPop = envPtr->expandCount - auxPtr->expandTarget; if (toPop > 0) { while (toPop --> 0) { TclEmitOpcode(INST_EXPAND_DROP, envPtr); } TclAdjustStackDepth(auxPtr->expandTargetDepth - envPtr->currStackDepth, envPtr); envPtr->currStackDepth = auxPtr->expandTargetDepth; } toPop = envPtr->currStackDepth - auxPtr->stackDepth; while (toPop --> 0) { TclEmitOpcode(INST_POP, envPtr); } envPtr->currStackDepth = savedStackDepth; } /* * --------------------------------------------------------------------- * * StartExpanding -- * * Pushes an INST_EXPAND_START and does some additional housekeeping so * that the [break] and [continue] compilers can use an exception-free * issue to discard it. * * --------------------------------------------------------------------- */ static void StartExpanding( CompileEnv *envPtr) { int i; TclEmitOpcode(INST_EXPAND_START, envPtr); /* * Update inner exception ranges with information about the environment * where this expansion started. */ for (i=0 ; i<envPtr->exceptArrayNext ; i++) { ExceptionRange *rangePtr = &envPtr->exceptArrayPtr[i]; ExceptionAux *auxPtr = &envPtr->exceptAuxArrayPtr[i]; /* * Ignore loops unless they're still being built. */ if (rangePtr->codeOffset > CurrentOffset(envPtr)) { continue; } if (rangePtr->numCodeBytes != -1) { continue; } /* * Adequate condition: further out loops and further in exceptions * don't actually need this information. */ if (auxPtr->expandTarget == envPtr->expandCount) { auxPtr->expandTargetDepth = envPtr->currStackDepth; } } /* * There's now one more expansion being processed on the auxiliary stack. */ envPtr->expandCount++; } /* * --------------------------------------------------------------------- * * TclFinalizeLoopExceptionRange -- * * Finalizes a loop exception range, binding the registered [break] and * [continue] implementations so that they jump to the correct place. * Note that this must only be called after *all* the exception range * target offsets have been set. * * --------------------------------------------------------------------- */ void TclFinalizeLoopExceptionRange( CompileEnv *envPtr, int range) { ExceptionRange *rangePtr = &envPtr->exceptArrayPtr[range]; ExceptionAux *auxPtr = &envPtr->exceptAuxArrayPtr[range]; int i, offset; unsigned char *site; if (rangePtr->type != LOOP_EXCEPTION_RANGE) { Tcl_Panic("trying to finalize a loop exception range"); } /* * Do the jump fixups. Note that these are always issued as INST_JUMP4 so * there is no need to fuss around with updating code offsets. */ for (i=0 ; i<auxPtr->numBreakTargets ; i++) { site = envPtr->codeStart + auxPtr->breakTargets[i]; offset = rangePtr->breakOffset - auxPtr->breakTargets[i]; TclUpdateInstInt4AtPc(INST_JUMP4, offset, site); } for (i=0 ; i<auxPtr->numContinueTargets ; i++) { site = envPtr->codeStart + auxPtr->continueTargets[i]; if (rangePtr->continueOffset == -1) { int j; /* * WTF? Can't bind, so revert to an INST_CONTINUE. Not enough * space to do anything else. */ *site = INST_CONTINUE; for (j=0 ; j<4 ; j++) { *++site = INST_NOP; } } else { offset = rangePtr->continueOffset - auxPtr->continueTargets[i]; TclUpdateInstInt4AtPc(INST_JUMP4, offset, site); } } /* * Drop the arrays we were holding the only reference to. */ if (auxPtr->breakTargets) { Tcl_Free(auxPtr->breakTargets); auxPtr->breakTargets = NULL; auxPtr->numBreakTargets = 0; } if (auxPtr->continueTargets) { Tcl_Free(auxPtr->continueTargets); auxPtr->continueTargets = NULL; auxPtr->numContinueTargets = 0; } } /* *---------------------------------------------------------------------- * * TclCreateAuxData -- * * Procedure that allocates and initializes a new AuxData structure in a * CompileEnv's array of compilation auxiliary data records. These * AuxData records hold information created during compilation by * CompileProcs and used by instructions during execution. * * Results: * Returns the index for the newly created AuxData structure. * * Side effects: * If there is not enough room in the CompileEnv's AuxData array, the * AuxData array in expanded: a new array of double the size is * allocated, if envPtr->mallocedAuxDataArray is non-zero the old array * is freed, and AuxData entries are copied from the old array to the new * one. * *---------------------------------------------------------------------- */ int TclCreateAuxData( ClientData clientData, /* The compilation auxiliary data to store in * the new aux data record. */ const AuxDataType *typePtr, /* Pointer to the type to attach to this * AuxData */ CompileEnv *envPtr)/* Points to the CompileEnv for which a new * aux data structure is to be allocated. */ { int index; /* Index for the new AuxData structure. */ AuxData *auxDataPtr; /* Points to the new AuxData structure */ index = envPtr->auxDataArrayNext; if (index >= envPtr->auxDataArrayEnd) { /* * Expand the AuxData array. The currently allocated entries are * stored between elements 0 and (envPtr->auxDataArrayNext - 1) * [inclusive]. */ size_t currBytes = envPtr->auxDataArrayNext * sizeof(AuxData); int newElems = 2*envPtr->auxDataArrayEnd; size_t newBytes = newElems * sizeof(AuxData); if (envPtr->mallocedAuxDataArray) { envPtr->auxDataArrayPtr = (AuxData *)Tcl_Realloc(envPtr->auxDataArrayPtr, newBytes); } else { /* * envPtr->auxDataArrayPtr isn't a Tcl_Alloc'd pointer, so we must * code a Tcl_Realloc equivalent for ourselves. */ AuxData *newPtr = (AuxData *)Tcl_Alloc(newBytes); memcpy(newPtr, envPtr->auxDataArrayPtr, currBytes); envPtr->auxDataArrayPtr = newPtr; envPtr->mallocedAuxDataArray = 1; } envPtr->auxDataArrayEnd = newElems; } envPtr->auxDataArrayNext++; auxDataPtr = &envPtr->auxDataArrayPtr[index]; auxDataPtr->clientData = clientData; auxDataPtr->type = typePtr; return index; } /* *---------------------------------------------------------------------- * * TclInitJumpFixupArray -- * * Initializes a JumpFixupArray structure to hold some number of jump * fixup entries. * * Results: * None. * * Side effects: * The JumpFixupArray structure is initialized. * *---------------------------------------------------------------------- */ void TclInitJumpFixupArray( JumpFixupArray *fixupArrayPtr) /* Points to the JumpFixupArray structure to * initialize. */ { fixupArrayPtr->fixup = fixupArrayPtr->staticFixupSpace; fixupArrayPtr->next = 0; fixupArrayPtr->end = JUMPFIXUP_INIT_ENTRIES - 1; fixupArrayPtr->mallocedArray = 0; } /* *---------------------------------------------------------------------- * * TclExpandJumpFixupArray -- * * Procedure that uses malloc to allocate more storage for a jump fixup * array. * * Results: * None. * * Side effects: * The jump fixup array in *fixupArrayPtr is reallocated to a new array * of double the size, and if fixupArrayPtr->mallocedArray is non-zero * the old array is freed. Jump fixup structures are copied from the old * array to the new one. * *---------------------------------------------------------------------- */ void TclExpandJumpFixupArray( JumpFixupArray *fixupArrayPtr) /* Points to the JumpFixupArray structure to * enlarge. */ { /* * The currently allocated jump fixup entries are stored from fixup[0] up * to fixup[fixupArrayPtr->fixupNext] (*not* inclusive). We assume * fixupArrayPtr->fixupNext is equal to fixupArrayPtr->fixupEnd. */ size_t currBytes = fixupArrayPtr->next * sizeof(JumpFixup); int newElems = 2*(fixupArrayPtr->end + 1); size_t newBytes = newElems * sizeof(JumpFixup); if (fixupArrayPtr->mallocedArray) { fixupArrayPtr->fixup = (JumpFixup *)Tcl_Realloc(fixupArrayPtr->fixup, newBytes); } else { /* * fixupArrayPtr->fixup isn't a Tcl_Alloc'd pointer, so we must code a * Tcl_Realloc equivalent for ourselves. */ JumpFixup *newPtr = (JumpFixup *)Tcl_Alloc(newBytes); memcpy(newPtr, fixupArrayPtr->fixup, currBytes); fixupArrayPtr->fixup = newPtr; fixupArrayPtr->mallocedArray = 1; } fixupArrayPtr->end = newElems; } /* *---------------------------------------------------------------------- * * TclFreeJumpFixupArray -- * * Free any storage allocated in a jump fixup array structure. * * Results: * None. * * Side effects: * Allocated storage in the JumpFixupArray structure is freed. * *---------------------------------------------------------------------- */ void TclFreeJumpFixupArray( JumpFixupArray *fixupArrayPtr) /* Points to the JumpFixupArray structure to * free. */ { if (fixupArrayPtr->mallocedArray) { Tcl_Free(fixupArrayPtr->fixup); } } /* *---------------------------------------------------------------------- * * TclEmitForwardJump -- * * Procedure to emit a two-byte forward jump of kind "jumpType". Since * the jump may later have to be grown to five bytes if the jump target * is more than, say, 127 bytes away, this procedure also initializes a * JumpFixup record with information about the jump. * * Results: * None. * * Side effects: * The JumpFixup record pointed to by "jumpFixupPtr" is initialized with * information needed later if the jump is to be grown. Also, a two byte * jump of the designated type is emitted at the current point in the * bytecode stream. * *---------------------------------------------------------------------- */ void TclEmitForwardJump( CompileEnv *envPtr, /* Points to the CompileEnv structure that * holds the resulting instruction. */ TclJumpType jumpType, /* Indicates the kind of jump: if true or * false or unconditional. */ JumpFixup *jumpFixupPtr) /* Points to the JumpFixup structure to * initialize with information about this * forward jump. */ { /* * Initialize the JumpFixup structure: * - codeOffset is offset of first byte of jump below * - cmdIndex is index of the command after the current one * - exceptIndex is the index of the first ExceptionRange after the * current one. */ jumpFixupPtr->jumpType = jumpType; jumpFixupPtr->codeOffset = envPtr->codeNext - envPtr->codeStart; jumpFixupPtr->cmdIndex = envPtr->numCommands; jumpFixupPtr->exceptIndex = envPtr->exceptArrayNext; switch (jumpType) { case TCL_UNCONDITIONAL_JUMP: TclEmitInstInt1(INST_JUMP1, 0, envPtr); break; case TCL_TRUE_JUMP: TclEmitInstInt1(INST_JUMP_TRUE1, 0, envPtr); break; default: TclEmitInstInt1(INST_JUMP_FALSE1, 0, envPtr); break; } } /* *---------------------------------------------------------------------- * * TclFixupForwardJump -- * * Procedure that updates a previously-emitted forward jump to jump a * specified number of bytes, "jumpDist". If necessary, the jump is grown * from two to five bytes; this is done if the jump distance is greater * than "distThreshold" (normally 127 bytes). The jump is described by a * JumpFixup record previously initialized by TclEmitForwardJump. * * Results: * 1 if the jump was grown and subsequent instructions had to be moved; * otherwise 0. This result is returned to allow callers to update any * additional code offsets they may hold. * * Side effects: * The jump may be grown and subsequent instructions moved. If this * happens, the code offsets for any commands and any ExceptionRange * records between the jump and the current code address will be updated * to reflect the moved code. Also, the bytecode instruction array in the * CompileEnv structure may be grown and reallocated. * *---------------------------------------------------------------------- */ int TclFixupForwardJump( CompileEnv *envPtr, /* Points to the CompileEnv structure that * holds the resulting instruction. */ JumpFixup *jumpFixupPtr, /* Points to the JumpFixup structure that * describes the forward jump. */ int jumpDist, /* Jump distance to set in jump instr. */ int distThreshold) /* Maximum distance before the two byte jump * is grown to five bytes. */ { unsigned char *jumpPc, *p; int firstCmd, lastCmd, firstRange, lastRange, k; size_t numBytes; if (jumpDist <= distThreshold) { jumpPc = envPtr->codeStart + jumpFixupPtr->codeOffset; switch (jumpFixupPtr->jumpType) { case TCL_UNCONDITIONAL_JUMP: TclUpdateInstInt1AtPc(INST_JUMP1, jumpDist, jumpPc); break; case TCL_TRUE_JUMP: TclUpdateInstInt1AtPc(INST_JUMP_TRUE1, jumpDist, jumpPc); break; default: TclUpdateInstInt1AtPc(INST_JUMP_FALSE1, jumpDist, jumpPc); break; } return 0; } /* * We must grow the jump then move subsequent instructions down. Note that * if we expand the space for generated instructions, code addresses might * change; be careful about updating any of these addresses held in * variables. */ if ((envPtr->codeNext + 3) > envPtr->codeEnd) { TclExpandCodeArray(envPtr); } jumpPc = envPtr->codeStart + jumpFixupPtr->codeOffset; numBytes = envPtr->codeNext-jumpPc-2; p = jumpPc+2; memmove(p+3, p, numBytes); envPtr->codeNext += 3; jumpDist += 3; switch (jumpFixupPtr->jumpType) { case TCL_UNCONDITIONAL_JUMP: TclUpdateInstInt4AtPc(INST_JUMP4, jumpDist, jumpPc); break; case TCL_TRUE_JUMP: TclUpdateInstInt4AtPc(INST_JUMP_TRUE4, jumpDist, jumpPc); break; default: TclUpdateInstInt4AtPc(INST_JUMP_FALSE4, jumpDist, jumpPc); break; } /* * Adjust the code offsets for any commands and any ExceptionRange records * between the jump and the current code address. */ firstCmd = jumpFixupPtr->cmdIndex; lastCmd = envPtr->numCommands - 1; if (firstCmd < lastCmd) { for (k = firstCmd; k <= lastCmd; k++) { envPtr->cmdMapPtr[k].codeOffset += 3; } } firstRange = jumpFixupPtr->exceptIndex; lastRange = envPtr->exceptArrayNext - 1; for (k = firstRange; k <= lastRange; k++) { ExceptionRange *rangePtr = &envPtr->exceptArrayPtr[k]; rangePtr->codeOffset += 3; switch (rangePtr->type) { case LOOP_EXCEPTION_RANGE: rangePtr->breakOffset += 3; if (rangePtr->continueOffset != -1) { rangePtr->continueOffset += 3; } break; case CATCH_EXCEPTION_RANGE: rangePtr->catchOffset += 3; break; default: Tcl_Panic("TclFixupForwardJump: bad ExceptionRange type %d", rangePtr->type); } } for (k = 0 ; k < envPtr->exceptArrayNext ; k++) { ExceptionAux *auxPtr = &envPtr->exceptAuxArrayPtr[k]; int i; for (i=0 ; i<auxPtr->numBreakTargets ; i++) { if (jumpFixupPtr->codeOffset < auxPtr->breakTargets[i]) { auxPtr->breakTargets[i] += 3; } } for (i=0 ; i<auxPtr->numContinueTargets ; i++) { if (jumpFixupPtr->codeOffset < auxPtr->continueTargets[i]) { auxPtr->continueTargets[i] += 3; } } } return 1; /* the jump was grown */ } /* *---------------------------------------------------------------------- * * TclEmitInvoke -- * * Emit one of the invoke-related instructions, wrapping it if necessary * in code that ensures that any break or continue operation passing * through it gets the stack unwinding correct, converting it into an * internal jump if in an appropriate context. * * Results: * None * * Side effects: * Issues the jump with all correct stack management. May create another * loop exception range; pointers to ExceptionRange and ExceptionAux * structures should not be held across this call. * *---------------------------------------------------------------------- */ void TclEmitInvoke( CompileEnv *envPtr, int opcode, ...) { va_list argList; ExceptionRange *rangePtr; ExceptionAux *auxBreakPtr, *auxContinuePtr; int arg1, arg2, wordCount = 0, expandCount = 0; int loopRange = 0, breakRange = 0, continueRange = 0; int cleanup, depth = TclGetStackDepth(envPtr); /* * Parse the arguments. */ va_start(argList, opcode); switch (opcode) { case INST_INVOKE_STK1: wordCount = arg1 = cleanup = va_arg(argList, int); arg2 = 0; break; case INST_INVOKE_STK4: wordCount = arg1 = cleanup = va_arg(argList, int); arg2 = 0; break; case INST_INVOKE_REPLACE: arg1 = va_arg(argList, int); arg2 = va_arg(argList, int); wordCount = arg1 + arg2 - 1; cleanup = arg1 + 1; break; default: Tcl_Panic("unexpected opcode"); case INST_EVAL_STK: wordCount = cleanup = 1; arg1 = arg2 = 0; break; case INST_RETURN_STK: wordCount = cleanup = 2; arg1 = arg2 = 0; break; case INST_INVOKE_EXPANDED: wordCount = arg1 = cleanup = va_arg(argList, int); arg2 = 0; expandCount = 1; break; } va_end(argList); /* * Determine if we need to handle break and continue exceptions with a * special handling exception range (so that we can correctly unwind the * stack). * * These must be done separately; they can be different (especially for * calls from inside a [for] increment clause). */ rangePtr = TclGetInnermostExceptionRange(envPtr, TCL_CONTINUE, &auxContinuePtr); if (rangePtr == NULL || rangePtr->type != LOOP_EXCEPTION_RANGE) { auxContinuePtr = NULL; } else if (auxContinuePtr->stackDepth == envPtr->currStackDepth-wordCount && auxContinuePtr->expandTarget == envPtr->expandCount-expandCount) { auxContinuePtr = NULL; } else { continueRange = auxContinuePtr - envPtr->exceptAuxArrayPtr; } rangePtr = TclGetInnermostExceptionRange(envPtr, TCL_BREAK, &auxBreakPtr); if (rangePtr == NULL || rangePtr->type != LOOP_EXCEPTION_RANGE) { auxBreakPtr = NULL; } else if (auxContinuePtr == NULL && auxBreakPtr->stackDepth == envPtr->currStackDepth-wordCount && auxBreakPtr->expandTarget == envPtr->expandCount-expandCount) { auxBreakPtr = NULL; } else { breakRange = auxBreakPtr - envPtr->exceptAuxArrayPtr; } if (auxBreakPtr != NULL || auxContinuePtr != NULL) { loopRange = TclCreateExceptRange(LOOP_EXCEPTION_RANGE, envPtr); ExceptionRangeStarts(envPtr, loopRange); } /* * Issue the invoke itself. */ switch (opcode) { case INST_INVOKE_STK1: TclEmitInstInt1(INST_INVOKE_STK1, arg1, envPtr); break; case INST_INVOKE_STK4: TclEmitInstInt4(INST_INVOKE_STK4, arg1, envPtr); break; case INST_INVOKE_EXPANDED: TclEmitOpcode(INST_INVOKE_EXPANDED, envPtr); envPtr->expandCount--; TclAdjustStackDepth(1 - arg1, envPtr); break; case INST_EVAL_STK: TclEmitOpcode(INST_EVAL_STK, envPtr); break; case INST_RETURN_STK: TclEmitOpcode(INST_RETURN_STK, envPtr); break; case INST_INVOKE_REPLACE: TclEmitInstInt4(INST_INVOKE_REPLACE, arg1, envPtr); TclEmitInt1(arg2, envPtr); TclAdjustStackDepth(-1, envPtr); /* Correction to stack depth calcs */ break; } /* * If we're generating a special wrapper exception range, we need to * finish that up now. */ if (auxBreakPtr != NULL || auxContinuePtr != NULL) { int savedStackDepth = envPtr->currStackDepth; int savedExpandCount = envPtr->expandCount; JumpFixup nonTrapFixup; if (auxBreakPtr != NULL) { auxBreakPtr = envPtr->exceptAuxArrayPtr + breakRange; } if (auxContinuePtr != NULL) { auxContinuePtr = envPtr->exceptAuxArrayPtr + continueRange; } ExceptionRangeEnds(envPtr, loopRange); TclEmitForwardJump(envPtr, TCL_UNCONDITIONAL_JUMP, &nonTrapFixup); /* * Careful! When generating these stack unwinding sequences, the depth * of stack in the cases where they are taken is not the same as if * the exception is not taken. */ if (auxBreakPtr != NULL) { TclAdjustStackDepth(-1, envPtr); ExceptionRangeTarget(envPtr, loopRange, breakOffset); TclCleanupStackForBreakContinue(envPtr, auxBreakPtr); TclAddLoopBreakFixup(envPtr, auxBreakPtr); TclAdjustStackDepth(1, envPtr); envPtr->currStackDepth = savedStackDepth; envPtr->expandCount = savedExpandCount; } if (auxContinuePtr != NULL) { TclAdjustStackDepth(-1, envPtr); ExceptionRangeTarget(envPtr, loopRange, continueOffset); TclCleanupStackForBreakContinue(envPtr, auxContinuePtr); TclAddLoopContinueFixup(envPtr, auxContinuePtr); TclAdjustStackDepth(1, envPtr); envPtr->currStackDepth = savedStackDepth; envPtr->expandCount = savedExpandCount; } TclFinalizeLoopExceptionRange(envPtr, loopRange); TclFixupForwardJumpToHere(envPtr, &nonTrapFixup, 127); } TclCheckStackDepth(depth+1-cleanup, envPtr); } /* *---------------------------------------------------------------------- * * TclGetInstructionTable -- * * Returns a pointer to the table describing Tcl bytecode instructions. * This procedure is defined so that clients can access the pointer from * outside the TCL DLLs. * * Results: * Returns a pointer to the global instruction table, same as the * expression (&tclInstructionTable[0]). * * Side effects: * None. * *---------------------------------------------------------------------- */ const void * /* == InstructionDesc* == */ TclGetInstructionTable(void) { return &tclInstructionTable[0]; } /* *---------------------------------------------------------------------- * * GetCmdLocEncodingSize -- * * Computes the total number of bytes needed to encode the command * location information for some compiled code. * * Results: * The byte count needed to encode the compiled location information. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int GetCmdLocEncodingSize( CompileEnv *envPtr) /* Points to compilation environment structure * containing the CmdLocation structure to * encode. */ { CmdLocation *mapPtr = envPtr->cmdMapPtr; int numCmds = envPtr->numCommands; int codeDelta, codeLen, srcDelta, srcLen; int codeDeltaNext, codeLengthNext, srcDeltaNext, srcLengthNext; /* The offsets in their respective byte * sequences where the next encoded offset or * length should go. */ int prevCodeOffset, prevSrcOffset, i; codeDeltaNext = codeLengthNext = srcDeltaNext = srcLengthNext = 0; prevCodeOffset = prevSrcOffset = 0; for (i = 0; i < numCmds; i++) { codeDelta = mapPtr[i].codeOffset - prevCodeOffset; if (codeDelta < 0) { Tcl_Panic("GetCmdLocEncodingSize: bad code offset"); } else if (codeDelta <= 127) { codeDeltaNext++; } else { codeDeltaNext += 5; /* 1 byte for 0xFF, 4 for positive delta */ } prevCodeOffset = mapPtr[i].codeOffset; codeLen = mapPtr[i].numCodeBytes; if (codeLen < 0) { Tcl_Panic("GetCmdLocEncodingSize: bad code length"); } else if (codeLen <= 127) { codeLengthNext++; } else { codeLengthNext += 5;/* 1 byte for 0xFF, 4 for length */ } srcDelta = mapPtr[i].srcOffset - prevSrcOffset; if ((-127 <= srcDelta) && (srcDelta <= 127) && (srcDelta != -1)) { srcDeltaNext++; } else { srcDeltaNext += 5; /* 1 byte for 0xFF, 4 for delta */ } prevSrcOffset = mapPtr[i].srcOffset; srcLen = mapPtr[i].numSrcBytes; if (srcLen < 0) { Tcl_Panic("GetCmdLocEncodingSize: bad source length"); } else if (srcLen <= 127) { srcLengthNext++; } else { srcLengthNext += 5; /* 1 byte for 0xFF, 4 for length */ } } return (codeDeltaNext + codeLengthNext + srcDeltaNext + srcLengthNext); } /* *---------------------------------------------------------------------- * * EncodeCmdLocMap -- * * Encode the command location information for some compiled code into a * ByteCode structure. The encoded command location map is stored as * three adjacent byte sequences. * * Results: * Pointer to the first byte after the encoded command location * information. * * Side effects: * The encoded information is stored into the block of memory headed by * codePtr. Also records pointers to the start of the four byte sequences * in fields in codePtr's ByteCode header structure. * *---------------------------------------------------------------------- */ static unsigned char * EncodeCmdLocMap( CompileEnv *envPtr, /* Points to compilation environment structure * containing the CmdLocation structure to * encode. */ ByteCode *codePtr, /* ByteCode in which to encode envPtr's * command location information. */ unsigned char *startPtr) /* Points to the first byte in codePtr's * memory block where the location information * is to be stored. */ { CmdLocation *mapPtr = envPtr->cmdMapPtr; int numCmds = envPtr->numCommands; unsigned char *p = startPtr; int codeDelta, codeLen, srcDelta, srcLen, prevOffset; int i; /* * Encode the code offset for each command as a sequence of deltas. */ codePtr->codeDeltaStart = p; prevOffset = 0; for (i = 0; i < numCmds; i++) { codeDelta = mapPtr[i].codeOffset - prevOffset; if (codeDelta < 0) { Tcl_Panic("EncodeCmdLocMap: bad code offset"); } else if (codeDelta <= 127) { TclStoreInt1AtPtr(codeDelta, p); p++; } else { TclStoreInt1AtPtr(0xFF, p); p++; TclStoreInt4AtPtr(codeDelta, p); p += 4; } prevOffset = mapPtr[i].codeOffset; } /* * Encode the code length for each command. */ codePtr->codeLengthStart = p; for (i = 0; i < numCmds; i++) { codeLen = mapPtr[i].numCodeBytes; if (codeLen < 0) { Tcl_Panic("EncodeCmdLocMap: bad code length"); } else if (codeLen <= 127) { TclStoreInt1AtPtr(codeLen, p); p++; } else { TclStoreInt1AtPtr(0xFF, p); p++; TclStoreInt4AtPtr(codeLen, p); p += 4; } } /* * Encode the source offset for each command as a sequence of deltas. */ codePtr->srcDeltaStart = p; prevOffset = 0; for (i = 0; i < numCmds; i++) { srcDelta = mapPtr[i].srcOffset - prevOffset; if ((-127 <= srcDelta) && (srcDelta <= 127) && (srcDelta != -1)) { TclStoreInt1AtPtr(srcDelta, p); p++; } else { TclStoreInt1AtPtr(0xFF, p); p++; TclStoreInt4AtPtr(srcDelta, p); p += 4; } prevOffset = mapPtr[i].srcOffset; } /* * Encode the source length for each command. */ codePtr->srcLengthStart = p; for (i = 0; i < numCmds; i++) { srcLen = mapPtr[i].numSrcBytes; if (srcLen < 0) { Tcl_Panic("EncodeCmdLocMap: bad source length"); } else if (srcLen <= 127) { TclStoreInt1AtPtr(srcLen, p); p++; } else { TclStoreInt1AtPtr(0xFF, p); p++; TclStoreInt4AtPtr(srcLen, p); p += 4; } } return p; } #ifdef TCL_COMPILE_STATS /* *---------------------------------------------------------------------- * * RecordByteCodeStats -- * * Accumulates various compilation-related statistics for each newly * compiled ByteCode. Called by the TclInitByteCodeObj when Tcl is * compiled with the -DTCL_COMPILE_STATS flag * * Results: * None. * * Side effects: * Accumulates aggregate code-related statistics in the interpreter's * ByteCodeStats structure. Records statistics specific to a ByteCode in * its ByteCode structure. * *---------------------------------------------------------------------- */ void RecordByteCodeStats( ByteCode *codePtr) /* Points to ByteCode structure with info * to add to accumulated statistics. */ { Interp *iPtr = (Interp *) *codePtr->interpHandle; ByteCodeStats *statsPtr; if (iPtr == NULL) { /* Avoid segfaulting in case we're called in a deleted interp */ return; } statsPtr = &(iPtr->stats); statsPtr->numCompilations++; statsPtr->totalSrcBytes += (double) codePtr->numSrcBytes; statsPtr->totalByteCodeBytes += (double) codePtr->structureSize; statsPtr->currentSrcBytes += (double) codePtr->numSrcBytes; statsPtr->currentByteCodeBytes += (double) codePtr->structureSize; statsPtr->srcCount[TclLog2(codePtr->numSrcBytes)]++; statsPtr->byteCodeCount[TclLog2((int) codePtr->structureSize)]++; statsPtr->currentInstBytes += (double) codePtr->numCodeBytes; statsPtr->currentLitBytes += (double) codePtr->numLitObjects * sizeof(Tcl_Obj *); statsPtr->currentExceptBytes += (double) codePtr->numExceptRanges * sizeof(ExceptionRange); statsPtr->currentAuxBytes += (double) codePtr->numAuxDataItems * sizeof(AuxData); statsPtr->currentCmdMapBytes += (double) codePtr->numCmdLocBytes; } #endif /* TCL_COMPILE_STATS */ /* * Local Variables: * mode: c * c-basic-offset: 4 * fill-column: 78 * tab-width: 8 * End: */
197437.c
/* * This file is part of the Micro Python project, http://micropython.org/ * * The MIT License (MIT) * * Copyright (c) 2019 Scott Shawcroft for Adafruit Industries * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "shared-bindings/displayio/EPaperDisplay.h" #include <stdint.h> #include "lib/utils/context_manager_helpers.h" #include "py/binary.h" #include "py/objproperty.h" #include "py/objtype.h" #include "py/runtime.h" #include "shared-bindings/displayio/Group.h" #include "shared-bindings/microcontroller/Pin.h" #include "shared-bindings/util.h" #include "shared-module/displayio/__init__.h" #include "supervisor/shared/translate.h" //| class EPaperDisplay: //| """Manage updating an epaper display over a display bus //| //| This initializes an epaper display and connects it into CircuitPython. Unlike other //| objects in CircuitPython, EPaperDisplay objects live until `displayio.release_displays()` //| is called. This is done so that CircuitPython can use the display itself. //| //| Most people should not use this class directly. Use a specific display driver instead that will //| contain the startup and shutdown sequences at minimum.""" //| //| def __init__(self, display_bus: _DisplayBus, start_sequence: ReadableBuffer, stop_sequence: ReadableBuffer, *, width: int, height: int, ram_width: int, ram_height: int, colstart: int = 0, rowstart: int = 0, rotation: int = 0, set_column_window_command: Optional[int] = None, set_row_window_command: Optional[int] = None, single_byte_bounds: bool = False, write_black_ram_command: int, black_bits_inverted: bool = False, write_color_ram_command: Optional[int] = None, color_bits_inverted: bool = False, highlight_color: int = 0x000000, refresh_display_command: int, refresh_time: float = 40, busy_pin: Optional[microcontroller.Pin] = None, busy_state: bool = True, seconds_per_frame: float = 180, always_toggle_chip_select: bool = False) -> None: //| """Create a EPaperDisplay object on the given display bus (`displayio.FourWire` or `displayio.ParallelBus`). //| //| The ``start_sequence`` and ``stop_sequence`` are bitpacked to minimize the ram impact. Every //| command begins with a command byte followed by a byte to determine the parameter count and if //| a delay is need after. When the top bit of the second byte is 1, the next byte will be the //| delay time in milliseconds. The remaining 7 bits are the parameter count excluding any delay //| byte. The third through final bytes are the remaining command parameters. The next byte will //| begin a new command definition. //| //| :param display_bus: The bus that the display is connected to //| :type display_bus: displayio.FourWire or displayio.ParallelBus //| :param buffer start_sequence: Byte-packed initialization sequence. //| :param buffer stop_sequence: Byte-packed initialization sequence. //| :param int width: Width in pixels //| :param int height: Height in pixels //| :param int ram_width: RAM width in pixels //| :param int ram_height: RAM height in pixels //| :param int colstart: The index if the first visible column //| :param int rowstart: The index if the first visible row //| :param int rotation: The rotation of the display in degrees clockwise. Must be in 90 degree increments (0, 90, 180, 270) //| :param int set_column_window_command: Command used to set the start and end columns to update //| :param int set_row_window_command: Command used so set the start and end rows to update //| :param int set_current_column_command: Command used to set the current column location //| :param int set_current_row_command: Command used to set the current row location //| :param int write_black_ram_command: Command used to write pixels values into the update region //| :param bool black_bits_inverted: True if 0 bits are used to show black pixels. Otherwise, 1 means to show black. //| :param int write_color_ram_command: Command used to write pixels values into the update region //| :param bool color_bits_inverted: True if 0 bits are used to show the color. Otherwise, 1 means to show color. //| :param int highlight_color: RGB888 of source color to highlight with third ePaper color. //| :param int refresh_display_command: Command used to start a display refresh //| :param float refresh_time: Time it takes to refresh the display before the stop_sequence should be sent. Ignored when busy_pin is provided. //| :param microcontroller.Pin busy_pin: Pin used to signify the display is busy //| :param bool busy_state: State of the busy pin when the display is busy //| :param float seconds_per_frame: Minimum number of seconds between screen refreshes //| :param bool always_toggle_chip_select: When True, chip select is toggled every byte""" //| ... //| STATIC mp_obj_t displayio_epaperdisplay_make_new(const mp_obj_type_t *type, size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { enum { ARG_display_bus, ARG_start_sequence, ARG_stop_sequence, ARG_width, ARG_height, ARG_ram_width, ARG_ram_height, ARG_colstart, ARG_rowstart, ARG_rotation, ARG_set_column_window_command, ARG_set_row_window_command, ARG_set_current_column_command, ARG_set_current_row_command, ARG_write_black_ram_command, ARG_black_bits_inverted, ARG_write_color_ram_command, ARG_color_bits_inverted, ARG_highlight_color, ARG_refresh_display_command, ARG_refresh_time, ARG_busy_pin, ARG_busy_state, ARG_seconds_per_frame, ARG_always_toggle_chip_select }; static const mp_arg_t allowed_args[] = { { MP_QSTR_display_bus, MP_ARG_REQUIRED | MP_ARG_OBJ }, { MP_QSTR_start_sequence, MP_ARG_REQUIRED | MP_ARG_OBJ }, { MP_QSTR_stop_sequence, MP_ARG_REQUIRED | MP_ARG_OBJ }, { MP_QSTR_width, MP_ARG_INT | MP_ARG_KW_ONLY | MP_ARG_REQUIRED, }, { MP_QSTR_height, MP_ARG_INT | MP_ARG_KW_ONLY | MP_ARG_REQUIRED, }, { MP_QSTR_ram_width, MP_ARG_INT | MP_ARG_KW_ONLY | MP_ARG_REQUIRED, }, { MP_QSTR_ram_height, MP_ARG_INT | MP_ARG_KW_ONLY | MP_ARG_REQUIRED, }, { MP_QSTR_colstart, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = 0} }, { MP_QSTR_rowstart, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = 0} }, { MP_QSTR_rotation, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = 0} }, { MP_QSTR_set_column_window_command, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = NO_COMMAND} }, { MP_QSTR_set_row_window_command, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = NO_COMMAND} }, { MP_QSTR_set_current_column_command, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = NO_COMMAND} }, { MP_QSTR_set_current_row_command, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = NO_COMMAND} }, { MP_QSTR_write_black_ram_command, MP_ARG_INT | MP_ARG_REQUIRED }, { MP_QSTR_black_bits_inverted, MP_ARG_BOOL | MP_ARG_KW_ONLY, {.u_bool = false} }, { MP_QSTR_write_color_ram_command, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_obj = mp_const_none} }, { MP_QSTR_color_bits_inverted, MP_ARG_BOOL | MP_ARG_KW_ONLY, {.u_bool = false} }, { MP_QSTR_highlight_color, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = 0x000000} }, { MP_QSTR_refresh_display_command, MP_ARG_INT | MP_ARG_REQUIRED }, { MP_QSTR_refresh_time, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_obj = MP_OBJ_NEW_SMALL_INT(40)} }, { MP_QSTR_busy_pin, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_obj = mp_const_none} }, { MP_QSTR_busy_state, MP_ARG_BOOL | MP_ARG_KW_ONLY, {.u_bool = true} }, { MP_QSTR_seconds_per_frame, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_obj = MP_OBJ_NEW_SMALL_INT(180)} }, { MP_QSTR_always_toggle_chip_select, MP_ARG_BOOL | MP_ARG_KW_ONLY, {.u_bool = false} }, }; mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); mp_obj_t display_bus = args[ARG_display_bus].u_obj; mp_buffer_info_t start_bufinfo; mp_get_buffer_raise(args[ARG_start_sequence].u_obj, &start_bufinfo, MP_BUFFER_READ); mp_buffer_info_t stop_bufinfo; mp_get_buffer_raise(args[ARG_stop_sequence].u_obj, &stop_bufinfo, MP_BUFFER_READ); const mcu_pin_obj_t* busy_pin = validate_obj_is_free_pin_or_none(args[ARG_busy_pin].u_obj); mp_int_t rotation = args[ARG_rotation].u_int; if (rotation % 90 != 0) { mp_raise_ValueError(translate("Display rotation must be in 90 degree increments")); } primary_display_t *disp = allocate_display_or_raise(); displayio_epaperdisplay_obj_t *self = &disp->epaper_display;; mp_float_t refresh_time = mp_obj_get_float(args[ARG_refresh_time].u_obj); mp_float_t seconds_per_frame = mp_obj_get_float(args[ARG_seconds_per_frame].u_obj); mp_int_t write_color_ram_command = NO_COMMAND; mp_int_t highlight_color = args[ARG_highlight_color].u_int; if (args[ARG_write_color_ram_command].u_obj != mp_const_none) { write_color_ram_command = mp_obj_get_int(args[ARG_write_color_ram_command].u_obj); } self->base.type = &displayio_epaperdisplay_type; common_hal_displayio_epaperdisplay_construct( self, display_bus, start_bufinfo.buf, start_bufinfo.len, stop_bufinfo.buf, stop_bufinfo.len, args[ARG_width].u_int, args[ARG_height].u_int, args[ARG_ram_width].u_int, args[ARG_ram_height].u_int, args[ARG_colstart].u_int, args[ARG_rowstart].u_int, rotation, args[ARG_set_column_window_command].u_int, args[ARG_set_row_window_command].u_int, args[ARG_set_current_column_command].u_int, args[ARG_set_current_row_command].u_int, args[ARG_write_black_ram_command].u_int, args[ARG_black_bits_inverted].u_bool, write_color_ram_command, args[ARG_color_bits_inverted].u_bool, highlight_color, args[ARG_refresh_display_command].u_int, refresh_time, busy_pin, args[ARG_busy_state].u_bool, seconds_per_frame, args[ARG_always_toggle_chip_select].u_bool ); return self; } // Helper to ensure we have the native super class instead of a subclass. static displayio_epaperdisplay_obj_t* native_display(mp_obj_t display_obj) { mp_obj_t native_display = mp_instance_cast_to_native_base(display_obj, &displayio_epaperdisplay_type); mp_obj_assert_native_inited(native_display); return MP_OBJ_TO_PTR(native_display); } //| def show(self, group: Group) -> None: //| """Switches to displaying the given group of layers. When group is None, the default //| CircuitPython terminal will be shown. //| //| :param Group group: The group to show.""" //| ... //| STATIC mp_obj_t displayio_epaperdisplay_obj_show(mp_obj_t self_in, mp_obj_t group_in) { displayio_epaperdisplay_obj_t *self = native_display(self_in); displayio_group_t* group = NULL; if (group_in != mp_const_none) { group = MP_OBJ_TO_PTR(native_group(group_in)); } bool ok = common_hal_displayio_epaperdisplay_show(self, group); if (!ok) { mp_raise_ValueError(translate("Group already used")); } return mp_const_none; } MP_DEFINE_CONST_FUN_OBJ_2(displayio_epaperdisplay_show_obj, displayio_epaperdisplay_obj_show); //| def refresh(self) -> None: //| """Refreshes the display immediately or raises an exception if too soon. Use //| ``time.sleep(display.time_to_refresh)`` to sleep until a refresh can occur.""" //| ... //| STATIC mp_obj_t displayio_epaperdisplay_obj_refresh(mp_obj_t self_in) { displayio_epaperdisplay_obj_t *self = native_display(self_in); bool ok = common_hal_displayio_epaperdisplay_refresh(self); if (!ok) { mp_raise_RuntimeError(translate("Refresh too soon")); } return mp_const_none; } MP_DEFINE_CONST_FUN_OBJ_1(displayio_epaperdisplay_refresh_obj, displayio_epaperdisplay_obj_refresh); //| time_to_refresh: float //| """Time, in fractional seconds, until the ePaper display can be refreshed.""" //| STATIC mp_obj_t displayio_epaperdisplay_obj_get_time_to_refresh(mp_obj_t self_in) { displayio_epaperdisplay_obj_t *self = native_display(self_in); return mp_obj_new_float(common_hal_displayio_epaperdisplay_get_time_to_refresh(self) / 1000.0); } MP_DEFINE_CONST_FUN_OBJ_1(displayio_epaperdisplay_get_time_to_refresh_obj, displayio_epaperdisplay_obj_get_time_to_refresh); const mp_obj_property_t displayio_epaperdisplay_time_to_refresh_obj = { .base.type = &mp_type_property, .proxy = {(mp_obj_t)&displayio_epaperdisplay_get_time_to_refresh_obj, (mp_obj_t)&mp_const_none_obj, (mp_obj_t)&mp_const_none_obj}, }; //| width: int //| """Gets the width of the display in pixels""" //| STATIC mp_obj_t displayio_epaperdisplay_obj_get_width(mp_obj_t self_in) { displayio_epaperdisplay_obj_t *self = native_display(self_in); return MP_OBJ_NEW_SMALL_INT(common_hal_displayio_epaperdisplay_get_width(self)); } MP_DEFINE_CONST_FUN_OBJ_1(displayio_epaperdisplay_get_width_obj, displayio_epaperdisplay_obj_get_width); const mp_obj_property_t displayio_epaperdisplay_width_obj = { .base.type = &mp_type_property, .proxy = {(mp_obj_t)&displayio_epaperdisplay_get_width_obj, (mp_obj_t)&mp_const_none_obj, (mp_obj_t)&mp_const_none_obj}, }; //| height: int //| """Gets the height of the display in pixels""" //| STATIC mp_obj_t displayio_epaperdisplay_obj_get_height(mp_obj_t self_in) { displayio_epaperdisplay_obj_t *self = native_display(self_in); return MP_OBJ_NEW_SMALL_INT(common_hal_displayio_epaperdisplay_get_height(self)); } MP_DEFINE_CONST_FUN_OBJ_1(displayio_epaperdisplay_get_height_obj, displayio_epaperdisplay_obj_get_height); const mp_obj_property_t displayio_epaperdisplay_height_obj = { .base.type = &mp_type_property, .proxy = {(mp_obj_t)&displayio_epaperdisplay_get_height_obj, (mp_obj_t)&mp_const_none_obj, (mp_obj_t)&mp_const_none_obj}, }; //| bus: _DisplayBus //| """The bus being used by the display""" //| STATIC mp_obj_t displayio_epaperdisplay_obj_get_bus(mp_obj_t self_in) { displayio_epaperdisplay_obj_t *self = native_display(self_in); return common_hal_displayio_epaperdisplay_get_bus(self); } MP_DEFINE_CONST_FUN_OBJ_1(displayio_epaperdisplay_get_bus_obj, displayio_epaperdisplay_obj_get_bus); const mp_obj_property_t displayio_epaperdisplay_bus_obj = { .base.type = &mp_type_property, .proxy = {(mp_obj_t)&displayio_epaperdisplay_get_bus_obj, (mp_obj_t)&mp_const_none_obj, (mp_obj_t)&mp_const_none_obj}, }; STATIC const mp_rom_map_elem_t displayio_epaperdisplay_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_show), MP_ROM_PTR(&displayio_epaperdisplay_show_obj) }, { MP_ROM_QSTR(MP_QSTR_refresh), MP_ROM_PTR(&displayio_epaperdisplay_refresh_obj) }, { MP_ROM_QSTR(MP_QSTR_width), MP_ROM_PTR(&displayio_epaperdisplay_width_obj) }, { MP_ROM_QSTR(MP_QSTR_height), MP_ROM_PTR(&displayio_epaperdisplay_height_obj) }, { MP_ROM_QSTR(MP_QSTR_bus), MP_ROM_PTR(&displayio_epaperdisplay_bus_obj) }, { MP_ROM_QSTR(MP_QSTR_time_to_refresh), MP_ROM_PTR(&displayio_epaperdisplay_time_to_refresh_obj) }, }; STATIC MP_DEFINE_CONST_DICT(displayio_epaperdisplay_locals_dict, displayio_epaperdisplay_locals_dict_table); const mp_obj_type_t displayio_epaperdisplay_type = { { &mp_type_type }, .name = MP_QSTR_EPaperDisplay, .make_new = displayio_epaperdisplay_make_new, .locals_dict = (mp_obj_dict_t*)&displayio_epaperdisplay_locals_dict, };
844563.c
/** ****************************************************************************** * @file TIM/TIM_PWMInput/Src/main.c * @author MCD Application Team * @brief This sample code shows how to use STM32L0xx TIM HAL API to measure * the frequency and duty cycle of an external signal through the * STM32L0xx HAL API. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2016 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "main.h" /** @addtogroup STM32L0xx_HAL_Examples * @{ */ /** @addtogroup TIM_PWMInput * @{ */ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Timer handler declaration */ TIM_HandleTypeDef TimHandle; /* Timer Input Capture Configuration Structure declaration */ TIM_IC_InitTypeDef sConfig; /* Slave configuration structure */ TIM_SlaveConfigTypeDef sSlaveConfig; /* Captured Value */ __IO uint32_t uwIC2Value = 0; /* Duty Cycle Value */ __IO uint32_t uwDutyCycle = 0; /* Frequency Value */ __IO uint32_t uwFrequency = 0; /* Private function prototypes -----------------------------------------------*/ static void SystemClock_Config(void); static void ErrorHandler(void); /* Private functions ---------------------------------------------------------*/ /** * @brief Main program * @param None * @retval None */ int main(void) { /*This sample code shows how to use STM32L0xx TIM HAL API to measure the frequency and duty cycle of an external signal through the STM32L0xx HAL API. */ /* STM32L0xx HAL library initialization: - Configure the Flash prefetch, Flash preread and Buffer caches - Systick timer is configured by default as source of time base, but user can eventually implement his proper time base source (a general purpose timer for example or other time source), keeping in mind that Time base duration should be kept 1ms since PPP_TIMEOUT_VALUEs are defined and handled in milliseconds basis. - Low Level Initialization */ HAL_Init(); /* Configure the system clock */ SystemClock_Config(); /*##-1- Configure the TIM peripheral #######################################*/ /* Set TIM instance */ TimHandle.Instance = TIM2; /* Initialize TIMx peripheral as follow: + Period = 0xFFFF + Prescaler = 0 + ClockDivision = 0 + Counter direction = Up */ TimHandle.Init.Period = 0xFFFF; TimHandle.Init.Prescaler = 0; TimHandle.Init.ClockDivision = 0; TimHandle.Init.CounterMode = TIM_COUNTERMODE_UP; if(HAL_TIM_IC_Init(&TimHandle) != HAL_OK) { /* Initialization Error */ ErrorHandler(); } /*##-2- Configure the Input Capture channels ###############################*/ /* Common configuration */ sConfig.ICPrescaler = TIM_ICPSC_DIV1; sConfig.ICFilter = 0; /* Configure the Input Capture of channel 1 */ sConfig.ICPolarity = TIM_ICPOLARITY_FALLING; sConfig.ICSelection = TIM_ICSELECTION_INDIRECTTI; if(HAL_TIM_IC_ConfigChannel(&TimHandle, &sConfig, TIM_CHANNEL_1) != HAL_OK) { /* Configuration Error */ ErrorHandler(); } /* Configure the Input Capture of channel 2 */ sConfig.ICPolarity = TIM_ICPOLARITY_RISING; sConfig.ICSelection = TIM_ICSELECTION_DIRECTTI; if(HAL_TIM_IC_ConfigChannel(&TimHandle, &sConfig, TIM_CHANNEL_2) != HAL_OK) { /* Configuration Error */ ErrorHandler(); } /*##-3- Configure the slave mode ###########################################*/ /* Select the slave Mode: Reset Mode */ sSlaveConfig.SlaveMode = TIM_SLAVEMODE_RESET; sSlaveConfig.InputTrigger = TIM_TS_TI2FP2; if(HAL_TIM_SlaveConfigSynchronization(&TimHandle, &sSlaveConfig) != HAL_OK) { /* Configuration Error */ ErrorHandler(); } /*##-4- Start the Input Capture in interrupt mode ##########################*/ if(HAL_TIM_IC_Start_IT(&TimHandle, TIM_CHANNEL_2) != HAL_OK) { /* Starting Error */ ErrorHandler(); } /*##-5- Start the Input Capture in interrupt mode ##########################*/ if(HAL_TIM_IC_Start_IT(&TimHandle, TIM_CHANNEL_1) != HAL_OK) { /* Starting Error */ ErrorHandler(); } while (1) { } } /** * @brief Input Capture callback in non blocking mode * @param htim : TIM IC handle * @retval None */ void HAL_TIM_IC_CaptureCallback(TIM_HandleTypeDef *htim) { if (htim->Channel == HAL_TIM_ACTIVE_CHANNEL_2) { /* Get the Input Capture value */ uwIC2Value = HAL_TIM_ReadCapturedValue(htim, TIM_CHANNEL_2); if (uwIC2Value != 0) { /* Duty cycle computation */ uwDutyCycle = ((HAL_TIM_ReadCapturedValue(htim, TIM_CHANNEL_1) + 1) * 100) / (uwIC2Value + 1); /* uwFrequency computation TIM2 counter clock = RCC_Clocks.HCLK_Frequency */ uwFrequency = HAL_RCC_GetHCLKFreq()/ (uwIC2Value + 1); } else { uwDutyCycle = 0; uwFrequency = 0; } } } /** * @brief System Clock Configuration * The system Clock is configured as follow : * System Clock source = PLL (HSI) * SYSCLK(Hz) = 32000000 * HCLK(Hz) = 32000000 * AHB Prescaler = 1 * APB1 Prescaler = 1 * APB2 Prescaler = 1 * HSI Frequency(Hz) = 16000000 * PLL_MUL = 4 * PLL_DIV = 2 * Flash Latency(WS) = 1 * Main regulator output voltage = Scale1 mode * @param None * @retval None */ static void SystemClock_Config(void) { RCC_ClkInitTypeDef RCC_ClkInitStruct; RCC_OscInitTypeDef RCC_OscInitStruct; /* Enable Power Control clock */ __HAL_RCC_PWR_CLK_ENABLE(); /* The voltage scaling allows optimizing the power consumption when the device is clocked below the maximum system frequency, to update the voltage scaling value regarding system frequency refer to product datasheet. */ __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE1); /* Enable HSI Oscillator and activate PLL with HSI as source */ RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSI; RCC_OscInitStruct.HSEState = RCC_HSE_OFF; RCC_OscInitStruct.HSIState = RCC_HSI_ON; RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON; RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSI; RCC_OscInitStruct.PLL.PLLMUL = RCC_PLL_MUL4; RCC_OscInitStruct.PLL.PLLDIV = RCC_PLL_DIV2; RCC_OscInitStruct.HSICalibrationValue = 0x10; HAL_RCC_OscConfig(&RCC_OscInitStruct); /* Select PLL as system clock source and configure the HCLK, PCLK1 and PCLK2 clocks dividers */ RCC_ClkInitStruct.ClockType = (RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2); RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV1; RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1; HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_1); } /** * @brief This function is executed in case of error occurrence. * @param None * @retval None */ static void ErrorHandler(void) { /* Infinite loop */ while(1) { } } #ifdef USE_FULL_ASSERT /** * @brief Reports the name of the source file and the source line number * where the assert_param error has occurred. * @param file: pointer to the source file name * @param line: assert_param error line source number * @retval None */ void assert_failed(uint8_t* file, uint32_t line) { /* User can add his own implementation to report the file name and line number, ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */ /* Infinite loop */ while (1) { } } #endif /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
770587.c
/* origin: FreeBSD /usr/src/lib/msun/src/e_asinf.c */ /* * Conversion to float by Ian Lance Taylor, Cygnus Support, [email protected]. */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ #include "libm.h" static const double pio2 = 1.570796326794896558e+00; static const float /* coefficients for R(x^2) */ pS0 = 1.6666586697e-01, pS1 = -4.2743422091e-02, pS2 = -8.6563630030e-03, qS1 = -7.0662963390e-01; static float R(float z) { float_t p, q; p = z * (pS0 + z * (pS1 + z * pS2)); q = 1.0f + z * qS1; return p / q; } float asinf(float x) { double s; float z; uint32_t hx, ix; GET_FLOAT_WORD(hx, x); ix = hx & 0x7fffffff; if (ix >= 0x3f800000) { /* |x| >= 1 */ if (ix == 0x3f800000) /* |x| == 1 */ return x * pio2 + 0x1p-120f; /* asin(+-1) = +-pi/2 with inexact */ return 0 / (x - x); /* asin(|x|>1) is NaN */ } if (ix < 0x3f000000) { /* |x| < 0.5 */ /* if 0x1p-126 <= |x| < 0x1p-12, avoid raising underflow */ if (ix < 0x39800000 && ix >= 0x00800000) return x; return x + x * R(x * x); } /* 1 > |x| >= 0.5 */ z = (1 - fabsf(x)) * 0.5f; s = sqrt(z); x = pio2 - 2 * (s + s * R(z)); if (hx >> 31) return -x; return x; }
863225.c
/** @file PS2 Mouse Communication Interface. Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #include "Ps2MouseAbsolutePointer.h" #include "CommPs2.h" UINT8 SampleRateTbl[MaxSampleRate] = { 0xa, 0x14, 0x28, 0x3c, 0x50, 0x64, 0xc8 }; UINT8 ResolutionTbl[MaxResolution] = { 0, 1, 2, 3 }; /** Issue self test command via IsaIo interface. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @return EFI_SUCCESS Success to do keyboard self testing. @return others Fail to do keyboard self testing. **/ EFI_STATUS KbcSelfTest ( IN EFI_ISA_IO_PROTOCOL *IsaIo ) { EFI_STATUS Status; UINT8 Data; // // Keyboard controller self test // Status = Out8042Command (IsaIo, SELF_TEST); if (EFI_ERROR (Status)) { return Status; } // // Read return code // Status = In8042Data (IsaIo, &Data); if (EFI_ERROR (Status)) { return Status; } if (Data != 0x55) { return EFI_DEVICE_ERROR; } // // Set system flag // Status = Out8042Command (IsaIo, READ_CMD_BYTE); if (EFI_ERROR (Status)) { return Status; } Status = In8042Data (IsaIo, &Data); if (EFI_ERROR (Status)) { return Status; } Status = Out8042Command (IsaIo, WRITE_CMD_BYTE); if (EFI_ERROR (Status)) { return Status; } Data |= CMD_SYS_FLAG; Status = Out8042Data (IsaIo, Data); if (EFI_ERROR (Status)) { return Status; } return EFI_SUCCESS; } /** Issue command to enable keyboard AUX functionality. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @return Status of command issuing. **/ EFI_STATUS KbcEnableAux ( IN EFI_ISA_IO_PROTOCOL *IsaIo ) { // // Send 8042 enable mouse command // return Out8042Command (IsaIo, ENABLE_AUX); } /** Issue command to disable keyboard AUX functionality. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @return Status of command issuing. **/ EFI_STATUS KbcDisableAux ( IN EFI_ISA_IO_PROTOCOL *IsaIo ) { // // Send 8042 disable mouse command // return Out8042Command (IsaIo, DISABLE_AUX); } /** Issue command to enable keyboard. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @return Status of command issuing. **/ EFI_STATUS KbcEnableKb ( IN EFI_ISA_IO_PROTOCOL *IsaIo ) { // // Send 8042 enable keyboard command // return Out8042Command (IsaIo, ENABLE_KB); } /** Issue command to disable keyboard. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @return Status of command issuing. **/ EFI_STATUS KbcDisableKb ( IN EFI_ISA_IO_PROTOCOL *IsaIo ) { // // Send 8042 disable keyboard command // return Out8042Command (IsaIo, DISABLE_KB); } /** Issue command to check keyboard status. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param KeyboardEnable return whether keyboard is enable. @return Status of command issuing. **/ EFI_STATUS CheckKbStatus ( IN EFI_ISA_IO_PROTOCOL *IsaIo, OUT BOOLEAN *KeyboardEnable ) { EFI_STATUS Status; UINT8 Data; // // Send command to read KBC command byte // Status = Out8042Command (IsaIo, READ_CMD_BYTE); if (EFI_ERROR (Status)) { return Status; } Status = In8042Data (IsaIo, &Data); if (EFI_ERROR (Status)) { return Status; } // // Check keyboard enable or not // if ((Data & CMD_KB_STS) == CMD_KB_DIS) { *KeyboardEnable = FALSE; } else { *KeyboardEnable = TRUE; } return EFI_SUCCESS; } /** Issue command to reset keyboard. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @return Status of command issuing. **/ EFI_STATUS PS2MouseReset ( IN EFI_ISA_IO_PROTOCOL *IsaIo ) { EFI_STATUS Status; UINT8 Data; Status = Out8042AuxCommand (IsaIo, RESET_CMD, FALSE); if (EFI_ERROR (Status)) { return Status; } Status = In8042AuxData (IsaIo, &Data); if (EFI_ERROR (Status)) { return Status; } // // Check BAT Complete Code // if (Data != PS2MOUSE_BAT1) { return EFI_DEVICE_ERROR; } Status = In8042AuxData (IsaIo, &Data); if (EFI_ERROR (Status)) { return Status; } // // Check BAT Complete Code // if (Data != PS2MOUSE_BAT2) { return EFI_DEVICE_ERROR; } return EFI_SUCCESS; } /** Issue command to set mouse's sample rate @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param SampleRate value of sample rate @return Status of command issuing. **/ EFI_STATUS PS2MouseSetSampleRate ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN MOUSE_SR SampleRate ) { EFI_STATUS Status; // // Send auxiliary command to set mouse sample rate // Status = Out8042AuxCommand (IsaIo, SETSR_CMD, FALSE); if (EFI_ERROR (Status)) { return Status; } Status = Out8042AuxData (IsaIo, SampleRateTbl[SampleRate]); return Status; } /** Issue command to set mouse's resolution. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Resolution value of resolution @return Status of command issuing. **/ EFI_STATUS PS2MouseSetResolution ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN MOUSE_RE Resolution ) { EFI_STATUS Status; // // Send auxiliary command to set mouse resolution // Status = Out8042AuxCommand (IsaIo, SETRE_CMD, FALSE); if (EFI_ERROR (Status)) { return Status; } Status = Out8042AuxData (IsaIo, ResolutionTbl[Resolution]); return Status; } /** Issue command to set mouse's scaling. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Scaling value of scaling @return Status of command issuing. **/ EFI_STATUS PS2MouseSetScaling ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN MOUSE_SF Scaling ) { UINT8 Command; Command = (UINT8) (Scaling == Scaling1 ? SETSF1_CMD : SETSF2_CMD); // // Send auxiliary command to set mouse scaling data // return Out8042AuxCommand (IsaIo, Command, FALSE); } /** Issue command to enable Ps2 mouse. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @return Status of command issuing. **/ EFI_STATUS PS2MouseEnable ( IN EFI_ISA_IO_PROTOCOL *IsaIo ) { // // Send auxiliary command to enable mouse // return Out8042AuxCommand (IsaIo, ENABLE_CMD, FALSE); } /** Get mouse packet . Only care first 3 bytes @param MouseAbsolutePointerDev Pointer to PS2 Absolute Pointer Simulation Device Private Data Structure @retval EFI_NOT_READY Mouse Device not ready to input data packet, or some error happened during getting the packet @retval EFI_SUCCESS The data packet is gotten successfully. **/ EFI_STATUS PS2MouseGetPacket ( PS2_MOUSE_ABSOLUTE_POINTER_DEV *MouseAbsolutePointerDev ) { EFI_STATUS Status; BOOLEAN KeyboardEnable; UINT8 Packet[PS2_PACKET_LENGTH]; UINT8 Data; UINTN Count; UINTN State; INT16 RelativeMovementX; INT16 RelativeMovementY; BOOLEAN LButton; BOOLEAN RButton; KeyboardEnable = FALSE; Count = 1; State = PS2_READ_BYTE_ONE; // // State machine to get mouse packet // while (1) { switch (State) { case PS2_READ_BYTE_ONE: // // Read mouse first byte data, if failed, immediately return // KbcDisableAux (MouseAbsolutePointerDev->IsaIo); Status = PS2MouseRead (MouseAbsolutePointerDev->IsaIo, &Data, &Count, State); if (EFI_ERROR (Status)) { KbcEnableAux (MouseAbsolutePointerDev->IsaIo); return EFI_NOT_READY; } if (Count != 1) { KbcEnableAux (MouseAbsolutePointerDev->IsaIo); return EFI_NOT_READY; } if (IS_PS2_SYNC_BYTE (Data)) { Packet[0] = Data; State = PS2_READ_DATA_BYTE; CheckKbStatus (MouseAbsolutePointerDev->IsaIo, &KeyboardEnable); KbcDisableKb (MouseAbsolutePointerDev->IsaIo); KbcEnableAux (MouseAbsolutePointerDev->IsaIo); } break; case PS2_READ_DATA_BYTE: Count = 2; Status = PS2MouseRead (MouseAbsolutePointerDev->IsaIo, (Packet + 1), &Count, State); if (EFI_ERROR (Status)) { if (KeyboardEnable) { KbcEnableKb (MouseAbsolutePointerDev->IsaIo); } return EFI_NOT_READY; } if (Count != 2) { if (KeyboardEnable) { KbcEnableKb (MouseAbsolutePointerDev->IsaIo); } return EFI_NOT_READY; } State = PS2_PROCESS_PACKET; break; case PS2_PROCESS_PACKET: if (KeyboardEnable) { KbcEnableKb (MouseAbsolutePointerDev->IsaIo); } // // Decode the packet // RelativeMovementX = Packet[1]; RelativeMovementY = Packet[2]; // // Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 // Byte 0 | Y overflow | X overflow | Y sign bit | X sign bit | Always 1 | Middle Btn | Right Btn | Left Btn // Byte 1 | 8 bit X Movement // Byte 2 | 8 bit Y Movement // // X sign bit + 8 bit X Movement : 9-bit signed twos complement integer that presents the relative displacement of the device in the X direction since the last data transmission. // Y sign bit + 8 bit Y Movement : Same as X sign bit + 8 bit X Movement. // // // First, Clear X and Y high 8 bits // RelativeMovementX = (INT16) (RelativeMovementX & 0xFF); RelativeMovementY = (INT16) (RelativeMovementY & 0xFF); // // Second, if the 9-bit signed twos complement integer is negative, set the high 8 bit 0xff // if ((Packet[0] & 0x10) != 0) { RelativeMovementX = (INT16) (RelativeMovementX | 0xFF00); } if ((Packet[0] & 0x20) != 0) { RelativeMovementY = (INT16) (RelativeMovementY | 0xFF00); } RButton = (UINT8) (Packet[0] & 0x2); LButton = (UINT8) (Packet[0] & 0x1); // // Update mouse state // MouseAbsolutePointerDev->State.CurrentX += RelativeMovementX; MouseAbsolutePointerDev->State.CurrentY -= RelativeMovementY; MouseAbsolutePointerDev->State.CurrentZ = 0; MouseAbsolutePointerDev->State.ActiveButtons = (UINT8) (LButton || RButton) & 0x3; MouseAbsolutePointerDev->StateChanged = TRUE; return EFI_SUCCESS; } } } /** Read data via IsaIo protocol with given number. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Buffer Buffer receive data of mouse @param BufSize The size of buffer @param State Check input or read data @return status of reading mouse data. **/ EFI_STATUS PS2MouseRead ( IN EFI_ISA_IO_PROTOCOL *IsaIo, OUT VOID *Buffer, IN OUT UINTN *BufSize, IN UINTN State ) { EFI_STATUS Status; UINTN BytesRead; Status = EFI_SUCCESS; BytesRead = 0; if (State == PS2_READ_BYTE_ONE) { // // Check input for mouse // Status = CheckForInput (IsaIo); if (EFI_ERROR (Status)) { return Status; } } while (BytesRead < *BufSize) { Status = WaitOutputFull (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { break; } IsaIo->Io.Read (IsaIo, EfiIsaIoWidthUint8, KBC_DATA_PORT, 1, Buffer); BytesRead++; Buffer = (UINT8 *) Buffer + 1; } // // Verify the correct number of bytes read // if (BytesRead == 0 || BytesRead != *BufSize) { Status = EFI_NOT_FOUND; } *BufSize = BytesRead; return Status; } // // 8042 I/O function // /** I/O work flow of outing 8042 command. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Command I/O command. @retval EFI_SUCCESS Success to execute I/O work flow @retval EFI_TIMEOUT Keyboard controller time out. **/ EFI_STATUS Out8042Command ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN UINT8 Command ) { EFI_STATUS Status; UINT8 Data; // // Wait keyboard controller input buffer empty // Status = WaitInputEmpty (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { return Status; } // // Send command // Data = Command; IsaIo->Io.Write (IsaIo, EfiIsaIoWidthUint8, KBC_CMD_STS_PORT, 1, &Data); Status = WaitInputEmpty (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { return Status; } return EFI_SUCCESS; } /** I/O work flow of outing 8042 data. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Data Data value @retval EFI_SUCCESS Success to execute I/O work flow @retval EFI_TIMEOUT Keyboard controller time out. **/ EFI_STATUS Out8042Data ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN UINT8 Data ) { EFI_STATUS Status; UINT8 Temp; // // Wait keyboard controller input buffer empty // Status = WaitInputEmpty (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { return Status; } Temp = Data; IsaIo->Io.Write (IsaIo, EfiIsaIoWidthUint8, KBC_DATA_PORT, 1, &Temp); Status = WaitInputEmpty (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { return Status; } return EFI_SUCCESS; } /** I/O work flow of in 8042 data. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Data Data value @retval EFI_SUCCESS Success to execute I/O work flow @retval EFI_TIMEOUT Keyboard controller time out. **/ EFI_STATUS In8042Data ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN OUT UINT8 *Data ) { UINTN Delay; UINT8 Temp; Delay = TIMEOUT / 50; do { IsaIo->Io.Read (IsaIo, EfiIsaIoWidthUint8, KBC_CMD_STS_PORT, 1, &Temp); // // Check keyboard controller status bit 0(output buffer status) // if ((Temp & KBC_OUTB) == KBC_OUTB) { break; } gBS->Stall (50); Delay--; } while (Delay != 0); if (Delay == 0) { return EFI_TIMEOUT; } IsaIo->Io.Read (IsaIo, EfiIsaIoWidthUint8, KBC_DATA_PORT, 1, Data); return EFI_SUCCESS; } /** I/O work flow of outing 8042 Aux command. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Command Aux I/O command @param Resend Whether need resend the Aux command. @retval EFI_SUCCESS Success to execute I/O work flow @retval EFI_TIMEOUT Keyboard controller time out. **/ EFI_STATUS Out8042AuxCommand ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN UINT8 Command, IN BOOLEAN Resend ) { EFI_STATUS Status; UINT8 Data; // // Wait keyboard controller input buffer empty // Status = WaitInputEmpty (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { return Status; } // // Send write to auxiliary device command // Data = WRITE_AUX_DEV; IsaIo->Io.Write (IsaIo, EfiIsaIoWidthUint8, KBC_CMD_STS_PORT, 1, &Data); Status = WaitInputEmpty (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { return Status; } // // Send auxiliary device command // IsaIo->Io.Write (IsaIo, EfiIsaIoWidthUint8, KBC_DATA_PORT, 1, &Command); // // Read return code // Status = In8042AuxData (IsaIo, &Data); if (EFI_ERROR (Status)) { return Status; } if (Data == PS2_ACK) { // // Receive mouse acknowledge, command send success // return EFI_SUCCESS; } else if (Resend) { // // Resend fail // return EFI_DEVICE_ERROR; } else if (Data == PS2_RESEND) { // // Resend command // Status = Out8042AuxCommand (IsaIo, Command, TRUE); if (EFI_ERROR (Status)) { return Status; } } else { // // Invalid return code // return EFI_DEVICE_ERROR; } return EFI_SUCCESS; } /** I/O work flow of outing 8042 Aux data. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Data Buffer holding return value @retval EFI_SUCCESS Success to execute I/O work flow. @retval EFI_TIMEOUT Keyboard controller time out. **/ EFI_STATUS Out8042AuxData ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN UINT8 Data ) { EFI_STATUS Status; UINT8 Temp; // // Wait keyboard controller input buffer empty // Status = WaitInputEmpty (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { return Status; } // // Send write to auxiliary device command // Temp = WRITE_AUX_DEV; IsaIo->Io.Write (IsaIo, EfiIsaIoWidthUint8, KBC_CMD_STS_PORT, 1, &Temp); Status = WaitInputEmpty (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { return Status; } Temp = Data; IsaIo->Io.Write (IsaIo, EfiIsaIoWidthUint8, KBC_DATA_PORT, 1, &Temp); Status = WaitInputEmpty (IsaIo, TIMEOUT); if (EFI_ERROR (Status)) { return Status; } return EFI_SUCCESS; } /** I/O work flow of in 8042 Aux data. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Data Buffer holding return value. @retval EFI_SUCCESS Success to execute I/O work flow @retval EFI_TIMEOUT Keyboard controller time out. **/ EFI_STATUS In8042AuxData ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN OUT UINT8 *Data ) { EFI_STATUS Status; // // wait for output data // Status = WaitOutputFull (IsaIo, BAT_TIMEOUT); if (EFI_ERROR (Status)) { return Status; } IsaIo->Io.Read (IsaIo, EfiIsaIoWidthUint8, KBC_DATA_PORT, 1, Data); return EFI_SUCCESS; } /** Check keyboard controller status, if it is output buffer full and for auxiliary device. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @retval EFI_SUCCESS Keyboard controller is ready @retval EFI_NOT_READY Keyboard controller is not ready **/ EFI_STATUS CheckForInput ( IN EFI_ISA_IO_PROTOCOL *IsaIo ) { UINT8 Data; IsaIo->Io.Read (IsaIo, EfiIsaIoWidthUint8, KBC_CMD_STS_PORT, 1, &Data); // // Check keyboard controller status, if it is output buffer full and for auxiliary device // if ((Data & (KBC_OUTB | KBC_AUXB)) != (KBC_OUTB | KBC_AUXB)) { return EFI_NOT_READY; } return EFI_SUCCESS; } /** I/O work flow to wait input buffer empty in given time. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Timeout Wating time. @retval EFI_TIMEOUT if input is still not empty in given time. @retval EFI_SUCCESS input is empty. **/ EFI_STATUS WaitInputEmpty ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN UINTN Timeout ) { UINTN Delay; UINT8 Data; Delay = Timeout / 50; do { IsaIo->Io.Read (IsaIo, EfiIsaIoWidthUint8, KBC_CMD_STS_PORT, 1, &Data); // // Check keyboard controller status bit 1(input buffer status) // if ((Data & KBC_INPB) == 0) { break; } gBS->Stall (50); Delay--; } while (Delay != 0); if (Delay == 0) { return EFI_TIMEOUT; } return EFI_SUCCESS; } /** I/O work flow to wait output buffer full in given time. @param IsaIo Pointer to instance of EFI_ISA_IO_PROTOCOL @param Timeout given time @retval EFI_TIMEOUT output is not full in given time @retval EFI_SUCCESS output is full in given time. **/ EFI_STATUS WaitOutputFull ( IN EFI_ISA_IO_PROTOCOL *IsaIo, IN UINTN Timeout ) { UINTN Delay; UINT8 Data; Delay = Timeout / 50; do { IsaIo->Io.Read (IsaIo, EfiIsaIoWidthUint8, KBC_CMD_STS_PORT, 1, &Data); // // Check keyboard controller status bit 0(output buffer status) // & bit5(output buffer for auxiliary device) // if ((Data & (KBC_OUTB | KBC_AUXB)) == (KBC_OUTB | KBC_AUXB)) { break; } gBS->Stall (50); Delay--; } while (Delay != 0); if (Delay == 0) { return EFI_TIMEOUT; } return EFI_SUCCESS; }
256914.c
/* * Copyright (c) 2013, Swedish Institute of Computer Science * Copyright (c) 2010, Vrije Universiteit Brussel * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * * Authors: Simon Duquennoy <[email protected]> * Joris Borms <[email protected]> */ #include "contiki.h" #include <stddef.h> #include <string.h> #include <stdio.h> #include "lib/memb.h" #include "lib/list.h" #include "net/nbr-table.h" #define DEBUG 0 #if DEBUG #include <stdio.h> #include "sys/ctimer.h" static void handle_periodic_timer(void *ptr); static struct ctimer periodic_timer; static uint8_t initialized = 0; static void print_table(); #define PRINTF(...) printf(__VA_ARGS__) #else #define PRINTF(...) #endif /* This is the callback function that will be called when there is a * nbr-policy active **/ #ifdef NBR_TABLE_FIND_REMOVABLE const linkaddr_t *NBR_TABLE_FIND_REMOVABLE(nbr_table_reason_t reason, void *data); #endif /* NBR_TABLE_FIND_REMOVABLE */ /* List of link-layer addresses of the neighbors, used as key in the tables */ typedef struct nbr_table_key { struct nbr_table_key *next; linkaddr_t lladdr; } nbr_table_key_t; /* For each neighbor, a map of the tables that use the neighbor. * As we are using uint8_t, we have a maximum of 8 tables in the system */ static uint8_t used_map[NBR_TABLE_MAX_NEIGHBORS]; /* For each neighbor, a map of the tables that lock the neighbor */ static uint8_t locked_map[NBR_TABLE_MAX_NEIGHBORS]; /* The maximum number of tables */ #define MAX_NUM_TABLES 8 /* A list of pointers to tables in use */ static struct nbr_table *all_tables[MAX_NUM_TABLES]; /* The current number of tables */ static unsigned num_tables; /* The neighbor address table */ MEMB(neighbor_addr_mem, nbr_table_key_t, NBR_TABLE_MAX_NEIGHBORS); LIST(nbr_table_keys); /*---------------------------------------------------------------------------*/ /* Get a key from a neighbor index */ static nbr_table_key_t * key_from_index(int index) { return index != -1 ? &((nbr_table_key_t *)neighbor_addr_mem.mem)[index] : NULL; } /*---------------------------------------------------------------------------*/ /* Get an item from its neighbor index */ static nbr_table_item_t * item_from_index(nbr_table_t *table, int index) { return table != NULL && index != -1 ? (char *)table->data + index * table->item_size : NULL; } /*---------------------------------------------------------------------------*/ /* Get the neighbor index of an item */ static int index_from_key(nbr_table_key_t *key) { return key != NULL ? key - (nbr_table_key_t *)neighbor_addr_mem.mem : -1; } /*---------------------------------------------------------------------------*/ /* Get the neighbor index of an item */ static int index_from_item(nbr_table_t *table, const nbr_table_item_t *item) { return table != NULL && item != NULL ? ((int)((char *)item - (char *)table->data)) / table->item_size : -1; } /*---------------------------------------------------------------------------*/ /* Get an item from its key */ static nbr_table_item_t * item_from_key(nbr_table_t *table, nbr_table_key_t *key) { return item_from_index(table, index_from_key(key)); } /*---------------------------------------------------------------------------*/ /* Get the key af an item */ static nbr_table_key_t * key_from_item(nbr_table_t *table, const nbr_table_item_t *item) { return key_from_index(index_from_item(table, item)); } /*---------------------------------------------------------------------------*/ /* Get the index of a neighbor from its link-layer address */ static int index_from_lladdr(const linkaddr_t *lladdr) { nbr_table_key_t *key; /* Allow lladdr-free insertion, useful e.g. for IPv6 ND. * Only one such entry is possible at a time, indexed by linkaddr_null. */ if(lladdr == NULL) { lladdr = &linkaddr_null; } key = list_head(nbr_table_keys); while(key != NULL) { if(lladdr && linkaddr_cmp(lladdr, &key->lladdr)) { return index_from_key(key); } key = list_item_next(key); } return -1; } /*---------------------------------------------------------------------------*/ /* Get bit from "used" or "locked" bitmap */ static int nbr_get_bit(uint8_t *bitmap, nbr_table_t *table, nbr_table_item_t *item) { int item_index = index_from_item(table, item); if(table != NULL && item_index != -1) { return (bitmap[item_index] & (1 << table->index)) != 0; } else { return 0; } return 0; } /*---------------------------------------------------------------------------*/ /* Set bit in "used" or "locked" bitmap */ static int nbr_set_bit(uint8_t *bitmap, nbr_table_t *table, nbr_table_item_t *item, int value) { int item_index = index_from_item(table, item); if(table != NULL && item_index != -1) { if(value) { bitmap[item_index] |= 1 << table->index; } else { bitmap[item_index] &= ~(1 << table->index); } return 1; } else { return 0; } return 0; } /*---------------------------------------------------------------------------*/ static void remove_key(nbr_table_key_t *least_used_key) { int i; for(i = 0; i < MAX_NUM_TABLES; i++) { if(all_tables[i] != NULL && all_tables[i]->callback != NULL) { /* Call table callback for each table that uses this item */ nbr_table_item_t *removed_item = item_from_key(all_tables[i], least_used_key); if(nbr_get_bit(used_map, all_tables[i], removed_item) == 1) { all_tables[i]->callback(removed_item); } } } /* Empty used map */ used_map[index_from_key(least_used_key)] = 0; /* Remove neighbor from list */ list_remove(nbr_table_keys, least_used_key); } /*---------------------------------------------------------------------------*/ static nbr_table_key_t * nbr_table_allocate(nbr_table_reason_t reason, void *data) { nbr_table_key_t *key; int least_used_count = 0; nbr_table_key_t *least_used_key = NULL; key = memb_alloc(&neighbor_addr_mem); if(key != NULL) { return key; } else { #ifdef NBR_TABLE_FIND_REMOVABLE const linkaddr_t *lladdr; lladdr = NBR_TABLE_FIND_REMOVABLE(reason, data); if(lladdr == NULL) { /* Nothing found that can be deleted - return NULL to indicate failure */ PRINTF("*** Not removing entry to allocate new\n"); return NULL; } else { /* used least_used_key to indicate what is the least useful entry */ int index; int locked = 0; if((index = index_from_lladdr(lladdr)) != -1) { least_used_key = key_from_index(index); locked = locked_map[index]; } /* Allow delete of locked item? */ if(least_used_key != NULL && locked) { PRINTF("Deleting locked item!\n"); locked_map[index] = 0; } } #endif /* NBR_TABLE_FIND_REMOVABLE */ if(least_used_key == NULL) { /* No more space, try to free a neighbor. * The replacement policy is the following: remove neighbor that is: * (1) not locked * (2) used by fewest tables * (3) oldest (the list is ordered by insertion time) * */ /* Get item from first key */ key = list_head(nbr_table_keys); while(key != NULL) { int item_index = index_from_key(key); int locked = locked_map[item_index]; /* Never delete a locked item */ if(!locked) { int used = used_map[item_index]; int used_count = 0; /* Count how many tables are using this item */ while(used != 0) { if((used & 1) == 1) { used_count++; } used >>= 1; } /* Find least used item */ if(least_used_key == NULL || used_count < least_used_count) { least_used_key = key; least_used_count = used_count; if(used_count == 0) { /* We won't find any least used item */ break; } } } key = list_item_next(key); } } if(least_used_key == NULL) { /* We haven't found any unlocked item, allocation fails */ return NULL; } else { /* Reuse least used item */ remove_key(least_used_key); return least_used_key; } } } /*---------------------------------------------------------------------------*/ /* Register a new neighbor table. To be used at initialization by modules * using a neighbor table */ int nbr_table_register(nbr_table_t *table, nbr_table_callback *callback) { #if DEBUG if(!initialized) { initialized = 1; /* schedule a debug printout per minute */ ctimer_set(&periodic_timer, CLOCK_SECOND * 60, handle_periodic_timer, NULL); } #endif if(nbr_table_is_registered(table)) { /* Table already registered, just update callback */ table->callback = callback; return 1; } if(num_tables < MAX_NUM_TABLES) { table->index = num_tables++; table->callback = callback; all_tables[table->index] = table; return 1; } else { /* Maximum number of tables exceeded */ return 0; } } /*---------------------------------------------------------------------------*/ /* Test whether a specified table has been registered or not */ int nbr_table_is_registered(nbr_table_t *table) { if(table != NULL && table->index >= 0 && table->index < MAX_NUM_TABLES && all_tables[table->index] == table) { return 1; } return 0; } /*---------------------------------------------------------------------------*/ /* Returns the first item of the current table */ nbr_table_item_t * nbr_table_head(nbr_table_t *table) { /* Get item from first key */ printf("+++++++++++nbr_table_keys: %x++++++++++\n",(uint32_t)nbr_table_keys); printf("+++++++++++*nbr_table_keys: %x++++++++++\n",(uint32_t)*nbr_table_keys); nbr_table_item_t *item = item_from_key(table, list_head(nbr_table_keys)); /* Item is the first neighbor, now check is it is in the current table */ if(nbr_get_bit(used_map, table, item)) { return item; } else { return nbr_table_next(table, item); } } /*---------------------------------------------------------------------------*/ /* Iterates over the current table */ nbr_table_item_t * nbr_table_next(nbr_table_t *table, nbr_table_item_t *item) { do { void *key = key_from_item(table, item); key = list_item_next(key); /* Loop until the next item is in the current table */ item = item_from_key(table, key); } while(item && !nbr_get_bit(used_map, table, item)); return item; } /*---------------------------------------------------------------------------*/ /* Add a neighbor indexed with its link-layer address */ nbr_table_item_t * nbr_table_add_lladdr(nbr_table_t *table, const linkaddr_t *lladdr, nbr_table_reason_t reason, void *data) { int index; nbr_table_item_t *item; nbr_table_key_t *key; if(table == NULL) { return NULL; } /* Allow lladdr-free insertion, useful e.g. for IPv6 ND. * Only one such entry is possible at a time, indexed by linkaddr_null. */ if(lladdr == NULL) { lladdr = &linkaddr_null; } if((index = index_from_lladdr(lladdr)) == -1) { /* Neighbor not yet in table, let's try to allocate one */ key = nbr_table_allocate(reason, data); /* No space available for new entry */ if(key == NULL) { return NULL; } /* Add neighbor to list */ list_add(nbr_table_keys, key); /* Get index from newly allocated neighbor */ index = index_from_key(key); /* Set link-layer address */ linkaddr_copy(&key->lladdr, lladdr); } /* Get item in the current table */ item = item_from_index(table, index); /* Initialize item data and set "used" bit */ memset(item, 0, table->item_size); nbr_set_bit(used_map, table, item, 1); #if DEBUG print_table(); #endif return item; } /*---------------------------------------------------------------------------*/ /* Get an item from its link-layer address */ void * nbr_table_get_from_lladdr(nbr_table_t *table, const linkaddr_t *lladdr) { void *item = item_from_index(table, index_from_lladdr(lladdr)); return nbr_get_bit(used_map, table, item) ? item : NULL; } /*---------------------------------------------------------------------------*/ /* Removes a neighbor from the current table (unset "used" bit) */ int nbr_table_remove(nbr_table_t *table, void *item) { int ret = nbr_set_bit(used_map, table, item, 0); nbr_set_bit(locked_map, table, item, 0); return ret; } /*---------------------------------------------------------------------------*/ /* Lock a neighbor for the current table (set "locked" bit) */ int nbr_table_lock(nbr_table_t *table, void *item) { #if DEBUG int i = index_from_item(table, item); PRINTF("*** Lock %d\n", i); #endif return nbr_set_bit(locked_map, table, item, 1); } /*---------------------------------------------------------------------------*/ /* Release the lock on a neighbor for the current table (unset "locked" bit) */ int nbr_table_unlock(nbr_table_t *table, void *item) { #if DEBUG int i = index_from_item(table, item); PRINTF("*** Unlock %d\n", i); #endif return nbr_set_bit(locked_map, table, item, 0); } /*---------------------------------------------------------------------------*/ /* Get link-layer address of an item */ linkaddr_t * nbr_table_get_lladdr(nbr_table_t *table, const void *item) { nbr_table_key_t *key = key_from_item(table, item); return key != NULL ? &key->lladdr : NULL; } /*---------------------------------------------------------------------------*/ #if DEBUG static void print_table() { int i, j; /* Printout all neighbors and which tables they are used in */ PRINTF("NBR TABLE:\n"); for(i = 0; i < NBR_TABLE_MAX_NEIGHBORS; i++) { if(used_map[i] > 0) { PRINTF(" %02d %02d",i , key_from_index(i)->lladdr.u8[LINKADDR_SIZE - 1]); for(j = 0; j < num_tables; j++) { PRINTF(" [%d:%d]", (used_map[i] & (1 << j)) != 0, (locked_map[i] & (1 << j)) != 0); } PRINTF("\n"); } } } /*---------------------------------------------------------------------------*/ static void handle_periodic_timer(void *ptr) { print_table(); ctimer_reset(&periodic_timer); } #endif
218798.c
/* Qn : Write a C program to show the common factors of two given numbers and also show the summation of that common factors. Sample Input Sample Output 9 45 1 3 9 Sum= 13 21 63 1 3 7 21 Sum= 32 */ /* Author : Arnob Mahmud mail : [email protected] */ #include <stdio.h> #include <math.h> int main(int argc, char const *argv[]) { int n1, n2, sum = 0; scanf("%d %d", &n1, &n2); for (int i = 1; i <= n1; i++) { if (n1 % i == 0 && n2 % i == 0) { printf("%d ", i); sum += i; } } printf("\nSum = %d", sum); return 0; }
517093.c
// ------------------------------------------------------------------------------ // // Project: Fibonacci number sequence generator // File name: main.c // Description: Main C file to invoke the sequence generator based on index input // License: MIT license (see included file LICENSE) // Build: Use attached Makefile (see run.bash file for options) // // Commandline Parameter: <sequence index> // Usage: fibonacci <sequence index> // // Author: Selva Senthilvelan // Date: 02/04/2020 // Version: 1.0 // // ------------------------------------------------------------------------------ // Header Files # include <stdio.h> # include <stdlib.h> # include "fibonacci.h" // Main Function int main( int argc, char **argv ) { // Local variable definitions int pos; // integer value of commandline input unsigned int n; // Index value after input validation // Handle commandline parameter if(argc > 1) { pos = atoi(argv[1]); if((pos==0) && (argv[1][0]!='0')) { pos = -1; } } else { printf("Enter the Fibonacci index to compute: "); scanf("%d", &pos); printf("\n"); } if(( pos < 0 ) || ( pos > 100 )) { printf( "Error (%s:%d): Index has to be an integer between 0 to 100. Exiting...\n", __FILE__, __LINE__ ); exit( 1 ); } else { n = (unsigned)pos; } // Call the recursive fibonacci function and print the result printf("Recursive: %d fibonacci number is: %f\n\n", n, fib_recursive_d( n )); // Call the iterative fibonacci function and print the result printf("Iterative: %d fibonacci number is: %f\n\n", n, fib_iterative_d( n )); // Call the optimized recursive fibonacci function and print the result fib_recursive_optimized_init_d(); // Call the init function printf("Recursive optimized: %d fibonacci number is: %f\n\n", n, fib_recursive_optimized_d( n ) ); } // ------------------------------------EOF------------------------------------------
791349.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE390_Error_Without_Action__char_w32CreateMutex_05.c Label Definition File: CWE390_Error_Without_Action.string.label.xml Template File: point-flaw-05.tmpl.c */ /* * @description * CWE: 390 Detection of Error Condition Without Action * Sinks: w32CreateMutex * GoodSink: Check the return value of CreateMutexA() and handle it properly * BadSink : Check to see if CreateMutexA() failed, but do nothing about it * Flow Variant: 05 Control flow: if(static_t) and if(static_f) * * */ #include "std_testcase.h" #include <windows.h> #define BUFSIZE 1024 /* The two variables below are not defined as "const", but are never assigned any other value, so a tool should be able to identify that reads of these will always return their initialized values. */ static int static_t = 1; /* true */ static int static_f = 0; /* false */ #ifndef OMITBAD void CWE390_Error_Without_Action__char_w32CreateMutex_05_bad() { if(static_t) { { HANDLE hMutex = NULL; hMutex = CreateMutexA(NULL, FALSE, NULL); /* FLAW: check for an error, but do nothing if one occurred */ if (hMutex == NULL) { /* do nothing */ } if (GetLastError() == ERROR_ALREADY_EXISTS) { /* do nothing */ } /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hMutex); } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { HANDLE hMutex = NULL; hMutex = CreateMutexA(NULL, FALSE, NULL); /* FIX: Check the return value of CreateMutex() for NULL AND * Check the return value of GetLastError() for ERROR_ALREADY_EXISTS */ if (hMutex == NULL) { exit(1); } if (GetLastError() == ERROR_ALREADY_EXISTS) { exit(1); } /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hMutex); } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* good1() uses if(static_f) instead of if(static_t) */ static void good1() { if(static_f) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { HANDLE hMutex = NULL; hMutex = CreateMutexA(NULL, FALSE, NULL); /* FLAW: check for an error, but do nothing if one occurred */ if (hMutex == NULL) { /* do nothing */ } if (GetLastError() == ERROR_ALREADY_EXISTS) { /* do nothing */ } /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hMutex); } } else { { HANDLE hMutex = NULL; hMutex = CreateMutexA(NULL, FALSE, NULL); /* FIX: Check the return value of CreateMutex() for NULL AND * Check the return value of GetLastError() for ERROR_ALREADY_EXISTS */ if (hMutex == NULL) { exit(1); } if (GetLastError() == ERROR_ALREADY_EXISTS) { exit(1); } /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hMutex); } } } /* good2() reverses the bodies in the if statement */ static void good2() { if(static_t) { { HANDLE hMutex = NULL; hMutex = CreateMutexA(NULL, FALSE, NULL); /* FIX: Check the return value of CreateMutex() for NULL AND * Check the return value of GetLastError() for ERROR_ALREADY_EXISTS */ if (hMutex == NULL) { exit(1); } if (GetLastError() == ERROR_ALREADY_EXISTS) { exit(1); } /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hMutex); } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { HANDLE hMutex = NULL; hMutex = CreateMutexA(NULL, FALSE, NULL); /* FLAW: check for an error, but do nothing if one occurred */ if (hMutex == NULL) { /* do nothing */ } if (GetLastError() == ERROR_ALREADY_EXISTS) { /* do nothing */ } /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hMutex); } } } void CWE390_Error_Without_Action__char_w32CreateMutex_05_good() { good1(); good2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE390_Error_Without_Action__char_w32CreateMutex_05_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE390_Error_Without_Action__char_w32CreateMutex_05_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
150637.c
#if defined(VPX_X86_ASM) #if defined(_WIN64) #include "vpx_config_x86_64-win64-vs8.c" #elif defined(WIN32) /* 32 bit Windows, MSVC. */ #include "vpx_config_x86-win32-vs8.c" #elif defined(__APPLE__) && defined(__x86_64__) /* 64 bit MacOS. */ #include "vpx_config_x86_64-darwin9-gcc.c" #elif defined(__APPLE__) && defined(__i386__) /* 32 bit MacOS. */ #include "vpx_config_x86-darwin9-gcc.c" #elif defined(__ELF__) && (defined(__i386) || defined(__i386__)) /* 32 bit ELF platforms. */ #include "vpx_config_x86-linux-gcc.c" #elif defined(__ELF__) && (defined(__x86_64) || defined(__x86_64__)) /* 64 bit ELF platforms. */ #include "vpx_config_x86_64-linux-gcc.c" #else #error VPX_X86_ASM is defined, but assembly not supported on this platform! #endif #elif defined(VPX_ARM_ASM) #if defined(__linux__) && defined(__GNUC__) #include "vpx_config_arm-linux-gcc.c" #else #error VPX_ARM_ASM is defined, but assembly not supported on this platform! #endif #else /* Assume generic GNU/GCC configuration. */ #include "vpx_config_generic-gnu.c" #endif
177171.c
#include <u.h> #include <libc.h> #include <draw.h> enum { Max = 100 }; Point string(Image *dst, Point pt, Image *src, Point sp, Font *f, char *s) { return _string(dst, pt, src, sp, f, s, nil, 1<<24, dst->clipr, nil, ZP, SoverD); } Point stringop(Image *dst, Point pt, Image *src, Point sp, Font *f, char *s, Drawop op) { return _string(dst, pt, src, sp, f, s, nil, 1<<24, dst->clipr, nil, ZP, op); } Point stringn(Image *dst, Point pt, Image *src, Point sp, Font *f, char *s, int len) { return _string(dst, pt, src, sp, f, s, nil, len, dst->clipr, nil, ZP, SoverD); } Point stringnop(Image *dst, Point pt, Image *src, Point sp, Font *f, char *s, int len, Drawop op) { return _string(dst, pt, src, sp, f, s, nil, len, dst->clipr, nil, ZP, op); } Point runestring(Image *dst, Point pt, Image *src, Point sp, Font *f, Rune *r) { return _string(dst, pt, src, sp, f, nil, r, 1<<24, dst->clipr, nil, ZP, SoverD); } Point runestringop(Image *dst, Point pt, Image *src, Point sp, Font *f, Rune *r, Drawop op) { return _string(dst, pt, src, sp, f, nil, r, 1<<24, dst->clipr, nil, ZP, op); } Point runestringn(Image *dst, Point pt, Image *src, Point sp, Font *f, Rune *r, int len) { return _string(dst, pt, src, sp, f, nil, r, len, dst->clipr, nil, ZP, SoverD); } Point runestringnop(Image *dst, Point pt, Image *src, Point sp, Font *f, Rune *r, int len, Drawop op) { return _string(dst, pt, src, sp, f, nil, r, len, dst->clipr, nil, ZP, op); } Point _string(Image *dst, Point pt, Image *src, Point sp, Font *f, char *s, Rune *r, int len, Rectangle clipr, Image *bg, Point bgp, Drawop op) { int m, n, wid, max; ushort cbuf[Max], *c, *ec; uchar *b; char *subfontname; char **sptr; Rune **rptr; Font *def; Subfont *sf; if(len < 0) sysfatal("libdraw: _string len=%d", len); if(s == nil){ s = ""; sptr = nil; }else sptr = &s; if(r == nil){ r = (Rune*) L""; rptr = nil; }else rptr = &r; sf = nil; #if defined(__AIX__) while((*s || *rptr) && len){ #else while((*s || *r) && len){ #endif max = Max; if(len < max) max = len; n = cachechars(f, sptr, rptr, cbuf, max, &wid, &subfontname); if(n > 0){ _setdrawop(dst->display, op); m = 47+2*n; if(bg) m += 4+2*4; b = bufimage(dst->display, m); if(b == 0){ fprint(2, "string: %r\n"); break; } if(bg) b[0] = 'x'; else b[0] = 's'; BPLONG(b+1, dst->id); BPLONG(b+5, src->id); BPLONG(b+9, f->cacheimage->id); BPLONG(b+13, pt.x); BPLONG(b+17, pt.y+f->ascent); BPLONG(b+21, clipr.min.x); BPLONG(b+25, clipr.min.y); BPLONG(b+29, clipr.max.x); BPLONG(b+33, clipr.max.y); BPLONG(b+37, sp.x); BPLONG(b+41, sp.y); BPSHORT(b+45, n); b += 47; if(bg){ BPLONG(b, bg->id); BPLONG(b+4, bgp.x); BPLONG(b+8, bgp.y); b += 12; } ec = &cbuf[n]; for(c=cbuf; c<ec; c++, b+=2) BPSHORT(b, *c); pt.x += wid; bgp.x += wid; agefont(f); len -= n; } if(subfontname){ freesubfont(sf); if((sf=_getsubfont(f->display, subfontname)) == 0){ def = f->display ? f->display->defaultfont : nil; if(def && f!=def) f = def; else break; } /* * must not free sf until cachechars has found it in the cache * and picked up its own reference. */ } } return pt; }
438428.c
/* * AltiVec optimizations for libjpeg-turbo * * Copyright (C) 2014-2015, D. R. Commander. * All rights reserved. * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. */ /* SLOW INTEGER INVERSE DCT */ #include "jsimd_altivec.h" #define F_0_298 2446 /* FIX(0.298631336) */ #define F_0_390 3196 /* FIX(0.390180644) */ #define F_0_541 4433 /* FIX(0.541196100) */ #define F_0_765 6270 /* FIX(0.765366865) */ #define F_0_899 7373 /* FIX(0.899976223) */ #define F_1_175 9633 /* FIX(1.175875602) */ #define F_1_501 12299 /* FIX(1.501321110) */ #define F_1_847 15137 /* FIX(1.847759065) */ #define F_1_961 16069 /* FIX(1.961570560) */ #define F_2_053 16819 /* FIX(2.053119869) */ #define F_2_562 20995 /* FIX(2.562915447) */ #define F_3_072 25172 /* FIX(3.072711026) */ #define CONST_BITS 13 #define PASS1_BITS 2 #define DESCALE_P1 (CONST_BITS - PASS1_BITS) #define DESCALE_P2 (CONST_BITS + PASS1_BITS + 3) #define DO_IDCT(in, PASS) \ { \ /* Even part \ * \ * (Original) \ * z1 = (z2 + z3) * 0.541196100; \ * tmp2 = z1 + z3 * -1.847759065; \ * tmp3 = z1 + z2 * 0.765366865; \ * \ * (This implementation) \ * tmp2 = z2 * 0.541196100 + z3 * (0.541196100 - 1.847759065); \ * tmp3 = z2 * (0.541196100 + 0.765366865) + z3 * 0.541196100; \ */ \ \ in##26l = vec_mergeh(in##2, in##6); \ in##26h = vec_mergel(in##2, in##6); \ \ tmp3l = vec_msums(in##26l, pw_f130_f054, pd_zero); \ tmp3h = vec_msums(in##26h, pw_f130_f054, pd_zero); \ tmp2l = vec_msums(in##26l, pw_f054_mf130, pd_zero); \ tmp2h = vec_msums(in##26h, pw_f054_mf130, pd_zero); \ \ tmp0 = vec_add(in##0, in##4); \ tmp1 = vec_sub(in##0, in##4); \ \ tmp0l = vec_unpackh(tmp0); \ tmp0h = vec_unpackl(tmp0); \ tmp0l = vec_sl(tmp0l, const_bits); \ tmp0h = vec_sl(tmp0h, const_bits); \ tmp0l = vec_add(tmp0l, pd_descale_p##PASS); \ tmp0h = vec_add(tmp0h, pd_descale_p##PASS); \ \ tmp10l = vec_add(tmp0l, tmp3l); \ tmp10h = vec_add(tmp0h, tmp3h); \ tmp13l = vec_sub(tmp0l, tmp3l); \ tmp13h = vec_sub(tmp0h, tmp3h); \ \ tmp1l = vec_unpackh(tmp1); \ tmp1h = vec_unpackl(tmp1); \ tmp1l = vec_sl(tmp1l, const_bits); \ tmp1h = vec_sl(tmp1h, const_bits); \ tmp1l = vec_add(tmp1l, pd_descale_p##PASS); \ tmp1h = vec_add(tmp1h, pd_descale_p##PASS); \ \ tmp11l = vec_add(tmp1l, tmp2l); \ tmp11h = vec_add(tmp1h, tmp2h); \ tmp12l = vec_sub(tmp1l, tmp2l); \ tmp12h = vec_sub(tmp1h, tmp2h); \ \ /* Odd part */ \ \ z3 = vec_add(in##3, in##7); \ z4 = vec_add(in##1, in##5); \ \ /* (Original) \ * z5 = (z3 + z4) * 1.175875602; \ * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644; \ * z3 += z5; z4 += z5; \ * \ * (This implementation) \ * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602; \ * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644); \ */ \ \ z34l = vec_mergeh(z3, z4); \ z34h = vec_mergel(z3, z4); \ \ z3l = vec_msums(z34l, pw_mf078_f117, pd_zero); \ z3h = vec_msums(z34h, pw_mf078_f117, pd_zero); \ z4l = vec_msums(z34l, pw_f117_f078, pd_zero); \ z4h = vec_msums(z34h, pw_f117_f078, pd_zero); \ \ /* (Original) \ * z1 = tmp0 + tmp3; z2 = tmp1 + tmp2; \ * tmp0 = tmp0 * 0.298631336; tmp1 = tmp1 * 2.053119869; \ * tmp2 = tmp2 * 3.072711026; tmp3 = tmp3 * 1.501321110; \ * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447; \ * tmp0 += z1 + z3; tmp1 += z2 + z4; \ * tmp2 += z2 + z3; tmp3 += z1 + z4; \ * \ * (This implementation) \ * tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223; \ * tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447; \ * tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447); \ * tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223); \ * tmp0 += z3; tmp1 += z4; \ * tmp2 += z3; tmp3 += z4; \ */ \ \ in##71l = vec_mergeh(in##7, in##1); \ in##71h = vec_mergel(in##7, in##1); \ \ tmp0l = vec_msums(in##71l, pw_mf060_mf089, z3l); \ tmp0h = vec_msums(in##71h, pw_mf060_mf089, z3h); \ tmp3l = vec_msums(in##71l, pw_mf089_f060, z4l); \ tmp3h = vec_msums(in##71h, pw_mf089_f060, z4h); \ \ in##53l = vec_mergeh(in##5, in##3); \ in##53h = vec_mergel(in##5, in##3); \ \ tmp1l = vec_msums(in##53l, pw_mf050_mf256, z4l); \ tmp1h = vec_msums(in##53h, pw_mf050_mf256, z4h); \ tmp2l = vec_msums(in##53l, pw_mf256_f050, z3l); \ tmp2h = vec_msums(in##53h, pw_mf256_f050, z3h); \ \ /* Final output stage */ \ \ out0l = vec_add(tmp10l, tmp3l); \ out0h = vec_add(tmp10h, tmp3h); \ out7l = vec_sub(tmp10l, tmp3l); \ out7h = vec_sub(tmp10h, tmp3h); \ \ out0l = vec_sra(out0l, descale_p##PASS); \ out0h = vec_sra(out0h, descale_p##PASS); \ out7l = vec_sra(out7l, descale_p##PASS); \ out7h = vec_sra(out7h, descale_p##PASS); \ \ out0 = vec_pack(out0l, out0h); \ out7 = vec_pack(out7l, out7h); \ \ out1l = vec_add(tmp11l, tmp2l); \ out1h = vec_add(tmp11h, tmp2h); \ out6l = vec_sub(tmp11l, tmp2l); \ out6h = vec_sub(tmp11h, tmp2h); \ \ out1l = vec_sra(out1l, descale_p##PASS); \ out1h = vec_sra(out1h, descale_p##PASS); \ out6l = vec_sra(out6l, descale_p##PASS); \ out6h = vec_sra(out6h, descale_p##PASS); \ \ out1 = vec_pack(out1l, out1h); \ out6 = vec_pack(out6l, out6h); \ \ out2l = vec_add(tmp12l, tmp1l); \ out2h = vec_add(tmp12h, tmp1h); \ out5l = vec_sub(tmp12l, tmp1l); \ out5h = vec_sub(tmp12h, tmp1h); \ \ out2l = vec_sra(out2l, descale_p##PASS); \ out2h = vec_sra(out2h, descale_p##PASS); \ out5l = vec_sra(out5l, descale_p##PASS); \ out5h = vec_sra(out5h, descale_p##PASS); \ \ out2 = vec_pack(out2l, out2h); \ out5 = vec_pack(out5l, out5h); \ \ out3l = vec_add(tmp13l, tmp0l); \ out3h = vec_add(tmp13h, tmp0h); \ out4l = vec_sub(tmp13l, tmp0l); \ out4h = vec_sub(tmp13h, tmp0h); \ \ out3l = vec_sra(out3l, descale_p##PASS); \ out3h = vec_sra(out3h, descale_p##PASS); \ out4l = vec_sra(out4l, descale_p##PASS); \ out4h = vec_sra(out4h, descale_p##PASS); \ \ out3 = vec_pack(out3l, out3h); \ out4 = vec_pack(out4l, out4h); \ } void jsimd_idct_islow_altivec (void *dct_table_, JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col) { short *dct_table = (short *)dct_table_; int *outptr; __vector short row0, row1, row2, row3, row4, row5, row6, row7, col0, col1, col2, col3, col4, col5, col6, col7, quant0, quant1, quant2, quant3, quant4, quant5, quant6, quant7, tmp0, tmp1, tmp2, tmp3, z3, z4, z34l, z34h, col71l, col71h, col26l, col26h, col53l, col53h, row71l, row71h, row26l, row26h, row53l, row53h, out0, out1, out2, out3, out4, out5, out6, out7; __vector int tmp0l, tmp0h, tmp1l, tmp1h, tmp2l, tmp2h, tmp3l, tmp3h, tmp10l, tmp10h, tmp11l, tmp11h, tmp12l, tmp12h, tmp13l, tmp13h, z3l, z3h, z4l, z4h, out0l, out0h, out1l, out1h, out2l, out2h, out3l, out3h, out4l, out4h, out5l, out5h, out6l, out6h, out7l, out7h; __vector signed char outb; /* Constants */ __vector short pw_zero = { __8X(0) }, pw_f130_f054 = { __4X2(F_0_541 + F_0_765, F_0_541) }, pw_f054_mf130 = { __4X2(F_0_541, F_0_541 - F_1_847) }, pw_mf078_f117 = { __4X2(F_1_175 - F_1_961, F_1_175) }, pw_f117_f078 = { __4X2(F_1_175, F_1_175 - F_0_390) }, pw_mf060_mf089 = { __4X2(F_0_298 - F_0_899, -F_0_899) }, pw_mf089_f060 = { __4X2(-F_0_899, F_1_501 - F_0_899) }, pw_mf050_mf256 = { __4X2(F_2_053 - F_2_562, -F_2_562) }, pw_mf256_f050 = { __4X2(-F_2_562, F_3_072 - F_2_562) }; __vector unsigned short pass1_bits = { __8X(PASS1_BITS) }; __vector int pd_zero = { __4X(0) }, pd_descale_p1 = { __4X(1 << (DESCALE_P1 - 1)) }, pd_descale_p2 = { __4X(1 << (DESCALE_P2 - 1)) }; __vector unsigned int descale_p1 = { __4X(DESCALE_P1) }, descale_p2 = { __4X(DESCALE_P2) }, const_bits = { __4X(CONST_BITS) }; __vector signed char pb_centerjsamp = { __16X(CENTERJSAMPLE) }; /* Pass 1: process columns */ col0 = vec_ld(0, coef_block); col1 = vec_ld(16, coef_block); col2 = vec_ld(32, coef_block); col3 = vec_ld(48, coef_block); col4 = vec_ld(64, coef_block); col5 = vec_ld(80, coef_block); col6 = vec_ld(96, coef_block); col7 = vec_ld(112, coef_block); tmp1 = vec_or(col1, col2); tmp2 = vec_or(col3, col4); tmp1 = vec_or(tmp1, tmp2); tmp3 = vec_or(col5, col6); tmp3 = vec_or(tmp3, col7); tmp1 = vec_or(tmp1, tmp3); quant0 = vec_ld(0, dct_table); col0 = vec_mladd(col0, quant0, pw_zero); if (vec_all_eq(tmp1, pw_zero)) { /* AC terms all zero */ col0 = vec_sl(col0, pass1_bits); row0 = vec_splat(col0, 0); row1 = vec_splat(col0, 1); row2 = vec_splat(col0, 2); row3 = vec_splat(col0, 3); row4 = vec_splat(col0, 4); row5 = vec_splat(col0, 5); row6 = vec_splat(col0, 6); row7 = vec_splat(col0, 7); } else { quant1 = vec_ld(16, dct_table); quant2 = vec_ld(32, dct_table); quant3 = vec_ld(48, dct_table); quant4 = vec_ld(64, dct_table); quant5 = vec_ld(80, dct_table); quant6 = vec_ld(96, dct_table); quant7 = vec_ld(112, dct_table); col1 = vec_mladd(col1, quant1, pw_zero); col2 = vec_mladd(col2, quant2, pw_zero); col3 = vec_mladd(col3, quant3, pw_zero); col4 = vec_mladd(col4, quant4, pw_zero); col5 = vec_mladd(col5, quant5, pw_zero); col6 = vec_mladd(col6, quant6, pw_zero); col7 = vec_mladd(col7, quant7, pw_zero); DO_IDCT(col, 1); TRANSPOSE(out, row); } /* Pass 2: process rows */ DO_IDCT(row, 2); TRANSPOSE(out, col); outb = vec_packs(col0, col0); outb = vec_add(outb, pb_centerjsamp); outptr = (int *)(output_buf[0] + output_col); vec_ste((__vector int)outb, 0, outptr); vec_ste((__vector int)outb, 4, outptr); outb = vec_packs(col1, col1); outb = vec_add(outb, pb_centerjsamp); outptr = (int *)(output_buf[1] + output_col); vec_ste((__vector int)outb, 0, outptr); vec_ste((__vector int)outb, 4, outptr); outb = vec_packs(col2, col2); outb = vec_add(outb, pb_centerjsamp); outptr = (int *)(output_buf[2] + output_col); vec_ste((__vector int)outb, 0, outptr); vec_ste((__vector int)outb, 4, outptr); outb = vec_packs(col3, col3); outb = vec_add(outb, pb_centerjsamp); outptr = (int *)(output_buf[3] + output_col); vec_ste((__vector int)outb, 0, outptr); vec_ste((__vector int)outb, 4, outptr); outb = vec_packs(col4, col4); outb = vec_add(outb, pb_centerjsamp); outptr = (int *)(output_buf[4] + output_col); vec_ste((__vector int)outb, 0, outptr); vec_ste((__vector int)outb, 4, outptr); outb = vec_packs(col5, col5); outb = vec_add(outb, pb_centerjsamp); outptr = (int *)(output_buf[5] + output_col); vec_ste((__vector int)outb, 0, outptr); vec_ste((__vector int)outb, 4, outptr); outb = vec_packs(col6, col6); outb = vec_add(outb, pb_centerjsamp); outptr = (int *)(output_buf[6] + output_col); vec_ste((__vector int)outb, 0, outptr); vec_ste((__vector int)outb, 4, outptr); outb = vec_packs(col7, col7); outb = vec_add(outb, pb_centerjsamp); outptr = (int *)(output_buf[7] + output_col); vec_ste((__vector int)outb, 0, outptr); vec_ste((__vector int)outb, 4, outptr); }
337323.c
/* Rax -- A radix tree implementation. * * Version 1.2 -- 7 February 2019 * * Copyright (c) 2017-2019, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <string.h> #include <assert.h> #include <stdio.h> #include <errno.h> #include <math.h> #include "rax.h" #ifndef RAX_MALLOC_INCLUDE #define RAX_MALLOC_INCLUDE "rax_malloc.h" #endif #include RAX_MALLOC_INCLUDE /* This is a special pointer that is guaranteed to never have the same value * of a radix tree node. It's used in order to report "not found" error without * requiring the function to have multiple return values. */ void *raxNotFound = (void*)"rax-not-found-pointer"; /* -------------------------------- Debugging ------------------------------ */ void raxDebugShowNode(const char *msg, raxNode *n); /* Turn debugging messages on/off by compiling with RAX_DEBUG_MSG macro on. * When RAX_DEBUG_MSG is defined by default Rax operations will emit a lot * of debugging info to the standard output, however you can still turn * debugging on/off in order to enable it only when you suspect there is an * operation causing a bug using the function raxSetDebugMsg(). */ #ifdef RAX_DEBUG_MSG #define debugf(...) \ if (raxDebugMsg) { \ printf("%s:%s:%d:\t", __FILE__, __func__, __LINE__); \ printf(__VA_ARGS__); \ fflush(stdout); \ } #define debugnode(msg,n) raxDebugShowNode(msg,n) #else #define debugf(...) #define debugnode(msg,n) #endif /* By default log debug info if RAX_DEBUG_MSG is defined. */ static int raxDebugMsg = 1; /* When debug messages are enabled, turn them on/off dynamically. By * default they are enabled. Set the state to 0 to disable, and 1 to * re-enable. */ void raxSetDebugMsg(int onoff) { raxDebugMsg = onoff; } /* ------------------------- raxStack functions -------------------------- * The raxStack is a simple stack of pointers that is capable of switching * from using a stack-allocated array to dynamic heap once a given number of * items are reached. It is used in order to retain the list of parent nodes * while walking the radix tree in order to implement certain operations that * need to navigate the tree upward. * ------------------------------------------------------------------------- */ /* Initialize the stack. */ static inline void raxStackInit(raxStack *ts) { ts->stack = ts->static_items; ts->items = 0; ts->maxitems = RAX_STACK_STATIC_ITEMS; ts->oom = 0; } /* Push an item into the stack, returns 1 on success, 0 on out of memory. */ static inline int raxStackPush(raxStack *ts, void *ptr) { if (ts->items == ts->maxitems) { if (ts->stack == ts->static_items) { ts->stack = rax_malloc(sizeof(void*)*ts->maxitems*2); if (ts->stack == NULL) { ts->stack = ts->static_items; ts->oom = 1; errno = ENOMEM; return 0; } memcpy(ts->stack,ts->static_items,sizeof(void*)*ts->maxitems); } else { void **newalloc = rax_realloc(ts->stack,sizeof(void*)*ts->maxitems*2); if (newalloc == NULL) { ts->oom = 1; errno = ENOMEM; return 0; } ts->stack = newalloc; } ts->maxitems *= 2; } ts->stack[ts->items] = ptr; ts->items++; return 1; } /* Pop an item from the stack, the function returns NULL if there are no * items to pop. */ static inline void *raxStackPop(raxStack *ts) { if (ts->items == 0) return NULL; ts->items--; return ts->stack[ts->items]; } /* Return the stack item at the top of the stack without actually consuming * it. */ static inline void *raxStackPeek(raxStack *ts) { if (ts->items == 0) return NULL; return ts->stack[ts->items-1]; } /* Free the stack in case we used heap allocation. */ static inline void raxStackFree(raxStack *ts) { if (ts->stack != ts->static_items) rax_free(ts->stack); } /* ---------------------------------------------------------------------------- * Radix tree implementation * --------------------------------------------------------------------------*/ /* Return the padding needed in the characters section of a node having size * 'nodesize'. The padding is needed to store the child pointers to aligned * addresses. Note that we add 4 to the node size because the node has a four * bytes header. */ #define raxPadding(nodesize) ((sizeof(void*)-((nodesize+4) % sizeof(void*))) & (sizeof(void*)-1)) /* Return the pointer to the last child pointer in a node. For the compressed * nodes this is the only child pointer. */ #define raxNodeLastChildPtr(n) ((raxNode**) ( \ ((char*)(n)) + \ raxNodeCurrentLength(n) - \ sizeof(raxNode*) - \ (((n)->iskey && !(n)->isnull) ? sizeof(void*) : 0) \ )) /* Return the pointer to the first child pointer. */ #define raxNodeFirstChildPtr(n) ((raxNode**) ( \ (n)->data + \ (n)->size + \ raxPadding((n)->size))) /* Return the current total size of the node. Note that the second line * computes the padding after the string of characters, needed in order to * save pointers to aligned addresses. */ #define raxNodeCurrentLength(n) ( \ sizeof(raxNode)+(n)->size+ \ raxPadding((n)->size)+ \ ((n)->iscompr ? sizeof(raxNode*) : sizeof(raxNode*)*(n)->size)+ \ (((n)->iskey && !(n)->isnull)*sizeof(void*)) \ ) /* Allocate a new non compressed node with the specified number of children. * If datafield is true, the allocation is made large enough to hold the * associated data pointer. * Returns the new node pointer. On out of memory NULL is returned. */ raxNode *raxNewNode(size_t children, int datafield) { size_t nodesize = sizeof(raxNode)+children+raxPadding(children)+ sizeof(raxNode*)*children; if (datafield) nodesize += sizeof(void*); raxNode *node = rax_malloc(nodesize); if (node == NULL) return NULL; node->iskey = 0; node->isnull = 0; node->iscompr = 0; node->size = children; return node; } /* Allocate a new rax and return its pointer. On out of memory the function * returns NULL. */ rax *raxNew(void) { rax *rax = rax_malloc(sizeof(*rax)); if (rax == NULL) return NULL; rax->numele = 0; rax->numnodes = 1; rax->head = raxNewNode(0,0); if (rax->head == NULL) { rax_free(rax); return NULL; } else { return rax; } } /* realloc the node to make room for auxiliary data in order * to store an item in that node. On out of memory NULL is returned. */ raxNode *raxReallocForData(raxNode *n, void *data) { if (data == NULL) return n; /* No reallocation needed, setting isnull=1 */ size_t curlen = raxNodeCurrentLength(n); return rax_realloc(n,curlen+sizeof(void*)); } /* Set the node auxiliary data to the specified pointer. */ void raxSetData(raxNode *n, void *data) { n->iskey = 1; if (data != NULL) { n->isnull = 0; void **ndata = (void**) ((char*)n+raxNodeCurrentLength(n)-sizeof(void*)); memcpy(ndata,&data,sizeof(data)); } else { n->isnull = 1; } } /* Get the node auxiliary data. */ void *raxGetData(raxNode *n) { if (n->isnull) return NULL; void **ndata =(void**)((char*)n+raxNodeCurrentLength(n)-sizeof(void*)); void *data; memcpy(&data,ndata,sizeof(data)); return data; } /* Add a new child to the node 'n' representing the character 'c' and return * its new pointer, as well as the child pointer by reference. Additionally * '***parentlink' is populated with the raxNode pointer-to-pointer of where * the new child was stored, which is useful for the caller to replace the * child pointer if it gets reallocated. * * On success the new parent node pointer is returned (it may change because * of the realloc, so the caller should discard 'n' and use the new value). * On out of memory NULL is returned, and the old node is still valid. */ raxNode *raxAddChild(raxNode *n, unsigned char c, raxNode **childptr, raxNode ***parentlink) { assert(n->iscompr == 0); size_t curlen = raxNodeCurrentLength(n); n->size++; size_t newlen = raxNodeCurrentLength(n); n->size--; /* For now restore the original size. We'll update it only on success at the end. */ /* Alloc the new child we will link to 'n'. */ raxNode *child = raxNewNode(0,0); if (child == NULL) return NULL; /* Make space in the original node. */ raxNode *newn = rax_realloc(n,newlen); if (newn == NULL) { rax_free(child); return NULL; } n = newn; /* After the reallocation, we have up to 8/16 (depending on the system * pointer size, and the required node padding) bytes at the end, that is, * the additional char in the 'data' section, plus one pointer to the new * child, plus the padding needed in order to store addresses into aligned * locations. * * So if we start with the following node, having "abde" edges. * * Note: * - We assume 4 bytes pointer for simplicity. * - Each space below corresponds to one byte * * [HDR*][abde][Aptr][Bptr][Dptr][Eptr]|AUXP| * * After the reallocation we need: 1 byte for the new edge character * plus 4 bytes for a new child pointer (assuming 32 bit machine). * However after adding 1 byte to the edge char, the header + the edge * characters are no longer aligned, so we also need 3 bytes of padding. * In total the reallocation will add 1+4+3 bytes = 8 bytes: * * (Blank bytes are represented by ".") * * [HDR*][abde][Aptr][Bptr][Dptr][Eptr]|AUXP|[....][....] * * Let's find where to insert the new child in order to make sure * it is inserted in-place lexicographically. Assuming we are adding * a child "c" in our case pos will be = 2 after the end of the following * loop. */ int pos; for (pos = 0; pos < n->size; pos++) { if (n->data[pos] > c) break; } /* Now, if present, move auxiliary data pointer at the end * so that we can mess with the other data without overwriting it. * We will obtain something like that: * * [HDR*][abde][Aptr][Bptr][Dptr][Eptr][....][....]|AUXP| */ unsigned char *src, *dst; if (n->iskey && !n->isnull) { src = ((unsigned char*)n+curlen-sizeof(void*)); dst = ((unsigned char*)n+newlen-sizeof(void*)); memmove(dst,src,sizeof(void*)); } /* Compute the "shift", that is, how many bytes we need to move the * pointers section forward because of the addition of the new child * byte in the string section. Note that if we had no padding, that * would be always "1", since we are adding a single byte in the string * section of the node (where now there is "abde" basically). * * However we have padding, so it could be zero, or up to 8. * * Another way to think at the shift is, how many bytes we need to * move child pointers forward *other than* the obvious sizeof(void*) * needed for the additional pointer itself. */ size_t shift = newlen - curlen - sizeof(void*); /* We said we are adding a node with edge 'c'. The insertion * point is between 'b' and 'd', so the 'pos' variable value is * the index of the first child pointer that we need to move forward * to make space for our new pointer. * * To start, move all the child pointers after the insertion point * of shift+sizeof(pointer) bytes on the right, to obtain: * * [HDR*][abde][Aptr][Bptr][....][....][Dptr][Eptr]|AUXP| */ src = n->data+n->size+ raxPadding(n->size)+ sizeof(raxNode*)*pos; memmove(src+shift+sizeof(raxNode*),src,sizeof(raxNode*)*(n->size-pos)); /* Move the pointers to the left of the insertion position as well. Often * we don't need to do anything if there was already some padding to use. In * that case the final destination of the pointers will be the same, however * in our example there was no pre-existing padding, so we added one byte * plus there bytes of padding. After the next memmove() things will look * like that: * * [HDR*][abde][....][Aptr][Bptr][....][Dptr][Eptr]|AUXP| */ if (shift) { src = (unsigned char*) raxNodeFirstChildPtr(n); memmove(src+shift,src,sizeof(raxNode*)*pos); } /* Now make the space for the additional char in the data section, * but also move the pointers before the insertion point to the right * by shift bytes, in order to obtain the following: * * [HDR*][ab.d][e...][Aptr][Bptr][....][Dptr][Eptr]|AUXP| */ src = n->data+pos; memmove(src+1,src,n->size-pos); /* We can now set the character and its child node pointer to get: * * [HDR*][abcd][e...][Aptr][Bptr][....][Dptr][Eptr]|AUXP| * [HDR*][abcd][e...][Aptr][Bptr][Cptr][Dptr][Eptr]|AUXP| */ n->data[pos] = c; n->size++; src = (unsigned char*) raxNodeFirstChildPtr(n); raxNode **childfield = (raxNode**)(src+sizeof(raxNode*)*pos); memcpy(childfield,&child,sizeof(child)); *childptr = child; *parentlink = childfield; return n; } /* Turn the node 'n', that must be a node without any children, into a * compressed node representing a set of nodes linked one after the other * and having exactly one child each. The node can be a key or not: this * property and the associated value if any will be preserved. * * The function also returns a child node, since the last node of the * compressed chain cannot be part of the chain: it has zero children while * we can only compress inner nodes with exactly one child each. */ raxNode *raxCompressNode(raxNode *n, unsigned char *s, size_t len, raxNode **child) { assert(n->size == 0 && n->iscompr == 0); void *data = NULL; /* Initialized only to avoid warnings. */ size_t newsize; debugf("Compress node: %.*s\n", (int)len,s); /* Allocate the child to link to this node. */ *child = raxNewNode(0,0); if (*child == NULL) return NULL; /* Make space in the parent node. */ newsize = sizeof(raxNode)+len+raxPadding(len)+sizeof(raxNode*); if (n->iskey) { data = raxGetData(n); /* To restore it later. */ if (!n->isnull) newsize += sizeof(void*); } raxNode *newn = rax_realloc(n,newsize); if (newn == NULL) { rax_free(*child); return NULL; } n = newn; n->iscompr = 1; n->size = len; memcpy(n->data,s,len); if (n->iskey) raxSetData(n,data); raxNode **childfield = raxNodeLastChildPtr(n); memcpy(childfield,child,sizeof(*child)); return n; } /* Low level function that walks the tree looking for the string * 's' of 'len' bytes. The function returns the number of characters * of the key that was possible to process: if the returned integer * is the same as 'len', then it means that the node corresponding to the * string was found (however it may not be a key in case the node->iskey is * zero or if simply we stopped in the middle of a compressed node, so that * 'splitpos' is non zero). * * Otherwise if the returned integer is not the same as 'len', there was an * early stop during the tree walk because of a character mismatch. * * The node where the search ended (because the full string was processed * or because there was an early stop) is returned by reference as * '*stopnode' if the passed pointer is not NULL. This node link in the * parent's node is returned as '*plink' if not NULL. Finally, if the * search stopped in a compressed node, '*splitpos' returns the index * inside the compressed node where the search ended. This is useful to * know where to split the node for insertion. * * Note that when we stop in the middle of a compressed node with * a perfect match, this function will return a length equal to the * 'len' argument (all the key matched), and will return a *splitpos which is * always positive (that will represent the index of the character immediately * *after* the last match in the current compressed node). * * When instead we stop at a compressed node and *splitpos is zero, it * means that the current node represents the key (that is, none of the * compressed node characters are needed to represent the key, just all * its parents nodes). */ static inline size_t raxLowWalk(rax *rax, unsigned char *s, size_t len, raxNode **stopnode, raxNode ***plink, int *splitpos, raxStack *ts) { raxNode *h = rax->head; raxNode **parentlink = &rax->head; size_t i = 0; /* Position in the string. */ size_t j = 0; /* Position in the node children (or bytes if compressed).*/ while(h->size && i < len) { debugnode("Lookup current node",h); unsigned char *v = h->data; if (h->iscompr) { for (j = 0; j < h->size && i < len; j++, i++) { if (v[j] != s[i]) break; } if (j != h->size) break; } else { /* Even when h->size is large, linear scan provides good * performances compared to other approaches that are in theory * more sounding, like performing a binary search. */ for (j = 0; j < h->size; j++) { if (v[j] == s[i]) break; } if (j == h->size) break; i++; } if (ts) raxStackPush(ts,h); /* Save stack of parent nodes. */ raxNode **children = raxNodeFirstChildPtr(h); if (h->iscompr) j = 0; /* Compressed node only child is at index 0. */ memcpy(&h,children+j,sizeof(h)); parentlink = children+j; j = 0; /* If the new node is non compressed and we do not iterate again (since i == len) set the split position to 0 to signal this node represents the searched key. */ } debugnode("Lookup stop node is",h); if (stopnode) *stopnode = h; if (plink) *plink = parentlink; if (splitpos && h->iscompr) *splitpos = j; return i; } /* Insert the element 's' of size 'len', setting as auxiliary data * the pointer 'data'. If the element is already present, the associated * data is updated (only if 'overwrite' is set to 1), and 0 is returned, * otherwise the element is inserted and 1 is returned. On out of memory the * function returns 0 as well but sets errno to ENOMEM, otherwise errno will * be set to 0. */ int raxGenericInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old, int overwrite) { size_t i; int j = 0; /* Split position. If raxLowWalk() stops in a compressed node, the index 'j' represents the char we stopped within the compressed node, that is, the position where to split the node for insertion. */ raxNode *h, **parentlink; debugf("### Insert %.*s with value %p\n", (int)len, s, data); i = raxLowWalk(rax,s,len,&h,&parentlink,&j,NULL); /* If i == len we walked following the whole string. If we are not * in the middle of a compressed node, the string is either already * inserted or this middle node is currently not a key, but can represent * our key. We have just to reallocate the node and make space for the * data pointer. */ if (i == len && (!h->iscompr || j == 0 /* not in the middle if j is 0 */)) { debugf("### Insert: node representing key exists\n"); /* Make space for the value pointer if needed. */ if (!h->iskey || (h->isnull && overwrite)) { h = raxReallocForData(h,data); if (h) memcpy(parentlink,&h,sizeof(h)); } if (h == NULL) { errno = ENOMEM; return 0; } /* Update the existing key if there is already one. */ if (h->iskey) { if (old) *old = raxGetData(h); if (overwrite) raxSetData(h,data); errno = 0; return 0; /* Element already exists. */ } /* Otherwise set the node as a key. Note that raxSetData() * will set h->iskey. */ raxSetData(h,data); rax->numele++; return 1; /* Element inserted. */ } /* If the node we stopped at is a compressed node, we need to * split it before to continue. * * Splitting a compressed node have a few possible cases. * Imagine that the node 'h' we are currently at is a compressed * node containing the string "ANNIBALE" (it means that it represents * nodes A -> N -> N -> I -> B -> A -> L -> E with the only child * pointer of this node pointing at the 'E' node, because remember that * we have characters at the edges of the graph, not inside the nodes * themselves. * * In order to show a real case imagine our node to also point to * another compressed node, that finally points at the node without * children, representing 'O': * * "ANNIBALE" -> "SCO" -> [] * * When inserting we may face the following cases. Note that all the cases * require the insertion of a non compressed node with exactly two * children, except for the last case which just requires splitting a * compressed node. * * 1) Inserting "ANNIENTARE" * * |B| -> "ALE" -> "SCO" -> [] * "ANNI" -> |-| * |E| -> (... continue algo ...) "NTARE" -> [] * * 2) Inserting "ANNIBALI" * * |E| -> "SCO" -> [] * "ANNIBAL" -> |-| * |I| -> (... continue algo ...) [] * * 3) Inserting "AGO" (Like case 1, but set iscompr = 0 into original node) * * |N| -> "NIBALE" -> "SCO" -> [] * |A| -> |-| * |G| -> (... continue algo ...) |O| -> [] * * 4) Inserting "CIAO" * * |A| -> "NNIBALE" -> "SCO" -> [] * |-| * |C| -> (... continue algo ...) "IAO" -> [] * * 5) Inserting "ANNI" * * "ANNI" -> "BALE" -> "SCO" -> [] * * The final algorithm for insertion covering all the above cases is as * follows. * * ============================= ALGO 1 ============================= * * For the above cases 1 to 4, that is, all cases where we stopped in * the middle of a compressed node for a character mismatch, do: * * Let $SPLITPOS be the zero-based index at which, in the * compressed node array of characters, we found the mismatching * character. For example if the node contains "ANNIBALE" and we add * "ANNIENTARE" the $SPLITPOS is 4, that is, the index at which the * mismatching character is found. * * 1. Save the current compressed node $NEXT pointer (the pointer to the * child element, that is always present in compressed nodes). * * 2. Create "split node" having as child the non common letter * at the compressed node. The other non common letter (at the key) * will be added later as we continue the normal insertion algorithm * at step "6". * * 3a. IF $SPLITPOS == 0: * Replace the old node with the split node, by copying the auxiliary * data if any. Fix parent's reference. Free old node eventually * (we still need its data for the next steps of the algorithm). * * 3b. IF $SPLITPOS != 0: * Trim the compressed node (reallocating it as well) in order to * contain $splitpos characters. Change child pointer in order to link * to the split node. If new compressed node len is just 1, set * iscompr to 0 (layout is the same). Fix parent's reference. * * 4a. IF the postfix len (the length of the remaining string of the * original compressed node after the split character) is non zero, * create a "postfix node". If the postfix node has just one character * set iscompr to 0, otherwise iscompr to 1. Set the postfix node * child pointer to $NEXT. * * 4b. IF the postfix len is zero, just use $NEXT as postfix pointer. * * 5. Set child[0] of split node to postfix node. * * 6. Set the split node as the current node, set current index at child[1] * and continue insertion algorithm as usually. * * ============================= ALGO 2 ============================= * * For case 5, that is, if we stopped in the middle of a compressed * node but no mismatch was found, do: * * Let $SPLITPOS be the zero-based index at which, in the * compressed node array of characters, we stopped iterating because * there were no more keys character to match. So in the example of * the node "ANNIBALE", adding the string "ANNI", the $SPLITPOS is 4. * * 1. Save the current compressed node $NEXT pointer (the pointer to the * child element, that is always present in compressed nodes). * * 2. Create a "postfix node" containing all the characters from $SPLITPOS * to the end. Use $NEXT as the postfix node child pointer. * If the postfix node length is 1, set iscompr to 0. * Set the node as a key with the associated value of the new * inserted key. * * 3. Trim the current node to contain the first $SPLITPOS characters. * As usually if the new node length is just 1, set iscompr to 0. * Take the iskey / associated value as it was in the original node. * Fix the parent's reference. * * 4. Set the postfix node as the only child pointer of the trimmed * node created at step 1. */ /* ------------------------- ALGORITHM 1 --------------------------- */ if (h->iscompr && i != len) { debugf("ALGO 1: Stopped at compressed node %.*s (%p)\n", h->size, h->data, (void*)h); debugf("Still to insert: %.*s\n", (int)(len-i), s+i); debugf("Splitting at %d: '%c'\n", j, ((char*)h->data)[j]); debugf("Other (key) letter is '%c'\n", s[i]); /* 1: Save next pointer. */ raxNode **childfield = raxNodeLastChildPtr(h); raxNode *next; memcpy(&next,childfield,sizeof(next)); debugf("Next is %p\n", (void*)next); debugf("iskey %d\n", h->iskey); if (h->iskey) { debugf("key value is %p\n", raxGetData(h)); } /* Set the length of the additional nodes we will need. */ size_t trimmedlen = j; size_t postfixlen = h->size - j - 1; int split_node_is_key = !trimmedlen && h->iskey && !h->isnull; size_t nodesize; /* 2: Create the split node. Also allocate the other nodes we'll need * ASAP, so that it will be simpler to handle OOM. */ raxNode *splitnode = raxNewNode(1, split_node_is_key); raxNode *trimmed = NULL; raxNode *postfix = NULL; if (trimmedlen) { nodesize = sizeof(raxNode)+trimmedlen+raxPadding(trimmedlen)+ sizeof(raxNode*); if (h->iskey && !h->isnull) nodesize += sizeof(void*); trimmed = rax_malloc(nodesize); } if (postfixlen) { nodesize = sizeof(raxNode)+postfixlen+raxPadding(postfixlen)+ sizeof(raxNode*); postfix = rax_malloc(nodesize); } /* OOM? Abort now that the tree is untouched. */ if (splitnode == NULL || (trimmedlen && trimmed == NULL) || (postfixlen && postfix == NULL)) { rax_free(splitnode); rax_free(trimmed); rax_free(postfix); errno = ENOMEM; return 0; } splitnode->data[0] = h->data[j]; if (j == 0) { /* 3a: Replace the old node with the split node. */ if (h->iskey) { void *ndata = raxGetData(h); raxSetData(splitnode,ndata); } memcpy(parentlink,&splitnode,sizeof(splitnode)); } else { /* 3b: Trim the compressed node. */ trimmed->size = j; memcpy(trimmed->data,h->data,j); trimmed->iscompr = j > 1 ? 1 : 0; trimmed->iskey = h->iskey; trimmed->isnull = h->isnull; if (h->iskey && !h->isnull) { void *ndata = raxGetData(h); raxSetData(trimmed,ndata); } raxNode **cp = raxNodeLastChildPtr(trimmed); memcpy(cp,&splitnode,sizeof(splitnode)); memcpy(parentlink,&trimmed,sizeof(trimmed)); parentlink = cp; /* Set parentlink to splitnode parent. */ rax->numnodes++; } /* 4: Create the postfix node: what remains of the original * compressed node after the split. */ if (postfixlen) { /* 4a: create a postfix node. */ postfix->iskey = 0; postfix->isnull = 0; postfix->size = postfixlen; postfix->iscompr = postfixlen > 1; memcpy(postfix->data,h->data+j+1,postfixlen); raxNode **cp = raxNodeLastChildPtr(postfix); memcpy(cp,&next,sizeof(next)); rax->numnodes++; } else { /* 4b: just use next as postfix node. */ postfix = next; } /* 5: Set splitnode first child as the postfix node. */ raxNode **splitchild = raxNodeLastChildPtr(splitnode); memcpy(splitchild,&postfix,sizeof(postfix)); /* 6. Continue insertion: this will cause the splitnode to * get a new child (the non common character at the currently * inserted key). */ rax_free(h); h = splitnode; } else if (h->iscompr && i == len) { /* ------------------------- ALGORITHM 2 --------------------------- */ debugf("ALGO 2: Stopped at compressed node %.*s (%p) j = %d\n", h->size, h->data, (void*)h, j); /* Allocate postfix & trimmed nodes ASAP to fail for OOM gracefully. */ size_t postfixlen = h->size - j; size_t nodesize = sizeof(raxNode)+postfixlen+raxPadding(postfixlen)+ sizeof(raxNode*); if (data != NULL) nodesize += sizeof(void*); raxNode *postfix = rax_malloc(nodesize); nodesize = sizeof(raxNode)+j+raxPadding(j)+sizeof(raxNode*); if (h->iskey && !h->isnull) nodesize += sizeof(void*); raxNode *trimmed = rax_malloc(nodesize); if (postfix == NULL || trimmed == NULL) { rax_free(postfix); rax_free(trimmed); errno = ENOMEM; return 0; } /* 1: Save next pointer. */ raxNode **childfield = raxNodeLastChildPtr(h); raxNode *next; memcpy(&next,childfield,sizeof(next)); /* 2: Create the postfix node. */ postfix->size = postfixlen; postfix->iscompr = postfixlen > 1; postfix->iskey = 1; postfix->isnull = 0; memcpy(postfix->data,h->data+j,postfixlen); raxSetData(postfix,data); raxNode **cp = raxNodeLastChildPtr(postfix); memcpy(cp,&next,sizeof(next)); rax->numnodes++; /* 3: Trim the compressed node. */ trimmed->size = j; trimmed->iscompr = j > 1; trimmed->iskey = 0; trimmed->isnull = 0; memcpy(trimmed->data,h->data,j); memcpy(parentlink,&trimmed,sizeof(trimmed)); if (h->iskey) { void *aux = raxGetData(h); raxSetData(trimmed,aux); } /* Fix the trimmed node child pointer to point to * the postfix node. */ cp = raxNodeLastChildPtr(trimmed); memcpy(cp,&postfix,sizeof(postfix)); /* Finish! We don't need to continue with the insertion * algorithm for ALGO 2. The key is already inserted. */ rax->numele++; rax_free(h); return 1; /* Key inserted. */ } /* We walked the radix tree as far as we could, but still there are left * chars in our string. We need to insert the missing nodes. */ while(i < len) { raxNode *child; /* If this node is going to have a single child, and there * are other characters, so that that would result in a chain * of single-childed nodes, turn it into a compressed node. */ if (h->size == 0 && len-i > 1) { debugf("Inserting compressed node\n"); size_t comprsize = len-i; if (comprsize > RAX_NODE_MAX_SIZE) comprsize = RAX_NODE_MAX_SIZE; raxNode *newh = raxCompressNode(h,s+i,comprsize,&child); if (newh == NULL) goto oom; h = newh; memcpy(parentlink,&h,sizeof(h)); parentlink = raxNodeLastChildPtr(h); i += comprsize; } else { debugf("Inserting normal node\n"); raxNode **new_parentlink; raxNode *newh = raxAddChild(h,s[i],&child,&new_parentlink); if (newh == NULL) goto oom; h = newh; memcpy(parentlink,&h,sizeof(h)); parentlink = new_parentlink; i++; } rax->numnodes++; h = child; } raxNode *newh = raxReallocForData(h,data); if (newh == NULL) goto oom; h = newh; if (!h->iskey) rax->numele++; raxSetData(h,data); memcpy(parentlink,&h,sizeof(h)); return 1; /* Element inserted. */ oom: /* This code path handles out of memory after part of the sub-tree was * already modified. Set the node as a key, and then remove it. However we * do that only if the node is a terminal node, otherwise if the OOM * happened reallocating a node in the middle, we don't need to free * anything. */ if (h->size == 0) { h->isnull = 1; h->iskey = 1; rax->numele++; /* Compensate the next remove. */ assert(raxRemove(rax,s,i,NULL) != 0); } errno = ENOMEM; return 0; } /* Overwriting insert. Just a wrapper for raxGenericInsert() that will * update the element if there is already one for the same key. */ int raxInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old) { return raxGenericInsert(rax,s,len,data,old,1); } /* Non overwriting insert function: if an element with the same key * exists, the value is not updated and the function returns 0. * This is just a wrapper for raxGenericInsert(). */ int raxTryInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old) { return raxGenericInsert(rax,s,len,data,old,0); } /* Find a key in the rax, returns raxNotFound special void pointer value * if the item was not found, otherwise the value associated with the * item is returned. */ void *raxFind(rax *rax, unsigned char *s, size_t len) { raxNode *h; debugf("### Lookup: %.*s\n", (int)len, s); int splitpos = 0; size_t i = raxLowWalk(rax,s,len,&h,NULL,&splitpos,NULL); if (i != len || (h->iscompr && splitpos != 0) || !h->iskey) return raxNotFound; return raxGetData(h); } /* Return the memory address where the 'parent' node stores the specified * 'child' pointer, so that the caller can update the pointer with another * one if needed. The function assumes it will find a match, otherwise the * operation is an undefined behavior (it will continue scanning the * memory without any bound checking). */ raxNode **raxFindParentLink(raxNode *parent, raxNode *child) { raxNode **cp = raxNodeFirstChildPtr(parent); raxNode *c; while(1) { memcpy(&c,cp,sizeof(c)); if (c == child) break; cp++; } return cp; } /* Low level child removal from node. The new node pointer (after the child * removal) is returned. Note that this function does not fix the pointer * of the parent node in its parent, so this task is up to the caller. * The function never fails for out of memory. */ raxNode *raxRemoveChild(raxNode *parent, raxNode *child) { debugnode("raxRemoveChild before", parent); /* If parent is a compressed node (having a single child, as for definition * of the data structure), the removal of the child consists into turning * it into a normal node without children. */ if (parent->iscompr) { void *data = NULL; if (parent->iskey) data = raxGetData(parent); parent->isnull = 0; parent->iscompr = 0; parent->size = 0; if (parent->iskey) raxSetData(parent,data); debugnode("raxRemoveChild after", parent); return parent; } /* Otherwise we need to scan for the child pointer and memmove() * accordingly. * * 1. To start we seek the first element in both the children * pointers and edge bytes in the node. */ raxNode **cp = raxNodeFirstChildPtr(parent); raxNode **c = cp; unsigned char *e = parent->data; /* 2. Search the child pointer to remove inside the array of children * pointers. */ while(1) { raxNode *aux; memcpy(&aux,c,sizeof(aux)); if (aux == child) break; c++; e++; } /* 3. Remove the edge and the pointer by memmoving the remaining children * pointer and edge bytes one position before. */ int taillen = parent->size - (e - parent->data) - 1; debugf("raxRemoveChild tail len: %d\n", taillen); memmove(e,e+1,taillen); /* Compute the shift, that is the amount of bytes we should move our * child pointers to the left, since the removal of one edge character * and the corresponding padding change, may change the layout. * We just check if in the old version of the node there was at the * end just a single byte and all padding: in that case removing one char * will remove a whole sizeof(void*) word. */ size_t shift = ((parent->size+4) % sizeof(void*)) == 1 ? sizeof(void*) : 0; /* Move the children pointers before the deletion point. */ if (shift) memmove(((char*)cp)-shift,cp,(parent->size-taillen-1)*sizeof(raxNode**)); /* Move the remaining "tail" pointers at the right position as well. */ size_t valuelen = (parent->iskey && !parent->isnull) ? sizeof(void*) : 0; memmove(((char*)c)-shift,c+1,taillen*sizeof(raxNode**)+valuelen); /* 4. Update size. */ parent->size--; /* realloc the node according to the theoretical memory usage, to free * data if we are over-allocating right now. */ raxNode *newnode = rax_realloc(parent,raxNodeCurrentLength(parent)); if (newnode) { debugnode("raxRemoveChild after", newnode); } /* Note: if rax_realloc() fails we just return the old address, which * is valid. */ return newnode ? newnode : parent; } /* Remove the specified item. Returns 1 if the item was found and * deleted, 0 otherwise. */ int raxRemove(rax *rax, unsigned char *s, size_t len, void **old) { raxNode *h; raxStack ts; debugf("### Delete: %.*s\n", (int)len, s); raxStackInit(&ts); int splitpos = 0; size_t i = raxLowWalk(rax,s,len,&h,NULL,&splitpos,&ts); if (i != len || (h->iscompr && splitpos != 0) || !h->iskey) { raxStackFree(&ts); return 0; } if (old) *old = raxGetData(h); h->iskey = 0; rax->numele--; /* If this node has no children, the deletion needs to reclaim the * no longer used nodes. This is an iterative process that needs to * walk the three upward, deleting all the nodes with just one child * that are not keys, until the head of the rax is reached or the first * node with more than one child is found. */ int trycompress = 0; /* Will be set to 1 if we should try to optimize the tree resulting from the deletion. */ if (h->size == 0) { debugf("Key deleted in node without children. Cleanup needed.\n"); raxNode *child = NULL; while(h != rax->head) { child = h; debugf("Freeing child %p [%.*s] key:%d\n", (void*)child, (int)child->size, (char*)child->data, child->iskey); rax_free(child); rax->numnodes--; h = raxStackPop(&ts); /* If this node has more then one child, or actually holds * a key, stop here. */ if (h->iskey || (!h->iscompr && h->size != 1)) break; } if (child) { debugf("Unlinking child %p from parent %p\n", (void*)child, (void*)h); raxNode *new = raxRemoveChild(h,child); if (new != h) { raxNode *parent = raxStackPeek(&ts); raxNode **parentlink; if (parent == NULL) { parentlink = &rax->head; } else { parentlink = raxFindParentLink(parent,h); } memcpy(parentlink,&new,sizeof(new)); } /* If after the removal the node has just a single child * and is not a key, we need to try to compress it. */ if (new->size == 1 && new->iskey == 0) { trycompress = 1; h = new; } } } else if (h->size == 1) { /* If the node had just one child, after the removal of the key * further compression with adjacent nodes is potentially possible. */ trycompress = 1; } /* Don't try node compression if our nodes pointers stack is not * complete because of OOM while executing raxLowWalk() */ if (trycompress && ts.oom) trycompress = 0; /* Recompression: if trycompress is true, 'h' points to a radix tree node * that changed in a way that could allow to compress nodes in this * sub-branch. Compressed nodes represent chains of nodes that are not * keys and have a single child, so there are two deletion events that * may alter the tree so that further compression is needed: * * 1) A node with a single child was a key and now no longer is a key. * 2) A node with two children now has just one child. * * We try to navigate upward till there are other nodes that can be * compressed, when we reach the upper node which is not a key and has * a single child, we scan the chain of children to collect the * compressible part of the tree, and replace the current node with the * new one, fixing the child pointer to reference the first non * compressible node. * * Example of case "1". A tree stores the keys "FOO" = 1 and * "FOOBAR" = 2: * * * "FOO" -> "BAR" -> [] (2) * (1) * * After the removal of "FOO" the tree can be compressed as: * * "FOOBAR" -> [] (2) * * * Example of case "2". A tree stores the keys "FOOBAR" = 1 and * "FOOTER" = 2: * * |B| -> "AR" -> [] (1) * "FOO" -> |-| * |T| -> "ER" -> [] (2) * * After the removal of "FOOTER" the resulting tree is: * * "FOO" -> |B| -> "AR" -> [] (1) * * That can be compressed into: * * "FOOBAR" -> [] (1) */ if (trycompress) { debugf("After removing %.*s:\n", (int)len, s); debugnode("Compression may be needed",h); debugf("Seek start node\n"); /* Try to reach the upper node that is compressible. * At the end of the loop 'h' will point to the first node we * can try to compress and 'parent' to its parent. */ raxNode *parent; while(1) { parent = raxStackPop(&ts); if (!parent || parent->iskey || (!parent->iscompr && parent->size != 1)) break; h = parent; debugnode("Going up to",h); } raxNode *start = h; /* Compression starting node. */ /* Scan chain of nodes we can compress. */ size_t comprsize = h->size; int nodes = 1; while(h->size != 0) { raxNode **cp = raxNodeLastChildPtr(h); memcpy(&h,cp,sizeof(h)); if (h->iskey || (!h->iscompr && h->size != 1)) break; /* Stop here if going to the next node would result into * a compressed node larger than h->size can hold. */ if (comprsize + h->size > RAX_NODE_MAX_SIZE) break; nodes++; comprsize += h->size; } if (nodes > 1) { /* If we can compress, create the new node and populate it. */ size_t nodesize = sizeof(raxNode)+comprsize+raxPadding(comprsize)+sizeof(raxNode*); raxNode *new = rax_malloc(nodesize); /* An out of memory here just means we cannot optimize this * node, but the tree is left in a consistent state. */ if (new == NULL) { raxStackFree(&ts); return 1; } new->iskey = 0; new->isnull = 0; new->iscompr = 1; new->size = comprsize; rax->numnodes++; /* Scan again, this time to populate the new node content and * to fix the new node child pointer. At the same time we free * all the nodes that we'll no longer use. */ comprsize = 0; h = start; while(h->size != 0) { memcpy(new->data+comprsize,h->data,h->size); comprsize += h->size; raxNode **cp = raxNodeLastChildPtr(h); raxNode *tofree = h; memcpy(&h,cp,sizeof(h)); rax_free(tofree); rax->numnodes--; if (h->iskey || (!h->iscompr && h->size != 1)) break; } debugnode("New node",new); /* Now 'h' points to the first node that we still need to use, * so our new node child pointer will point to it. */ raxNode **cp = raxNodeLastChildPtr(new); memcpy(cp,&h,sizeof(h)); /* Fix parent link. */ if (parent) { raxNode **parentlink = raxFindParentLink(parent,start); memcpy(parentlink,&new,sizeof(new)); } else { rax->head = new; } debugf("Compressed %d nodes, %d total bytes\n", nodes, (int)comprsize); } } raxStackFree(&ts); return 1; } /* This is the core of raxFree(): performs a depth-first scan of the * tree and releases all the nodes found. */ void raxRecursiveFree(rax *rax, raxNode *n, void (*free_callback)(void*)) { debugnode("free traversing",n); int numchildren = n->iscompr ? 1 : n->size; raxNode **cp = raxNodeLastChildPtr(n); while(numchildren--) { raxNode *child; memcpy(&child,cp,sizeof(child)); raxRecursiveFree(rax,child,free_callback); cp--; } debugnode("free depth-first",n); if (free_callback && n->iskey && !n->isnull) free_callback(raxGetData(n)); rax_free(n); rax->numnodes--; } /* Free a whole radix tree, calling the specified callback in order to * free the auxiliary data. */ void raxFreeWithCallback(rax *rax, void (*free_callback)(void*)) { raxRecursiveFree(rax,rax->head,free_callback); assert(rax->numnodes == 0); rax_free(rax); } /* Free a whole radix tree. */ void raxFree(rax *rax) { raxFreeWithCallback(rax,NULL); } /* ------------------------------- Iterator --------------------------------- */ /* Initialize a Rax iterator. This call should be performed a single time * to initialize the iterator, and must be followed by a raxSeek() call, * otherwise the raxPrev()/raxNext() functions will just return EOF. */ void raxStart(raxIterator *it, rax *rt) { it->flags = RAX_ITER_EOF; /* No crash if the iterator is not seeked. */ it->rt = rt; it->key_len = 0; it->key = it->key_static_string; it->key_max = RAX_ITER_STATIC_LEN; it->data = NULL; it->node_cb = NULL; raxStackInit(&it->stack); } /* Append characters at the current key string of the iterator 'it'. This * is a low level function used to implement the iterator, not callable by * the user. Returns 0 on out of memory, otherwise 1 is returned. */ int raxIteratorAddChars(raxIterator *it, unsigned char *s, size_t len) { if (len == 0) return 1; if (it->key_max < it->key_len+len) { unsigned char *old = (it->key == it->key_static_string) ? NULL : it->key; size_t new_max = (it->key_len+len)*2; it->key = rax_realloc(old,new_max); if (it->key == NULL) { it->key = (!old) ? it->key_static_string : old; errno = ENOMEM; return 0; } if (old == NULL) memcpy(it->key,it->key_static_string,it->key_len); it->key_max = new_max; } /* Use memmove since there could be an overlap between 's' and * it->key when we use the current key in order to re-seek. */ memmove(it->key+it->key_len,s,len); it->key_len += len; return 1; } /* Remove the specified number of chars from the right of the current * iterator key. */ void raxIteratorDelChars(raxIterator *it, size_t count) { it->key_len -= count; } /* Do an iteration step towards the next element. At the end of the step the * iterator key will represent the (new) current key. If it is not possible * to step in the specified direction since there are no longer elements, the * iterator is flagged with RAX_ITER_EOF. * * If 'noup' is true the function starts directly scanning for the next * lexicographically smaller children, and the current node is already assumed * to be the parent of the last key node, so the first operation to go back to * the parent will be skipped. This option is used by raxSeek() when * implementing seeking a non existing element with the ">" or "<" options: * the starting node is not a key in that particular case, so we start the scan * from a node that does not represent the key set. * * The function returns 1 on success or 0 on out of memory. */ int raxIteratorNextStep(raxIterator *it, int noup) { if (it->flags & RAX_ITER_EOF) { return 1; } else if (it->flags & RAX_ITER_JUST_SEEKED) { it->flags &= ~RAX_ITER_JUST_SEEKED; return 1; } /* Save key len, stack items and the node where we are currently * so that on iterator EOF we can restore the current key and state. */ size_t orig_key_len = it->key_len; size_t orig_stack_items = it->stack.items; raxNode *orig_node = it->node; while(1) { int children = it->node->iscompr ? 1 : it->node->size; if (!noup && children) { debugf("GO DEEPER\n"); /* Seek the lexicographically smaller key in this subtree, which * is the first one found always going towards the first child * of every successive node. */ if (!raxStackPush(&it->stack,it->node)) return 0; raxNode **cp = raxNodeFirstChildPtr(it->node); if (!raxIteratorAddChars(it,it->node->data, it->node->iscompr ? it->node->size : 1)) return 0; memcpy(&it->node,cp,sizeof(it->node)); /* Call the node callback if any, and replace the node pointer * if the callback returns true. */ if (it->node_cb && it->node_cb(&it->node)) memcpy(cp,&it->node,sizeof(it->node)); /* For "next" step, stop every time we find a key along the * way, since the key is lexicographically smaller compared to * what follows in the sub-children. */ if (it->node->iskey) { it->data = raxGetData(it->node); return 1; } } else { /* If we finished exploring the previous sub-tree, switch to the * new one: go upper until a node is found where there are * children representing keys lexicographically greater than the * current key. */ while(1) { int old_noup = noup; /* Already on head? Can't go up, iteration finished. */ if (!noup && it->node == it->rt->head) { it->flags |= RAX_ITER_EOF; it->stack.items = orig_stack_items; it->key_len = orig_key_len; it->node = orig_node; return 1; } /* If there are no children at the current node, try parent's * next child. */ unsigned char prevchild = it->key[it->key_len-1]; if (!noup) { it->node = raxStackPop(&it->stack); } else { noup = 0; } /* Adjust the current key to represent the node we are * at. */ int todel = it->node->iscompr ? it->node->size : 1; raxIteratorDelChars(it,todel); /* Try visiting the next child if there was at least one * additional child. */ if (!it->node->iscompr && it->node->size > (old_noup ? 0 : 1)) { raxNode **cp = raxNodeFirstChildPtr(it->node); int i = 0; while (i < it->node->size) { debugf("SCAN NEXT %c\n", it->node->data[i]); if (it->node->data[i] > prevchild) break; i++; cp++; } if (i != it->node->size) { debugf("SCAN found a new node\n"); raxIteratorAddChars(it,it->node->data+i,1); if (!raxStackPush(&it->stack,it->node)) return 0; memcpy(&it->node,cp,sizeof(it->node)); /* Call the node callback if any, and replace the node * pointer if the callback returns true. */ if (it->node_cb && it->node_cb(&it->node)) memcpy(cp,&it->node,sizeof(it->node)); if (it->node->iskey) { it->data = raxGetData(it->node); return 1; } break; } } } } } } /* Seek the greatest key in the subtree at the current node. Return 0 on * out of memory, otherwise 1. This is a helper function for different * iteration functions below. */ int raxSeekGreatest(raxIterator *it) { while(it->node->size) { if (it->node->iscompr) { if (!raxIteratorAddChars(it,it->node->data, it->node->size)) return 0; } else { if (!raxIteratorAddChars(it,it->node->data+it->node->size-1,1)) return 0; } raxNode **cp = raxNodeLastChildPtr(it->node); if (!raxStackPush(&it->stack,it->node)) return 0; memcpy(&it->node,cp,sizeof(it->node)); } return 1; } /* Like raxIteratorNextStep() but implements an iteration step moving * to the lexicographically previous element. The 'noup' option has a similar * effect to the one of raxIteratorNextStep(). */ int raxIteratorPrevStep(raxIterator *it, int noup) { if (it->flags & RAX_ITER_EOF) { return 1; } else if (it->flags & RAX_ITER_JUST_SEEKED) { it->flags &= ~RAX_ITER_JUST_SEEKED; return 1; } /* Save key len, stack items and the node where we are currently * so that on iterator EOF we can restore the current key and state. */ size_t orig_key_len = it->key_len; size_t orig_stack_items = it->stack.items; raxNode *orig_node = it->node; while(1) { int old_noup = noup; /* Already on head? Can't go up, iteration finished. */ if (!noup && it->node == it->rt->head) { it->flags |= RAX_ITER_EOF; it->stack.items = orig_stack_items; it->key_len = orig_key_len; it->node = orig_node; return 1; } unsigned char prevchild = it->key[it->key_len-1]; if (!noup) { it->node = raxStackPop(&it->stack); } else { noup = 0; } /* Adjust the current key to represent the node we are * at. */ int todel = it->node->iscompr ? it->node->size : 1; raxIteratorDelChars(it,todel); /* Try visiting the prev child if there is at least one * child. */ if (!it->node->iscompr && it->node->size > (old_noup ? 0 : 1)) { raxNode **cp = raxNodeLastChildPtr(it->node); int i = it->node->size-1; while (i >= 0) { debugf("SCAN PREV %c\n", it->node->data[i]); if (it->node->data[i] < prevchild) break; i--; cp--; } /* If we found a new subtree to explore in this node, * go deeper following all the last children in order to * find the key lexicographically greater. */ if (i != -1) { debugf("SCAN found a new node\n"); /* Enter the node we just found. */ if (!raxIteratorAddChars(it,it->node->data+i,1)) return 0; if (!raxStackPush(&it->stack,it->node)) return 0; memcpy(&it->node,cp,sizeof(it->node)); /* Seek sub-tree max. */ if (!raxSeekGreatest(it)) return 0; } } /* Return the key: this could be the key we found scanning a new * subtree, or if we did not find a new subtree to explore here, * before giving up with this node, check if it's a key itself. */ if (it->node->iskey) { it->data = raxGetData(it->node); return 1; } } } /* Seek an iterator at the specified element. * Return 0 if the seek failed for syntax error or out of memory. Otherwise * 1 is returned. When 0 is returned for out of memory, errno is set to * the ENOMEM value. */ int raxSeek(raxIterator *it, const char *op, unsigned char *ele, size_t len) { int eq = 0, lt = 0, gt = 0, first = 0, last = 0; it->stack.items = 0; /* Just resetting. Initialized by raxStart(). */ it->flags |= RAX_ITER_JUST_SEEKED; it->flags &= ~RAX_ITER_EOF; it->key_len = 0; it->node = NULL; /* Set flags according to the operator used to perform the seek. */ if (op[0] == '>') { gt = 1; if (op[1] == '=') eq = 1; } else if (op[0] == '<') { lt = 1; if (op[1] == '=') eq = 1; } else if (op[0] == '=') { eq = 1; } else if (op[0] == '^') { first = 1; } else if (op[0] == '$') { last = 1; } else { errno = 0; return 0; /* Error. */ } /* If there are no elements, set the EOF condition immediately and * return. */ if (it->rt->numele == 0) { it->flags |= RAX_ITER_EOF; return 1; } if (first) { /* Seeking the first key greater or equal to the empty string * is equivalent to seeking the smaller key available. */ return raxSeek(it,">=",NULL,0); } if (last) { /* Find the greatest key taking always the last child till a * final node is found. */ it->node = it->rt->head; if (!raxSeekGreatest(it)) return 0; assert(it->node->iskey); it->data = raxGetData(it->node); return 1; } /* We need to seek the specified key. What we do here is to actually * perform a lookup, and later invoke the prev/next key code that * we already use for iteration. */ int splitpos = 0; size_t i = raxLowWalk(it->rt,ele,len,&it->node,NULL,&splitpos,&it->stack); /* Return OOM on incomplete stack info. */ if (it->stack.oom) return 0; if (eq && i == len && (!it->node->iscompr || splitpos == 0) && it->node->iskey) { /* We found our node, since the key matches and we have an * "equal" condition. */ if (!raxIteratorAddChars(it,ele,len)) return 0; /* OOM. */ it->data = raxGetData(it->node); } else if (lt || gt) { /* Exact key not found or eq flag not set. We have to set as current * key the one represented by the node we stopped at, and perform * a next/prev operation to seek. */ raxIteratorAddChars(it, ele, i-splitpos); /* We need to set the iterator in the correct state to call next/prev * step in order to seek the desired element. */ debugf("After initial seek: i=%d len=%d key=%.*s\n", (int)i, (int)len, (int)it->key_len, it->key); if (i != len && !it->node->iscompr) { /* If we stopped in the middle of a normal node because of a * mismatch, add the mismatching character to the current key * and call the iterator with the 'noup' flag so that it will try * to seek the next/prev child in the current node directly based * on the mismatching character. */ if (!raxIteratorAddChars(it,ele+i,1)) return 0; debugf("Seek normal node on mismatch: %.*s\n", (int)it->key_len, (char*)it->key); it->flags &= ~RAX_ITER_JUST_SEEKED; if (lt && !raxIteratorPrevStep(it,1)) return 0; if (gt && !raxIteratorNextStep(it,1)) return 0; it->flags |= RAX_ITER_JUST_SEEKED; /* Ignore next call. */ } else if (i != len && it->node->iscompr) { debugf("Compressed mismatch: %.*s\n", (int)it->key_len, (char*)it->key); /* In case of a mismatch within a compressed node. */ int nodechar = it->node->data[splitpos]; int keychar = ele[i]; it->flags &= ~RAX_ITER_JUST_SEEKED; if (gt) { /* If the key the compressed node represents is greater * than our seek element, continue forward, otherwise set the * state in order to go back to the next sub-tree. */ if (nodechar > keychar) { if (!raxIteratorNextStep(it,0)) return 0; } else { if (!raxIteratorAddChars(it,it->node->data,it->node->size)) return 0; if (!raxIteratorNextStep(it,1)) return 0; } } if (lt) { /* If the key the compressed node represents is smaller * than our seek element, seek the greater key in this * subtree, otherwise set the state in order to go back to * the previous sub-tree. */ if (nodechar < keychar) { if (!raxSeekGreatest(it)) return 0; it->data = raxGetData(it->node); } else { if (!raxIteratorAddChars(it,it->node->data,it->node->size)) return 0; if (!raxIteratorPrevStep(it,1)) return 0; } } it->flags |= RAX_ITER_JUST_SEEKED; /* Ignore next call. */ } else { debugf("No mismatch: %.*s\n", (int)it->key_len, (char*)it->key); /* If there was no mismatch we are into a node representing the * key, (but which is not a key or the seek operator does not * include 'eq'), or we stopped in the middle of a compressed node * after processing all the key. Continue iterating as this was * a legitimate key we stopped at. */ it->flags &= ~RAX_ITER_JUST_SEEKED; if (it->node->iscompr && it->node->iskey && splitpos && lt) { /* If we stopped in the middle of a compressed node with * perfect match, and the condition is to seek a key "<" than * the specified one, then if this node is a key it already * represents our match. For instance we may have nodes: * * "f" -> "oobar" = 1 -> "" = 2 * * Representing keys "f" = 1, "foobar" = 2. A seek for * the key < "foo" will stop in the middle of the "oobar" * node, but will be our match, representing the key "f". * * So in that case, we don't seek backward. */ it->data = raxGetData(it->node); } else { if (gt && !raxIteratorNextStep(it,0)) return 0; if (lt && !raxIteratorPrevStep(it,0)) return 0; } it->flags |= RAX_ITER_JUST_SEEKED; /* Ignore next call. */ } } else { /* If we are here just eq was set but no match was found. */ it->flags |= RAX_ITER_EOF; return 1; } return 1; } /* Go to the next element in the scope of the iterator 'it'. * If EOF (or out of memory) is reached, 0 is returned, otherwise 1 is * returned. In case 0 is returned because of OOM, errno is set to ENOMEM. */ int raxNext(raxIterator *it) { if (!raxIteratorNextStep(it,0)) { errno = ENOMEM; return 0; } if (it->flags & RAX_ITER_EOF) { errno = 0; return 0; } return 1; } /* Go to the previous element in the scope of the iterator 'it'. * If EOF (or out of memory) is reached, 0 is returned, otherwise 1 is * returned. In case 0 is returned because of OOM, errno is set to ENOMEM. */ int raxPrev(raxIterator *it) { if (!raxIteratorPrevStep(it,0)) { errno = ENOMEM; return 0; } if (it->flags & RAX_ITER_EOF) { errno = 0; return 0; } return 1; } /* Perform a random walk starting in the current position of the iterator. * Return 0 if the tree is empty or on out of memory. Otherwise 1 is returned * and the iterator is set to the node reached after doing a random walk * of 'steps' steps. If the 'steps' argument is 0, the random walk is performed * using a random number of steps between 1 and two times the logarithm of * the number of elements. * * NOTE: if you use this function to generate random elements from the radix * tree, expect a disappointing distribution. A random walk produces good * random elements if the tree is not sparse, however in the case of a radix * tree certain keys will be reported much more often than others. At least * this function should be able to explore every possible element eventually. */ int raxRandomWalk(raxIterator *it, size_t steps) { if (it->rt->numele == 0) { it->flags |= RAX_ITER_EOF; return 0; } if (steps == 0) { size_t fle = 1+floor(log(it->rt->numele)); fle *= 2; steps = 1 + rand() % fle; } raxNode *n = it->node; while(steps > 0 || !n->iskey) { int numchildren = n->iscompr ? 1 : n->size; int r = rand() % (numchildren+(n != it->rt->head)); if (r == numchildren) { /* Go up to parent. */ n = raxStackPop(&it->stack); int todel = n->iscompr ? n->size : 1; raxIteratorDelChars(it,todel); } else { /* Select a random child. */ if (n->iscompr) { if (!raxIteratorAddChars(it,n->data,n->size)) return 0; } else { if (!raxIteratorAddChars(it,n->data+r,1)) return 0; } raxNode **cp = raxNodeFirstChildPtr(n)+r; if (!raxStackPush(&it->stack,n)) return 0; memcpy(&n,cp,sizeof(n)); } if (n->iskey) steps--; } it->node = n; it->data = raxGetData(it->node); return 1; } /* Compare the key currently pointed by the iterator to the specified * key according to the specified operator. Returns 1 if the comparison is * true, otherwise 0 is returned. */ int raxCompare(raxIterator *iter, const char *op, unsigned char *key, size_t key_len) { int eq = 0, lt = 0, gt = 0; if (op[0] == '=' || op[1] == '=') eq = 1; if (op[0] == '>') gt = 1; else if (op[0] == '<') lt = 1; else if (op[1] != '=') return 0; /* Syntax error. */ size_t minlen = key_len < iter->key_len ? key_len : iter->key_len; int cmp = memcmp(iter->key,key,minlen); /* Handle == */ if (lt == 0 && gt == 0) return cmp == 0 && key_len == iter->key_len; /* Handle >, >=, <, <= */ if (cmp == 0) { /* Same prefix: longer wins. */ if (eq && key_len == iter->key_len) return 1; else if (lt) return iter->key_len < key_len; else if (gt) return iter->key_len > key_len; else return 0; /* Avoid warning, just 'eq' is handled before. */ } else if (cmp > 0) { return gt ? 1 : 0; } else /* (cmp < 0) */ { return lt ? 1 : 0; } } /* Free the iterator. */ void raxStop(raxIterator *it) { if (it->key != it->key_static_string) rax_free(it->key); raxStackFree(&it->stack); } /* Return if the iterator is in an EOF state. This happens when raxSeek() * failed to seek an appropriate element, so that raxNext() or raxPrev() * will return zero, or when an EOF condition was reached while iterating * with raxNext() and raxPrev(). */ int raxEOF(raxIterator *it) { return it->flags & RAX_ITER_EOF; } /* Return the number of elements inside the radix tree. */ uint64_t raxSize(rax *rax) { return rax->numele; } /* ----------------------------- Introspection ------------------------------ */ /* This function is mostly used for debugging and learning purposes. * It shows an ASCII representation of a tree on standard output, outline * all the nodes and the contained keys. * * The representation is as follow: * * "foobar" (compressed node) * [abc] (normal node with three children) * [abc]=0x12345678 (node is a key, pointing to value 0x12345678) * [] (a normal empty node) * * Children are represented in new indented lines, each children prefixed by * the "`-(x)" string, where "x" is the edge byte. * * [abc] * `-(a) "ladin" * `-(b) [kj] * `-(c) [] * * However when a node has a single child the following representation * is used instead: * * [abc] -> "ladin" -> [] */ /* The actual implementation of raxShow(). */ void raxRecursiveShow(int level, int lpad, raxNode *n) { char s = n->iscompr ? '"' : '['; char e = n->iscompr ? '"' : ']'; int numchars = printf("%c%.*s%c", s, n->size, n->data, e); if (n->iskey) { numchars += printf("=%p",raxGetData(n)); } int numchildren = n->iscompr ? 1 : n->size; /* Note that 7 and 4 magic constants are the string length * of " `-(x) " and " -> " respectively. */ if (level) { lpad += (numchildren > 1) ? 7 : 4; if (numchildren == 1) lpad += numchars; } raxNode **cp = raxNodeFirstChildPtr(n); for (int i = 0; i < numchildren; i++) { char *branch = " `-(%c) "; if (numchildren > 1) { printf("\n"); for (int j = 0; j < lpad; j++) putchar(' '); printf(branch,n->data[i]); } else { printf(" -> "); } raxNode *child; memcpy(&child,cp,sizeof(child)); raxRecursiveShow(level+1,lpad,child); cp++; } } /* Show a tree, as outlined in the comment above. */ void raxShow(rax *rax) { raxRecursiveShow(0,0,rax->head); putchar('\n'); } /* Used by debugnode() macro to show info about a given node. */ void raxDebugShowNode(const char *msg, raxNode *n) { if (raxDebugMsg == 0) return; printf("%s: %p [%.*s] key:%u size:%u children:", msg, (void*)n, (int)n->size, (char*)n->data, n->iskey, n->size); int numcld = n->iscompr ? 1 : n->size; raxNode **cldptr = raxNodeLastChildPtr(n) - (numcld-1); while(numcld--) { raxNode *child; memcpy(&child,cldptr,sizeof(child)); cldptr++; printf("%p ", (void*)child); } printf("\n"); fflush(stdout); } /* Touch all the nodes of a tree returning a check sum. This is useful * in order to make Valgrind detect if there is something wrong while * reading the data structure. * * This function was used in order to identify Rax bugs after a big refactoring * using this technique: * * 1. The rax-test is executed using Valgrind, adding a printf() so that for * the fuzz tester we see what iteration in the loop we are in. * 2. After every modification of the radix tree made by the fuzz tester * in rax-test.c, we add a call to raxTouch(). * 3. Now as soon as an operation will corrupt the tree, raxTouch() will * detect it (via Valgrind) immediately. We can add more calls to narrow * the state. * 4. At this point a good idea is to enable Rax debugging messages immediately * before the moment the tree is corrupted, to see what happens. */ unsigned long raxTouch(raxNode *n) { debugf("Touching %p\n", (void*)n); unsigned long sum = 0; if (n->iskey) { sum += (unsigned long)raxGetData(n); } int numchildren = n->iscompr ? 1 : n->size; raxNode **cp = raxNodeFirstChildPtr(n); int count = 0; for (int i = 0; i < numchildren; i++) { if (numchildren > 1) { sum += (long)n->data[i]; } raxNode *child; memcpy(&child,cp,sizeof(child)); if (child == (void*)0x65d1760) count++; if (count > 1) exit(1); sum += raxTouch(child); cp++; } return sum; }
902812.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */ /* * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * University Research and Technology * Corporation. All rights reserved. * Copyright (c) 2004-2005 The University of Tennessee and The University * of Tennessee Research Foundation. All rights * reserved. * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * University of Stuttgart. All rights reserved. * Copyright (c) 2004-2005 The Regents of the University of California. * All rights reserved. * Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2014 Los Alamos National Security, LLC. All rights * reserved. * Copyright (c) 2015 Research Organization for Information Science * and Technology (RIST). All rights reserved. * $COPYRIGHT$ * * Additional copyrights may follow * * $HEADER$ */ #include "ompi_config.h" #include "ompi/mpi/fortran/mpif-h/bindings.h" #include "ompi/mpi/fortran/base/constants.h" #if OMPI_BUILD_MPI_PROFILING #if OPAL_HAVE_WEAK_SYMBOLS #pragma weak PMPI_FETCH_AND_OP = ompi_fetch_and_op_f #pragma weak pmpi_fetch_and_op = ompi_fetch_and_op_f #pragma weak pmpi_fetch_and_op_ = ompi_fetch_and_op_f #pragma weak pmpi_fetch_and_op__ = ompi_fetch_and_op_f #pragma weak PMPI_Fetch_and_op_f = ompi_fetch_and_op_f #pragma weak PMPI_Fetch_and_op_f08 = ompi_fetch_and_op_f #else OMPI_GENERATE_F77_BINDINGS (PMPI_FETCH_AND_OP, pmpi_fetch_and_op, pmpi_fetch_and_op_, pmpi_fetch_and_op__, pompi_fetch_and_op_f, (char *origin_addr, char *result_addr, MPI_Fint *datatype, MPI_Fint *target_rank, MPI_Aint *target_disp, MPI_Fint *op, MPI_Fint *win, MPI_Fint *ierr), (origin_addr, result_addr, datatype, target_rank, target_disp, op, win, ierr) ) #endif #endif #if OPAL_HAVE_WEAK_SYMBOLS #pragma weak MPI_FETCH_AND_OP = ompi_fetch_and_op_f #pragma weak mpi_fetch_and_op = ompi_fetch_and_op_f #pragma weak mpi_fetch_and_op_ = ompi_fetch_and_op_f #pragma weak mpi_fetch_and_op__ = ompi_fetch_and_op_f #pragma weak MPI_Fetch_and_op_f = ompi_fetch_and_op_f #pragma weak MPI_Fetch_and_op_f08 = ompi_fetch_and_op_f #else #if ! OMPI_BUILD_MPI_PROFILING OMPI_GENERATE_F77_BINDINGS (MPI_FETCH_AND_OP, mpi_fetch_and_op, mpi_fetch_and_op_, mpi_fetch_and_op__, ompi_fetch_and_op_f, (char *origin_addr, char *result_addr, MPI_Fint *datatype, MPI_Fint *target_rank, MPI_Aint *target_disp, MPI_Fint *op, MPI_Fint *win, MPI_Fint *ierr), (origin_addr, result_addr, datatype, target_rank, target_disp, op, win, ierr) ) #else #define ompi_fetch_and_op_f pompi_fetch_and_op_f #endif #endif void ompi_fetch_and_op_f(char *origin_addr, char *result_addr, MPI_Fint *datatype, MPI_Fint *target_rank, MPI_Aint *target_disp, MPI_Fint *op, MPI_Fint *win, MPI_Fint *ierr) { int c_ierr; MPI_Datatype c_datatype = PMPI_Type_f2c(*datatype); MPI_Win c_win = PMPI_Win_f2c(*win); MPI_Op c_op = PMPI_Op_f2c(*op); c_ierr = PMPI_Fetch_and_op(OMPI_F2C_BOTTOM(origin_addr), OMPI_F2C_BOTTOM(result_addr), c_datatype, OMPI_FINT_2_INT(*target_rank), *target_disp, c_op, c_win); if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr); }
310465.c
// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include "structs.h" #include <assert.h> #include <ctype.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <myst/file.h> #include <myst/json.h> #include "strings.h" const char* arg0; #define COUNTOF(ARR) (sizeof(ARR) / sizeof(ARR[0])) typedef struct json_callback_data { header_t* hdr; } json_callback_data_t; static int _strtou64(uint64_t* x, const char* str) { char* end; *x = strtoull(str, &end, 10); if (!end || *end != '\0') return -1; return 0; } static void _write(void* stream, const void* buf, size_t count) { fwrite(buf, 1, count, (FILE*)stream); } static json_result_t _json_read_callback( json_parser_t* parser, json_reason_t reason, json_type_t type, const json_union_t* un, void* callback_data) { json_result_t result = JSON_UNEXPECTED; json_callback_data_t* data = (json_callback_data_t*)callback_data; switch (reason) { case JSON_REASON_NONE: { /* Unreachable */ assert(false); break; } case JSON_REASON_NAME: { break; } case JSON_REASON_BEGIN_OBJECT: { break; } case JSON_REASON_END_OBJECT: { break; } case JSON_REASON_BEGIN_ARRAY: { break; } case JSON_REASON_END_ARRAY: { break; } case JSON_REASON_VALUE: { if (json_match(parser, "keyslots.#.type") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; const size_t n = sizeof(ks->type); if (strlcpy(ks->type, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "keyslots.#.key_size") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_INTEGER || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; ks->key_size = un->integer; } else if (json_match(parser, "keyslots.#.kdf.type") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; const size_t n = sizeof(ks->kdf.type); if (strlcpy(ks->kdf.type, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } if (strcmp(ks->kdf.type, "pbkdf2") != 0 && strcmp(ks->kdf.type, "argon2i") != 0 && strcmp(ks->kdf.type, "argon2id") != 0) { result = JSON_UNSUPPORTED; goto done; } } else if (json_match(parser, "keyslots.#.kdf.time") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_INTEGER || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; ks->kdf.time = un->integer; } else if (json_match(parser, "keyslots.#.kdf.memory") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_INTEGER || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; ks->kdf.memory = un->integer; } else if (json_match(parser, "keyslots.#.kdf.hash") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; const size_t n = sizeof(ks->kdf.hash); if (strlcpy(ks->kdf.hash, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "keyslots.#.kdf.iterations") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_INTEGER || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; ks->kdf.iterations = un->integer; } else if (json_match(parser, "keyslots.#.kdf.cpus") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_INTEGER || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; ks->kdf.cpus = un->integer; } else if (json_match(parser, "keyslots.#.kdf.salt") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; size_t n = sizeof(ks->kdf.salt); if (strlcpy(ks->kdf.salt, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "keyslots.#.af.type") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; const size_t n = sizeof(ks->af.type); if (strlcpy(ks->af.type, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "keyslots.#.af.hash") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; const size_t n = sizeof(ks->af.hash); if (strlcpy(ks->af.hash, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "keyslots.#.af.stripes") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_INTEGER || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; ks->af.stripes = un->integer; } else if (json_match(parser, "keyslots.#.area.type") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; const size_t n = sizeof(ks->area.type); if (strlcpy(ks->area.type, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if ( json_match(parser, "keyslots.#.area.encryption") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; const size_t n = sizeof(ks->area.encryption); if (strlcpy(ks->area.encryption, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "keyslots.#.area.key_size") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_INTEGER || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; ks->area.key_size = un->integer; } else if (json_match(parser, "keyslots.#.area.offset") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; if (_strtou64(&ks->area.offset, un->string) != 0) { result = JSON_TYPE_MISMATCH; goto done; } } else if (json_match(parser, "keyslots.#.area.size") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_KEYSLOTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_keyslot_t* ks = &data->hdr->keyslots[i]; if (_strtou64(&ks->area.size, un->string) != 0) { result = JSON_TYPE_MISMATCH; goto done; } } else if (json_match(parser, "segments.#.type") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_SEGMENTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_segment_t* seg = &data->hdr->segments[i]; const size_t n = sizeof(seg->type); if (strlcpy(seg->type, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "segments.#.offset") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_SEGMENTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_segment_t* seg = &data->hdr->segments[i]; if (_strtou64(&seg->offset, un->string) != 0) { result = JSON_TYPE_MISMATCH; goto done; } } else if (json_match(parser, "segments.#.iv_tweak") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_SEGMENTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_segment_t* seg = &data->hdr->segments[i]; if (_strtou64(&seg->iv_tweak, un->string) != 0) { result = JSON_TYPE_MISMATCH; goto done; } } else if (json_match(parser, "segments.#.size") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_SEGMENTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_segment_t* seg = &data->hdr->segments[i]; if (strcmp(un->string, "dynamic") == 0) seg->size = (uint64_t)-1; else if (_strtou64(&seg->size, un->string) != 0) { result = JSON_TYPE_MISMATCH; goto done; } } else if (json_match(parser, "segments.#.encryption") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_SEGMENTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_segment_t* seg = &data->hdr->segments[i]; const size_t n = sizeof(seg->encryption); if (strlcpy(seg->encryption, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "segments.#.sector_size") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_INTEGER || i >= NUM_SEGMENTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_segment_t* seg = &data->hdr->segments[i]; seg->sector_size = un->integer; } else if (json_match(parser, "segments.#.integrity.type") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_SEGMENTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_segment_t* seg = &data->hdr->segments[i]; const size_t n = sizeof(seg->integrity.type); if (strlcpy(seg->integrity.type, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if ( json_match(parser, "segments.#.integrity.journal_encryption") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_SEGMENTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_segment_t* seg = &data->hdr->segments[i]; const size_t n = sizeof(seg->integrity.journal_encryption); char* p = seg->integrity.journal_encryption; if (strcmp(un->string, "none") != 0) { result = JSON_UNSUPPORTED; goto done; } if (strlcpy(p, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if ( json_match(parser, "segments.#.integrity.journal_integrity") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_SEGMENTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_segment_t* seg = &data->hdr->segments[i]; const size_t n = sizeof(seg->integrity.journal_integrity); char* p = seg->integrity.journal_integrity; if (strcmp(un->string, "none") != 0) { result = JSON_UNSUPPORTED; goto done; } if (strlcpy(p, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "digests.#.type") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_DIGESTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_digest_t* digest = &data->hdr->digests[i]; const size_t n = sizeof(digest->type); if (strcmp(un->string, "pbkdf2") != 0) { result = JSON_UNSUPPORTED; goto done; } if (strlcpy(digest->type, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "digests.#.keyslots") == JSON_OK) { uint64_t i = parser->path[1].number; uint64_t n; if (type != JSON_TYPE_STRING || i >= NUM_DIGESTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_digest_t* digest = &data->hdr->digests[i]; if (_strtou64(&n, un->string) != 0) { result = JSON_TYPE_MISMATCH; goto done; } if (n >= COUNTOF(digest->keyslots)) { result = JSON_OUT_OF_BOUNDS; goto done; } digest->keyslots[n] = 1; } else if (json_match(parser, "digests.#.segments") == JSON_OK) { uint64_t i = parser->path[1].number; uint64_t n; if (type != JSON_TYPE_STRING || i >= NUM_DIGESTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_digest_t* digest = &data->hdr->digests[i]; if (_strtou64(&n, un->string) != 0) { result = JSON_TYPE_MISMATCH; goto done; } if (n >= COUNTOF(digest->segments)) { result = JSON_OUT_OF_BOUNDS; goto done; } digest->segments[n] = 1; } else if (json_match(parser, "digests.#.hash") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_DIGESTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_digest_t* digest = &data->hdr->digests[i]; const size_t n = sizeof(digest->hash); if (strlcpy(digest->hash, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "digests.#.iterations") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_INTEGER || i >= NUM_DIGESTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_digest_t* digest = &data->hdr->digests[i]; digest->iterations = un->integer; } else if (json_match(parser, "digests.#.salt") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_DIGESTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_digest_t* digest = &data->hdr->digests[i]; size_t n = sizeof(digest->salt); if (strlcpy(digest->hash, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "digests.#.digest") == JSON_OK) { uint64_t i = parser->path[1].number; if (type != JSON_TYPE_STRING || i >= NUM_DIGESTS) { result = JSON_TYPE_MISMATCH; goto done; } luks2_digest_t* digest = &data->hdr->digests[i]; size_t n = sizeof(digest->digest); if (strlcpy(digest->hash, un->string, n) >= n) { result = JSON_BUFFER_OVERFLOW; goto done; } } else if (json_match(parser, "config.json_size") == JSON_OK) { luks2_config_t* config = &data->hdr->config; if (_strtou64(&config->json_size, un->string) != 0) { result = JSON_TYPE_MISMATCH; goto done; } } else if (json_match(parser, "config.keyslots_size") == JSON_OK) { luks2_config_t* config = &data->hdr->config; if (_strtou64(&config->keyslots_size, un->string) != 0) { result = JSON_TYPE_MISMATCH; goto done; } } else { json_dump_path(_write, stdout, parser); result = JSON_UNKNOWN_VALUE; goto done; } break; } } result = JSON_OK; done: return result; } static void _parse(const char* path) { json_parser_t parser; char* data; size_t size; json_result_t r; header_t header; json_callback_data_t callback_data = {&header}; static json_allocator_t allocator = { malloc, free, }; memset(&header, 0, sizeof(header)); if (myst_load_file(path, (void**)&data, &size) != 0) { fprintf(stderr, "%s: failed to access '%s'\n", arg0, path); exit(1); } const json_parser_options_t options = {1}; if ((r = json_parser_init( &parser, data, size, _json_read_callback, &callback_data, &allocator, &options)) != JSON_OK) { fprintf(stderr, "%s: json_parser_init() failed: %d\n", arg0, r); exit(1); } if ((r = json_parser_parse(&parser)) != JSON_OK) { fprintf(stderr, "%s: json_parser_init() failed: %d\n", arg0, r); exit(1); } if (parser.depth != 0) { fprintf(stderr, "%s: unterminated objects\n", arg0); exit(1); } /* check a couple of the fields */ assert(strcmp(header.keyslots[0].area.type, "raw") == 0); assert(strcmp(header.keyslots[0].kdf.type, "argon2i") == 0); printf("=== passed test (%s)\n", arg0); } int main(int argc, char** argv) { arg0 = argv[0]; if (argc != 2) { fprintf(stderr, "Usage: %s path\n", argv[0]); exit(1); } _parse(argv[1]); return 0; }
219410.c
/* * Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <config.h> #include "nx-match.h" #include <netinet/icmp6.h> #include "classifier.h" #include "colors.h" #include "openvswitch/hmap.h" #include "openflow/nicira-ext.h" #include "openvswitch/dynamic-string.h" #include "openvswitch/meta-flow.h" #include "openvswitch/ofp-actions.h" #include "openvswitch/ofp-errors.h" #include "openvswitch/ofp-util.h" #include "openvswitch/ofpbuf.h" #include "openvswitch/vlog.h" #include "packets.h" #include "openvswitch/shash.h" #include "tun-metadata.h" #include "unaligned.h" #include "util.h" #include "vl-mff-map.h" VLOG_DEFINE_THIS_MODULE(nx_match); /* OXM headers. * * * Standard OXM/NXM * ================ * * The header is 32 bits long. It looks like this: * * |31 16 15 9| 8 7 0 * +----------------------------------+---------------+--+------------------+ * | oxm_class | oxm_field |hm| oxm_length | * +----------------------------------+---------------+--+------------------+ * * where hm stands for oxm_hasmask. It is followed by oxm_length bytes of * payload. When oxm_hasmask is 0, the payload is the value of the field * identified by the header; when oxm_hasmask is 1, the payload is a value for * the field followed by a mask of equal length. * * Internally, we represent a standard OXM header as a 64-bit integer with the * above information in the most-significant bits. * * * Experimenter OXM * ================ * * The header is 64 bits long. It looks like the diagram above except that a * 32-bit experimenter ID, which we call oxm_vendor and which identifies a * vendor, is inserted just before the payload. Experimenter OXMs are * identified by an all-1-bits oxm_class (OFPXMC12_EXPERIMENTER). The * oxm_length value *includes* the experimenter ID, so that the real payload is * only oxm_length - 4 bytes long. * * Internally, we represent an experimenter OXM header as a 64-bit integer with * the standard header in the upper 32 bits and the experimenter ID in the * lower 32 bits. (It would be more convenient to swap the positions of the * two 32-bit words, but this would be more error-prone because experimenter * OXMs are very rarely used, so accidentally passing one through a 32-bit type * somewhere in the OVS code would be hard to find.) */ /* * OXM Class IDs. * The high order bit differentiate reserved classes from member classes. * Classes 0x0000 to 0x7FFF are member classes, allocated by ONF. * Classes 0x8000 to 0xFFFE are reserved classes, reserved for standardisation. */ enum ofp12_oxm_class { OFPXMC12_NXM_0 = 0x0000, /* Backward compatibility with NXM */ OFPXMC12_NXM_1 = 0x0001, /* Backward compatibility with NXM */ OFPXMC12_OPENFLOW_BASIC = 0x8000, /* Basic class for OpenFlow */ OFPXMC15_PACKET_REGS = 0x8001, /* Packet registers (pipeline fields). */ OFPXMC12_EXPERIMENTER = 0xffff, /* Experimenter class */ }; /* Functions for extracting raw field values from OXM/NXM headers. */ static uint32_t nxm_vendor(uint64_t header) { return header; } static int nxm_class(uint64_t header) { return header >> 48; } static int nxm_field(uint64_t header) { return (header >> 41) & 0x7f; } static bool nxm_hasmask(uint64_t header) { return (header >> 40) & 1; } static int nxm_length(uint64_t header) { return (header >> 32) & 0xff; } static uint64_t nxm_no_len(uint64_t header) { return header & 0xffffff80ffffffffULL; } static bool is_experimenter_oxm(uint64_t header) { return nxm_class(header) == OFPXMC12_EXPERIMENTER; } /* The OXM header "length" field is somewhat tricky: * * - For a standard OXM header, the length is the number of bytes of the * payload, and the payload consists of just the value (and mask, if * present). * * - For an experimenter OXM header, the length is the number of bytes in * the payload plus 4 (the length of the experimenter ID). That is, the * experimenter ID is included in oxm_length. * * This function returns the length of the experimenter ID field in 'header'. * That is, for an experimenter OXM (when an experimenter ID is present), it * returns 4, and for a standard OXM (when no experimenter ID is present), it * returns 0. */ static int nxm_experimenter_len(uint64_t header) { return is_experimenter_oxm(header) ? 4 : 0; } /* Returns the number of bytes that follow the header for an NXM/OXM entry * with the given 'header'. */ static int nxm_payload_len(uint64_t header) { return nxm_length(header) - nxm_experimenter_len(header); } /* Returns the number of bytes in the header for an NXM/OXM entry with the * given 'header'. */ static int nxm_header_len(uint64_t header) { return 4 + nxm_experimenter_len(header); } #define NXM_HEADER(VENDOR, CLASS, FIELD, HASMASK, LENGTH) \ (((uint64_t) (CLASS) << 48) | \ ((uint64_t) (FIELD) << 41) | \ ((uint64_t) (HASMASK) << 40) | \ ((uint64_t) (LENGTH) << 32) | \ (VENDOR)) #define NXM_HEADER_FMT "%#"PRIx32":%d:%d:%d:%d" #define NXM_HEADER_ARGS(HEADER) \ nxm_vendor(HEADER), nxm_class(HEADER), nxm_field(HEADER), \ nxm_hasmask(HEADER), nxm_length(HEADER) /* Functions for turning the "hasmask" bit on or off. (This also requires * adjusting the length.) */ static uint64_t nxm_make_exact_header(uint64_t header) { int new_len = nxm_payload_len(header) / 2 + nxm_experimenter_len(header); return NXM_HEADER(nxm_vendor(header), nxm_class(header), nxm_field(header), 0, new_len); } static uint64_t nxm_make_wild_header(uint64_t header) { int new_len = nxm_payload_len(header) * 2 + nxm_experimenter_len(header); return NXM_HEADER(nxm_vendor(header), nxm_class(header), nxm_field(header), 1, new_len); } /* Flow cookie. * * This may be used to gain the OpenFlow 1.1-like ability to restrict * certain NXM-based Flow Mod and Flow Stats Request messages to flows * with specific cookies. See the "nx_flow_mod" and "nx_flow_stats_request" * structure definitions for more details. This match is otherwise not * allowed. */ #define NXM_NX_COOKIE NXM_HEADER (0, 0x0001, 30, 0, 8) #define NXM_NX_COOKIE_W nxm_make_wild_header(NXM_NX_COOKIE) struct nxm_field { uint64_t header; enum ofp_version version; const char *name; /* e.g. "NXM_OF_IN_PORT". */ enum mf_field_id id; }; static const struct nxm_field *nxm_field_by_header(uint64_t header); static const struct nxm_field *nxm_field_by_name(const char *name, size_t len); static const struct nxm_field *nxm_field_by_mf_id(enum mf_field_id, enum ofp_version); static void nx_put_header__(struct ofpbuf *, uint64_t header, bool masked); static void nx_put_header_len(struct ofpbuf *, enum mf_field_id field, enum ofp_version version, bool masked, size_t n_bytes); /* Rate limit for nx_match parse errors. These always indicate a bug in the * peer and so there's not much point in showing a lot of them. */ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); static const struct nxm_field * mf_parse_subfield_name(const char *name, int name_len, bool *wild); /* Returns the preferred OXM header to use for field 'id' in OpenFlow version * 'version'. Specify 0 for 'version' if an NXM legacy header should be * preferred over any standardized OXM header. Returns 0 if field 'id' cannot * be expressed in NXM or OXM. */ static uint64_t mf_oxm_header(enum mf_field_id id, enum ofp_version version) { const struct nxm_field *f = nxm_field_by_mf_id(id, version); return f ? f->header : 0; } /* Returns the 32-bit OXM or NXM header to use for field 'id', preferring an * NXM legacy header over any standardized OXM header. Returns 0 if field 'id' * cannot be expressed with a 32-bit NXM or OXM header. * * Whenever possible, use nx_pull_header() instead of this function, because * this function cannot support 64-bit experimenter OXM headers. */ uint32_t mf_nxm_header(enum mf_field_id id) { uint64_t oxm = mf_oxm_header(id, 0); return is_experimenter_oxm(oxm) ? 0 : oxm >> 32; } /* Returns the 32-bit OXM or NXM header to use for field 'mff'. If 'mff' is * a mapped variable length mf_field, update the header with the configured * length of 'mff'. Returns 0 if 'mff' cannot be expressed with a 32-bit NXM * or OXM header.*/ uint32_t nxm_header_from_mff(const struct mf_field *mff) { uint64_t oxm = mf_oxm_header(mff->id, 0); if (mff->mapped) { oxm = nxm_no_len(oxm) | ((uint64_t) mff->n_bytes << 32); } return is_experimenter_oxm(oxm) ? 0 : oxm >> 32; } static const struct mf_field * mf_from_oxm_header(uint64_t header, const struct vl_mff_map *vl_mff_map) { const struct nxm_field *f = nxm_field_by_header(header); if (f) { const struct mf_field *mff = mf_from_id(f->id); const struct mf_field *vl_mff = mf_get_vl_mff(mff, vl_mff_map); return vl_mff ? vl_mff : mff; } else { return NULL; } } /* Returns the "struct mf_field" that corresponds to NXM or OXM header * 'header', or NULL if 'header' doesn't correspond to any known field. */ const struct mf_field * mf_from_nxm_header(uint32_t header, const struct vl_mff_map *vl_mff_map) { return mf_from_oxm_header((uint64_t) header << 32, vl_mff_map); } /* Returns the width of the data for a field with the given 'header', in * bytes. */ static int nxm_field_bytes(uint64_t header) { unsigned int length = nxm_payload_len(header); return nxm_hasmask(header) ? length / 2 : length; } /* nx_pull_match() and helpers. */ /* Given NXM/OXM value 'value' and mask 'mask' associated with 'header', checks * for any 1-bit in the value where there is a 0-bit in the mask. Returns 0 if * none, otherwise an error code. */ static bool is_mask_consistent(uint64_t header, const uint8_t *value, const uint8_t *mask) { unsigned int width = nxm_field_bytes(header); unsigned int i; for (i = 0; i < width; i++) { if (value[i] & ~mask[i]) { if (!VLOG_DROP_WARN(&rl)) { VLOG_WARN_RL(&rl, "Rejecting NXM/OXM entry "NXM_HEADER_FMT " " "with 1-bits in value for bits wildcarded by the " "mask.", NXM_HEADER_ARGS(header)); } return false; } } return true; } static bool is_cookie_pseudoheader(uint64_t header) { return header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W; } static enum ofperr nx_pull_header__(struct ofpbuf *b, bool allow_cookie, const struct vl_mff_map *vl_mff_map, uint64_t *header, const struct mf_field **field) { if (b->size < 4) { goto bad_len; } *header = ((uint64_t) ntohl(get_unaligned_be32(b->data))) << 32; if (is_experimenter_oxm(*header)) { if (b->size < 8) { goto bad_len; } *header = ntohll(get_unaligned_be64(b->data)); } if (nxm_length(*header) < nxm_experimenter_len(*header)) { VLOG_WARN_RL(&rl, "OXM header "NXM_HEADER_FMT" has invalid length %d " "(minimum is %d)", NXM_HEADER_ARGS(*header), nxm_length(*header), nxm_header_len(*header)); goto error; } ofpbuf_pull(b, nxm_header_len(*header)); if (field) { *field = mf_from_oxm_header(*header, vl_mff_map); if (!*field && !(allow_cookie && is_cookie_pseudoheader(*header))) { VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" is unknown", NXM_HEADER_ARGS(*header)); return OFPERR_OFPBMC_BAD_FIELD; } else if (mf_vl_mff_invalid(*field, vl_mff_map)) { return OFPERR_NXFMFC_INVALID_TLV_FIELD; } } return 0; bad_len: VLOG_DBG_RL(&rl, "encountered partial (%"PRIu32"-byte) OXM entry", b->size); error: *header = 0; if (field) { *field = NULL; } return OFPERR_OFPBMC_BAD_LEN; } static void copy_entry_value(const struct mf_field *field, union mf_value *value, const uint8_t *payload, int width) { int copy_len; void *copy_dst; copy_dst = value; copy_len = MIN(width, field ? field->n_bytes : sizeof *value); if (field && field->variable_len) { memset(value, 0, field->n_bytes); copy_dst = &value->u8 + field->n_bytes - copy_len; } memcpy(copy_dst, payload, copy_len); } static enum ofperr nx_pull_entry__(struct ofpbuf *b, bool allow_cookie, const struct vl_mff_map *vl_mff_map, uint64_t *header, const struct mf_field **field_, union mf_value *value, union mf_value *mask) { const struct mf_field *field; enum ofperr header_error; unsigned int payload_len; const uint8_t *payload; int width; header_error = nx_pull_header__(b, allow_cookie, vl_mff_map, header, &field); if (header_error && header_error != OFPERR_OFPBMC_BAD_FIELD) { return header_error; } payload_len = nxm_payload_len(*header); payload = ofpbuf_try_pull(b, payload_len); if (!payload) { VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" calls for %u-byte " "payload but only %"PRIu32" bytes follow OXM header", NXM_HEADER_ARGS(*header), payload_len, b->size); return OFPERR_OFPBMC_BAD_LEN; } width = nxm_field_bytes(*header); if (nxm_hasmask(*header) && !is_mask_consistent(*header, payload, payload + width)) { return OFPERR_OFPBMC_BAD_WILDCARDS; } copy_entry_value(field, value, payload, width); if (mask) { if (nxm_hasmask(*header)) { copy_entry_value(field, mask, payload + width, width); } else { memset(mask, 0xff, sizeof *mask); } } else if (nxm_hasmask(*header)) { VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" includes mask but " "masked OXMs are not allowed here", NXM_HEADER_ARGS(*header)); return OFPERR_OFPBMC_BAD_MASK; } if (field_) { *field_ = field; return header_error; } return 0; } /* Attempts to pull an NXM or OXM header, value, and mask (if present) from the * beginning of 'b'. If successful, stores a pointer to the "struct mf_field" * corresponding to the pulled header in '*field', the value into '*value', * and the mask into '*mask', and returns 0. On error, returns an OpenFlow * error; in this case, some bytes might have been pulled off 'b' anyhow, and * the output parameters might have been modified. * * If a NULL 'mask' is supplied, masked OXM or NXM entries are treated as * errors (with OFPERR_OFPBMC_BAD_MASK). */ enum ofperr nx_pull_entry(struct ofpbuf *b, const struct vl_mff_map *vl_mff_map, const struct mf_field **field, union mf_value *value, union mf_value *mask) { uint64_t header; return nx_pull_entry__(b, false, vl_mff_map, &header, field, value, mask); } /* Attempts to pull an NXM or OXM header from the beginning of 'b'. If * successful, stores a pointer to the "struct mf_field" corresponding to the * pulled header in '*field', stores the header's hasmask bit in '*masked' * (true if hasmask=1, false if hasmask=0), and returns 0. On error, returns * an OpenFlow error; in this case, some bytes might have been pulled off 'b' * anyhow, and the output parameters might have been modified. * * If NULL 'masked' is supplied, masked OXM or NXM headers are treated as * errors (with OFPERR_OFPBMC_BAD_MASK). */ enum ofperr nx_pull_header(struct ofpbuf *b, const struct vl_mff_map *vl_mff_map, const struct mf_field **field, bool *masked) { enum ofperr error; uint64_t header; error = nx_pull_header__(b, false, vl_mff_map, &header, field); if (masked) { *masked = !error && nxm_hasmask(header); } else if (!error && nxm_hasmask(header)) { error = OFPERR_OFPBMC_BAD_MASK; } return error; } static enum ofperr nx_pull_match_entry(struct ofpbuf *b, bool allow_cookie, const struct mf_field **field, union mf_value *value, union mf_value *mask) { enum ofperr error; uint64_t header; error = nx_pull_entry__(b, allow_cookie, NULL, &header, field, value, mask); if (error) { return error; } if (field && *field) { if (!mf_is_mask_valid(*field, mask)) { VLOG_DBG_RL(&rl, "bad mask for field %s", (*field)->name); return OFPERR_OFPBMC_BAD_MASK; } if (!mf_is_value_valid(*field, value)) { VLOG_DBG_RL(&rl, "bad value for field %s", (*field)->name); return OFPERR_OFPBMC_BAD_VALUE; } } return 0; } static enum ofperr nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict, struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask, const struct tun_table *tun_table) { ovs_assert((cookie != NULL) == (cookie_mask != NULL)); match_init_catchall(match); match->flow.tunnel.metadata.tab = tun_table; if (cookie) { *cookie = *cookie_mask = htonll(0); } struct ofpbuf b = ofpbuf_const_initializer(p, match_len); while (b.size) { const uint8_t *pos = b.data; const struct mf_field *field; union mf_value value; union mf_value mask; enum ofperr error; error = nx_pull_match_entry(&b, cookie != NULL, &field, &value, &mask); if (error) { if (error == OFPERR_OFPBMC_BAD_FIELD && !strict) { continue; } } else if (!field) { if (!cookie) { error = OFPERR_OFPBMC_BAD_FIELD; } else if (*cookie_mask) { error = OFPERR_OFPBMC_DUP_FIELD; } else { *cookie = value.be64; *cookie_mask = mask.be64; } } else if (!mf_are_prereqs_ok(field, &match->flow, NULL)) { error = OFPERR_OFPBMC_BAD_PREREQ; } else if (!mf_is_all_wild(field, &match->wc)) { error = OFPERR_OFPBMC_DUP_FIELD; } else { char *err_str; mf_set(field, &value, &mask, match, &err_str); if (err_str) { VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" " "within match (%s)", pos - p, err_str); free(err_str); return OFPERR_OFPBMC_BAD_VALUE; } } if (error) { VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" " "within match (%s)", pos - p, ofperr_to_string(error)); return error; } } match->flow.tunnel.metadata.tab = NULL; return 0; } static enum ofperr nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict, struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask, const struct tun_table *tun_table) { uint8_t *p = NULL; if (match_len) { p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8)); if (!p) { VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a " "multiple of 8, is longer than space in message (max " "length %"PRIu32")", match_len, b->size); return OFPERR_OFPBMC_BAD_LEN; } } return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask, tun_table); } /* Parses the nx_match formatted match description in 'b' with length * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask' * are valid pointers, then stores the cookie and mask in them if 'b' contains * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both. * * Fails with an error upon encountering an unknown NXM header. * * Returns 0 if successful, otherwise an OpenFlow error code. */ enum ofperr nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask, const struct tun_table *tun_table) { return nx_pull_match__(b, match_len, true, match, cookie, cookie_mask, tun_table); } /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers, * instead of failing with an error. */ enum ofperr nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len, struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask, const struct tun_table *tun_table) { return nx_pull_match__(b, match_len, false, match, cookie, cookie_mask, tun_table); } static enum ofperr oxm_pull_match__(struct ofpbuf *b, bool strict, const struct tun_table *tun_table, struct match *match) { struct ofp11_match_header *omh = b->data; uint8_t *p; uint16_t match_len; if (b->size < sizeof *omh) { return OFPERR_OFPBMC_BAD_LEN; } match_len = ntohs(omh->length); if (match_len < sizeof *omh) { return OFPERR_OFPBMC_BAD_LEN; } if (omh->type != htons(OFPMT_OXM)) { return OFPERR_OFPBMC_BAD_TYPE; } p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8)); if (!p) { VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a " "multiple of 8, is longer than space in message (max " "length %"PRIu32")", match_len, b->size); return OFPERR_OFPBMC_BAD_LEN; } return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh, strict, match, NULL, NULL, tun_table); } /* Parses the oxm formatted match description preceded by a struct * ofp11_match_header in 'b'. Stores the result in 'match'. * * Fails with an error when encountering unknown OXM headers. * * Returns 0 if successful, otherwise an OpenFlow error code. */ enum ofperr oxm_pull_match(struct ofpbuf *b, const struct tun_table *tun_table, struct match *match) { return oxm_pull_match__(b, true, tun_table, match); } /* Behaves the same as oxm_pull_match() with one exception. Skips over unknown * OXM headers instead of failing with an error when they are encountered. */ enum ofperr oxm_pull_match_loose(struct ofpbuf *b, const struct tun_table *tun_table, struct match *match) { return oxm_pull_match__(b, false, tun_table, match); } /* Parses the OXM match description in the 'oxm_len' bytes in 'oxm'. Stores * the result in 'match'. * * Fails with an error when encountering unknown OXM headers. * * Returns 0 if successful, otherwise an OpenFlow error code. */ enum ofperr oxm_decode_match(const void *oxm, size_t oxm_len, const struct tun_table *tun_table, struct match *match) { return nx_pull_raw(oxm, oxm_len, true, match, NULL, NULL, tun_table); } /* Verify an array of OXM TLVs treating value of each TLV as a mask, * disallowing masks in each TLV and ignoring pre-requisites. */ enum ofperr oxm_pull_field_array(const void *fields_data, size_t fields_len, struct field_array *fa) { struct ofpbuf b = ofpbuf_const_initializer(fields_data, fields_len); while (b.size) { const uint8_t *pos = b.data; const struct mf_field *field; union mf_value value; enum ofperr error; uint64_t header; error = nx_pull_entry__(&b, false, NULL, &header, &field, &value, NULL); if (error) { VLOG_DBG_RL(&rl, "error pulling field array field"); return error; } else if (!field) { VLOG_DBG_RL(&rl, "unknown field array field"); error = OFPERR_OFPBMC_BAD_FIELD; } else if (bitmap_is_set(fa->used.bm, field->id)) { VLOG_DBG_RL(&rl, "duplicate field array field '%s'", field->name); error = OFPERR_OFPBMC_DUP_FIELD; } else if (!mf_is_mask_valid(field, &value)) { VLOG_DBG_RL(&rl, "bad mask in field array field '%s'", field->name); return OFPERR_OFPBMC_BAD_MASK; } else { field_array_set(field->id, &value, fa); } if (error) { const uint8_t *start = fields_data; VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" " "within field array (%s)", pos - start, ofperr_to_string(error)); return error; } } return 0; } /* nx_put_match() and helpers. * * 'put' functions whose names end in 'w' add a wildcarded field. * 'put' functions whose names end in 'm' add a field that might be wildcarded. * Other 'put' functions add exact-match fields. */ void nxm_put__(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, const void *value, const void *mask, size_t n_bytes) { nx_put_header_len(b, field, version, !!mask, n_bytes); ofpbuf_put(b, value, n_bytes); if (mask) { ofpbuf_put(b, mask, n_bytes); } } static void nxm_put(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, const void *value, const void *mask, size_t n_bytes) { if (!is_all_zeros(mask, n_bytes)) { bool masked = !is_all_ones(mask, n_bytes); nxm_put__(b, field, version, value, masked ? mask : NULL, n_bytes); } } static void nxm_put_8m(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, uint8_t value, uint8_t mask) { nxm_put(b, field, version, &value, &mask, sizeof value); } static void nxm_put_8(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, uint8_t value) { nxm_put__(b, field, version, &value, NULL, sizeof value); } static void nxm_put_16m(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, ovs_be16 value, ovs_be16 mask) { nxm_put(b, field, version, &value, &mask, sizeof value); } static void nxm_put_16(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, ovs_be16 value) { nxm_put__(b, field, version, &value, NULL, sizeof value); } static void nxm_put_32m(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, ovs_be32 value, ovs_be32 mask) { nxm_put(b, field, version, &value, &mask, sizeof value); } static void nxm_put_32(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, ovs_be32 value) { nxm_put__(b, field, version, &value, NULL, sizeof value); } static void nxm_put_64m(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, ovs_be64 value, ovs_be64 mask) { nxm_put(b, field, version, &value, &mask, sizeof value); } static void nxm_put_128m(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, const ovs_be128 value, const ovs_be128 mask) { nxm_put(b, field, version, &value, &mask, sizeof(value)); } static void nxm_put_eth_masked(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, const struct eth_addr value, const struct eth_addr mask) { nxm_put(b, field, version, value.ea, mask.ea, ETH_ADDR_LEN); } static void nxm_put_ipv6(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, const struct in6_addr *value, const struct in6_addr *mask) { nxm_put(b, field, version, value->s6_addr, mask->s6_addr, sizeof value->s6_addr); } static void nxm_put_frag(struct ofpbuf *b, const struct match *match, enum ofp_version version) { uint8_t nw_frag = match->flow.nw_frag & FLOW_NW_FRAG_MASK; uint8_t nw_frag_mask = match->wc.masks.nw_frag & FLOW_NW_FRAG_MASK; nxm_put_8m(b, MFF_IP_FRAG, version, nw_frag, nw_frag_mask == FLOW_NW_FRAG_MASK ? UINT8_MAX : nw_frag_mask); } /* Appends to 'b' a set of OXM or NXM matches for the IPv4 or IPv6 fields in * 'match'. */ static void nxm_put_ip(struct ofpbuf *b, const struct match *match, enum ofp_version oxm) { const struct flow *flow = &match->flow; if (flow->dl_type == htons(ETH_TYPE_IP)) { nxm_put_32m(b, MFF_IPV4_SRC, oxm, flow->nw_src, match->wc.masks.nw_src); nxm_put_32m(b, MFF_IPV4_DST, oxm, flow->nw_dst, match->wc.masks.nw_dst); } else { nxm_put_ipv6(b, MFF_IPV6_SRC, oxm, &flow->ipv6_src, &match->wc.masks.ipv6_src); nxm_put_ipv6(b, MFF_IPV6_DST, oxm, &flow->ipv6_dst, &match->wc.masks.ipv6_dst); } nxm_put_frag(b, match, oxm); if (match->wc.masks.nw_tos & IP_DSCP_MASK) { if (oxm) { nxm_put_8(b, MFF_IP_DSCP_SHIFTED, oxm, flow->nw_tos >> 2); } else { nxm_put_8(b, MFF_IP_DSCP, oxm, flow->nw_tos & IP_DSCP_MASK); } } if (match->wc.masks.nw_tos & IP_ECN_MASK) { nxm_put_8(b, MFF_IP_ECN, oxm, flow->nw_tos & IP_ECN_MASK); } if (match->wc.masks.nw_ttl) { nxm_put_8(b, MFF_IP_TTL, oxm, flow->nw_ttl); } nxm_put_32m(b, MFF_IPV6_LABEL, oxm, flow->ipv6_label, match->wc.masks.ipv6_label); if (match->wc.masks.nw_proto) { nxm_put_8(b, MFF_IP_PROTO, oxm, flow->nw_proto); if (flow->nw_proto == IPPROTO_TCP) { nxm_put_16m(b, MFF_TCP_SRC, oxm, flow->tp_src, match->wc.masks.tp_src); nxm_put_16m(b, MFF_TCP_DST, oxm, flow->tp_dst, match->wc.masks.tp_dst); nxm_put_16m(b, MFF_TCP_FLAGS, oxm, flow->tcp_flags, match->wc.masks.tcp_flags); } else if (flow->nw_proto == IPPROTO_UDP) { nxm_put_16m(b, MFF_UDP_SRC, oxm, flow->tp_src, match->wc.masks.tp_src); nxm_put_16m(b, MFF_UDP_DST, oxm, flow->tp_dst, match->wc.masks.tp_dst); } else if (flow->nw_proto == IPPROTO_SCTP) { nxm_put_16m(b, MFF_SCTP_SRC, oxm, flow->tp_src, match->wc.masks.tp_src); nxm_put_16m(b, MFF_SCTP_DST, oxm, flow->tp_dst, match->wc.masks.tp_dst); } else if (is_icmpv4(flow, NULL)) { if (match->wc.masks.tp_src) { nxm_put_8(b, MFF_ICMPV4_TYPE, oxm, ntohs(flow->tp_src)); } if (match->wc.masks.tp_dst) { nxm_put_8(b, MFF_ICMPV4_CODE, oxm, ntohs(flow->tp_dst)); } } else if (is_icmpv6(flow, NULL)) { if (match->wc.masks.tp_src) { nxm_put_8(b, MFF_ICMPV6_TYPE, oxm, ntohs(flow->tp_src)); } if (match->wc.masks.tp_dst) { nxm_put_8(b, MFF_ICMPV6_CODE, oxm, ntohs(flow->tp_dst)); } if (is_nd(flow, NULL)) { nxm_put_ipv6(b, MFF_ND_TARGET, oxm, &flow->nd_target, &match->wc.masks.nd_target); if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) { nxm_put_eth_masked(b, MFF_ND_SLL, oxm, flow->arp_sha, match->wc.masks.arp_sha); } if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) { nxm_put_eth_masked(b, MFF_ND_TLL, oxm, flow->arp_tha, match->wc.masks.arp_tha); } } } } } /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied. * Otherwise, 'cookie_mask' should be zero. * * Specify 'oxm' as 0 to express the match in NXM format; otherwise, specify * 'oxm' as the OpenFlow version number for the OXM format to use. * * This function can cause 'b''s data to be reallocated. * * Returns the number of bytes appended to 'b', excluding padding. * * If 'match' is a catch-all rule that matches every packet, then this function * appends nothing to 'b' and returns 0. */ static int nx_put_raw(struct ofpbuf *b, enum ofp_version oxm, const struct match *match, ovs_be64 cookie, ovs_be64 cookie_mask) { const struct flow *flow = &match->flow; const size_t start_len = b->size; int match_len; int i; BUILD_ASSERT_DECL(FLOW_WC_SEQ == 36); /* Metadata. */ if (match->wc.masks.dp_hash) { nxm_put_32m(b, MFF_DP_HASH, oxm, htonl(flow->dp_hash), htonl(match->wc.masks.dp_hash)); } if (match->wc.masks.recirc_id) { nxm_put_32(b, MFF_RECIRC_ID, oxm, htonl(flow->recirc_id)); } if (match->wc.masks.conj_id) { nxm_put_32(b, MFF_CONJ_ID, oxm, htonl(flow->conj_id)); } if (match->wc.masks.in_port.ofp_port) { ofp_port_t in_port = flow->in_port.ofp_port; if (oxm) { nxm_put_32(b, MFF_IN_PORT_OXM, oxm, ofputil_port_to_ofp11(in_port)); } else { nxm_put_16(b, MFF_IN_PORT, oxm, htons(ofp_to_u16(in_port))); } } if (match->wc.masks.actset_output) { nxm_put_32(b, MFF_ACTSET_OUTPUT, oxm, ofputil_port_to_ofp11(flow->actset_output)); } /* Ethernet. */ nxm_put_eth_masked(b, MFF_ETH_SRC, oxm, flow->dl_src, match->wc.masks.dl_src); nxm_put_eth_masked(b, MFF_ETH_DST, oxm, flow->dl_dst, match->wc.masks.dl_dst); nxm_put_16m(b, MFF_ETH_TYPE, oxm, ofputil_dl_type_to_openflow(flow->dl_type), match->wc.masks.dl_type); /* 802.1Q. */ if (oxm) { ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI); ovs_be16 vid = flow->vlan_tci & VID_CFI_MASK; ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK; if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) { nxm_put_16(b, MFF_VLAN_VID, oxm, vid); } else if (mask) { nxm_put_16m(b, MFF_VLAN_VID, oxm, vid, mask); } if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) { nxm_put_8(b, MFF_VLAN_PCP, oxm, vlan_tci_to_pcp(flow->vlan_tci)); } } else { nxm_put_16m(b, MFF_VLAN_TCI, oxm, flow->vlan_tci, match->wc.masks.vlan_tci); } /* MPLS. */ if (eth_type_mpls(flow->dl_type)) { if (match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK)) { nxm_put_8(b, MFF_MPLS_TC, oxm, mpls_lse_to_tc(flow->mpls_lse[0])); } if (match->wc.masks.mpls_lse[0] & htonl(MPLS_BOS_MASK)) { nxm_put_8(b, MFF_MPLS_BOS, oxm, mpls_lse_to_bos(flow->mpls_lse[0])); } if (match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK)) { nxm_put_32(b, MFF_MPLS_LABEL, oxm, htonl(mpls_lse_to_label(flow->mpls_lse[0]))); } } /* L3. */ if (is_ip_any(flow)) { nxm_put_ip(b, match, oxm); } else if (flow->dl_type == htons(ETH_TYPE_ARP) || flow->dl_type == htons(ETH_TYPE_RARP)) { /* ARP. */ if (match->wc.masks.nw_proto) { nxm_put_16(b, MFF_ARP_OP, oxm, htons(flow->nw_proto)); } nxm_put_32m(b, MFF_ARP_SPA, oxm, flow->nw_src, match->wc.masks.nw_src); nxm_put_32m(b, MFF_ARP_TPA, oxm, flow->nw_dst, match->wc.masks.nw_dst); nxm_put_eth_masked(b, MFF_ARP_SHA, oxm, flow->arp_sha, match->wc.masks.arp_sha); nxm_put_eth_masked(b, MFF_ARP_THA, oxm, flow->arp_tha, match->wc.masks.arp_tha); } /* Tunnel ID. */ nxm_put_64m(b, MFF_TUN_ID, oxm, flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id); /* Other tunnel metadata. */ nxm_put_16m(b, MFF_TUN_FLAGS, oxm, htons(flow->tunnel.flags), htons(match->wc.masks.tunnel.flags)); nxm_put_32m(b, MFF_TUN_SRC, oxm, flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src); nxm_put_32m(b, MFF_TUN_DST, oxm, flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst); nxm_put_ipv6(b, MFF_TUN_IPV6_SRC, oxm, &flow->tunnel.ipv6_src, &match->wc.masks.tunnel.ipv6_src); nxm_put_ipv6(b, MFF_TUN_IPV6_DST, oxm, &flow->tunnel.ipv6_dst, &match->wc.masks.tunnel.ipv6_dst); nxm_put_16m(b, MFF_TUN_GBP_ID, oxm, flow->tunnel.gbp_id, match->wc.masks.tunnel.gbp_id); nxm_put_8m(b, MFF_TUN_GBP_FLAGS, oxm, flow->tunnel.gbp_flags, match->wc.masks.tunnel.gbp_flags); tun_metadata_to_nx_match(b, oxm, match); /* Registers. */ if (oxm < OFP15_VERSION) { for (i = 0; i < FLOW_N_REGS; i++) { nxm_put_32m(b, MFF_REG0 + i, oxm, htonl(flow->regs[i]), htonl(match->wc.masks.regs[i])); } } else { for (i = 0; i < FLOW_N_XREGS; i++) { nxm_put_64m(b, MFF_XREG0 + i, oxm, htonll(flow_get_xreg(flow, i)), htonll(flow_get_xreg(&match->wc.masks, i))); } } /* Packet mark. */ nxm_put_32m(b, MFF_PKT_MARK, oxm, htonl(flow->pkt_mark), htonl(match->wc.masks.pkt_mark)); /* Connection tracking. */ nxm_put_32m(b, MFF_CT_STATE, oxm, htonl(flow->ct_state), htonl(match->wc.masks.ct_state)); nxm_put_16m(b, MFF_CT_ZONE, oxm, htons(flow->ct_zone), htons(match->wc.masks.ct_zone)); nxm_put_32m(b, MFF_CT_MARK, oxm, htonl(flow->ct_mark), htonl(match->wc.masks.ct_mark)); nxm_put_128m(b, MFF_CT_LABEL, oxm, hton128(flow->ct_label), hton128(match->wc.masks.ct_label)); /* OpenFlow 1.1+ Metadata. */ nxm_put_64m(b, MFF_METADATA, oxm, flow->metadata, match->wc.masks.metadata); /* Cookie. */ if (cookie_mask) { bool masked = cookie_mask != OVS_BE64_MAX; cookie &= cookie_mask; nx_put_header__(b, NXM_NX_COOKIE, masked); ofpbuf_put(b, &cookie, sizeof cookie); if (masked) { ofpbuf_put(b, &cookie_mask, sizeof cookie_mask); } } match_len = b->size - start_len; return match_len; } /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied. * Otherwise, 'cookie_mask' should be zero. * * This function can cause 'b''s data to be reallocated. * * Returns the number of bytes appended to 'b', excluding padding. The return * value can be zero if it appended nothing at all to 'b' (which happens if * 'cr' is a catch-all rule that matches every packet). */ int nx_put_match(struct ofpbuf *b, const struct match *match, ovs_be64 cookie, ovs_be64 cookie_mask) { int match_len = nx_put_raw(b, 0, match, cookie, cookie_mask); ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8)); return match_len; } /* Appends to 'b' an struct ofp11_match_header followed by the OXM format that * expresses 'match', plus enough zero bytes to pad the data appended out to a * multiple of 8. * * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow * version in use as 'version'. * * This function can cause 'b''s data to be reallocated. * * Returns the number of bytes appended to 'b', excluding the padding. Never * returns zero. */ int oxm_put_match(struct ofpbuf *b, const struct match *match, enum ofp_version version) { int match_len; struct ofp11_match_header *omh; size_t start_len = b->size; ovs_be64 cookie = htonll(0), cookie_mask = htonll(0); ofpbuf_put_uninit(b, sizeof *omh); match_len = (nx_put_raw(b, version, match, cookie, cookie_mask) + sizeof *omh); ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8)); omh = ofpbuf_at(b, start_len, sizeof *omh); omh->type = htons(OFPMT_OXM); omh->length = htons(match_len); return match_len; } /* Appends to 'b' the OXM formats that expresses 'match', without header or * padding. * * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow * version in use as 'version'. * * This function can cause 'b''s data to be reallocated. */ void oxm_put_raw(struct ofpbuf *b, const struct match *match, enum ofp_version version) { nx_put_raw(b, version, match, 0, 0); } /* Appends to 'b' the nx_match format that expresses the tlv corresponding * to 'id'. If mask is not all-ones then it is also formated as the value * of the tlv. */ static void nx_format_mask_tlv(struct ds *ds, enum mf_field_id id, const union mf_value *mask) { const struct mf_field *mf = mf_from_id(id); ds_put_format(ds, "%s", mf->name); if (!is_all_ones(mask, mf->n_bytes)) { ds_put_char(ds, '='); mf_format(mf, mask, NULL, ds); } ds_put_char(ds, ','); } /* Appends a string representation of 'fa_' to 'ds'. * The TLVS value of 'fa_' is treated as a mask and * only the name of fields is formated if it is all ones. */ void oxm_format_field_array(struct ds *ds, const struct field_array *fa) { size_t start_len = ds->length; size_t i, offset = 0; BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fa->used.bm) { const struct mf_field *mf = mf_from_id(i); union mf_value value; memcpy(&value, fa->values + offset, mf->n_bytes); nx_format_mask_tlv(ds, i, &value); offset += mf->n_bytes; } if (ds->length > start_len) { ds_chomp(ds, ','); } } /* Appends to 'b' a series of OXM TLVs corresponding to the series * of enum mf_field_id and value tuples in 'fa_'. * * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow * version in use as 'version'. * * This function can cause 'b''s data to be reallocated. * * Returns the number of bytes appended to 'b'. May return zero. */ int oxm_put_field_array(struct ofpbuf *b, const struct field_array *fa, enum ofp_version version) { size_t start_len = b->size; /* Field arrays are only used with the group selection method * property and group properties are only available in OpenFlow 1.5+. * So the following assertion should never fail. * * If support for older OpenFlow versions is desired then some care * will need to be taken of different TLVs that handle the same * flow fields. In particular: * - VLAN_TCI, VLAN_VID and MFF_VLAN_PCP * - IP_DSCP_MASK and DSCP_SHIFTED * - REGS and XREGS */ ovs_assert(version >= OFP15_VERSION); size_t i, offset = 0; BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fa->used.bm) { const struct mf_field *mf = mf_from_id(i); union mf_value value; memcpy(&value, fa->values + offset, mf->n_bytes); int len = mf_field_len(mf, &value, NULL, NULL); nxm_put__(b, i, version, &value + mf->n_bytes - len, NULL, len); offset += mf->n_bytes; } return b->size - start_len; } static void nx_put_header__(struct ofpbuf *b, uint64_t header, bool masked) { uint64_t masked_header = masked ? nxm_make_wild_header(header) : header; ovs_be64 network_header = htonll(masked_header); ofpbuf_put(b, &network_header, nxm_header_len(header)); } void nx_put_header(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, bool masked) { nx_put_header__(b, mf_oxm_header(field, version), masked); } void nx_put_mff_header(struct ofpbuf *b, const struct mf_field *mff, enum ofp_version version, bool masked) { if (mff->mapped) { nx_put_header_len(b, mff->id, version, masked, mff->n_bytes); } else { nx_put_header(b, mff->id, version, masked); } } static void nx_put_header_len(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version, bool masked, size_t n_bytes) { uint64_t header = mf_oxm_header(field, version); header = NXM_HEADER(nxm_vendor(header), nxm_class(header), nxm_field(header), false, nxm_experimenter_len(header) + n_bytes); nx_put_header__(b, header, masked); } void nx_put_entry(struct ofpbuf *b, const struct mf_field *mff, enum ofp_version version, const union mf_value *value, const union mf_value *mask) { bool masked; int len, offset; len = mf_field_len(mff, value, mask, &masked); offset = mff->n_bytes - len; nx_put_header_len(b, mff->id, version, masked, len); ofpbuf_put(b, &value->u8 + offset, len); if (masked) { ofpbuf_put(b, &mask->u8 + offset, len); } } /* nx_match_to_string() and helpers. */ static void format_nxm_field_name(struct ds *, uint64_t header); char * nx_match_to_string(const uint8_t *p, unsigned int match_len) { if (!match_len) { return xstrdup("<any>"); } struct ofpbuf b = ofpbuf_const_initializer(p, match_len); struct ds s = DS_EMPTY_INITIALIZER; while (b.size) { union mf_value value; union mf_value mask; enum ofperr error; uint64_t header; int value_len; error = nx_pull_entry__(&b, true, NULL, &header, NULL, &value, &mask); if (error) { break; } value_len = MIN(sizeof value, nxm_field_bytes(header)); if (s.length) { ds_put_cstr(&s, ", "); } format_nxm_field_name(&s, header); ds_put_char(&s, '('); for (int i = 0; i < value_len; i++) { ds_put_format(&s, "%02x", ((const uint8_t *) &value)[i]); } if (nxm_hasmask(header)) { ds_put_char(&s, '/'); for (int i = 0; i < value_len; i++) { ds_put_format(&s, "%02x", ((const uint8_t *) &mask)[i]); } } ds_put_char(&s, ')'); } if (b.size) { if (s.length) { ds_put_cstr(&s, ", "); } ds_put_format(&s, "<%u invalid bytes>", b.size); } return ds_steal_cstr(&s); } char * oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len) { const struct ofp11_match_header *omh = p->data; uint16_t match_len_; struct ds s; ds_init(&s); if (match_len < sizeof *omh) { ds_put_format(&s, "<match too short: %u>", match_len); goto err; } if (omh->type != htons(OFPMT_OXM)) { ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type)); goto err; } match_len_ = ntohs(omh->length); if (match_len_ < sizeof *omh) { ds_put_format(&s, "<match length field too short: %u>", match_len_); goto err; } if (match_len_ != match_len) { ds_put_format(&s, "<match length field incorrect: %u != %u>", match_len_, match_len); goto err; } return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0), match_len - sizeof *omh); err: return ds_steal_cstr(&s); } void nx_format_field_name(enum mf_field_id id, enum ofp_version version, struct ds *s) { format_nxm_field_name(s, mf_oxm_header(id, version)); } static void format_nxm_field_name(struct ds *s, uint64_t header) { const struct nxm_field *f = nxm_field_by_header(header); if (f) { ds_put_cstr(s, f->name); if (nxm_hasmask(header)) { ds_put_cstr(s, "_W"); } } else if (header == NXM_NX_COOKIE) { ds_put_cstr(s, "NXM_NX_COOKIE"); } else if (header == NXM_NX_COOKIE_W) { ds_put_cstr(s, "NXM_NX_COOKIE_W"); } else { ds_put_format(s, "%d:%d", nxm_class(header), nxm_field(header)); } } static bool streq_len(const char *a, size_t a_len, const char *b) { return strlen(b) == a_len && !memcmp(a, b, a_len); } static uint64_t parse_nxm_field_name(const char *name, int name_len) { const struct nxm_field *f; bool wild; f = mf_parse_subfield_name(name, name_len, &wild); if (f) { if (!wild) { return f->header; } else if (mf_from_id(f->id)->maskable != MFM_NONE) { return nxm_make_wild_header(f->header); } } if (streq_len(name, name_len, "NXM_NX_COOKIE")) { return NXM_NX_COOKIE; } else if (streq_len(name, name_len, "NXM_NX_COOKIE_W")) { return NXM_NX_COOKIE_W; } /* Check whether it's a field header value as hex. * (This isn't ordinarily useful except for testing error behavior.) */ if (name_len == 8) { uint64_t header; bool ok; header = hexits_value(name, name_len, &ok) << 32; if (ok) { return header; } } else if (name_len == 16) { uint64_t header; bool ok; header = hexits_value(name, name_len, &ok); if (ok && is_experimenter_oxm(header)) { return header; } } return 0; } /* nx_match_from_string(). */ static int nx_match_from_string_raw(const char *s, struct ofpbuf *b) { const char *full_s = s; const size_t start_len = b->size; if (!strcmp(s, "<any>")) { /* Ensure that 'b->data' isn't actually null. */ ofpbuf_prealloc_tailroom(b, 1); return 0; } for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) { const char *name; uint64_t header; ovs_be64 nw_header; int name_len; size_t n; name = s; name_len = strcspn(s, "("); if (s[name_len] != '(') { ovs_fatal(0, "%s: missing ( at end of nx_match", full_s); } header = parse_nxm_field_name(name, name_len); if (!header) { ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s); } s += name_len + 1; b->header = ofpbuf_put_uninit(b, nxm_header_len(header)); s = ofpbuf_put_hex(b, s, &n); if (n != nxm_field_bytes(header)) { const struct mf_field *field = mf_from_oxm_header(header, NULL); if (field && field->variable_len) { if (n <= field->n_bytes) { int len = (nxm_hasmask(header) ? n * 2 : n) + nxm_experimenter_len(header); header = NXM_HEADER(nxm_vendor(header), nxm_class(header), nxm_field(header), nxm_hasmask(header) ? 1 : 0, len); } else { ovs_fatal(0, "expected to read at most %d bytes but got " "%"PRIuSIZE, field->n_bytes, n); } } else { ovs_fatal(0, "expected to read %d bytes but got %"PRIuSIZE, nxm_field_bytes(header), n); } } nw_header = htonll(header); memcpy(b->header, &nw_header, nxm_header_len(header)); if (nxm_hasmask(header)) { s += strspn(s, " "); if (*s != '/') { ovs_fatal(0, "%s: missing / in masked field %.*s", full_s, name_len, name); } s = ofpbuf_put_hex(b, s + 1, &n); if (n != nxm_field_bytes(header)) { ovs_fatal(0, "%.2s: hex digits expected", s); } } s += strspn(s, " "); if (*s != ')') { ovs_fatal(0, "%s: missing ) following field %.*s", full_s, name_len, name); } s++; } return b->size - start_len; } int nx_match_from_string(const char *s, struct ofpbuf *b) { int match_len = nx_match_from_string_raw(s, b); ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8)); return match_len; } int oxm_match_from_string(const char *s, struct ofpbuf *b) { int match_len; struct ofp11_match_header *omh; size_t start_len = b->size; ofpbuf_put_uninit(b, sizeof *omh); match_len = nx_match_from_string_raw(s, b) + sizeof *omh; ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8)); omh = ofpbuf_at(b, start_len, sizeof *omh); omh->type = htons(OFPMT_OXM); omh->length = htons(match_len); return match_len; } /* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into * '*move'. * * Returns NULL if successful, otherwise a malloc()'d string describing the * error. The caller is responsible for freeing the returned string. */ char * OVS_WARN_UNUSED_RESULT nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s) { const char *full_s = s; char *error; error = mf_parse_subfield__(&move->src, &s); if (error) { return error; } if (strncmp(s, "->", 2)) { return xasprintf("%s: missing `->' following source", full_s); } s += 2; error = mf_parse_subfield(&move->dst, s); if (error) { return error; } if (move->src.n_bits != move->dst.n_bits) { return xasprintf("%s: source field is %d bits wide but destination is " "%d bits wide", full_s, move->src.n_bits, move->dst.n_bits); } return NULL; } /* nxm_format_reg_move(). */ void nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s) { ds_put_format(s, "%smove:%s", colors.special, colors.end); mf_format_subfield(&move->src, s); ds_put_format(s, "%s->%s", colors.special, colors.end); mf_format_subfield(&move->dst, s); } enum ofperr nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow) { enum ofperr error; error = mf_check_src(&move->src, flow); if (error) { return error; } return mf_check_dst(&move->dst, flow); } /* nxm_execute_reg_move(). */ void nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data, struct flow *flow, struct flow_wildcards *wc) { union mf_subvalue src_subvalue; union mf_subvalue mask_value; ovs_be64 src_data_be = htonll(src_data); memset(&mask_value, 0xff, sizeof mask_value); mf_write_subfield_flow(dst, &mask_value, &wc->masks); bitwise_copy(&src_data_be, sizeof src_data_be, 0, &src_subvalue, sizeof src_subvalue, 0, sizeof src_data_be * 8); mf_write_subfield_flow(dst, &src_subvalue, flow); } /* nxm_parse_stack_action, works for both push() and pop(). */ /* Parses 's' as a "push" or "pop" action, in the form described in * ovs-ofctl(8), into '*stack_action'. * * Returns NULL if successful, otherwise a malloc()'d string describing the * error. The caller is responsible for freeing the returned string. */ char * OVS_WARN_UNUSED_RESULT nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s) { char *error; error = mf_parse_subfield__(&stack_action->subfield, &s); if (error) { return error; } if (*s != '\0') { return xasprintf("%s: trailing garbage following push or pop", s); } return NULL; } void nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s) { ds_put_format(s, "%spush:%s", colors.param, colors.end); mf_format_subfield(&push->subfield, s); } void nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s) { ds_put_format(s, "%spop:%s", colors.param, colors.end); mf_format_subfield(&pop->subfield, s); } enum ofperr nxm_stack_push_check(const struct ofpact_stack *push, const struct flow *flow) { return mf_check_src(&push->subfield, flow); } enum ofperr nxm_stack_pop_check(const struct ofpact_stack *pop, const struct flow *flow) { return mf_check_dst(&pop->subfield, flow); } /* nxm_execute_stack_push(), nxm_execute_stack_pop(). * * A stack is an ofpbuf with 'data' pointing to the bottom of the stack and * 'size' indexing the top of the stack. Each value of some byte length is * stored to the stack immediately followed by the length of the value as an * unsigned byte. This way a POP operation can first read the length byte, and * then the appropriate number of bytes from the stack. This also means that * it is only possible to traverse the stack from top to bottom. It is * possible, however, to push values also to the bottom of the stack, which is * useful when a stack has been serialized to a wire format in reverse order * (topmost value first). */ /* Push value 'v' of length 'bytes' to the top of 'stack'. */ void nx_stack_push(struct ofpbuf *stack, const void *v, uint8_t bytes) { ofpbuf_put(stack, v, bytes); ofpbuf_put(stack, &bytes, sizeof bytes); } /* Push value 'v' of length 'bytes' to the bottom of 'stack'. */ void nx_stack_push_bottom(struct ofpbuf *stack, const void *v, uint8_t bytes) { ofpbuf_push(stack, &bytes, sizeof bytes); ofpbuf_push(stack, v, bytes); } /* Pop the topmost value from 'stack', returning a pointer to the value in the * stack and the length of the value in '*bytes'. In case of underflow a NULL * is returned and length is returned as zero via '*bytes'. */ void * nx_stack_pop(struct ofpbuf *stack, uint8_t *bytes) { if (!stack->size) { *bytes = 0; return NULL; } stack->size -= sizeof *bytes; memcpy(bytes, ofpbuf_tail(stack), sizeof *bytes); ovs_assert(stack->size >= *bytes); stack->size -= *bytes; return ofpbuf_tail(stack); } void nxm_execute_stack_push(const struct ofpact_stack *push, const struct flow *flow, struct flow_wildcards *wc, struct ofpbuf *stack) { union mf_subvalue dst_value; mf_write_subfield_flow(&push->subfield, (union mf_subvalue *)&exact_match_mask, &wc->masks); mf_read_subfield(&push->subfield, flow, &dst_value); uint8_t bytes = DIV_ROUND_UP(push->subfield.n_bits, 8); nx_stack_push(stack, &dst_value.u8[sizeof dst_value - bytes], bytes); } bool nxm_execute_stack_pop(const struct ofpact_stack *pop, struct flow *flow, struct flow_wildcards *wc, struct ofpbuf *stack) { uint8_t src_bytes; const void *src = nx_stack_pop(stack, &src_bytes); if (src) { union mf_subvalue src_value; uint8_t dst_bytes = DIV_ROUND_UP(pop->subfield.n_bits, 8); if (src_bytes < dst_bytes) { memset(&src_value.u8[sizeof src_value - dst_bytes], 0, dst_bytes - src_bytes); } memcpy(&src_value.u8[sizeof src_value - src_bytes], src, src_bytes); mf_write_subfield_flow(&pop->subfield, (union mf_subvalue *)&exact_match_mask, &wc->masks); mf_write_subfield_flow(&pop->subfield, &src_value, flow); return true; } else { /* Attempted to pop from an empty stack. */ return false; } } /* Formats 'sf' into 's' in a format normally acceptable to * mf_parse_subfield(). (It won't be acceptable if sf->field is NULL or if * sf->field has no NXM name.) */ void mf_format_subfield(const struct mf_subfield *sf, struct ds *s) { if (!sf->field) { ds_put_cstr(s, "<unknown>"); } else { const struct nxm_field *f = nxm_field_by_mf_id(sf->field->id, 0); ds_put_cstr(s, f ? f->name : sf->field->name); } if (sf->field && sf->ofs == 0 && sf->n_bits == sf->field->n_bits) { ds_put_cstr(s, "[]"); } else if (sf->n_bits == 1) { ds_put_format(s, "[%d]", sf->ofs); } else { ds_put_format(s, "[%d..%d]", sf->ofs, sf->ofs + sf->n_bits - 1); } } static const struct nxm_field * mf_parse_subfield_name(const char *name, int name_len, bool *wild) { *wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2); if (*wild) { name_len -= 2; } return nxm_field_by_name(name, name_len); } /* Parses a subfield from the beginning of '*sp' into 'sf'. If successful, * returns NULL and advances '*sp' to the first byte following the parsed * string. On failure, returns a malloc()'d error message, does not modify * '*sp', and does not properly initialize 'sf'. * * The syntax parsed from '*sp' takes the form "header[start..end]" where * 'header' is the name of an NXM field and 'start' and 'end' are (inclusive) * bit indexes. "..end" may be omitted to indicate a single bit. "start..end" * may both be omitted (the [] are still required) to indicate an entire * field. */ char * OVS_WARN_UNUSED_RESULT mf_parse_subfield__(struct mf_subfield *sf, const char **sp) { const struct mf_field *field = NULL; const struct nxm_field *f; const char *name; int start, end; const char *s; int name_len; bool wild; s = *sp; name = s; name_len = strcspn(s, "[-"); f = mf_parse_subfield_name(name, name_len, &wild); field = f ? mf_from_id(f->id) : mf_from_name_len(name, name_len); if (!field) { return xasprintf("%s: unknown field `%.*s'", *sp, name_len, s); } s += name_len; /* Assume full field. */ start = 0; end = field->n_bits - 1; if (*s == '[') { if (!strncmp(s, "[]", 2)) { /* Nothing to do. */ } else if (ovs_scan(s, "[%d..%d]", &start, &end)) { /* Nothing to do. */ } else if (ovs_scan(s, "[%d]", &start)) { end = start; } else { return xasprintf("%s: syntax error expecting [] or [<bit>] or " "[<start>..<end>]", *sp); } s = strchr(s, ']') + 1; } if (start > end) { return xasprintf("%s: starting bit %d is after ending bit %d", *sp, start, end); } else if (start >= field->n_bits) { return xasprintf("%s: starting bit %d is not valid because field is " "only %d bits wide", *sp, start, field->n_bits); } else if (end >= field->n_bits){ return xasprintf("%s: ending bit %d is not valid because field is " "only %d bits wide", *sp, end, field->n_bits); } sf->field = field; sf->ofs = start; sf->n_bits = end - start + 1; *sp = s; return NULL; } /* Parses a subfield from the entirety of 's' into 'sf'. Returns NULL if * successful, otherwise a malloc()'d string describing the error. The caller * is responsible for freeing the returned string. * * The syntax parsed from 's' takes the form "header[start..end]" where * 'header' is the name of an NXM field and 'start' and 'end' are (inclusive) * bit indexes. "..end" may be omitted to indicate a single bit. "start..end" * may both be omitted (the [] are still required) to indicate an entire * field. */ char * OVS_WARN_UNUSED_RESULT mf_parse_subfield(struct mf_subfield *sf, const char *s) { char *error = mf_parse_subfield__(sf, &s); if (!error && s[0]) { error = xstrdup("unexpected input following field syntax"); } return error; } /* Returns an bitmap in which each bit corresponds to the like-numbered field * in the OFPXMC12_OPENFLOW_BASIC OXM class, in which the bit values are taken * from the 'fields' bitmap. Only fields defined in OpenFlow 'version' are * considered. * * This is useful for encoding OpenFlow 1.2 table stats messages. */ ovs_be64 oxm_bitmap_from_mf_bitmap(const struct mf_bitmap *fields, enum ofp_version version) { uint64_t oxm_bitmap = 0; int i; BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->bm) { uint64_t oxm = mf_oxm_header(i, version); uint32_t class = nxm_class(oxm); int field = nxm_field(oxm); if (class == OFPXMC12_OPENFLOW_BASIC && field < 64) { oxm_bitmap |= UINT64_C(1) << field; } } return htonll(oxm_bitmap); } /* Opposite conversion from oxm_bitmap_from_mf_bitmap(). * * This is useful for decoding OpenFlow 1.2 table stats messages. */ struct mf_bitmap oxm_bitmap_to_mf_bitmap(ovs_be64 oxm_bitmap, enum ofp_version version) { struct mf_bitmap fields = MF_BITMAP_INITIALIZER; for (enum mf_field_id id = 0; id < MFF_N_IDS; id++) { uint64_t oxm = mf_oxm_header(id, version); if (oxm && version >= nxm_field_by_header(oxm)->version) { uint32_t class = nxm_class(oxm); int field = nxm_field(oxm); if (class == OFPXMC12_OPENFLOW_BASIC && field < 64 && oxm_bitmap & htonll(UINT64_C(1) << field)) { bitmap_set1(fields.bm, id); } } } return fields; } /* Returns a bitmap of fields that can be encoded in OXM and that can be * modified with a "set_field" action. */ struct mf_bitmap oxm_writable_fields(void) { struct mf_bitmap b = MF_BITMAP_INITIALIZER; int i; for (i = 0; i < MFF_N_IDS; i++) { if (mf_oxm_header(i, 0) && mf_from_id(i)->writable) { bitmap_set1(b.bm, i); } } return b; } /* Returns a bitmap of fields that can be encoded in OXM and that can be * matched in a flow table. */ struct mf_bitmap oxm_matchable_fields(void) { struct mf_bitmap b = MF_BITMAP_INITIALIZER; int i; for (i = 0; i < MFF_N_IDS; i++) { if (mf_oxm_header(i, 0)) { bitmap_set1(b.bm, i); } } return b; } /* Returns a bitmap of fields that can be encoded in OXM and that can be * matched in a flow table with an arbitrary bitmask. */ struct mf_bitmap oxm_maskable_fields(void) { struct mf_bitmap b = MF_BITMAP_INITIALIZER; int i; for (i = 0; i < MFF_N_IDS; i++) { if (mf_oxm_header(i, 0) && mf_from_id(i)->maskable == MFM_FULLY) { bitmap_set1(b.bm, i); } } return b; } struct nxm_field_index { struct hmap_node header_node; /* In nxm_header_map. */ struct hmap_node name_node; /* In nxm_name_map. */ struct ovs_list mf_node; /* In mf_mf_map[nf.id]. */ const struct nxm_field nf; }; #include "nx-match.inc" static struct hmap nxm_header_map; static struct hmap nxm_name_map; static struct ovs_list nxm_mf_map[MFF_N_IDS]; static void nxm_init(void) { static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; if (ovsthread_once_start(&once)) { hmap_init(&nxm_header_map); hmap_init(&nxm_name_map); for (int i = 0; i < MFF_N_IDS; i++) { ovs_list_init(&nxm_mf_map[i]); } for (struct nxm_field_index *nfi = all_nxm_fields; nfi < &all_nxm_fields[ARRAY_SIZE(all_nxm_fields)]; nfi++) { hmap_insert(&nxm_header_map, &nfi->header_node, hash_uint64(nxm_no_len(nfi->nf.header))); hmap_insert(&nxm_name_map, &nfi->name_node, hash_string(nfi->nf.name, 0)); ovs_list_push_back(&nxm_mf_map[nfi->nf.id], &nfi->mf_node); } ovsthread_once_done(&once); } } static const struct nxm_field * nxm_field_by_header(uint64_t header) { const struct nxm_field_index *nfi; uint64_t header_no_len; nxm_init(); if (nxm_hasmask(header)) { header = nxm_make_exact_header(header); } header_no_len = nxm_no_len(header); HMAP_FOR_EACH_IN_BUCKET (nfi, header_node, hash_uint64(header_no_len), &nxm_header_map) { if (header_no_len == nxm_no_len(nfi->nf.header)) { if (nxm_length(header) == nxm_length(nfi->nf.header) || mf_from_id(nfi->nf.id)->variable_len) { return &nfi->nf; } else { return NULL; } } } return NULL; } static const struct nxm_field * nxm_field_by_name(const char *name, size_t len) { const struct nxm_field_index *nfi; nxm_init(); HMAP_FOR_EACH_WITH_HASH (nfi, name_node, hash_bytes(name, len, 0), &nxm_name_map) { if (strlen(nfi->nf.name) == len && !memcmp(nfi->nf.name, name, len)) { return &nfi->nf; } } return NULL; } static const struct nxm_field * nxm_field_by_mf_id(enum mf_field_id id, enum ofp_version version) { const struct nxm_field_index *nfi; const struct nxm_field *f; nxm_init(); f = NULL; LIST_FOR_EACH (nfi, mf_node, &nxm_mf_map[id]) { if (!f || version >= nfi->nf.version) { f = &nfi->nf; } } return f; }
883763.c
// SPDX-License-Identifier: GPL-2.0-only /* Connection state tracking for netfilter. This is separated from, but required by, the NAT layer; it can also be used by an iptables extension. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <[email protected]> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * (C) 2005-2012 Patrick McHardy <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/jhash.h> #include <linux/siphash.h> #include <linux/err.h> #include <linux/percpu.h> #include <linux/moduleparam.h> #include <linux/notifier.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/mm.h> #include <linux/nsproxy.h> #include <linux/rculist_nulls.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_seqadj.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_acct.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_conntrack_timestamp.h> #include <net/netfilter/nf_conntrack_timeout.h> #include <net/netfilter/nf_conntrack_labels.h> #include <net/netfilter/nf_conntrack_synproxy.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <net/netns/hash.h> #include <net/ip.h> #include "nf_internals.h" __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; EXPORT_SYMBOL_GPL(nf_conntrack_locks); __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); struct hlist_nulls_head *nf_conntrack_hash __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_hash); struct conntrack_gc_work { struct delayed_work dwork; u32 last_bucket; bool exiting; bool early_drop; long next_gc_run; }; static __read_mostly struct kmem_cache *nf_conntrack_cachep; static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); static __read_mostly bool nf_conntrack_locks_all; /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ #define GC_MAX_BUCKETS_DIV 128u /* upper bound of full table scan */ #define GC_MAX_SCAN_JIFFIES (16u * HZ) /* desired ratio of entries found to be expired */ #define GC_EVICT_RATIO 50u static struct conntrack_gc_work conntrack_gc_work; void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) { /* 1) Acquire the lock */ spin_lock(lock); /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics * It pairs with the smp_store_release() in nf_conntrack_all_unlock() */ if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false)) return; /* fast path failed, unlock */ spin_unlock(lock); /* Slow path 1) get global lock */ spin_lock(&nf_conntrack_locks_all_lock); /* Slow path 2) get the lock we want */ spin_lock(lock); /* Slow path 3) release the global lock */ spin_unlock(&nf_conntrack_locks_all_lock); } EXPORT_SYMBOL_GPL(nf_conntrack_lock); static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; spin_unlock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_unlock(&nf_conntrack_locks[h2]); } /* return true if we need to recompute hashes (in case hash table was resized) */ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, unsigned int h2, unsigned int sequence) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; if (h1 <= h2) { nf_conntrack_lock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_lock_nested(&nf_conntrack_locks[h2], SINGLE_DEPTH_NESTING); } else { nf_conntrack_lock(&nf_conntrack_locks[h2]); spin_lock_nested(&nf_conntrack_locks[h1], SINGLE_DEPTH_NESTING); } if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { nf_conntrack_double_unlock(h1, h2); return true; } return false; } static void nf_conntrack_all_lock(void) { int i; spin_lock(&nf_conntrack_locks_all_lock); nf_conntrack_locks_all = true; for (i = 0; i < CONNTRACK_LOCKS; i++) { spin_lock(&nf_conntrack_locks[i]); /* This spin_unlock provides the "release" to ensure that * nf_conntrack_locks_all==true is visible to everyone that * acquired spin_lock(&nf_conntrack_locks[]). */ spin_unlock(&nf_conntrack_locks[i]); } } static void nf_conntrack_all_unlock(void) { /* All prior stores must be complete before we clear * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock() * might observe the false value but not the entire * critical section. * It pairs with the smp_load_acquire() in nf_conntrack_lock() */ smp_store_release(&nf_conntrack_locks_all, false); spin_unlock(&nf_conntrack_locks_all_lock); } unsigned int nf_conntrack_htable_size __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_max); seqcount_t nf_conntrack_generation __read_mostly; static unsigned int nf_conntrack_hash_rnd __read_mostly; static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, const struct net *net) { unsigned int n; u32 seed; get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd)); /* The direction must be ignored, so we hash everything up to the * destination ports (which is a multiple of 4) and treat the last * three bytes manually. */ seed = nf_conntrack_hash_rnd ^ net_hash_mix(net); n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); return jhash2((u32 *)tuple, n, seed ^ (((__force __u16)tuple->dst.u.all << 16) | tuple->dst.protonum)); } static u32 scale_hash(u32 hash) { return reciprocal_scale(hash, nf_conntrack_htable_size); } static u32 __hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple, unsigned int size) { return reciprocal_scale(hash_conntrack_raw(tuple, net), size); } static u32 hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple) { return scale_hash(hash_conntrack_raw(tuple, net)); } static bool nf_ct_get_tuple_ports(const struct sk_buff *skb, unsigned int dataoff, struct nf_conntrack_tuple *tuple) { struct { __be16 sport; __be16 dport; } _inet_hdr, *inet_hdr; /* Actually only need first 4 bytes to get ports. */ inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr); if (!inet_hdr) return false; tuple->src.u.udp.port = inet_hdr->sport; tuple->dst.u.udp.port = inet_hdr->dport; return true; } static bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, struct net *net, struct nf_conntrack_tuple *tuple) { unsigned int size; const __be32 *ap; __be32 _addrs[8]; memset(tuple, 0, sizeof(*tuple)); tuple->src.l3num = l3num; switch (l3num) { case NFPROTO_IPV4: nhoff += offsetof(struct iphdr, saddr); size = 2 * sizeof(__be32); break; case NFPROTO_IPV6: nhoff += offsetof(struct ipv6hdr, saddr); size = sizeof(_addrs); break; default: return true; } ap = skb_header_pointer(skb, nhoff, size, _addrs); if (!ap) return false; switch (l3num) { case NFPROTO_IPV4: tuple->src.u3.ip = ap[0]; tuple->dst.u3.ip = ap[1]; break; case NFPROTO_IPV6: memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6)); memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6)); break; } tuple->dst.protonum = protonum; tuple->dst.dir = IP_CT_DIR_ORIGINAL; switch (protonum) { #if IS_ENABLED(CONFIG_IPV6) case IPPROTO_ICMPV6: return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple); #endif case IPPROTO_ICMP: return icmp_pkt_to_tuple(skb, dataoff, net, tuple); #ifdef CONFIG_NF_CT_PROTO_GRE case IPPROTO_GRE: return gre_pkt_to_tuple(skb, dataoff, net, tuple); #endif case IPPROTO_TCP: case IPPROTO_UDP: /* fallthrough */ return nf_ct_get_tuple_ports(skb, dataoff, tuple); #ifdef CONFIG_NF_CT_PROTO_UDPLITE case IPPROTO_UDPLITE: return nf_ct_get_tuple_ports(skb, dataoff, tuple); #endif #ifdef CONFIG_NF_CT_PROTO_SCTP case IPPROTO_SCTP: return nf_ct_get_tuple_ports(skb, dataoff, tuple); #endif #ifdef CONFIG_NF_CT_PROTO_DCCP case IPPROTO_DCCP: return nf_ct_get_tuple_ports(skb, dataoff, tuple); #endif default: break; } return true; } static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, u_int8_t *protonum) { int dataoff = -1; const struct iphdr *iph; struct iphdr _iph; iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); if (!iph) return -1; /* Conntrack defragments packets, we might still see fragments * inside ICMP packets though. */ if (iph->frag_off & htons(IP_OFFSET)) return -1; dataoff = nhoff + (iph->ihl << 2); *protonum = iph->protocol; /* Check bogus IP headers */ if (dataoff > skb->len) { pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n", nhoff, iph->ihl << 2, skb->len); return -1; } return dataoff; } #if IS_ENABLED(CONFIG_IPV6) static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, u8 *protonum) { int protoff = -1; unsigned int extoff = nhoff + sizeof(struct ipv6hdr); __be16 frag_off; u8 nexthdr; if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr), &nexthdr, sizeof(nexthdr)) != 0) { pr_debug("can't get nexthdr\n"); return -1; } protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off); /* * (protoff == skb->len) means the packet has not data, just * IPv6 and possibly extensions headers, but it is tracked anyway */ if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { pr_debug("can't find proto in pkt\n"); return -1; } *protonum = nexthdr; return protoff; } #endif static int get_l4proto(const struct sk_buff *skb, unsigned int nhoff, u8 pf, u8 *l4num) { switch (pf) { case NFPROTO_IPV4: return ipv4_get_l4proto(skb, nhoff, l4num); #if IS_ENABLED(CONFIG_IPV6) case NFPROTO_IPV6: return ipv6_get_l4proto(skb, nhoff, l4num); #endif default: *l4num = 0; break; } return -1; } bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, u_int16_t l3num, struct net *net, struct nf_conntrack_tuple *tuple) { u8 protonum; int protoff; protoff = get_l4proto(skb, nhoff, l3num, &protonum); if (protoff <= 0) return false; return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple); } EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig) { memset(inverse, 0, sizeof(*inverse)); inverse->src.l3num = orig->src.l3num; switch (orig->src.l3num) { case NFPROTO_IPV4: inverse->src.u3.ip = orig->dst.u3.ip; inverse->dst.u3.ip = orig->src.u3.ip; break; case NFPROTO_IPV6: inverse->src.u3.in6 = orig->dst.u3.in6; inverse->dst.u3.in6 = orig->src.u3.in6; break; default: break; } inverse->dst.dir = !orig->dst.dir; inverse->dst.protonum = orig->dst.protonum; switch (orig->dst.protonum) { case IPPROTO_ICMP: return nf_conntrack_invert_icmp_tuple(inverse, orig); #if IS_ENABLED(CONFIG_IPV6) case IPPROTO_ICMPV6: return nf_conntrack_invert_icmpv6_tuple(inverse, orig); #endif } inverse->src.u.all = orig->dst.u.all; inverse->dst.u.all = orig->src.u.all; return true; } EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); /* Generate a almost-unique pseudo-id for a given conntrack. * * intentionally doesn't re-use any of the seeds used for hash * table location, we assume id gets exposed to userspace. * * Following nf_conn items do not change throughout lifetime * of the nf_conn: * * 1. nf_conn address * 2. nf_conn->master address (normally NULL) * 3. the associated net namespace * 4. the original direction tuple */ u32 nf_ct_get_id(const struct nf_conn *ct) { static __read_mostly siphash_key_t ct_id_seed; unsigned long a, b, c, d; net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); a = (unsigned long)ct; b = (unsigned long)ct->master; c = (unsigned long)nf_ct_net(ct); d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple), &ct_id_seed); #ifdef CONFIG_64BIT return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed); #else return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed); #endif } EXPORT_SYMBOL_GPL(nf_ct_get_id); static void clean_from_lists(struct nf_conn *ct) { pr_debug("clean_from_lists(%p)\n", ct); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); /* Destroy all pending expectations */ nf_ct_remove_expectations(ct); } /* must be called with local_bh_disable */ static void nf_ct_add_to_dying_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) dying list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->dying); spin_unlock(&pcpu->lock); } /* must be called with local_bh_disable */ static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) unconfirmed list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->unconfirmed); spin_unlock(&pcpu->lock); } /* must be called with local_bh_disable */ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* We overload first tuple to link into unconfirmed or dying list.*/ pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); spin_unlock(&pcpu->lock); } #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK) /* Released via destroy_conntrack() */ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, const struct nf_conntrack_zone *zone, gfp_t flags) { struct nf_conn *tmpl, *p; if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) { tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags); if (!tmpl) return NULL; p = tmpl; tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); if (tmpl != p) { tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p; } } else { tmpl = kzalloc(sizeof(*tmpl), flags); if (!tmpl) return NULL; } tmpl->status = IPS_TEMPLATE; write_pnet(&tmpl->ct_net, net); nf_ct_zone_add(tmpl, zone); atomic_set(&tmpl->ct_general.use, 0); return tmpl; } EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc); void nf_ct_tmpl_free(struct nf_conn *tmpl) { nf_ct_ext_destroy(tmpl); nf_ct_ext_free(tmpl); if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) kfree((char *)tmpl - tmpl->proto.tmpl_padto); else kfree(tmpl); } EXPORT_SYMBOL_GPL(nf_ct_tmpl_free); static void destroy_gre_conntrack(struct nf_conn *ct) { #ifdef CONFIG_NF_CT_PROTO_GRE struct nf_conn *master = ct->master; if (master) nf_ct_gre_keymap_destroy(master); #endif } static void destroy_conntrack(struct nf_conntrack *nfct) { struct nf_conn *ct = (struct nf_conn *)nfct; pr_debug("destroy_conntrack(%p)\n", ct); WARN_ON(atomic_read(&nfct->use) != 0); if (unlikely(nf_ct_is_template(ct))) { nf_ct_tmpl_free(ct); return; } if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE)) destroy_gre_conntrack(ct); local_bh_disable(); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, * too. */ nf_ct_remove_expectations(ct); nf_ct_del_from_dying_or_unconfirmed_list(ct); local_bh_enable(); if (ct->master) nf_ct_put(ct->master); pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); nf_conntrack_free(ct); } static void nf_ct_delete_from_lists(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); unsigned int hash, reply_hash; unsigned int sequence; nf_ct_helper_destroy(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); clean_from_lists(ct); nf_conntrack_double_unlock(hash, reply_hash); nf_ct_add_to_dying_list(ct); local_bh_enable(); } bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) { struct nf_conn_tstamp *tstamp; if (test_and_set_bit(IPS_DYING_BIT, &ct->status)) return false; tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) tstamp->stop = ktime_get_real_ns(); if (nf_conntrack_event_report(IPCT_DESTROY, ct, portid, report) < 0) { /* destroy event was not delivered. nf_ct_put will * be done by event cache worker on redelivery. */ nf_ct_delete_from_lists(ct); nf_conntrack_ecache_delayed_work(nf_ct_net(ct)); return false; } nf_conntrack_ecache_work(nf_ct_net(ct)); nf_ct_delete_from_lists(ct); nf_ct_put(ct); return true; } EXPORT_SYMBOL_GPL(nf_ct_delete); static inline bool nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone, const struct net *net) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); /* A conntrack can be recreated with the equal tuple, * so we need to check that the conntrack is confirmed */ return nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) && nf_ct_is_confirmed(ct) && net_eq(net, nf_ct_net(ct)); } static inline bool nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2) { return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) && nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple, &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) && nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) && nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) && net_eq(nf_ct_net(ct1), nf_ct_net(ct2)); } /* caller must hold rcu readlock and none of the nf_conntrack_locks */ static void nf_ct_gc_expired(struct nf_conn *ct) { if (!atomic_inc_not_zero(&ct->ct_general.use)) return; if (nf_ct_should_gc(ct)) nf_ct_kill(ct); nf_ct_put(ct); } /* * Warning : * - Caller must take a reference on returned object * and recheck nf_ct_tuple_equal(tuple, &h->tuple) */ static struct nf_conntrack_tuple_hash * ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_head *ct_hash; struct hlist_nulls_node *n; unsigned int bucket, hsize; begin: nf_conntrack_get_ht(&ct_hash, &hsize); bucket = reciprocal_scale(hash, hsize); hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { struct nf_conn *ct; ct = nf_ct_tuplehash_to_ctrack(h); if (nf_ct_is_expired(ct)) { nf_ct_gc_expired(ct); continue; } if (nf_ct_key_equal(h, tuple, zone, net)) return h; } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(n) != bucket) { NF_CT_STAT_INC_ATOMIC(net, search_restart); goto begin; } return NULL; } /* Find a connection corresponding to a tuple. */ static struct nf_conntrack_tuple_hash * __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; rcu_read_lock(); h = ____nf_conntrack_find(net, zone, tuple, hash); if (h) { /* We have a candidate that matches the tuple we're interested * in, try to obtain a reference and re-check tuple */ ct = nf_ct_tuplehash_to_ctrack(h); if (likely(atomic_inc_not_zero(&ct->ct_general.use))) { if (likely(nf_ct_key_equal(h, tuple, zone, net))) goto found; /* TYPESAFE_BY_RCU recycled the candidate */ nf_ct_put(ct); } h = NULL; } found: rcu_read_unlock(); return h; } struct nf_conntrack_tuple_hash * nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple) { return __nf_conntrack_find_get(net, zone, tuple, hash_conntrack_raw(tuple, net)); } EXPORT_SYMBOL_GPL(nf_conntrack_find_get); static void __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, unsigned int reply_hash) { hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &nf_conntrack_hash[hash]); hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &nf_conntrack_hash[reply_hash]); } int nf_conntrack_hash_check_insert(struct nf_conn *ct) { const struct nf_conntrack_zone *zone; struct net *net = nf_ct_net(ct); unsigned int hash, reply_hash; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; unsigned int sequence; zone = nf_ct_zone(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); /* See if there's one in the list already, including reverse */ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, zone, net)) goto out; hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, zone, net)) goto out; smp_wmb(); /* The caller holds a reference to this object */ atomic_set(&ct->ct_general.use, 2); __nf_conntrack_hash_insert(ct, hash, reply_hash); nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert); local_bh_enable(); return 0; out: nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); return -EEXIST; } EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); static inline void nf_ct_acct_update(struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int len) { struct nf_conn_acct *acct; acct = nf_conn_acct_find(ct); if (acct) { struct nf_conn_counter *counter = acct->counter; atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes); } } static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct nf_conn *loser_ct) { struct nf_conn_acct *acct; acct = nf_conn_acct_find(loser_ct); if (acct) { struct nf_conn_counter *counter = acct->counter; unsigned int bytes; /* u32 should be fine since we must have seen one packet. */ bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes); nf_ct_acct_update(ct, ctinfo, bytes); } } /* Resolve race on insertion if this protocol allows this. */ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, enum ip_conntrack_info ctinfo, struct nf_conntrack_tuple_hash *h) { /* This is the conntrack entry already in hashes that won race. */ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); const struct nf_conntrack_l4proto *l4proto; enum ip_conntrack_info oldinfo; struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); if (l4proto->allow_clash && !nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use)) { if (((ct->status & IPS_NAT_DONE_MASK) == 0) || nf_ct_match(ct, loser_ct)) { nf_ct_acct_merge(ct, ctinfo, loser_ct); nf_conntrack_put(&loser_ct->ct_general); nf_ct_set(skb, ct, oldinfo); return NF_ACCEPT; } nf_ct_put(ct); } NF_CT_STAT_INC(net, drop); return NF_DROP; } /* Confirm a connection given skb; places it in hash table */ int __nf_conntrack_confirm(struct sk_buff *skb) { const struct nf_conntrack_zone *zone; unsigned int hash, reply_hash; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct nf_conn_help *help; struct nf_conn_tstamp *tstamp; struct hlist_nulls_node *n; enum ip_conntrack_info ctinfo; struct net *net; unsigned int sequence; int ret = NF_DROP; ct = nf_ct_get(skb, &ctinfo); net = nf_ct_net(ct); /* ipt_REJECT uses nf_conntrack_attach to attach related ICMP/TCP RST packets in other direction. Actual packet which created connection will be IP_CT_NEW or for an expected connection, IP_CT_RELATED. */ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) return NF_ACCEPT; zone = nf_ct_zone(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&nf_conntrack_generation); /* reuse the hash saved before */ hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; hash = scale_hash(hash); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); /* We're not in hash table, and we refuse to set up related * connections for unconfirmed conns. But packet copies and * REJECT will give spurious warnings here. */ /* Another skb with the same unconfirmed conntrack may * win the race. This may happen for bridge(br_flood) * or broadcast/multicast packets do skb_clone with * unconfirmed conntrack. */ if (unlikely(nf_ct_is_confirmed(ct))) { WARN_ON_ONCE(1); nf_conntrack_double_unlock(hash, reply_hash); local_bh_enable(); return NF_DROP; } pr_debug("Confirming conntrack %p\n", ct); /* We have to check the DYING flag after unlink to prevent * a race against nf_ct_get_next_corpse() possibly called from * user context, else we insert an already 'dead' hash, blocking * further use of that particular connection -JM. */ nf_ct_del_from_dying_or_unconfirmed_list(ct); if (unlikely(nf_ct_is_dying(ct))) { nf_ct_add_to_dying_list(ct); goto dying; } /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, zone, net)) goto out; hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, zone, net)) goto out; /* Timer relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ ct->timeout += nfct_time_stamp; atomic_inc(&ct->ct_general.use); ct->status |= IPS_CONFIRMED; /* set conntrack timestamp, if enabled. */ tstamp = nf_conn_tstamp_find(ct); if (tstamp) tstamp->start = ktime_get_real_ns(); /* Since the lookup is lockless, hash insertion must be done after * starting the timer and setting the CONFIRMED bit. The RCU barriers * guarantee that no other CPU can find the conntrack before the above * stores are visible. */ __nf_conntrack_hash_insert(ct, hash, reply_hash); nf_conntrack_double_unlock(hash, reply_hash); local_bh_enable(); help = nfct_help(ct); if (help && help->helper) nf_conntrack_event_cache(IPCT_HELPER, ct); nf_conntrack_event_cache(master_ct(ct) ? IPCT_RELATED : IPCT_NEW, ct); return NF_ACCEPT; out: nf_ct_add_to_dying_list(ct); ret = nf_ct_resolve_clash(net, skb, ctinfo, h); dying: nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); return ret; } EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); /* Returns true if a connection correspondings to the tuple (required for NAT). */ int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack) { struct net *net = nf_ct_net(ignored_conntrack); const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_head *ct_hash; unsigned int hash, hsize; struct hlist_nulls_node *n; struct nf_conn *ct; zone = nf_ct_zone(ignored_conntrack); rcu_read_lock(); begin: nf_conntrack_get_ht(&ct_hash, &hsize); hash = __hash_conntrack(net, tuple, hsize); hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (ct == ignored_conntrack) continue; if (nf_ct_is_expired(ct)) { nf_ct_gc_expired(ct); continue; } if (nf_ct_key_equal(h, tuple, zone, net)) { /* Tuple is taken already, so caller will need to find * a new source port to use. * * Only exception: * If the *original tuples* are identical, then both * conntracks refer to the same flow. * This is a rare situation, it can occur e.g. when * more than one UDP packet is sent from same socket * in different threads. * * Let nf_ct_resolve_clash() deal with this later. */ if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) continue; NF_CT_STAT_INC_ATOMIC(net, found); rcu_read_unlock(); return 1; } } if (get_nulls_value(n) != hash) { NF_CT_STAT_INC_ATOMIC(net, search_restart); goto begin; } rcu_read_unlock(); return 0; } EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); #define NF_CT_EVICTION_RANGE 8 /* There's a small race here where we may free a just-assured connection. Too bad: we're in trouble anyway. */ static unsigned int early_drop_list(struct net *net, struct hlist_nulls_head *head) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; unsigned int drops = 0; struct nf_conn *tmp; hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) continue; if (nf_ct_is_expired(tmp)) { nf_ct_gc_expired(tmp); continue; } if (test_bit(IPS_ASSURED_BIT, &tmp->status) || !net_eq(nf_ct_net(tmp), net) || nf_ct_is_dying(tmp)) continue; if (!atomic_inc_not_zero(&tmp->ct_general.use)) continue; /* kill only if still in same netns -- might have moved due to * SLAB_TYPESAFE_BY_RCU rules. * * We steal the timer reference. If that fails timer has * already fired or someone else deleted it. Just drop ref * and move to next entry. */ if (net_eq(nf_ct_net(tmp), net) && nf_ct_is_confirmed(tmp) && nf_ct_delete(tmp, 0, 0)) drops++; nf_ct_put(tmp); } return drops; } static noinline int early_drop(struct net *net, unsigned int hash) { unsigned int i, bucket; for (i = 0; i < NF_CT_EVICTION_RANGE; i++) { struct hlist_nulls_head *ct_hash; unsigned int hsize, drops; rcu_read_lock(); nf_conntrack_get_ht(&ct_hash, &hsize); if (!i) bucket = reciprocal_scale(hash, hsize); else bucket = (bucket + 1) % hsize; drops = early_drop_list(net, &ct_hash[bucket]); rcu_read_unlock(); if (drops) { NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops); return true; } } return false; } static bool gc_worker_skip_ct(const struct nf_conn *ct) { return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct); } static bool gc_worker_can_early_drop(const struct nf_conn *ct) { const struct nf_conntrack_l4proto *l4proto; if (!test_bit(IPS_ASSURED_BIT, &ct->status)) return true; l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); if (l4proto->can_early_drop && l4proto->can_early_drop(ct)) return true; return false; } #define DAY (86400 * HZ) /* Set an arbitrary timeout large enough not to ever expire, this save * us a check for the IPS_OFFLOAD_BIT from the packet path via * nf_ct_is_expired(). */ static void nf_ct_offload_timeout(struct nf_conn *ct) { if (nf_ct_expires(ct) < DAY / 2) ct->timeout = nfct_time_stamp + DAY; } static void gc_worker(struct work_struct *work) { unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); unsigned int i, goal, buckets = 0, expired_count = 0; unsigned int nf_conntrack_max95 = 0; struct conntrack_gc_work *gc_work; unsigned int ratio, scanned = 0; unsigned long next_run; gc_work = container_of(work, struct conntrack_gc_work, dwork.work); goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; i = gc_work->last_bucket; if (gc_work->early_drop) nf_conntrack_max95 = nf_conntrack_max / 100u * 95u; do { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_head *ct_hash; struct hlist_nulls_node *n; unsigned int hashsz; struct nf_conn *tmp; i++; rcu_read_lock(); nf_conntrack_get_ht(&ct_hash, &hashsz); if (i >= hashsz) i = 0; hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) { struct net *net; tmp = nf_ct_tuplehash_to_ctrack(h); scanned++; if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) { nf_ct_offload_timeout(tmp); continue; } if (nf_ct_is_expired(tmp)) { nf_ct_gc_expired(tmp); expired_count++; continue; } if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp)) continue; net = nf_ct_net(tmp); if (atomic_read(&net->ct.count) < nf_conntrack_max95) continue; /* need to take reference to avoid possible races */ if (!atomic_inc_not_zero(&tmp->ct_general.use)) continue; if (gc_worker_skip_ct(tmp)) { nf_ct_put(tmp); continue; } if (gc_worker_can_early_drop(tmp)) nf_ct_kill(tmp); nf_ct_put(tmp); } /* could check get_nulls_value() here and restart if ct * was moved to another chain. But given gc is best-effort * we will just continue with next hash slot. */ rcu_read_unlock(); cond_resched(); } while (++buckets < goal); if (gc_work->exiting) return; /* * Eviction will normally happen from the packet path, and not * from this gc worker. * * This worker is only here to reap expired entries when system went * idle after a busy period. * * The heuristics below are supposed to balance conflicting goals: * * 1. Minimize time until we notice a stale entry * 2. Maximize scan intervals to not waste cycles * * Normally, expire ratio will be close to 0. * * As soon as a sizeable fraction of the entries have expired * increase scan frequency. */ ratio = scanned ? expired_count * 100 / scanned : 0; if (ratio > GC_EVICT_RATIO) { gc_work->next_gc_run = min_interval; } else { unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV; BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0); gc_work->next_gc_run += min_interval; if (gc_work->next_gc_run > max) gc_work->next_gc_run = max; } next_run = gc_work->next_gc_run; gc_work->last_bucket = i; gc_work->early_drop = false; queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run); } static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) { INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker); gc_work->next_gc_run = HZ; gc_work->exiting = false; } static struct nf_conn * __nf_conntrack_alloc(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp, u32 hash) { struct nf_conn *ct; /* We don't want any race condition at early drop stage */ atomic_inc(&net->ct.count); if (nf_conntrack_max && unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { if (!early_drop(net, hash)) { if (!conntrack_gc_work.early_drop) conntrack_gc_work.early_drop = true; atomic_dec(&net->ct.count); net_warn_ratelimited("nf_conntrack: table full, dropping packet\n"); return ERR_PTR(-ENOMEM); } } /* * Do not use kmem_cache_zalloc(), as this cache uses * SLAB_TYPESAFE_BY_RCU. */ ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); if (ct == NULL) goto out; spin_lock_init(&ct->lock); ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; /* save hash for reusing when confirming */ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; ct->status = 0; ct->timeout = 0; write_pnet(&ct->ct_net, net); memset(&ct->__nfct_init_offset, 0, offsetof(struct nf_conn, proto) - offsetof(struct nf_conn, __nfct_init_offset)); nf_ct_zone_add(ct, zone); /* Because we use RCU lookups, we set ct_general.use to zero before * this is inserted in any list. */ atomic_set(&ct->ct_general.use, 0); return ct; out: atomic_dec(&net->ct.count); return ERR_PTR(-ENOMEM); } struct nf_conn *nf_conntrack_alloc(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp) { return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0); } EXPORT_SYMBOL_GPL(nf_conntrack_alloc); void nf_conntrack_free(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); /* A freed object has refcnt == 0, that's * the golden rule for SLAB_TYPESAFE_BY_RCU */ WARN_ON(atomic_read(&ct->ct_general.use) != 0); nf_ct_ext_destroy(ct); nf_ct_ext_free(ct); kmem_cache_free(nf_conntrack_cachep, ct); smp_mb__before_atomic(); atomic_dec(&net->ct.count); } EXPORT_SYMBOL_GPL(nf_conntrack_free); /* Allocate a new conntrack: we return -ENOMEM if classification failed due to stress. Otherwise it really is unclassifiable. */ static noinline struct nf_conntrack_tuple_hash * init_conntrack(struct net *net, struct nf_conn *tmpl, const struct nf_conntrack_tuple *tuple, struct sk_buff *skb, unsigned int dataoff, u32 hash) { struct nf_conn *ct; struct nf_conn_help *help; struct nf_conntrack_tuple repl_tuple; struct nf_conntrack_ecache *ecache; struct nf_conntrack_expect *exp = NULL; const struct nf_conntrack_zone *zone; struct nf_conn_timeout *timeout_ext; struct nf_conntrack_zone tmp; if (!nf_ct_invert_tuple(&repl_tuple, tuple)) { pr_debug("Can't invert tuple.\n"); return NULL; } zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, hash); if (IS_ERR(ct)) return (struct nf_conntrack_tuple_hash *)ct; if (!nf_ct_add_synproxy(ct, tmpl)) { nf_conntrack_free(ct); return ERR_PTR(-ENOMEM); } timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; if (timeout_ext) nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout), GFP_ATOMIC); nf_ct_acct_ext_add(ct, GFP_ATOMIC); nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); nf_ct_labels_ext_add(ct); ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, ecache ? ecache->expmask : 0, GFP_ATOMIC); local_bh_disable(); if (net->ct.expect_count) { spin_lock(&nf_conntrack_expect_lock); exp = nf_ct_find_expectation(net, zone, tuple); if (exp) { pr_debug("expectation arrives ct=%p exp=%p\n", ct, exp); /* Welcome, Mr. Bond. We've been expecting you... */ __set_bit(IPS_EXPECTED_BIT, &ct->status); /* exp->master safe, refcnt bumped in nf_ct_find_expectation */ ct->master = exp->master; if (exp->helper) { help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help) rcu_assign_pointer(help->helper, exp->helper); } #ifdef CONFIG_NF_CONNTRACK_MARK ct->mark = exp->master->mark; #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK ct->secmark = exp->master->secmark; #endif NF_CT_STAT_INC(net, expect_new); } spin_unlock(&nf_conntrack_expect_lock); } if (!exp) __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); /* Now it is inserted into the unconfirmed list, bump refcount */ nf_conntrack_get(&ct->ct_general); nf_ct_add_to_unconfirmed_list(ct); local_bh_enable(); if (exp) { if (exp->expectfn) exp->expectfn(ct, exp); nf_ct_expect_put(exp); } return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; } /* On success, returns 0, sets skb->_nfct | ctinfo */ static int resolve_normal_ct(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, u_int8_t protonum, const struct nf_hook_state *state) { const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; struct nf_conn *ct; u32 hash; if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, state->pf, protonum, state->net, &tuple)) { pr_debug("Can't get tuple\n"); return 0; } /* look for tuple match */ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); hash = hash_conntrack_raw(&tuple, state->net); h = __nf_conntrack_find_get(state->net, zone, &tuple, hash); if (!h) { h = init_conntrack(state->net, tmpl, &tuple, skb, dataoff, hash); if (!h) return 0; if (IS_ERR(h)) return PTR_ERR(h); } ct = nf_ct_tuplehash_to_ctrack(h); /* It exists; we have (non-exclusive) reference. */ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { ctinfo = IP_CT_ESTABLISHED_REPLY; } else { /* Once we've had two way comms, always ESTABLISHED. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { pr_debug("normal packet for %p\n", ct); ctinfo = IP_CT_ESTABLISHED; } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { pr_debug("related packet for %p\n", ct); ctinfo = IP_CT_RELATED; } else { pr_debug("new packet for %p\n", ct); ctinfo = IP_CT_NEW; } } nf_ct_set(skb, ct, ctinfo); return 0; } /* * icmp packets need special treatment to handle error messages that are * related to a connection. * * Callers need to check if skb has a conntrack assigned when this * helper returns; in such case skb belongs to an already known connection. */ static unsigned int __cold nf_conntrack_handle_icmp(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, u8 protonum, const struct nf_hook_state *state) { int ret; if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP) ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state); #if IS_ENABLED(CONFIG_IPV6) else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6) ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state); #endif else return NF_ACCEPT; if (ret <= 0) { NF_CT_STAT_INC_ATOMIC(state->net, error); NF_CT_STAT_INC_ATOMIC(state->net, invalid); } return ret; } static int generic_packet(struct nf_conn *ct, struct sk_buff *skb, enum ip_conntrack_info ctinfo) { const unsigned int *timeout = nf_ct_timeout_lookup(ct); if (!timeout) timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout; nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); return NF_ACCEPT; } /* Returns verdict for packet, or -1 for invalid. */ static int nf_conntrack_handle_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state) { switch (nf_ct_protonum(ct)) { case IPPROTO_TCP: return nf_conntrack_tcp_packet(ct, skb, dataoff, ctinfo, state); case IPPROTO_UDP: return nf_conntrack_udp_packet(ct, skb, dataoff, ctinfo, state); case IPPROTO_ICMP: return nf_conntrack_icmp_packet(ct, skb, ctinfo, state); #if IS_ENABLED(CONFIG_IPV6) case IPPROTO_ICMPV6: return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state); #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE case IPPROTO_UDPLITE: return nf_conntrack_udplite_packet(ct, skb, dataoff, ctinfo, state); #endif #ifdef CONFIG_NF_CT_PROTO_SCTP case IPPROTO_SCTP: return nf_conntrack_sctp_packet(ct, skb, dataoff, ctinfo, state); #endif #ifdef CONFIG_NF_CT_PROTO_DCCP case IPPROTO_DCCP: return nf_conntrack_dccp_packet(ct, skb, dataoff, ctinfo, state); #endif #ifdef CONFIG_NF_CT_PROTO_GRE case IPPROTO_GRE: return nf_conntrack_gre_packet(ct, skb, dataoff, ctinfo, state); #endif } return generic_packet(ct, skb, ctinfo); } unsigned int nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state) { enum ip_conntrack_info ctinfo; struct nf_conn *ct, *tmpl; u_int8_t protonum; int dataoff, ret; tmpl = nf_ct_get(skb, &ctinfo); if (tmpl || ctinfo == IP_CT_UNTRACKED) { /* Previously seen (loopback or untracked)? Ignore. */ if ((tmpl && !nf_ct_is_template(tmpl)) || ctinfo == IP_CT_UNTRACKED) { NF_CT_STAT_INC_ATOMIC(state->net, ignore); return NF_ACCEPT; } skb->_nfct = 0; } /* rcu_read_lock()ed by nf_hook_thresh */ dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum); if (dataoff <= 0) { pr_debug("not prepared to track yet or error occurred\n"); NF_CT_STAT_INC_ATOMIC(state->net, error); NF_CT_STAT_INC_ATOMIC(state->net, invalid); ret = NF_ACCEPT; goto out; } if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) { ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff, protonum, state); if (ret <= 0) { ret = -ret; goto out; } /* ICMP[v6] protocol trackers may assign one conntrack. */ if (skb->_nfct) goto out; } repeat: ret = resolve_normal_ct(tmpl, skb, dataoff, protonum, state); if (ret < 0) { /* Too stressed to deal. */ NF_CT_STAT_INC_ATOMIC(state->net, drop); ret = NF_DROP; goto out; } ct = nf_ct_get(skb, &ctinfo); if (!ct) { /* Not valid part of a connection */ NF_CT_STAT_INC_ATOMIC(state->net, invalid); ret = NF_ACCEPT; goto out; } ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state); if (ret <= 0) { /* Invalid: inverse of the return code tells * the netfilter core what to do */ pr_debug("nf_conntrack_in: Can't track with proto module\n"); nf_conntrack_put(&ct->ct_general); skb->_nfct = 0; NF_CT_STAT_INC_ATOMIC(state->net, invalid); if (ret == -NF_DROP) NF_CT_STAT_INC_ATOMIC(state->net, drop); /* Special case: TCP tracker reports an attempt to reopen a * closed/aborted connection. We have to go back and create a * fresh conntrack. */ if (ret == -NF_REPEAT) goto repeat; ret = -ret; goto out; } if (ctinfo == IP_CT_ESTABLISHED_REPLY && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_REPLY, ct); out: if (tmpl) nf_ct_put(tmpl); return ret; } EXPORT_SYMBOL_GPL(nf_conntrack_in); /* Alter reply tuple (maybe alter helper). This is for NAT, and is implicitly racy: see __nf_conntrack_confirm */ void nf_conntrack_alter_reply(struct nf_conn *ct, const struct nf_conntrack_tuple *newreply) { struct nf_conn_help *help = nfct_help(ct); /* Should be unconfirmed, so not in hash table yet */ WARN_ON(nf_ct_is_confirmed(ct)); pr_debug("Altering reply tuple of %p to ", ct); nf_ct_dump_tuple(newreply); ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; if (ct->master || (help && !hlist_empty(&help->expectations))) return; rcu_read_lock(); __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb, u32 extra_jiffies, bool do_acct) { /* Only update if this is not a fixed timeout */ if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) goto acct; /* If not in hash table, timer will not be active yet */ if (nf_ct_is_confirmed(ct)) extra_jiffies += nfct_time_stamp; if (READ_ONCE(ct->timeout) != extra_jiffies) WRITE_ONCE(ct->timeout, extra_jiffies); acct: if (do_acct) nf_ct_acct_update(ct, ctinfo, skb->len); } EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb) { nf_ct_acct_update(ct, ctinfo, skb->len); return nf_ct_delete(ct, 0, 0); } EXPORT_SYMBOL_GPL(nf_ct_kill_acct); #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> #include <linux/mutex.h> /* Generic function for tcp/udp/sctp/dccp and alike. */ int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) || nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port)) goto nla_put_failure; return 0; nla_put_failure: return -1; } EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, }; EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t) { if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) return -EINVAL; t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); return 0; } EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); unsigned int nf_ct_port_nlattr_tuple_size(void) { static unsigned int size __read_mostly; if (!size) size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); return size; } EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); #endif /* Used by ipt_REJECT and ip6t_REJECT. */ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; /* This ICMP is in reverse direction to the packet which caused it */ ct = nf_ct_get(skb, &ctinfo); if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ctinfo = IP_CT_RELATED_REPLY; else ctinfo = IP_CT_RELATED; /* Attach to new skbuff, and increment count */ nf_ct_set(nskb, ct, ctinfo); nf_conntrack_get(skb_nfct(nskb)); } static int __nf_conntrack_update(struct net *net, struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; struct nf_nat_hook *nat_hook; unsigned int status; int dataoff; u16 l3num; u8 l4num; l3num = nf_ct_l3num(ct); dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num); if (dataoff <= 0) return -1; if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, l4num, net, &tuple)) return -1; if (ct->status & IPS_SRC_NAT) { memcpy(tuple.src.u3.all, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all, sizeof(tuple.src.u3.all)); tuple.src.u.all = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all; } if (ct->status & IPS_DST_NAT) { memcpy(tuple.dst.u3.all, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all, sizeof(tuple.dst.u3.all)); tuple.dst.u.all = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all; } h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple); if (!h) return 0; /* Store status bits of the conntrack that is clashing to re-do NAT * mangling according to what it has been done already to this packet. */ status = ct->status; nf_ct_put(ct); ct = nf_ct_tuplehash_to_ctrack(h); nf_ct_set(skb, ct, ctinfo); nat_hook = rcu_dereference(nf_nat_hook); if (!nat_hook) return 0; if (status & IPS_SRC_NAT && nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC, IP_CT_DIR_ORIGINAL) == NF_DROP) return -1; if (status & IPS_DST_NAT && nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST, IP_CT_DIR_ORIGINAL) == NF_DROP) return -1; return 0; } /* This packet is coming from userspace via nf_queue, complete the packet * processing after the helper invocation in nf_confirm(). */ static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { const struct nf_conntrack_helper *helper; const struct nf_conn_help *help; int protoff; help = nfct_help(ct); if (!help) return 0; helper = rcu_dereference(help->helper); if (!(helper->flags & NF_CT_HELPER_F_USERSPACE)) return 0; switch (nf_ct_l3num(ct)) { case NFPROTO_IPV4: protoff = skb_network_offset(skb) + ip_hdrlen(skb); break; #if IS_ENABLED(CONFIG_IPV6) case NFPROTO_IPV6: { __be16 frag_off; u8 pnum; pnum = ipv6_hdr(skb)->nexthdr; protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, &frag_off); if (protoff < 0 || (frag_off & htons(~0x7)) != 0) return 0; break; } #endif default: return 0; } if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && !nf_is_loopback_packet(skb)) { if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); return -1; } } /* We've seen it coming out the other side: confirm it */ return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0; } static int nf_conntrack_update(struct net *net, struct sk_buff *skb) { enum ip_conntrack_info ctinfo; struct nf_conn *ct; int err; ct = nf_ct_get(skb, &ctinfo); if (!ct) return 0; if (!nf_ct_is_confirmed(ct)) { err = __nf_conntrack_update(net, skb, ct, ctinfo); if (err < 0) return err; ct = nf_ct_get(skb, &ctinfo); } return nf_confirm_cthelper(skb, ct, ctinfo); } static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb) { const struct nf_conntrack_tuple *src_tuple; const struct nf_conntrack_tuple_hash *hash; struct nf_conntrack_tuple srctuple; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); if (ct) { src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo)); memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); return true; } if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), NFPROTO_IPV4, dev_net(skb->dev), &srctuple)) return false; hash = nf_conntrack_find_get(dev_net(skb->dev), &nf_ct_zone_dflt, &srctuple); if (!hash) return false; ct = nf_ct_tuplehash_to_ctrack(hash); src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir); memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); nf_ct_put(ct); return true; } /* Bring out ya dead! */ static struct nf_conn * get_next_corpse(int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct hlist_nulls_node *n; spinlock_t *lockp; for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; local_bh_disable(); nf_conntrack_lock(lockp); if (*bucket < nf_conntrack_htable_size) { hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) goto found; } } spin_unlock(lockp); local_bh_enable(); cond_resched(); } return NULL; found: atomic_inc(&ct->ct_general.use); spin_unlock(lockp); local_bh_enable(); return ct; } static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data, u32 portid, int report) { unsigned int bucket = 0, sequence; struct nf_conn *ct; might_sleep(); for (;;) { sequence = read_seqcount_begin(&nf_conntrack_generation); while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { /* Time to push up daises... */ nf_ct_delete(ct, portid, report); nf_ct_put(ct); cond_resched(); } if (!read_seqcount_retry(&nf_conntrack_generation, sequence)) break; bucket = 0; } } struct iter_data { int (*iter)(struct nf_conn *i, void *data); void *data; struct net *net; }; static int iter_net_only(struct nf_conn *i, void *data) { struct iter_data *d = data; if (!net_eq(d->net, nf_ct_net(i))) return 0; return d->iter(i, d->data); } static void __nf_ct_unconfirmed_destroy(struct net *net) { int cpu; for_each_possible_cpu(cpu) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; struct ct_pcpu *pcpu; pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); spin_lock_bh(&pcpu->lock); hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { struct nf_conn *ct; ct = nf_ct_tuplehash_to_ctrack(h); /* we cannot call iter() on unconfirmed list, the * owning cpu can reallocate ct->ext at any time. */ set_bit(IPS_DYING_BIT, &ct->status); } spin_unlock_bh(&pcpu->lock); cond_resched(); } } void nf_ct_unconfirmed_destroy(struct net *net) { might_sleep(); if (atomic_read(&net->ct.count) > 0) { __nf_ct_unconfirmed_destroy(net); nf_queue_nf_hook_drop(net); synchronize_net(); } } EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy); void nf_ct_iterate_cleanup_net(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, u32 portid, int report) { struct iter_data d; might_sleep(); if (atomic_read(&net->ct.count) == 0) return; d.iter = iter; d.data = data; d.net = net; nf_ct_iterate_cleanup(iter_net_only, &d, portid, report); } EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net); /** * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table * @iter: callback to invoke for each conntrack * @data: data to pass to @iter * * Like nf_ct_iterate_cleanup, but first marks conntracks on the * unconfirmed list as dying (so they will not be inserted into * main table). * * Can only be called in module exit path. */ void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data) { struct net *net; down_read(&net_rwsem); for_each_net(net) { if (atomic_read(&net->ct.count) == 0) continue; __nf_ct_unconfirmed_destroy(net); nf_queue_nf_hook_drop(net); } up_read(&net_rwsem); /* Need to wait for netns cleanup worker to finish, if its * running -- it might have deleted a net namespace from * the global list, so our __nf_ct_unconfirmed_destroy() might * not have affected all namespaces. */ net_ns_barrier(); /* a conntrack could have been unlinked from unconfirmed list * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy(). * This makes sure its inserted into conntrack table. */ synchronize_net(); nf_ct_iterate_cleanup(iter, data, 0, 0); } EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy); static int kill_all(struct nf_conn *i, void *data) { return net_eq(nf_ct_net(i), data); } void nf_conntrack_cleanup_start(void) { conntrack_gc_work.exiting = true; RCU_INIT_POINTER(ip_ct_attach, NULL); } void nf_conntrack_cleanup_end(void) { RCU_INIT_POINTER(nf_ct_hook, NULL); cancel_delayed_work_sync(&conntrack_gc_work.dwork); kvfree(nf_conntrack_hash); nf_conntrack_proto_fini(); nf_conntrack_seqadj_fini(); nf_conntrack_labels_fini(); nf_conntrack_helper_fini(); nf_conntrack_timeout_fini(); nf_conntrack_ecache_fini(); nf_conntrack_tstamp_fini(); nf_conntrack_acct_fini(); nf_conntrack_expect_fini(); kmem_cache_destroy(nf_conntrack_cachep); } /* * Mishearing the voices in his head, our hero wonders how he's * supposed to kill the mall. */ void nf_conntrack_cleanup_net(struct net *net) { LIST_HEAD(single); list_add(&net->exit_list, &single); nf_conntrack_cleanup_net_list(&single); } void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) { int busy; struct net *net; /* * This makes sure all current packets have passed through * netfilter framework. Roll on, two-stage module * delete... */ synchronize_net(); i_see_dead_people: busy = 0; list_for_each_entry(net, net_exit_list, exit_list) { nf_ct_iterate_cleanup(kill_all, net, 0, 0); if (atomic_read(&net->ct.count) != 0) busy = 1; } if (busy) { schedule(); goto i_see_dead_people; } list_for_each_entry(net, net_exit_list, exit_list) { nf_conntrack_proto_pernet_fini(net); nf_conntrack_ecache_pernet_fini(net); nf_conntrack_expect_pernet_fini(net); free_percpu(net->ct.stat); free_percpu(net->ct.pcpu_lists); } } void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) { struct hlist_nulls_head *hash; unsigned int nr_slots, i; if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head))) return NULL; BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL | __GFP_ZERO); if (hash && nulls) for (i = 0; i < nr_slots; i++) INIT_HLIST_NULLS_HEAD(&hash[i], i); return hash; } EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); int nf_conntrack_hash_resize(unsigned int hashsize) { int i, bucket; unsigned int old_size; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; if (!hashsize) return -EINVAL; hash = nf_ct_alloc_hashtable(&hashsize, 1); if (!hash) return -ENOMEM; old_size = nf_conntrack_htable_size; if (old_size == hashsize) { kvfree(hash); return 0; } local_bh_disable(); nf_conntrack_all_lock(); write_seqcount_begin(&nf_conntrack_generation); /* Lookups in the old hash might happen in parallel, which means we * might get false negatives during connection lookup. New connections * created because of a false negative won't make it into the hash * though since that required taking the locks. */ for (i = 0; i < nf_conntrack_htable_size; i++) { while (!hlist_nulls_empty(&nf_conntrack_hash[i])) { h = hlist_nulls_entry(nf_conntrack_hash[i].first, struct nf_conntrack_tuple_hash, hnnode); ct = nf_ct_tuplehash_to_ctrack(h); hlist_nulls_del_rcu(&h->hnnode); bucket = __hash_conntrack(nf_ct_net(ct), &h->tuple, hashsize); hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } old_size = nf_conntrack_htable_size; old_hash = nf_conntrack_hash; nf_conntrack_hash = hash; nf_conntrack_htable_size = hashsize; write_seqcount_end(&nf_conntrack_generation); nf_conntrack_all_unlock(); local_bh_enable(); synchronize_net(); kvfree(old_hash); return 0; } int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp) { unsigned int hashsize; int rc; if (current->nsproxy->net_ns != &init_net) return -EOPNOTSUPP; /* On boot, we can set this without any fancy locking. */ if (!nf_conntrack_hash) return param_set_uint(val, kp); rc = kstrtouint(val, 0, &hashsize); if (rc) return rc; return nf_conntrack_hash_resize(hashsize); } EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); static __always_inline unsigned int total_extension_size(void) { /* remember to add new extensions below */ BUILD_BUG_ON(NF_CT_EXT_NUM > 9); return sizeof(struct nf_ct_ext) + sizeof(struct nf_conn_help) #if IS_ENABLED(CONFIG_NF_NAT) + sizeof(struct nf_conn_nat) #endif + sizeof(struct nf_conn_seqadj) + sizeof(struct nf_conn_acct) #ifdef CONFIG_NF_CONNTRACK_EVENTS + sizeof(struct nf_conntrack_ecache) #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + sizeof(struct nf_conn_tstamp) #endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT + sizeof(struct nf_conn_timeout) #endif #ifdef CONFIG_NF_CONNTRACK_LABELS + sizeof(struct nf_conn_labels) #endif #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) + sizeof(struct nf_conn_synproxy) #endif ; }; int nf_conntrack_init_start(void) { unsigned long nr_pages = totalram_pages(); int max_factor = 8; int ret = -ENOMEM; int i; /* struct nf_ct_ext uses u8 to store offsets/size */ BUILD_BUG_ON(total_extension_size() > 255u); seqcount_init(&nf_conntrack_generation); for (i = 0; i < CONNTRACK_LOCKS; i++) spin_lock_init(&nf_conntrack_locks[i]); if (!nf_conntrack_htable_size) { /* Idea from tcp.c: use 1/16384 of memory. * On i386: 32MB machine has 512 buckets. * >= 1GB machines have 16384 buckets. * >= 4GB machines have 65536 buckets. */ nf_conntrack_htable_size = (((nr_pages << PAGE_SHIFT) / 16384) / sizeof(struct hlist_head)); if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE))) nf_conntrack_htable_size = 65536; else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) nf_conntrack_htable_size = 16384; if (nf_conntrack_htable_size < 32) nf_conntrack_htable_size = 32; /* Use a max. factor of four by default to get the same max as * with the old struct list_heads. When a table size is given * we use the old value of 8 to avoid reducing the max. * entries. */ max_factor = 4; } nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1); if (!nf_conntrack_hash) return -ENOMEM; nf_conntrack_max = max_factor * nf_conntrack_htable_size; nf_conntrack_cachep = kmem_cache_create("nf_conntrack", sizeof(struct nf_conn), NFCT_INFOMASK + 1, SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL); if (!nf_conntrack_cachep) goto err_cachep; ret = nf_conntrack_expect_init(); if (ret < 0) goto err_expect; ret = nf_conntrack_acct_init(); if (ret < 0) goto err_acct; ret = nf_conntrack_tstamp_init(); if (ret < 0) goto err_tstamp; ret = nf_conntrack_ecache_init(); if (ret < 0) goto err_ecache; ret = nf_conntrack_timeout_init(); if (ret < 0) goto err_timeout; ret = nf_conntrack_helper_init(); if (ret < 0) goto err_helper; ret = nf_conntrack_labels_init(); if (ret < 0) goto err_labels; ret = nf_conntrack_seqadj_init(); if (ret < 0) goto err_seqadj; ret = nf_conntrack_proto_init(); if (ret < 0) goto err_proto; conntrack_gc_work_init(&conntrack_gc_work); queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ); return 0; err_proto: nf_conntrack_seqadj_fini(); err_seqadj: nf_conntrack_labels_fini(); err_labels: nf_conntrack_helper_fini(); err_helper: nf_conntrack_timeout_fini(); err_timeout: nf_conntrack_ecache_fini(); err_ecache: nf_conntrack_tstamp_fini(); err_tstamp: nf_conntrack_acct_fini(); err_acct: nf_conntrack_expect_fini(); err_expect: kmem_cache_destroy(nf_conntrack_cachep); err_cachep: kvfree(nf_conntrack_hash); return ret; } static struct nf_ct_hook nf_conntrack_hook = { .update = nf_conntrack_update, .destroy = destroy_conntrack, .get_tuple_skb = nf_conntrack_get_tuple_skb, }; void nf_conntrack_init_end(void) { /* For use by REJECT target */ RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook); } /* * We need to use special "null" values, not used in hash table */ #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) #define DYING_NULLS_VAL ((1<<30)+1) #define TEMPLATE_NULLS_VAL ((1<<30)+2) int nf_conntrack_init_net(struct net *net) { int ret = -ENOMEM; int cpu; BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER); BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS); atomic_set(&net->ct.count, 0); net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); if (!net->ct.pcpu_lists) goto err_stat; for_each_possible_cpu(cpu) { struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); spin_lock_init(&pcpu->lock); INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); } net->ct.stat = alloc_percpu(struct ip_conntrack_stat); if (!net->ct.stat) goto err_pcpu_lists; ret = nf_conntrack_expect_pernet_init(net); if (ret < 0) goto err_expect; nf_conntrack_acct_pernet_init(net); nf_conntrack_tstamp_pernet_init(net); nf_conntrack_ecache_pernet_init(net); nf_conntrack_helper_pernet_init(net); nf_conntrack_proto_pernet_init(net); return 0; err_expect: free_percpu(net->ct.stat); err_pcpu_lists: free_percpu(net->ct.pcpu_lists); err_stat: return ret; }
756149.c
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/io/retry_strategy.h> #include <aws/testing/aws_test_harness.h> #include <aws/common/clock.h> #include <aws/common/condition_variable.h> #include <aws/io/event_loop.h> struct exponential_backoff_test_data { size_t retry_count; size_t client_error_count; int failure_error_code; struct aws_mutex mutex; struct aws_condition_variable cvar; }; static void s_too_many_retries_test_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) { (void)error_code; struct exponential_backoff_test_data *test_data = user_data; enum aws_retry_error_type error_type = AWS_RETRY_ERROR_TYPE_SERVER_ERROR; aws_mutex_lock(&test_data->mutex); test_data->retry_count += 1; if (test_data->client_error_count) { error_type = AWS_RETRY_ERROR_TYPE_CLIENT_ERROR; test_data->client_error_count--; } aws_mutex_unlock(&test_data->mutex); if (aws_retry_strategy_schedule_retry(token, error_type, s_too_many_retries_test_on_retry_ready, user_data)) { aws_mutex_lock(&test_data->mutex); test_data->failure_error_code = aws_last_error(); aws_mutex_unlock(&test_data->mutex); aws_retry_token_release(token); aws_condition_variable_notify_all(&test_data->cvar); } } static void s_too_many_retries_test_token_acquired( struct aws_retry_strategy *retry_strategy, int error_code, struct aws_retry_token *token, void *user_data) { (void)retry_strategy; (void)error_code; aws_retry_strategy_schedule_retry( token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_too_many_retries_test_on_retry_ready, user_data); } static bool s_retry_has_failed(void *arg) { struct exponential_backoff_test_data *test_data = arg; return test_data->failure_error_code != AWS_OP_SUCCESS; } static int s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( struct aws_allocator *allocator, enum aws_exponential_backoff_jitter_mode jitter_mode) { aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = jitter_mode, .el_group = el_group, }; struct aws_retry_strategy *retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config); ASSERT_NOT_NULL(retry_strategy); struct exponential_backoff_test_data test_data = { .retry_count = 0, .failure_error_code = 0, .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; ASSERT_SUCCESS(aws_mutex_lock(&test_data.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( retry_strategy, NULL, s_too_many_retries_test_token_acquired, &test_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&test_data.cvar, &test_data.mutex, s_retry_has_failed, &test_data)); aws_mutex_unlock(&test_data.mutex); ASSERT_UINT_EQUALS(config.max_retries, test_data.retry_count); ASSERT_UINT_EQUALS(AWS_IO_MAX_RETRIES_EXCEEDED, test_data.failure_error_code); aws_retry_strategy_release(retry_strategy); aws_event_loop_group_release(el_group); ASSERT_SUCCESS(aws_global_thread_creator_shutdown_wait_for(10)); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } /* Test that no jitter mode exponential back-off fails after max retries are exceeded. */ static int s_test_exponential_backoff_retry_too_many_retries_no_jitter_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( allocator, AWS_EXPONENTIAL_BACKOFF_JITTER_NONE); } AWS_TEST_CASE( test_exponential_backoff_retry_too_many_retries_no_jitter, s_test_exponential_backoff_retry_too_many_retries_no_jitter_fn) /* Test that full jitter mode exponential back-off fails after max retries are exceeded. */ static int s_test_exponential_backoff_retry_too_many_retries_full_jitter_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( allocator, AWS_EXPONENTIAL_BACKOFF_JITTER_FULL); } AWS_TEST_CASE( test_exponential_backoff_retry_too_many_retries_full_jitter, s_test_exponential_backoff_retry_too_many_retries_full_jitter_fn) /* Test that decorrelated jitter mode exponential back-off fails after max retries are exceeded. */ static int s_test_exponential_backoff_retry_too_many_retries_decorrelated_jitter_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( allocator, AWS_EXPONENTIAL_BACKOFF_JITTER_DECORRELATED); } AWS_TEST_CASE( test_exponential_backoff_retry_too_many_retries_decorrelated_jitter, s_test_exponential_backoff_retry_too_many_retries_decorrelated_jitter_fn) /* Test that default jitter mode exponential back-off fails after max retries are exceeded. */ static int s_test_exponential_backoff_retry_too_many_retries_default_jitter_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( allocator, AWS_EXPONENTIAL_BACKOFF_JITTER_DEFAULT); } AWS_TEST_CASE( test_exponential_backoff_retry_too_many_retries_default_jitter, s_test_exponential_backoff_retry_too_many_retries_default_jitter_fn) /* Test that client failures do not count against the max retry budget. */ static int s_test_exponential_backoff_retry_client_errors_do_not_count_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_exponential_backoff_retry_options config = { .el_group = el_group, .max_retries = 3, }; struct aws_retry_strategy *retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config); ASSERT_NOT_NULL(retry_strategy); struct exponential_backoff_test_data test_data = { .retry_count = 0, .failure_error_code = 0, .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, .client_error_count = 2, }; ASSERT_SUCCESS(aws_mutex_lock(&test_data.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( retry_strategy, NULL, s_too_many_retries_test_token_acquired, &test_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&test_data.cvar, &test_data.mutex, s_retry_has_failed, &test_data)); aws_mutex_unlock(&test_data.mutex); ASSERT_UINT_EQUALS(config.max_retries + 2, test_data.retry_count); ASSERT_UINT_EQUALS(AWS_IO_MAX_RETRIES_EXCEEDED, test_data.failure_error_code); aws_retry_strategy_release(retry_strategy); aws_event_loop_group_release(el_group); ASSERT_SUCCESS(aws_global_thread_creator_shutdown_wait_for(10)); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_exponential_backoff_retry_client_errors_do_not_count, s_test_exponential_backoff_retry_client_errors_do_not_count_fn) /* Test that in no jitter mode, exponential backoff is actually applied as documented. */ static int s_test_exponential_backoff_retry_no_jitter_time_taken_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, .el_group = el_group, }; struct aws_retry_strategy *retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config); ASSERT_NOT_NULL(retry_strategy); struct exponential_backoff_test_data test_data = { .retry_count = 0, .failure_error_code = 0, .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; uint64_t before_time = 0; ASSERT_SUCCESS(aws_high_res_clock_get_ticks(&before_time)); ASSERT_SUCCESS(aws_mutex_lock(&test_data.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( retry_strategy, NULL, s_too_many_retries_test_token_acquired, &test_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&test_data.cvar, &test_data.mutex, s_retry_has_failed, &test_data)); aws_mutex_unlock(&test_data.mutex); uint64_t after_time = 0; ASSERT_SUCCESS(aws_high_res_clock_get_ticks(&after_time)); uint64_t backoff_scale_factor = aws_timestamp_convert(config.backoff_scale_factor_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); uint64_t expected_interval = (1 * backoff_scale_factor) + (2 * backoff_scale_factor) + (4 * backoff_scale_factor); ASSERT_TRUE(expected_interval <= after_time - before_time); ASSERT_UINT_EQUALS(config.max_retries, test_data.retry_count); ASSERT_UINT_EQUALS(AWS_IO_MAX_RETRIES_EXCEEDED, test_data.failure_error_code); aws_retry_strategy_release(retry_strategy); aws_event_loop_group_release(el_group); ASSERT_SUCCESS(aws_global_thread_creator_shutdown_wait_for(10)); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_exponential_backoff_retry_no_jitter_time_taken, s_test_exponential_backoff_retry_no_jitter_time_taken_fn) /* verify that invalid options cause a failure at creation time. */ static int s_test_exponential_backoff_retry_invalid_options_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_exponential_backoff_retry_options config = { .max_retries = 64, .el_group = el_group, }; struct aws_retry_strategy *retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config); ASSERT_NULL(retry_strategy); ASSERT_UINT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); aws_event_loop_group_release(el_group); ASSERT_SUCCESS(aws_global_thread_creator_shutdown_wait_for(10)); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_exponential_backoff_retry_invalid_options, s_test_exponential_backoff_retry_invalid_options_fn)
273301.c
/*--------------------------------------------------------------------------- demo_server_connection_list_test.c - demo_server_connection_list component tester Generated from demo_server_connection_list.icl by icl_gen using GSL/4. Copyright (c) 1996-2009 iMatix Corporation All rights reserved. This file is licensed under the BSD license as follows: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of iMatix Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY IMATIX CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IMATIX CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *---------------------------------------------------------------------------*/ #include "icl.h" // iCL base classes #include "demo_server_connection_list.h" // Definitions for our class #include "version.h" int main (int argc, char *argv[]) { int argn; // Argument number Bool args_ok = TRUE, // Were the arguments okay? quiet_mode = FALSE; // -q means suppress messages char **argparm; // Argument parameter to pick-up argparm = NULL; // Argument parameter to pick-up for (argn = 1; argn < argc; argn++) { // If argparm is set, we have to collect an argument parameter if (argparm) { if (*argv [argn] != '-') { // Parameter can't start with '-' *argparm = argv [argn]; argparm = NULL; } else { args_ok = FALSE; break; } } else if (*argv [argn] == '-') { switch (argv [argn][1]) { case 'q': quiet_mode = TRUE; break; case 'v': printf (PRODUCT "\n"); printf (COPYRIGHT "\n"); printf (BUILDMODEL "\n"); printf ("Built on: " BUILDDATE "\n"); printf ("Compiler: " CCOPTS "\n"); exit (EXIT_SUCCESS); case 'h': printf (PRODUCT "\n"); printf (COPYRIGHT "\n"); printf ("Options:\n"); printf (" -q - Quiet mode: no messages\n"); printf (" -v - Show version information\n"); printf (" -h - Show summary of command-line options\n"); exit (EXIT_SUCCESS); // Anything else is an error default: args_ok = FALSE; } } else { args_ok = FALSE; break; } } // Set quiet console mode before initialise so we don't get memory // allocator message in quiet mode. if (quiet_mode) icl_console_mode (ICL_CONSOLE_QUIET, TRUE); // Initialise global class so we can use the console icl_system_initialise (argc, argv); // If there was a missing parameter or an argument error, quit if (argparm) { icl_console_print ("E: argument missing - use '-h' option for help"); exit (EXIT_FAILURE); } else if (!args_ok) { icl_console_print ("E: invalid arguments - use '-h' option for help"); exit (EXIT_FAILURE); } demo_server_connection_list_selftest (); icl_console_print ("I: demo_server_connection_list OK"); icl_system_terminate (); // Terminate all classes return (EXIT_SUCCESS); }
846830.c
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the COPYING file, which can be found at the root of the source code * * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * [email protected]. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* * Small program to illustrate the "misalignment" of members within a compound * datatype, in a datatype fixed by H5Tget_native_type(). */ #include "hdf5.h" #include "H5private.h" #include "h5tools.h" const char *fname = "talign.h5"; const char *setname = "align"; /* * This program assumes that there is no extra space between the members 'Ok' * and 'Not Ok', (there shouldn't be because they are of the same atomic type * H5T_NATIVE_FLOAT, and they are placed within the compound next to one * another per construction) */ int main(void) { hid_t fil = H5I_INVALID_HID, spc = H5I_INVALID_HID, set = H5I_INVALID_HID; hid_t cs6 = H5I_INVALID_HID, cmp = H5I_INVALID_HID, fix = H5I_INVALID_HID; hid_t cmp1 = H5I_INVALID_HID, cmp2 = H5I_INVALID_HID, cmp3 = H5I_INVALID_HID; hid_t plist = H5I_INVALID_HID; hid_t array_dt = H5I_INVALID_HID; hsize_t dim[2]; hsize_t cdim[4]; char string5[5]; float fok[2] = {1234.0f, 2341.0f}; float fnok[2] = {5678.0f, 6785.0f}; float *fptr = NULL; char *data = NULL; int result = 0; herr_t error = 1; HDprintf("%-70s", "Testing alignment in compound datatypes"); HDstrcpy(string5, "Hi!"); HDunlink(fname); fil = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); if (fil < 0) { HDputs("*FAILED*"); return 1; } H5E_BEGIN_TRY { (void)H5Ldelete(fil, setname, H5P_DEFAULT); } H5E_END_TRY; cs6 = H5Tcopy(H5T_C_S1); H5Tset_size(cs6, sizeof(string5)); H5Tset_strpad(cs6, H5T_STR_NULLPAD); cmp = H5Tcreate(H5T_COMPOUND, sizeof(fok) + sizeof(string5) + sizeof(fnok)); H5Tinsert(cmp, "Awkward length", 0, cs6); cdim[0] = sizeof(fok) / sizeof(float); array_dt = H5Tarray_create2(H5T_NATIVE_FLOAT, 1, cdim); H5Tinsert(cmp, "Ok", sizeof(string5), array_dt); H5Tclose(array_dt); cdim[0] = sizeof(fnok) / sizeof(float); array_dt = H5Tarray_create2(H5T_NATIVE_FLOAT, 1, cdim); H5Tinsert(cmp, "Not Ok", sizeof(fok) + sizeof(string5), array_dt); H5Tclose(array_dt); fix = H5Tget_native_type(cmp, H5T_DIR_DEFAULT); cmp1 = H5Tcreate(H5T_COMPOUND, sizeof(fok)); cdim[0] = sizeof(fok) / sizeof(float); array_dt = H5Tarray_create2(H5T_NATIVE_FLOAT, 1, cdim); H5Tinsert(cmp1, "Ok", 0, array_dt); H5Tclose(array_dt); cmp2 = H5Tcreate(H5T_COMPOUND, sizeof(string5)); H5Tinsert(cmp2, "Awkward length", 0, cs6); cmp3 = H5Tcreate(H5T_COMPOUND, sizeof(fnok)); cdim[0] = sizeof(fnok) / sizeof(float); array_dt = H5Tarray_create2(H5T_NATIVE_FLOAT, 1, cdim); H5Tinsert(cmp3, "Not Ok", 0, array_dt); H5Tclose(array_dt); plist = H5Pcreate(H5P_DATASET_XFER); if ((error = H5Pset_preserve(plist, 1)) < 0) goto out; /* * Create a small dataset, and write data into it we write each field * in turn so that we are avoid alignment issues at this point */ dim[0] = 1; spc = H5Screate_simple(1, dim, NULL); set = H5Dcreate2(fil, setname, cmp, spc, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); H5Dwrite(set, cmp1, spc, H5S_ALL, plist, fok); H5Dwrite(set, cmp2, spc, H5S_ALL, plist, string5); H5Dwrite(set, cmp3, spc, H5S_ALL, plist, fnok); H5Dclose(set); /* Now open the set, and read it back in */ data = (char *)HDmalloc(H5Tget_size(fix)); if (!data) { HDperror("malloc() failed"); HDabort(); } set = H5Dopen2(fil, setname, H5P_DEFAULT); H5Dread(set, fix, spc, H5S_ALL, H5P_DEFAULT, data); fptr = (float *)((void *)(data + H5Tget_member_offset(fix, 1))); H5Dclose(set); out: if (error < 0) { result = 1; HDputs("*FAILED - HDF5 library error*"); } else if (!(H5_FLT_ABS_EQUAL(fok[0], fptr[0])) || !(H5_FLT_ABS_EQUAL(fok[1], fptr[1])) || !(H5_FLT_ABS_EQUAL(fnok[0], fptr[2])) || !(H5_FLT_ABS_EQUAL(fnok[1], fptr[3]))) { char *mname; result = 1; mname = H5Tget_member_name(fix, 0); HDprintf("%14s (%2d) %6s = %s\n", mname ? mname : "(null)", (int)H5Tget_member_offset(fix, 0), string5, (char *)(data + H5Tget_member_offset(fix, 0))); if (mname) H5free_memory(mname); fptr = (float *)((void *)(data + H5Tget_member_offset(fix, 1))); mname = H5Tget_member_name(fix, 1); HDprintf("Data comparison:\n" "%14s (%2d) %6f = %f\n" " %6f = %f\n", mname ? mname : "(null)", (int)H5Tget_member_offset(fix, 1), (double)fok[0], (double)fptr[0], (double)fok[1], (double)fptr[1]); if (mname) H5free_memory(mname); fptr = (float *)((void *)(data + H5Tget_member_offset(fix, 2))); mname = H5Tget_member_name(fix, 2); HDprintf("%14s (%2d) %6f = %f\n" " %6f = %6f\n", mname ? mname : "(null)", (int)H5Tget_member_offset(fix, 2), (double)fnok[0], (double)fptr[0], (double)fnok[1], (double)fptr[1]); if (mname) H5free_memory(mname); fptr = (float *)((void *)(data + H5Tget_member_offset(fix, 1))); HDprintf("\n" "Short circuit\n" " %6f = %f\n" " %6f = %f\n" " %6f = %f\n" " %6f = %f\n", (double)fok[0], (double)fptr[0], (double)fok[1], (double)fptr[1], (double)fnok[0], (double)fptr[2], (double)fnok[1], (double)fptr[3]); HDputs("*FAILED - compound type alignmnent problem*"); } else { HDputs(" PASSED"); } if (data) HDfree(data); H5Sclose(spc); H5Tclose(cs6); H5Tclose(cmp); H5Tclose(fix); H5Tclose(cmp1); H5Tclose(cmp2); H5Tclose(cmp3); H5Pclose(plist); H5Fclose(fil); HDunlink(fname); HDfflush(stdout); return result; }
254447.c
/*********************************************************** Copyright 1990, by Alfalfa Software Incorporated, Cambridge, Massachusetts. All Rights Reserved Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that Alfalfa's name not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. ALPHALPHA DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL ALPHALPHA BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. If you make any modifications, bugfixes or other changes to this software we'd appreciate it if you could send a copy to us so we can keep things up-to-date. Many thanks. Kee Hinckley Alfalfa Software, Inc. 267 Allston St., #3 Cambridge, MA 02139 USA [email protected] ******************************************************************/ #include <sys/cdefs.h> __FBSDID("$FreeBSD: src/usr.bin/gencat/genlib.c,v 1.13 2002/12/24 07:40:10 davidxu Exp $"); #include <ctype.h> #include <err.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "msgcat.h" #include "gencat.h" #include <machine/endian.h> /* libkern/OSByteOrder is needed for the 64 bit byte swap */ #include <libkern/OSByteOrder.h> #ifndef htonll #define htonll(x) OSSwapHostToBigInt64(x) #define ntohll(x) OSSwapBigToHostInt64(x) #endif static char *curline = NULL; static long lineno = 0; static void warning(char *cptr, const char *msg) { warnx("%s on line %ld\n%s", msg, lineno, (curline == NULL ? "" : curline) ); if (cptr) { char *tptr; for (tptr = curline; tptr < cptr; ++tptr) putc(' ', stderr); fprintf(stderr, "^\n"); } } static void error(char *cptr, const char *msg) { warning(cptr, msg); exit(1); } static void corrupt(void) { error(NULL, "corrupt message catalog"); } static void nomem(void) { error(NULL, "out of memory"); } static char * gencat_getline(int fd) { static size_t curlen = BUFSIZ; static char buf[BUFSIZ], *bptr = buf, *bend = buf; char *cptr, *cend; long buflen; if (!curline) { curline = (char *) malloc(curlen); if (!curline) nomem(); } ++lineno; cptr = curline; cend = curline + curlen; while (TRUE) { for (; bptr < bend && cptr < cend; ++cptr, ++bptr) { if (*bptr == '\n') { *cptr = '\0'; ++bptr; return(curline); } else *cptr = *bptr; } if (bptr == bend) { buflen = read(fd, buf, BUFSIZ); if (buflen <= 0) { if (cptr > curline) { *cptr = '\0'; return(curline); } return(NULL); } bend = buf + buflen; bptr = buf; } if (cptr == cend) { cptr = curline = (char *) realloc(curline, curlen *= 2); if (!curline) nomem(); cend = curline + curlen; } } } static char * token(char *cptr) { static char tok[MAXTOKEN+1]; char *tptr = tok; while (*cptr && isspace((unsigned char)*cptr)) ++cptr; while (*cptr && !isspace((unsigned char)*cptr)) *tptr++ = *cptr++; *tptr = '\0'; return(tok); } static char * wskip(char *cptr) { if (!*cptr || !isspace((unsigned char)*cptr)) { warning(cptr, "expected a space"); return(cptr); } while (*cptr && isspace((unsigned char)*cptr)) ++cptr; return(cptr); } static char * cskip(char *cptr) { if (!*cptr || isspace((unsigned char)*cptr)) { warning(cptr, "wasn't expecting a space"); return(cptr); } while (*cptr && !isspace((unsigned char)*cptr)) ++cptr; return(cptr); } static char * getmsg(int fd, char *cptr, char quote) { static char *msg = NULL; static size_t msglen = 0; size_t clen, i; char *tptr; int needq; if (quote && *cptr == quote) { needq = TRUE; ++cptr; } else needq = FALSE; clen = strlen(cptr) + 1; if (clen > msglen) { if (msglen) msg = (char *) realloc(msg, clen); else msg = (char *) malloc(clen); if (!msg) nomem(); msglen = clen; } tptr = msg; while (*cptr) { if (quote && *cptr == quote) { char *tmp; tmp = cptr+1; if (*tmp && (!isspace((unsigned char)*tmp) || *wskip(tmp))) { warning(cptr, "unexpected quote character, ignoring"); *tptr++ = *cptr++; } else { *cptr = '\0'; } } else if (*cptr == '\\') { ++cptr; switch (*cptr) { case '\0': cptr = gencat_getline(fd); if (!cptr) error(NULL, "premature end of file"); msglen += strlen(cptr); i = tptr - msg; msg = (char *) realloc(msg, msglen); if (!msg) nomem(); tptr = msg + i; break; #define CASEOF(CS, CH) \ case CS: \ *tptr++ = CH; \ ++cptr; \ break; CASEOF('n', '\n') CASEOF('t', '\t') CASEOF('v', '\v') CASEOF('b', '\b') CASEOF('r', '\r') CASEOF('f', '\f') CASEOF('"', '"') CASEOF('\'', '\'') CASEOF('\\', '\\') default: if (isdigit((unsigned char)*cptr)) { *tptr = 0; for (i = 0; i < 3; ++i) { if (!isdigit((unsigned char)*cptr)) break; if (*cptr > '7') warning(cptr, "octal number greater than 7?!"); *tptr *= 8; *tptr += (*cptr - '0'); ++cptr; } ++tptr; } else { warning(cptr, "unrecognized escape sequence"); } } } else { *tptr++ = *cptr++; } } *tptr = '\0'; return(msg); } static char * dupstr(const char *ostr) { char *nstr; nstr = strdup(ostr); if (!nstr) error(NULL, "unable to allocate storage"); return(nstr); } /* * The Global Stuff */ typedef struct _msgT { long msgId; char *str; char *hconst; long offset; struct _msgT *prev, *next; } msgT; typedef struct _setT { long setId; char *hconst; msgT *first, *last; struct _setT *prev, *next; } setT; typedef struct { setT *first, *last; } catT; static setT *curSet; static catT *cat; /* * Find the current byte order. There are of course some others, but * this will do for now. Note that all we care about is "long". */ long MCGetByteOrder(void) { long l = 0x00010203; char *cptr = (char *) &l; if (cptr[0] == 0 && cptr[1] == 1 && cptr[2] == 2 && cptr[3] == 3) return MC68KByteOrder; else return MCn86ByteOrder; } void MCParse(int fd) { char *cptr, *str; int setid = 1, msgid = 0; char hconst[MAXTOKEN+1]; char quote = 0; if (!cat) { cat = (catT *) malloc(sizeof(catT)); if (!cat) nomem(); bzero(cat, sizeof(catT)); } hconst[0] = '\0'; while ((cptr = gencat_getline(fd)) != NULL) { if (*cptr == '$') { ++cptr; if (strncmp(cptr, "set", 3) == 0) { cptr += 3; cptr = wskip(cptr); setid = atoi(cptr); cptr = cskip(cptr); if (*cptr) cptr = wskip(cptr); if (*cptr == '#') { ++cptr; MCAddSet(setid, token(cptr)); } else MCAddSet(setid, NULL); msgid = 0; } else if (strncmp(cptr, "delset", 6) == 0) { cptr += 6; cptr = wskip(cptr); setid = atoi(cptr); MCDelSet(setid); } else if (strncmp(cptr, "quote", 5) == 0) { cptr += 5; if (!*cptr) quote = 0; else { cptr = wskip(cptr); if (!*cptr) quote = 0; else quote = *cptr; } } else if (isspace((unsigned char)*cptr)) { cptr = wskip(cptr); if (*cptr == '#') { ++cptr; strcpy(hconst, token(cptr)); } } else { if (*cptr) { cptr = wskip(cptr); if (*cptr) warning(cptr, "unrecognized line"); } } } else { if (!curSet) MCAddSet(setid, NULL); if (isdigit((unsigned char)*cptr) || *cptr == '#') { if (*cptr == '#') { ++msgid; ++cptr; if (!*cptr) { MCAddMsg(msgid, "", hconst); hconst[0] = '\0'; continue; } if (!isspace((unsigned char)*cptr)) warning(cptr, "expected a space"); ++cptr; if (!*cptr) { MCAddMsg(msgid, "", hconst); hconst[0] = '\0'; continue; } } else { msgid = atoi(cptr); cptr = cskip(cptr); if (isspace(*cptr)) cptr++; /* if (*cptr) ++cptr; */ } if (!*cptr) { if (isspace(cptr[-1])) { MCAddMsg(msgid, "", hconst); hconst[0] = '\0'; } else { MCDelMsg(msgid); } } else { str = getmsg(fd, cptr, quote); MCAddMsg(msgid, str, hconst); hconst[0] = '\0'; } } } } } void MCReadCat(int fd) { MCHeaderT mcHead; MCMsgT mcMsg; MCSetT mcSet; msgT *msg; setT *set; int i; char *data; cat = (catT *) malloc(sizeof(catT)); if (!cat) nomem(); bzero(cat, sizeof(catT)); /* While we deal with read/write this in network byte order we do NOT deal with struct member padding issues, or even sizeof(long) issues, those are left for a future genneration to curse either me, or the original author for */ if (read(fd, &mcHead, sizeof(mcHead)) != sizeof(mcHead)) corrupt(); if (strncmp(mcHead.magic, MCMagic, MCMagicLen) != 0) corrupt(); if (ntohl(mcHead.majorVer) != MCMajorVer) error(NULL, "unrecognized catalog version"); if ((ntohl(mcHead.flags) & MC68KByteOrder) == 0) error(NULL, "wrong byte order"); if (lseek(fd, ntohll(mcHead.firstSet), L_SET) == -1) corrupt(); while (TRUE) { if (read(fd, &mcSet, sizeof(mcSet)) != sizeof(mcSet)) corrupt(); if (mcSet.invalid) continue; set = (setT *) malloc(sizeof(setT)); if (!set) nomem(); bzero(set, sizeof(*set)); if (cat->first) { cat->last->next = set; set->prev = cat->last; cat->last = set; } else cat->first = cat->last = set; set->setId = ntohl(mcSet.setId); /* Get the data */ if (mcSet.dataLen) { data = (char *) malloc((size_t)ntohl(mcSet.dataLen)); if (!data) nomem(); if (lseek(fd, ntohll(mcSet.data.off), L_SET) == -1) corrupt(); if (read(fd, data, (size_t)ntohl(mcSet.dataLen)) != ntohl(mcSet.dataLen)) corrupt(); if (lseek(fd, ntohll(mcSet.u.firstMsg), L_SET) == -1) corrupt(); for (i = 0; i < ntohl(mcSet.numMsgs); ++i) { if (read(fd, &mcMsg, sizeof(mcMsg)) != sizeof(mcMsg)) corrupt(); if (mcMsg.invalid) { --i; continue; } msg = (msgT *) malloc(sizeof(msgT)); if (!msg) nomem(); bzero(msg, sizeof(*msg)); if (set->first) { set->last->next = msg; msg->prev = set->last; set->last = msg; } else set->first = set->last = msg; msg->msgId = ntohl(mcMsg.msgId); msg->str = dupstr((char *) (data + ntohll(mcMsg.msg.off))); } free(data); } if (!mcSet.nextSet) break; if (lseek(fd, ntohll(mcSet.nextSet), L_SET) == -1) corrupt(); } } static void printS(int fd, const char *str) { if (str) write(fd, str, strlen(str)); } static void printL(int fd, long l) { char buf[32]; sprintf(buf, "%ld", l); write(fd, buf, strlen(buf)); } static void printLX(int fd, long l) { char buf[32]; sprintf(buf, "%lx", l); write(fd, buf, strlen(buf)); } static void genconst(int fd, int type, char *setConst, char *msgConst, long val) { switch (type) { case MCLangC: if (!msgConst) { printS(fd, "\n#define "); printS(fd, setConst); printS(fd, "Set"); } else { printS(fd, "#define "); printS(fd, setConst); printS(fd, msgConst); } printS(fd, "\t0x"); printLX(fd, val); printS(fd, "\n"); break; case MCLangCPlusPlus: case MCLangANSIC: if (!msgConst) { printS(fd, "\nconst long "); printS(fd, setConst); printS(fd, "Set"); } else { printS(fd, "const long "); printS(fd, setConst); printS(fd, msgConst); } printS(fd, "\t= "); printL(fd, val); printS(fd, ";\n"); break; default: error(NULL, "not a recognized (programming) language type"); } } void MCWriteConst(int fd, int type, int orConsts) { msgT *msg; setT *set; long id; if (orConsts && (type == MCLangC || type == MCLangCPlusPlus || type == MCLangANSIC)) { printS(fd, "/* Use these Macros to compose and decompose setId's and msgId's */\n"); printS(fd, "#ifndef MCMakeId\n"); printS(fd, "# define MCMakeId(s,m)\t(unsigned long)(((unsigned short)s<<(sizeof(short)*8))\\\n"); printS(fd, "\t\t\t\t\t|(unsigned short)m)\n"); printS(fd, "# define MCSetId(id)\t(unsigned int) (id >> (sizeof(short) * 8))\n"); printS(fd, "# define MCMsgId(id)\t(unsigned int) ((id << (sizeof(short) * 8))\\\n"); printS(fd, "\t\t\t\t\t>> (sizeof(short) * 8))\n"); printS(fd, "#endif\n"); } for (set = cat->first; set; set = set->next) { if (set->hconst) genconst(fd, type, set->hconst, NULL, set->setId); for (msg = set->first; msg; msg = msg->next) { if (msg->hconst) { if (orConsts) id = MCMakeId(set->setId, msg->msgId); else id = msg->msgId; genconst(fd, type, set->hconst, msg->hconst, id); free(msg->hconst); msg->hconst = NULL; } } if (set->hconst) { free(set->hconst); set->hconst = NULL; } } } void MCWriteCat(int fd) { MCHeaderT mcHead; int cnt; setT *set; msgT *msg; MCSetT mcSet; MCMsgT mcMsg; off_t pos; bcopy(MCMagic, mcHead.magic, MCMagicLen); mcHead.majorVer = htonl(MCMajorVer); mcHead.minorVer = htonl(MCMinorVer); mcHead.flags = htonl(MC68KByteOrder); mcHead.firstSet = 0; /* We'll be back to set this in a minute */ if (cat == NULL) error(NULL, "cannot write empty catalog set"); for (cnt = 0, set = cat->first; set; set = set->next) ++cnt; mcHead.numSets = htonl(cnt); /* I'm not inclined to mess with it, but it looks odd that we write the header twice...and that we get the firstSet value from another lseek rather then just 'sizeof(mcHead)' */ /* Also, this code doesn't seem to check returns from write! */ lseek(fd, (off_t)0L, L_SET); write(fd, &mcHead, sizeof(mcHead)); mcHead.firstSet = htonll(lseek(fd, (off_t)0L, L_INCR)); lseek(fd, (off_t)0L, L_SET); write(fd, &mcHead, sizeof(mcHead)); for (set = cat->first; set; set = set->next) { bzero(&mcSet, sizeof(mcSet)); mcSet.setId = htonl(set->setId); mcSet.invalid = FALSE; /* The rest we'll have to come back and change in a moment */ pos = lseek(fd, (off_t)0L, L_INCR); write(fd, &mcSet, sizeof(mcSet)); /* Now write all the string data */ mcSet.data.off = htonll(lseek(fd, (off_t)0L, L_INCR)); cnt = 0; for (msg = set->first; msg; msg = msg->next) { msg->offset = lseek(fd, (off_t)0L, L_INCR) - ntohll(mcSet.data.off); mcSet.dataLen += write(fd, msg->str, strlen(msg->str) + 1); ++cnt; } mcSet.u.firstMsg = htonll(lseek(fd, (off_t)0L, L_INCR)); mcSet.numMsgs = htonl(cnt); mcSet.dataLen = htonl(mcSet.dataLen); /* Now write the message headers */ for (msg = set->first; msg; msg = msg->next) { mcMsg.msgId = htonl(msg->msgId); mcMsg.msg.off = htonll(msg->offset); mcMsg.invalid = FALSE; write(fd, &mcMsg, sizeof(mcMsg)); } /* Go back and fix things up */ if (set == cat->last) { mcSet.nextSet = 0; lseek(fd, pos, L_SET); write(fd, &mcSet, sizeof(mcSet)); } else { mcSet.nextSet = htonll(lseek(fd, (off_t)0L, L_INCR)); lseek(fd, pos, L_SET); write(fd, &mcSet, sizeof(mcSet)); lseek(fd, ntohll(mcSet.nextSet), L_SET); } } } void MCAddSet(int setId, char *hconst) { setT *set; if (setId <= 0) { error(NULL, "setId's must be greater than zero"); return; } if (hconst && !*hconst) hconst = NULL; for (set = cat->first; set; set = set->next) { if (set->setId == setId) { if (set->hconst && hconst) free(set->hconst); set->hconst = NULL; break; } else if (set->setId > setId) { setT *newSet; newSet = (setT *) malloc(sizeof(setT)); if (!newSet) nomem(); bzero(newSet, sizeof(setT)); newSet->prev = set->prev; newSet->next = set; if (set->prev) set->prev->next = newSet; else cat->first = newSet; set->prev = newSet; set = newSet; break; } } if (!set) { set = (setT *) malloc(sizeof(setT)); if (!set) nomem(); bzero(set, sizeof(setT)); if (cat->first) { set->prev = cat->last; set->next = NULL; cat->last->next = set; cat->last = set; } else { set->prev = set->next = NULL; cat->first = cat->last = set; } } set->setId = setId; if (hconst) set->hconst = dupstr(hconst); curSet = set; } void MCAddMsg(int msgId, const char *str, char *hconst) { msgT *msg; if (!curSet) error(NULL, "can't specify a message when no set exists"); if (msgId <= 0) { error(NULL, "msgId's must be greater than zero"); return; } if (hconst && !*hconst) hconst = NULL; for (msg = curSet->first; msg; msg = msg->next) { if (msg->msgId == msgId) { if (msg->hconst && hconst) free(msg->hconst); if (msg->str) free(msg->str); msg->hconst = msg->str = NULL; break; } else if (msg->msgId > msgId) { msgT *newMsg; newMsg = (msgT *) malloc(sizeof(msgT)); if (!newMsg) nomem(); bzero(newMsg, sizeof(msgT)); newMsg->prev = msg->prev; newMsg->next = msg; if (msg->prev) msg->prev->next = newMsg; else curSet->first = newMsg; msg->prev = newMsg; msg = newMsg; break; } } if (!msg) { msg = (msgT *) malloc(sizeof(msgT)); if (!msg) nomem(); bzero(msg, sizeof(msgT)); if (curSet->first) { msg->prev = curSet->last; msg->next = NULL; curSet->last->next = msg; curSet->last = msg; } else { msg->prev = msg->next = NULL; curSet->first = curSet->last = msg; } } msg->msgId = msgId; if (hconst) msg->hconst = dupstr(hconst); msg->str = dupstr(str); } void MCDelSet(int setId) { setT *set; msgT *msg; for (set = cat->first; set; set = set->next) { if (set->setId == setId) { for (msg = set->first; msg; msg = msg->next) { if (msg->hconst) free(msg->hconst); if (msg->str) free(msg->str); free(msg); } if (set->hconst) free(set->hconst); if (set->prev) set->prev->next = set->next; else cat->first = set->next; if (set->next) set->next->prev = set->prev; else cat->last = set->prev; free(set); return; } else if (set->setId > setId) break; } warning(NULL, "specified set doesn't exist"); } void MCDelMsg(int msgId) { msgT *msg; if (!curSet) error(NULL, "you can't delete a message before defining the set"); for (msg = curSet->first; msg; msg = msg->next) { if (msg->msgId == msgId) { if (msg->hconst) free(msg->hconst); if (msg->str) free(msg->str); if (msg->prev) msg->prev->next = msg->next; else curSet->first = msg->next; if (msg->next) msg->next->prev = msg->prev; else curSet->last = msg->prev; free(msg); return; } else if (msg->msgId > msgId) break; } warning(NULL, "specified msg doesn't exist"); } #if 0 /* this function is unsed and looks like debug thing */ void MCDumpcat(fp) FILE *fp; { msgT *msg; setT *set; if (!cat) errx(1, "no catalog open"); for (set = cat->first; set; set = set->next) { fprintf(fp, "$set %ld", set->setId); if (set->hconst) fprintf(fp, " # %s", set->hconst); fprintf(fp, "\n\n"); for (msg = set->first; msg; msg = msg->next) { if (msg->hconst) fprintf(fp, "# %s\n", msg->hconst); fprintf(fp, "%ld\t%s\n", msg->msgId, msg->str); } fprintf(fp, "\n"); } } #endif /* 0 */
452339.c
//***************************************************************************** // // fontcmss32i.c - Font definition for the 32pt Cmss italic font. // // Copyright (c) 2011-2017 Texas Instruments Incorporated. All rights reserved. // Software License Agreement // // Texas Instruments (TI) is supplying this software for use solely and // exclusively on TI's microcontroller products. The software is owned by // TI and/or its suppliers, and is protected under applicable copyright // laws. You may not combine this software with "viral" open-source // software in order to form a larger program. // // THIS SOFTWARE IS PROVIDED "AS IS" AND WITH ALL FAULTS. // NO WARRANTIES, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT // NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. TI SHALL NOT, UNDER ANY // CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR CONSEQUENTIAL // DAMAGES, FOR ANY REASON WHATSOEVER. // // This is part of revision 2.1.4.178 of the Tiva Graphics Library. // //***************************************************************************** //***************************************************************************** // // This file is generated by ftrasterize; DO NOT EDIT BY HAND! // //***************************************************************************** #include <stdint.h> #include <stdbool.h> #include "grlib/grlib.h" //***************************************************************************** // // Details of this font: // Characters: 32 to 126 inclusive // Style: cmss // Size: 32 point // Bold: no // Italic: yes // Memory usage: 3780 bytes // //***************************************************************************** //***************************************************************************** // // The compressed data for the 32 point Cmss italic font. // Contains characters 32 to 126 inclusive. // //***************************************************************************** static const uint8_t g_pui8Cmss32iData[3579] = { 5, 13, 0, 53, 80, 28, 10, 240, 147, 115, 114, 130, 130, 130, 115, 114, 130, 130, 130, 115, 114, 130, 130, 130, 130, 114, 240, 195, 115, 115, 0, 10, 112, 20, 13, 240, 195, 51, 67, 51, 67, 51, 66, 66, 82, 66, 81, 81, 82, 66, 0, 39, 80, 89, 26, 0, 8, 33, 97, 240, 34, 82, 240, 33, 97, 240, 34, 82, 240, 33, 97, 240, 49, 97, 240, 33, 97, 240, 49, 97, 240, 33, 97, 240, 49, 97, 240, 33, 97, 175, 7, 177, 97, 240, 49, 97, 240, 34, 82, 240, 33, 97, 240, 49, 97, 191, 7, 161, 97, 240, 33, 97, 240, 49, 97, 240, 33, 97, 240, 49, 97, 240, 34, 81, 240, 49, 97, 240, 34, 82, 240, 33, 97, 240, 34, 82, 240, 33, 97, 0, 8, 80, 52, 18, 162, 240, 18, 215, 169, 131, 18, 34, 114, 50, 49, 114, 50, 162, 50, 178, 50, 178, 50, 179, 34, 183, 199, 199, 214, 179, 19, 178, 50, 178, 50, 178, 50, 178, 50, 162, 50, 97, 66, 35, 99, 34, 19, 122, 166, 226, 240, 18, 0, 15, 77, 22, 240, 180, 161, 101, 145, 98, 50, 129, 98, 50, 113, 98, 66, 97, 114, 66, 82, 98, 82, 81, 114, 66, 81, 130, 66, 65, 146, 51, 50, 146, 50, 65, 181, 65, 196, 65, 240, 97, 83, 193, 84, 177, 82, 34, 146, 66, 50, 145, 82, 50, 129, 83, 50, 113, 98, 66, 98, 98, 66, 97, 114, 50, 97, 130, 50, 81, 146, 34, 97, 164, 97, 179, 0, 17, 48, 59, 23, 0, 7, 4, 240, 54, 240, 18, 50, 242, 66, 242, 66, 241, 82, 226, 66, 242, 50, 240, 18, 34, 240, 37, 240, 52, 130, 146, 162, 131, 147, 117, 130, 99, 34, 114, 98, 67, 83, 98, 68, 51, 98, 99, 35, 114, 103, 130, 117, 147, 86, 169, 24, 102, 86, 0, 23, 64, 12, 7, 243, 67, 67, 66, 82, 81, 82, 0, 21, 80, 36, 14, 146, 178, 178, 178, 178, 194, 178, 194, 178, 194, 178, 194, 194, 179, 178, 194, 194, 194, 178, 194, 194, 194, 194, 194, 194, 194, 194, 194, 210, 194, 194, 210, 194, 160, 36, 14, 114, 194, 210, 194, 209, 210, 194, 194, 194, 194, 194, 194, 194, 194, 194, 179, 178, 194, 194, 194, 178, 194, 194, 178, 194, 178, 194, 178, 194, 178, 178, 178, 178, 192, 29, 15, 113, 210, 210, 210, 131, 33, 50, 68, 17, 20, 103, 148, 165, 133, 19, 83, 33, 35, 66, 34, 49, 146, 210, 210, 0, 34, 112, 46, 23, 0, 18, 82, 240, 97, 240, 113, 240, 113, 240, 113, 240, 98, 240, 97, 240, 113, 240, 113, 240, 113, 207, 5, 193, 240, 113, 240, 113, 240, 113, 240, 98, 240, 97, 240, 113, 240, 113, 240, 113, 240, 98, 0, 19, 14, 7, 0, 19, 51, 67, 66, 82, 82, 81, 82, 240, 240, 48, 9, 11, 0, 23, 56, 56, 0, 19, 80, 10, 6, 0, 16, 67, 51, 51, 0, 6, 48, 70, 21, 240, 33, 240, 66, 240, 65, 240, 66, 240, 65, 240, 65, 240, 81, 240, 65, 240, 81, 240, 65, 240, 81, 240, 65, 240, 81, 240, 65, 240, 66, 240, 65, 240, 66, 240, 65, 240, 66, 240, 65, 240, 66, 240, 65, 240, 65, 240, 81, 240, 65, 240, 81, 240, 65, 240, 81, 240, 65, 240, 66, 240, 65, 240, 66, 240, 65, 240, 80, 45, 16, 0, 8, 116, 167, 131, 50, 114, 98, 98, 98, 82, 114, 82, 114, 66, 130, 66, 130, 66, 130, 66, 114, 67, 114, 66, 130, 66, 115, 66, 114, 82, 114, 82, 98, 98, 83, 114, 51, 135, 165, 0, 17, 16, 29, 14, 0, 8, 18, 119, 115, 18, 194, 179, 179, 179, 178, 179, 179, 179, 178, 194, 179, 179, 179, 178, 194, 179, 123, 59, 0, 14, 48, 36, 18, 0, 10, 5, 184, 147, 52, 114, 114, 114, 114, 113, 130, 113, 130, 240, 18, 242, 240, 18, 242, 242, 242, 226, 242, 242, 242, 227, 226, 240, 28, 92, 0, 18, 96, 35, 17, 0, 9, 69, 168, 131, 66, 114, 98, 242, 242, 226, 227, 165, 197, 240, 18, 240, 18, 242, 242, 242, 242, 81, 130, 82, 115, 99, 67, 136, 165, 0, 18, 16, 38, 17, 0, 9, 115, 212, 196, 194, 18, 179, 18, 163, 19, 162, 34, 162, 50, 147, 50, 131, 51, 115, 67, 99, 82, 114, 98, 110, 62, 179, 226, 242, 227, 227, 227, 0, 17, 112, 41, 18, 0, 9, 90, 138, 130, 240, 18, 243, 242, 240, 18, 240, 18, 20, 169, 148, 51, 131, 82, 240, 18, 240, 18, 240, 18, 240, 18, 242, 113, 130, 98, 114, 131, 67, 136, 197, 0, 19, 32, 41, 16, 0, 9, 5, 151, 131, 194, 210, 211, 210, 37, 99, 22, 100, 67, 83, 98, 67, 114, 67, 114, 66, 130, 66, 130, 66, 114, 82, 114, 82, 114, 82, 98, 114, 66, 135, 165, 0, 17, 16, 28, 17, 0, 8, 93, 77, 226, 226, 226, 227, 211, 226, 226, 227, 211, 226, 227, 226, 227, 226, 227, 227, 226, 227, 227, 0, 18, 96, 43, 18, 0, 9, 118, 169, 131, 82, 115, 99, 83, 130, 83, 114, 99, 114, 115, 67, 152, 167, 162, 67, 130, 114, 98, 130, 98, 130, 82, 146, 82, 146, 82, 130, 98, 114, 130, 83, 136, 197, 0, 19, 32, 41, 17, 0, 9, 53, 183, 146, 66, 130, 98, 98, 114, 98, 114, 82, 130, 82, 130, 82, 130, 82, 115, 82, 99, 99, 68, 119, 18, 116, 51, 226, 227, 211, 226, 130, 51, 136, 180, 0, 18, 48, 15, 8, 0, 11, 50, 83, 83, 0, 8, 51, 83, 82, 0, 8, 96, 19, 9, 0, 12, 99, 99, 99, 0, 9, 67, 98, 114, 114, 113, 129, 113, 240, 240, 224, 28, 10, 0, 10, 67, 115, 115, 240, 240, 114, 114, 130, 130, 130, 115, 114, 130, 130, 130, 115, 114, 130, 130, 130, 115, 115, 240, 192, 13, 24, 0, 39, 31, 5, 0, 15, 63, 5, 0, 39, 64, 31, 13, 0, 13, 115, 162, 178, 0, 6, 18, 178, 178, 162, 178, 178, 162, 162, 162, 163, 162, 162, 178, 178, 113, 50, 83, 72, 101, 240, 240, 32, 31, 13, 240, 229, 104, 67, 67, 49, 114, 178, 178, 163, 162, 162, 162, 162, 162, 178, 162, 178, 162, 178, 178, 240, 240, 98, 178, 178, 0, 14, 48, 60, 20, 0, 6, 21, 217, 163, 67, 131, 114, 130, 71, 98, 72, 82, 66, 52, 82, 50, 68, 66, 50, 99, 66, 50, 98, 82, 34, 114, 66, 50, 114, 66, 50, 99, 66, 50, 98, 82, 50, 83, 82, 50, 82, 98, 66, 51, 99, 54, 146, 68, 163, 240, 51, 99, 153, 198, 0, 21, 32, 51, 21, 0, 6, 99, 240, 51, 240, 36, 240, 18, 19, 242, 19, 226, 35, 226, 35, 210, 51, 210, 66, 194, 82, 194, 83, 162, 99, 162, 99, 146, 115, 141, 141, 115, 146, 114, 162, 99, 163, 82, 179, 67, 179, 66, 195, 51, 195, 0, 21, 48, 47, 20, 240, 240, 234, 172, 131, 100, 114, 131, 114, 146, 99, 146, 99, 146, 98, 147, 98, 131, 99, 100, 123, 155, 146, 116, 114, 147, 83, 162, 83, 162, 83, 162, 82, 178, 82, 162, 83, 147, 83, 116, 108, 138, 0, 21, 32, 49, 20, 0, 6, 23, 186, 147, 83, 115, 240, 34, 240, 34, 240, 34, 240, 50, 240, 34, 240, 50, 240, 50, 240, 34, 240, 50, 240, 50, 240, 50, 240, 50, 240, 50, 240, 51, 240, 50, 240, 51, 240, 51, 98, 170, 198, 0, 21, 16, 49, 22, 0, 6, 11, 188, 163, 115, 147, 131, 130, 162, 115, 163, 99, 178, 99, 178, 98, 194, 98, 194, 83, 194, 83, 194, 83, 194, 82, 194, 98, 194, 83, 178, 99, 178, 99, 162, 114, 163, 114, 147, 115, 116, 140, 170, 0, 23, 64, 47, 21, 0, 5, 110, 126, 115, 240, 50, 240, 66, 240, 51, 240, 51, 240, 51, 240, 50, 240, 66, 240, 61, 141, 131, 240, 50, 240, 66, 240, 51, 240, 51, 240, 51, 240, 50, 240, 66, 240, 51, 240, 62, 126, 0, 21, 112, 48, 20, 240, 240, 237, 125, 115, 240, 34, 240, 50, 240, 35, 240, 35, 240, 35, 240, 35, 240, 34, 240, 35, 240, 44, 140, 131, 240, 34, 240, 35, 240, 35, 240, 35, 240, 34, 240, 50, 240, 35, 240, 35, 240, 35, 0, 22, 16, 49, 20, 0, 6, 22, 202, 147, 83, 115, 145, 114, 240, 34, 240, 34, 240, 50, 240, 34, 240, 50, 240, 50, 240, 34, 240, 50, 240, 50, 118, 82, 118, 82, 163, 82, 163, 83, 146, 114, 146, 115, 130, 131, 99, 154, 183, 0, 21, 16, 52, 23, 0, 6, 35, 163, 115, 163, 115, 163, 114, 178, 130, 163, 115, 163, 115, 163, 115, 163, 114, 178, 130, 163, 127, 1, 127, 1, 115, 163, 114, 178, 130, 163, 115, 163, 115, 163, 115, 163, 114, 178, 130, 163, 115, 163, 115, 163, 115, 162, 0, 24, 29, 10, 240, 147, 115, 115, 114, 130, 115, 115, 115, 114, 130, 115, 115, 115, 114, 130, 115, 115, 115, 114, 130, 115, 115, 115, 0, 10, 112, 35, 18, 0, 6, 18, 243, 243, 243, 242, 240, 18, 243, 243, 243, 242, 240, 18, 243, 243, 243, 242, 240, 18, 243, 243, 242, 129, 99, 130, 67, 138, 166, 0, 19, 32, 53, 23, 0, 6, 35, 163, 115, 147, 131, 131, 146, 131, 162, 115, 163, 99, 179, 83, 195, 67, 210, 67, 226, 36, 227, 22, 214, 19, 213, 35, 212, 67, 195, 83, 179, 115, 163, 115, 163, 115, 162, 147, 146, 147, 131, 163, 115, 163, 115, 163, 0, 23, 112, 30, 15, 240, 240, 67, 195, 195, 195, 194, 195, 195, 195, 194, 210, 195, 195, 195, 194, 210, 195, 195, 195, 194, 210, 195, 204, 60, 0, 15, 48, 85, 28, 0, 7, 68, 212, 117, 196, 117, 181, 114, 18, 165, 130, 18, 162, 18, 115, 18, 150, 115, 18, 146, 19, 115, 18, 130, 35, 114, 34, 130, 34, 130, 35, 98, 50, 115, 35, 98, 35, 115, 35, 82, 51, 115, 50, 82, 50, 130, 66, 66, 66, 130, 66, 51, 66, 115, 66, 50, 67, 115, 67, 19, 67, 115, 67, 18, 82, 130, 86, 82, 130, 100, 98, 115, 99, 99, 115, 99, 99, 114, 240, 18, 0, 29, 69, 23, 0, 6, 37, 131, 117, 131, 117, 131, 114, 18, 130, 130, 19, 114, 115, 19, 99, 115, 19, 99, 114, 50, 99, 114, 51, 82, 130, 51, 82, 115, 51, 67, 115, 66, 67, 114, 83, 51, 114, 83, 50, 130, 83, 50, 115, 98, 35, 115, 98, 35, 114, 115, 19, 114, 115, 18, 130, 130, 18, 115, 133, 115, 133, 114, 148, 0, 24, 49, 22, 0, 6, 86, 233, 195, 83, 162, 131, 130, 162, 114, 194, 82, 210, 82, 210, 66, 226, 66, 226, 66, 226, 50, 242, 50, 226, 66, 226, 66, 226, 66, 210, 82, 195, 83, 178, 114, 162, 131, 130, 163, 83, 201, 230, 0, 23, 64, 48, 20, 240, 240, 234, 171, 146, 115, 115, 131, 99, 146, 99, 146, 98, 162, 98, 162, 83, 146, 99, 146, 99, 130, 115, 99, 139, 138, 163, 240, 35, 240, 35, 240, 34, 240, 35, 240, 35, 240, 35, 240, 34, 240, 50, 0, 22, 32, 60, 22, 0, 6, 86, 233, 195, 83, 162, 131, 130, 162, 114, 179, 83, 194, 82, 210, 66, 226, 66, 226, 66, 226, 50, 242, 50, 226, 66, 226, 66, 226, 66, 210, 82, 83, 82, 83, 67, 66, 114, 83, 34, 131, 71, 147, 69, 186, 217, 240, 68, 240, 67, 240, 67, 240, 83, 0, 11, 112, 49, 21, 0, 5, 122, 187, 147, 115, 131, 131, 115, 146, 114, 162, 114, 162, 99, 162, 99, 146, 115, 131, 114, 116, 139, 155, 163, 82, 179, 83, 162, 99, 147, 115, 131, 115, 131, 130, 131, 131, 114, 147, 99, 162, 99, 163, 0, 21, 80, 45, 19, 0, 5, 117, 186, 131, 83, 115, 240, 18, 240, 18, 240, 34, 240, 34, 240, 35, 240, 36, 247, 231, 244, 240, 35, 240, 34, 240, 34, 240, 34, 240, 34, 240, 18, 98, 130, 116, 83, 122, 182, 0, 20, 48, 52, 22, 240, 240, 255, 3, 63, 4, 179, 240, 67, 240, 66, 240, 67, 240, 67, 240, 67, 240, 67, 240, 66, 240, 67, 240, 67, 240, 67, 240, 67, 240, 66, 240, 67, 240, 67, 240, 67, 240, 67, 240, 66, 240, 67, 240, 67, 240, 67, 0, 23, 112, 51, 21, 240, 240, 242, 163, 83, 162, 99, 162, 99, 147, 98, 163, 98, 162, 114, 162, 99, 162, 99, 147, 98, 163, 98, 162, 114, 162, 114, 162, 98, 163, 98, 163, 98, 162, 114, 162, 114, 147, 114, 131, 146, 99, 163, 67, 200, 229, 0, 22, 80, 54, 22, 240, 240, 227, 226, 51, 210, 82, 195, 82, 194, 99, 163, 99, 162, 115, 147, 115, 146, 131, 130, 162, 115, 162, 114, 179, 83, 179, 82, 195, 67, 195, 66, 211, 51, 226, 50, 242, 34, 240, 19, 18, 240, 21, 240, 37, 240, 36, 240, 52, 0, 23, 112, 85, 30, 0, 7, 67, 147, 162, 51, 147, 162, 51, 132, 146, 67, 133, 130, 67, 133, 115, 67, 114, 19, 114, 98, 114, 19, 99, 98, 98, 35, 98, 114, 98, 35, 83, 114, 82, 51, 82, 130, 82, 51, 67, 131, 51, 51, 66, 147, 50, 67, 51, 147, 35, 67, 50, 163, 34, 98, 50, 163, 34, 98, 34, 179, 18, 114, 34, 179, 18, 114, 18, 197, 130, 18, 197, 132, 212, 148, 212, 148, 211, 163, 0, 31, 80, 58, 25, 0, 6, 115, 179, 131, 163, 163, 131, 179, 115, 211, 83, 227, 67, 240, 19, 50, 240, 35, 34, 240, 70, 240, 69, 240, 99, 240, 100, 240, 100, 240, 82, 19, 240, 50, 50, 240, 35, 51, 243, 67, 227, 99, 195, 115, 179, 147, 162, 163, 147, 179, 115, 195, 0, 25, 112, 52, 22, 240, 240, 227, 211, 52, 194, 83, 179, 83, 163, 115, 131, 131, 115, 148, 83, 179, 82, 195, 67, 211, 35, 227, 19, 240, 21, 240, 36, 240, 51, 240, 67, 240, 67, 240, 67, 240, 66, 240, 82, 240, 67, 240, 67, 240, 67, 240, 66, 0, 24, 50, 22, 0, 6, 30, 127, 240, 51, 240, 51, 240, 67, 240, 51, 240, 51, 240, 51, 240, 51, 240, 67, 240, 51, 240, 51, 240, 51, 240, 51, 240, 67, 240, 51, 240, 51, 240, 51, 240, 51, 240, 67, 240, 51, 240, 63, 127, 0, 22, 112, 36, 15, 240, 117, 165, 162, 195, 194, 210, 210, 195, 195, 194, 210, 210, 195, 195, 194, 210, 210, 195, 194, 210, 210, 210, 195, 194, 210, 210, 195, 195, 194, 210, 213, 149, 160, 20, 13, 240, 210, 66, 81, 81, 82, 66, 82, 66, 82, 66, 67, 51, 67, 51, 0, 39, 64, 36, 15, 240, 117, 149, 210, 210, 210, 195, 194, 210, 210, 195, 195, 194, 210, 210, 195, 195, 194, 210, 210, 195, 194, 210, 210, 210, 195, 194, 210, 210, 195, 195, 149, 165, 160, 16, 13, 240, 240, 18, 164, 130, 18, 114, 50, 82, 66, 66, 98, 0, 41, 9, 6, 240, 147, 51, 51, 0, 19, 112, 13, 7, 240, 18, 81, 82, 82, 82, 67, 67, 0, 21, 64, 27, 15, 0, 21, 37, 136, 114, 67, 210, 210, 210, 135, 89, 83, 82, 66, 114, 66, 99, 67, 68, 75, 85, 34, 0, 15, 80, 42, 16, 240, 240, 99, 211, 210, 226, 226, 211, 211, 210, 226, 226, 36, 122, 100, 67, 82, 114, 82, 114, 82, 114, 67, 114, 67, 114, 66, 114, 82, 114, 67, 98, 84, 67, 82, 23, 98, 36, 0, 17, 23, 15, 0, 21, 38, 136, 98, 82, 82, 194, 210, 194, 210, 210, 210, 211, 210, 82, 105, 133, 0, 15, 112, 45, 18, 0, 6, 18, 243, 243, 242, 240, 18, 240, 18, 243, 242, 240, 18, 164, 34, 135, 18, 115, 68, 114, 98, 114, 114, 114, 114, 98, 115, 98, 115, 98, 114, 114, 114, 114, 99, 115, 68, 138, 148, 34, 0, 19, 25, 15, 0, 21, 37, 151, 114, 67, 82, 98, 66, 114, 75, 60, 50, 210, 210, 211, 211, 81, 105, 133, 0, 15, 112, 30, 15, 240, 240, 132, 150, 130, 195, 194, 210, 195, 194, 210, 168, 120, 147, 195, 194, 210, 210, 195, 194, 210, 210, 210, 195, 194, 0, 16, 80, 41, 20, 0, 28, 68, 35, 155, 131, 51, 178, 82, 162, 98, 162, 98, 162, 82, 179, 51, 184, 178, 20, 195, 240, 35, 240, 42, 172, 114, 131, 98, 162, 98, 162, 99, 115, 139, 183, 0, 6, 32, 43, 15, 240, 240, 67, 194, 210, 210, 195, 195, 194, 210, 210, 195, 36, 106, 84, 51, 83, 82, 82, 98, 67, 98, 67, 98, 66, 98, 82, 98, 82, 98, 67, 98, 67, 83, 66, 99, 66, 98, 0, 15, 80, 25, 10, 240, 240, 227, 115, 115, 0, 5, 98, 130, 115, 115, 114, 130, 130, 115, 115, 114, 130, 130, 115, 114, 0, 11, 32, 16, 0, 9, 35, 211, 211, 0, 9, 66, 226, 226, 211, 210, 226, 226, 211, 211, 210, 226, 226, 211, 211, 210, 226, 211, 210, 151, 164, 240, 240, 208, 41, 16, 240, 240, 98, 226, 226, 211, 211, 210, 226, 226, 211, 211, 83, 82, 83, 98, 67, 114, 35, 131, 19, 150, 167, 151, 147, 35, 115, 51, 114, 83, 98, 83, 98, 98, 98, 99, 0, 16, 80, 28, 10, 240, 162, 115, 115, 114, 130, 130, 115, 114, 130, 130, 130, 115, 114, 130, 130, 130, 115, 114, 130, 130, 115, 115, 114, 0, 11, 49, 24, 0, 33, 50, 52, 68, 114, 23, 23, 100, 53, 51, 84, 83, 82, 83, 98, 98, 83, 83, 98, 82, 99, 98, 82, 98, 98, 83, 98, 98, 83, 98, 98, 82, 99, 98, 82, 99, 83, 82, 98, 99, 67, 98, 98, 0, 24, 80, 34, 15, 0, 20, 115, 36, 106, 84, 51, 83, 82, 82, 98, 67, 98, 67, 98, 66, 98, 82, 98, 82, 98, 67, 98, 67, 83, 66, 99, 66, 98, 0, 15, 80, 31, 16, 0, 22, 100, 168, 114, 82, 98, 114, 66, 130, 66, 130, 50, 146, 50, 146, 50, 130, 66, 130, 67, 98, 99, 51, 120, 164, 0, 17, 16, 39, 17, 0, 23, 114, 37, 122, 116, 67, 99, 83, 98, 114, 98, 114, 83, 114, 83, 114, 82, 114, 98, 114, 98, 98, 100, 52, 106, 114, 36, 146, 227, 227, 226, 242, 242, 0, 6, 16, 40, 16, 0, 22, 84, 34, 103, 18, 84, 52, 82, 98, 82, 114, 82, 114, 66, 115, 66, 115, 66, 114, 82, 114, 83, 83, 84, 52, 106, 116, 34, 226, 211, 211, 210, 226, 226, 240, 240, 144, 23, 13, 0, 18, 19, 35, 82, 20, 101, 131, 147, 163, 162, 178, 178, 163, 163, 162, 178, 178, 0, 14, 48, 23, 14, 0, 19, 117, 120, 98, 66, 82, 194, 196, 182, 150, 179, 194, 194, 66, 82, 89, 117, 0, 14, 112, 26, 11, 0, 10, 2, 131, 130, 146, 120, 56, 67, 130, 146, 146, 146, 146, 130, 146, 146, 146, 49, 86, 84, 0, 11, 112, 34, 15, 0, 20, 114, 98, 82, 98, 67, 83, 66, 99, 66, 99, 66, 98, 82, 98, 82, 83, 66, 99, 66, 98, 82, 83, 82, 68, 90, 100, 35, 0, 15, 80, 33, 16, 0, 22, 3, 130, 66, 114, 82, 99, 82, 98, 99, 67, 99, 66, 115, 51, 115, 50, 146, 34, 162, 34, 162, 18, 178, 18, 180, 196, 0, 17, 32, 55, 23, 0, 31, 83, 98, 99, 51, 83, 98, 82, 81, 17, 83, 82, 66, 18, 66, 98, 69, 51, 98, 50, 34, 50, 114, 50, 34, 35, 114, 34, 50, 34, 130, 34, 50, 19, 130, 18, 66, 18, 146, 18, 66, 18, 146, 17, 84, 164, 84, 178, 99, 0, 24, 32, 30, 19, 0, 26, 67, 115, 115, 83, 146, 67, 163, 35, 198, 213, 243, 244, 230, 195, 34, 179, 51, 162, 82, 146, 99, 99, 131, 0, 19, 80, 43, 19, 0, 26, 67, 130, 114, 114, 130, 99, 131, 82, 147, 67, 147, 66, 178, 50, 194, 50, 194, 34, 211, 18, 213, 243, 240, 19, 240, 18, 240, 34, 240, 18, 240, 19, 240, 18, 213, 228, 0, 6, 80, 21, 17, 0, 23, 107, 106, 226, 227, 211, 211, 211, 211, 211, 211, 211, 211, 219, 107, 0, 17, 96, 9, 18, 0, 33, 111, 63, 0, 36, 48, 11, 33, 0, 61, 127, 15, 63, 15, 0, 66, 48, 19, 14, 240, 240, 18, 51, 83, 50, 98, 50, 99, 50, 98, 50, 98, 51, 0, 44, 64, 15, 14, 0, 7, 35, 66, 69, 34, 82, 22, 66, 67, 0, 44, 48, }; //***************************************************************************** // // The font definition for the 32 point Cmss italic font. // //***************************************************************************** const tFont g_sFontCmss32i = { // // The format of the font. // FONT_FMT_PIXEL_RLE, // // The maximum width of the font. // 29, // // The height of the font. // 33, // // The baseline of the font. // 25, // // The offset to each character in the font. // { 0, 5, 33, 53, 142, 194, 271, 330, 342, 378, 414, 443, 489, 503, 512, 522, 592, 637, 666, 702, 737, 775, 816, 857, 885, 928, 969, 984, 1003, 1031, 1044, 1075, 1106, 1166, 1217, 1264, 1313, 1362, 1409, 1457, 1506, 1558, 1587, 1622, 1675, 1705, 1790, 1859, 1908, 1956, 2016, 2065, 2110, 2162, 2213, 2267, 2352, 2410, 2462, 2512, 2548, 2568, 2604, 2620, 2629, 2642, 2669, 2711, 2734, 2779, 2804, 2834, 2875, 2918, 2943, 2975, 3016, 3044, 3093, 3127, 3158, 3197, 3237, 3260, 3283, 3309, 3343, 3376, 3431, 3461, 3504, 3525, 3534, 3545, 3564, }, // // A pointer to the actual font data // g_pui8Cmss32iData };
195964.c
/* APPLE LOCAL file Radar 3830232 */ /* { dg-do compile { target *-*-darwin* } } */ /* { dg-options "-O2" } */ /* { dg-final { scan-assembler-not "\(jmp|b\)\[ \\t\]+_*init_iconv_desc" } } */ /* Contributed by Andrew Pinski 26 Oct 2004 <[email protected]> */ struct cset_converter { int func; int cd; }; void abort(void); int puts(const char*); int f(int i){return i;} void g(void){puts("hi");} struct conversion { int pair; int func; int fake_cd; }; static const struct conversion conversion_tab[] = { { 2, 2, 2 }, { 3, 3, 3 } }; static struct cset_converter init_iconv_desc (int i) { struct cset_converter ret; unsigned i1; char *pair; if (!i) { ret.func = 0; ret.cd = 0; return ret; } pair = __builtin_alloca(i*3); for (i1 = 0; i1 < (sizeof (conversion_tab) / sizeof ((conversion_tab)[0])); i1++) if (i == conversion_tab[i1].pair) { ret.func = conversion_tab[i1].func; ret.cd = conversion_tab[i1].fake_cd; return ret; } ret.func = 2; ret.cd = f(i); if (ret.cd == 4) { g(); ret.func = 4; } return ret; } struct f { struct cset_converter a; struct cset_converter b; }; void ff(struct f *a) { a->a = init_iconv_desc(0); /* Compiler was illegally optimizing this call into a tailcall (jmp). Radar 3830232 */ a->b = init_iconv_desc(1); } int main(void) { struct f a; ff (&a); if (a.a.func!=0 || a.a.cd !=0) abort(); if (a.b.func!=2 || a.b.cd !=1) abort(); return 0; }
272089.c
/***************************************************************************//** * # License * * The licensor of this software is Silicon Laboratories Inc. Your use of this * software is governed by the terms of Silicon Labs Master Software License * Agreement (MSLA) available at * www.silabs.com/about-us/legal/master-software-license-agreement. This * software is Third Party Software licensed by Silicon Labs from a third party * and is governed by the sections of the MSLA applicable to Third Party * Software and the additional terms set forth below. * ******************************************************************************/ /* * FIPS-180-2 compliant SHA-384/512 implementation * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file is part of mbed TLS (https://tls.mbed.org) */ /* * The SHA-512 Secure Hash Standard was published by NIST in 2002. * * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf */ #if !defined(MBEDTLS_CONFIG_FILE) #include "mbedtls/config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_SHA512_C) #include "mbedtls/sha512.h" #if defined(_MSC_VER) || defined(__WATCOMC__) #define UL64(x) x##ui64 #else #define UL64(x) x##ULL #endif #include <string.h> #if defined(MBEDTLS_SELF_TEST) #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #include <stdio.h> #include <stdlib.h> #define mbedtls_printf printf #define mbedtls_calloc calloc #define mbedtls_free free #endif /* MBEDTLS_PLATFORM_C */ #endif /* MBEDTLS_SELF_TEST */ #if !defined(MBEDTLS_SHA512_ALT) /* Implementation that should never be optimized out by the compiler */ static void mbedtls_zeroize( void *v, size_t n ) { volatile unsigned char *p = v; while( n-- ) *p++ = 0; } /* * 64-bit integer manipulation macros (big endian) */ #ifndef GET_UINT64_BE #define GET_UINT64_BE(n,b,i) \ { \ (n) = ( (uint64_t) (b)[(i) ] << 56 ) \ | ( (uint64_t) (b)[(i) + 1] << 48 ) \ | ( (uint64_t) (b)[(i) + 2] << 40 ) \ | ( (uint64_t) (b)[(i) + 3] << 32 ) \ | ( (uint64_t) (b)[(i) + 4] << 24 ) \ | ( (uint64_t) (b)[(i) + 5] << 16 ) \ | ( (uint64_t) (b)[(i) + 6] << 8 ) \ | ( (uint64_t) (b)[(i) + 7] ); \ } #endif /* GET_UINT64_BE */ #ifndef PUT_UINT64_BE #define PUT_UINT64_BE(n,b,i) \ { \ (b)[(i) ] = (unsigned char) ( (n) >> 56 ); \ (b)[(i) + 1] = (unsigned char) ( (n) >> 48 ); \ (b)[(i) + 2] = (unsigned char) ( (n) >> 40 ); \ (b)[(i) + 3] = (unsigned char) ( (n) >> 32 ); \ (b)[(i) + 4] = (unsigned char) ( (n) >> 24 ); \ (b)[(i) + 5] = (unsigned char) ( (n) >> 16 ); \ (b)[(i) + 6] = (unsigned char) ( (n) >> 8 ); \ (b)[(i) + 7] = (unsigned char) ( (n) ); \ } #endif /* PUT_UINT64_BE */ void mbedtls_sha512_init( mbedtls_sha512_context *ctx ) { memset( ctx, 0, sizeof( mbedtls_sha512_context ) ); } void mbedtls_sha512_free( mbedtls_sha512_context *ctx ) { if( ctx == NULL ) return; mbedtls_zeroize( ctx, sizeof( mbedtls_sha512_context ) ); } void mbedtls_sha512_clone( mbedtls_sha512_context *dst, const mbedtls_sha512_context *src ) { *dst = *src; } /* * SHA-512 context setup */ int mbedtls_sha512_starts_ret( mbedtls_sha512_context *ctx, int is384 ) { ctx->total[0] = 0; ctx->total[1] = 0; if( is384 == 0 ) { /* SHA-512 */ ctx->state[0] = UL64(0x6A09E667F3BCC908); ctx->state[1] = UL64(0xBB67AE8584CAA73B); ctx->state[2] = UL64(0x3C6EF372FE94F82B); ctx->state[3] = UL64(0xA54FF53A5F1D36F1); ctx->state[4] = UL64(0x510E527FADE682D1); ctx->state[5] = UL64(0x9B05688C2B3E6C1F); ctx->state[6] = UL64(0x1F83D9ABFB41BD6B); ctx->state[7] = UL64(0x5BE0CD19137E2179); } else { /* SHA-384 */ ctx->state[0] = UL64(0xCBBB9D5DC1059ED8); ctx->state[1] = UL64(0x629A292A367CD507); ctx->state[2] = UL64(0x9159015A3070DD17); ctx->state[3] = UL64(0x152FECD8F70E5939); ctx->state[4] = UL64(0x67332667FFC00B31); ctx->state[5] = UL64(0x8EB44A8768581511); ctx->state[6] = UL64(0xDB0C2E0D64F98FA7); ctx->state[7] = UL64(0x47B5481DBEFA4FA4); } ctx->is384 = is384; return( 0 ); } #if !defined(MBEDTLS_DEPRECATED_REMOVED) void mbedtls_sha512_starts( mbedtls_sha512_context *ctx, int is384 ) { mbedtls_sha512_starts_ret( ctx, is384 ); } #endif #if !defined(MBEDTLS_SHA512_PROCESS_ALT) /* * Round constants */ static const uint64_t K[80] = { UL64(0x428A2F98D728AE22), UL64(0x7137449123EF65CD), UL64(0xB5C0FBCFEC4D3B2F), UL64(0xE9B5DBA58189DBBC), UL64(0x3956C25BF348B538), UL64(0x59F111F1B605D019), UL64(0x923F82A4AF194F9B), UL64(0xAB1C5ED5DA6D8118), UL64(0xD807AA98A3030242), UL64(0x12835B0145706FBE), UL64(0x243185BE4EE4B28C), UL64(0x550C7DC3D5FFB4E2), UL64(0x72BE5D74F27B896F), UL64(0x80DEB1FE3B1696B1), UL64(0x9BDC06A725C71235), UL64(0xC19BF174CF692694), UL64(0xE49B69C19EF14AD2), UL64(0xEFBE4786384F25E3), UL64(0x0FC19DC68B8CD5B5), UL64(0x240CA1CC77AC9C65), UL64(0x2DE92C6F592B0275), UL64(0x4A7484AA6EA6E483), UL64(0x5CB0A9DCBD41FBD4), UL64(0x76F988DA831153B5), UL64(0x983E5152EE66DFAB), UL64(0xA831C66D2DB43210), UL64(0xB00327C898FB213F), UL64(0xBF597FC7BEEF0EE4), UL64(0xC6E00BF33DA88FC2), UL64(0xD5A79147930AA725), UL64(0x06CA6351E003826F), UL64(0x142929670A0E6E70), UL64(0x27B70A8546D22FFC), UL64(0x2E1B21385C26C926), UL64(0x4D2C6DFC5AC42AED), UL64(0x53380D139D95B3DF), UL64(0x650A73548BAF63DE), UL64(0x766A0ABB3C77B2A8), UL64(0x81C2C92E47EDAEE6), UL64(0x92722C851482353B), UL64(0xA2BFE8A14CF10364), UL64(0xA81A664BBC423001), UL64(0xC24B8B70D0F89791), UL64(0xC76C51A30654BE30), UL64(0xD192E819D6EF5218), UL64(0xD69906245565A910), UL64(0xF40E35855771202A), UL64(0x106AA07032BBD1B8), UL64(0x19A4C116B8D2D0C8), UL64(0x1E376C085141AB53), UL64(0x2748774CDF8EEB99), UL64(0x34B0BCB5E19B48A8), UL64(0x391C0CB3C5C95A63), UL64(0x4ED8AA4AE3418ACB), UL64(0x5B9CCA4F7763E373), UL64(0x682E6FF3D6B2B8A3), UL64(0x748F82EE5DEFB2FC), UL64(0x78A5636F43172F60), UL64(0x84C87814A1F0AB72), UL64(0x8CC702081A6439EC), UL64(0x90BEFFFA23631E28), UL64(0xA4506CEBDE82BDE9), UL64(0xBEF9A3F7B2C67915), UL64(0xC67178F2E372532B), UL64(0xCA273ECEEA26619C), UL64(0xD186B8C721C0C207), UL64(0xEADA7DD6CDE0EB1E), UL64(0xF57D4F7FEE6ED178), UL64(0x06F067AA72176FBA), UL64(0x0A637DC5A2C898A6), UL64(0x113F9804BEF90DAE), UL64(0x1B710B35131C471B), UL64(0x28DB77F523047D84), UL64(0x32CAAB7B40C72493), UL64(0x3C9EBE0A15C9BEBC), UL64(0x431D67C49C100D4C), UL64(0x4CC5D4BECB3E42B6), UL64(0x597F299CFC657E2A), UL64(0x5FCB6FAB3AD6FAEC), UL64(0x6C44198C4A475817) }; int mbedtls_internal_sha512_process( mbedtls_sha512_context *ctx, const unsigned char data[128] ) { int i; uint64_t temp1, temp2, W[80]; uint64_t A, B, C, D, E, F, G, H; #define SHR(x,n) (x >> n) #define ROTR(x,n) (SHR(x,n) | (x << (64 - n))) #define S0(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHR(x, 7)) #define S1(x) (ROTR(x,19) ^ ROTR(x,61) ^ SHR(x, 6)) #define S2(x) (ROTR(x,28) ^ ROTR(x,34) ^ ROTR(x,39)) #define S3(x) (ROTR(x,14) ^ ROTR(x,18) ^ ROTR(x,41)) #define F0(x,y,z) ((x & y) | (z & (x | y))) #define F1(x,y,z) (z ^ (x & (y ^ z))) #define P(a,b,c,d,e,f,g,h,x,K) \ { \ temp1 = h + S3(e) + F1(e,f,g) + K + x; \ temp2 = S2(a) + F0(a,b,c); \ d += temp1; h = temp1 + temp2; \ } for( i = 0; i < 16; i++ ) { GET_UINT64_BE( W[i], data, i << 3 ); } for( ; i < 80; i++ ) { W[i] = S1(W[i - 2]) + W[i - 7] + S0(W[i - 15]) + W[i - 16]; } A = ctx->state[0]; B = ctx->state[1]; C = ctx->state[2]; D = ctx->state[3]; E = ctx->state[4]; F = ctx->state[5]; G = ctx->state[6]; H = ctx->state[7]; i = 0; do { P( A, B, C, D, E, F, G, H, W[i], K[i] ); i++; P( H, A, B, C, D, E, F, G, W[i], K[i] ); i++; P( G, H, A, B, C, D, E, F, W[i], K[i] ); i++; P( F, G, H, A, B, C, D, E, W[i], K[i] ); i++; P( E, F, G, H, A, B, C, D, W[i], K[i] ); i++; P( D, E, F, G, H, A, B, C, W[i], K[i] ); i++; P( C, D, E, F, G, H, A, B, W[i], K[i] ); i++; P( B, C, D, E, F, G, H, A, W[i], K[i] ); i++; } while( i < 80 ); ctx->state[0] += A; ctx->state[1] += B; ctx->state[2] += C; ctx->state[3] += D; ctx->state[4] += E; ctx->state[5] += F; ctx->state[6] += G; ctx->state[7] += H; return( 0 ); } #if !defined(MBEDTLS_DEPRECATED_REMOVED) void mbedtls_sha512_process( mbedtls_sha512_context *ctx, const unsigned char data[128] ) { mbedtls_internal_sha512_process( ctx, data ); } #endif #endif /* !MBEDTLS_SHA512_PROCESS_ALT */ /* * SHA-512 process buffer */ int mbedtls_sha512_update_ret( mbedtls_sha512_context *ctx, const unsigned char *input, size_t ilen ) { int ret; size_t fill; unsigned int left; if( ilen == 0 ) return( 0 ); left = (unsigned int) (ctx->total[0] & 0x7F); fill = 128 - left; ctx->total[0] += (uint64_t) ilen; if( ctx->total[0] < (uint64_t) ilen ) ctx->total[1]++; if( left && ilen >= fill ) { memcpy( (void *) (ctx->buffer + left), input, fill ); if( ( ret = mbedtls_internal_sha512_process( ctx, ctx->buffer ) ) != 0 ) return( ret ); input += fill; ilen -= fill; left = 0; } while( ilen >= 128 ) { if( ( ret = mbedtls_internal_sha512_process( ctx, input ) ) != 0 ) return( ret ); input += 128; ilen -= 128; } if( ilen > 0 ) memcpy( (void *) (ctx->buffer + left), input, ilen ); return( 0 ); } #if !defined(MBEDTLS_DEPRECATED_REMOVED) void mbedtls_sha512_update( mbedtls_sha512_context *ctx, const unsigned char *input, size_t ilen ) { mbedtls_sha512_update_ret( ctx, input, ilen ); } #endif /* * SHA-512 final digest */ int mbedtls_sha512_finish_ret( mbedtls_sha512_context *ctx, unsigned char output[64] ) { int ret; unsigned used; uint64_t high, low; /* * Add padding: 0x80 then 0x00 until 16 bytes remain for the length */ used = ctx->total[0] & 0x7F; ctx->buffer[used++] = 0x80; if( used <= 112 ) { /* Enough room for padding + length in current block */ memset( ctx->buffer + used, 0, 112 - used ); } else { /* We'll need an extra block */ memset( ctx->buffer + used, 0, 128 - used ); if( ( ret = mbedtls_internal_sha512_process( ctx, ctx->buffer ) ) != 0 ) return( ret ); memset( ctx->buffer, 0, 112 ); } /* * Add message length */ high = ( ctx->total[0] >> 61 ) | ( ctx->total[1] << 3 ); low = ( ctx->total[0] << 3 ); PUT_UINT64_BE( high, ctx->buffer, 112 ); PUT_UINT64_BE( low, ctx->buffer, 120 ); if( ( ret = mbedtls_internal_sha512_process( ctx, ctx->buffer ) ) != 0 ) return( ret ); /* * Output final state */ PUT_UINT64_BE( ctx->state[0], output, 0 ); PUT_UINT64_BE( ctx->state[1], output, 8 ); PUT_UINT64_BE( ctx->state[2], output, 16 ); PUT_UINT64_BE( ctx->state[3], output, 24 ); PUT_UINT64_BE( ctx->state[4], output, 32 ); PUT_UINT64_BE( ctx->state[5], output, 40 ); if( ctx->is384 == 0 ) { PUT_UINT64_BE( ctx->state[6], output, 48 ); PUT_UINT64_BE( ctx->state[7], output, 56 ); } return( 0 ); } #if !defined(MBEDTLS_DEPRECATED_REMOVED) void mbedtls_sha512_finish( mbedtls_sha512_context *ctx, unsigned char output[64] ) { mbedtls_sha512_finish_ret( ctx, output ); } #endif #endif /* !MBEDTLS_SHA512_ALT */ /* * output = SHA-512( input buffer ) */ int mbedtls_sha512_ret( const unsigned char *input, size_t ilen, unsigned char output[64], int is384 ) { int ret; mbedtls_sha512_context ctx; mbedtls_sha512_init( &ctx ); if( ( ret = mbedtls_sha512_starts_ret( &ctx, is384 ) ) != 0 ) goto exit; if( ( ret = mbedtls_sha512_update_ret( &ctx, input, ilen ) ) != 0 ) goto exit; if( ( ret = mbedtls_sha512_finish_ret( &ctx, output ) ) != 0 ) goto exit; exit: mbedtls_sha512_free( &ctx ); return( ret ); } #if !defined(MBEDTLS_DEPRECATED_REMOVED) void mbedtls_sha512( const unsigned char *input, size_t ilen, unsigned char output[64], int is384 ) { mbedtls_sha512_ret( input, ilen, output, is384 ); } #endif #if defined(MBEDTLS_SELF_TEST) /* * FIPS-180-2 test vectors */ static const unsigned char sha512_test_buf[3][113] = { { "abc" }, { "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn" "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" }, { "" } }; static const size_t sha512_test_buflen[3] = { 3, 112, 1000 }; static const unsigned char sha512_test_sum[6][64] = { /* * SHA-384 test vectors */ { 0xCB, 0x00, 0x75, 0x3F, 0x45, 0xA3, 0x5E, 0x8B, 0xB5, 0xA0, 0x3D, 0x69, 0x9A, 0xC6, 0x50, 0x07, 0x27, 0x2C, 0x32, 0xAB, 0x0E, 0xDE, 0xD1, 0x63, 0x1A, 0x8B, 0x60, 0x5A, 0x43, 0xFF, 0x5B, 0xED, 0x80, 0x86, 0x07, 0x2B, 0xA1, 0xE7, 0xCC, 0x23, 0x58, 0xBA, 0xEC, 0xA1, 0x34, 0xC8, 0x25, 0xA7 }, { 0x09, 0x33, 0x0C, 0x33, 0xF7, 0x11, 0x47, 0xE8, 0x3D, 0x19, 0x2F, 0xC7, 0x82, 0xCD, 0x1B, 0x47, 0x53, 0x11, 0x1B, 0x17, 0x3B, 0x3B, 0x05, 0xD2, 0x2F, 0xA0, 0x80, 0x86, 0xE3, 0xB0, 0xF7, 0x12, 0xFC, 0xC7, 0xC7, 0x1A, 0x55, 0x7E, 0x2D, 0xB9, 0x66, 0xC3, 0xE9, 0xFA, 0x91, 0x74, 0x60, 0x39 }, { 0x9D, 0x0E, 0x18, 0x09, 0x71, 0x64, 0x74, 0xCB, 0x08, 0x6E, 0x83, 0x4E, 0x31, 0x0A, 0x4A, 0x1C, 0xED, 0x14, 0x9E, 0x9C, 0x00, 0xF2, 0x48, 0x52, 0x79, 0x72, 0xCE, 0xC5, 0x70, 0x4C, 0x2A, 0x5B, 0x07, 0xB8, 0xB3, 0xDC, 0x38, 0xEC, 0xC4, 0xEB, 0xAE, 0x97, 0xDD, 0xD8, 0x7F, 0x3D, 0x89, 0x85 }, /* * SHA-512 test vectors */ { 0xDD, 0xAF, 0x35, 0xA1, 0x93, 0x61, 0x7A, 0xBA, 0xCC, 0x41, 0x73, 0x49, 0xAE, 0x20, 0x41, 0x31, 0x12, 0xE6, 0xFA, 0x4E, 0x89, 0xA9, 0x7E, 0xA2, 0x0A, 0x9E, 0xEE, 0xE6, 0x4B, 0x55, 0xD3, 0x9A, 0x21, 0x92, 0x99, 0x2A, 0x27, 0x4F, 0xC1, 0xA8, 0x36, 0xBA, 0x3C, 0x23, 0xA3, 0xFE, 0xEB, 0xBD, 0x45, 0x4D, 0x44, 0x23, 0x64, 0x3C, 0xE8, 0x0E, 0x2A, 0x9A, 0xC9, 0x4F, 0xA5, 0x4C, 0xA4, 0x9F }, { 0x8E, 0x95, 0x9B, 0x75, 0xDA, 0xE3, 0x13, 0xDA, 0x8C, 0xF4, 0xF7, 0x28, 0x14, 0xFC, 0x14, 0x3F, 0x8F, 0x77, 0x79, 0xC6, 0xEB, 0x9F, 0x7F, 0xA1, 0x72, 0x99, 0xAE, 0xAD, 0xB6, 0x88, 0x90, 0x18, 0x50, 0x1D, 0x28, 0x9E, 0x49, 0x00, 0xF7, 0xE4, 0x33, 0x1B, 0x99, 0xDE, 0xC4, 0xB5, 0x43, 0x3A, 0xC7, 0xD3, 0x29, 0xEE, 0xB6, 0xDD, 0x26, 0x54, 0x5E, 0x96, 0xE5, 0x5B, 0x87, 0x4B, 0xE9, 0x09 }, { 0xE7, 0x18, 0x48, 0x3D, 0x0C, 0xE7, 0x69, 0x64, 0x4E, 0x2E, 0x42, 0xC7, 0xBC, 0x15, 0xB4, 0x63, 0x8E, 0x1F, 0x98, 0xB1, 0x3B, 0x20, 0x44, 0x28, 0x56, 0x32, 0xA8, 0x03, 0xAF, 0xA9, 0x73, 0xEB, 0xDE, 0x0F, 0xF2, 0x44, 0x87, 0x7E, 0xA6, 0x0A, 0x4C, 0xB0, 0x43, 0x2C, 0xE5, 0x77, 0xC3, 0x1B, 0xEB, 0x00, 0x9C, 0x5C, 0x2C, 0x49, 0xAA, 0x2E, 0x4E, 0xAD, 0xB2, 0x17, 0xAD, 0x8C, 0xC0, 0x9B } }; /* * Checkup routine */ int mbedtls_sha512_self_test( int verbose ) { int i, j, k, buflen, ret = 0; unsigned char *buf; unsigned char sha512sum[64]; mbedtls_sha512_context ctx; buf = mbedtls_calloc( 1024, sizeof(unsigned char) ); if( NULL == buf ) { if( verbose != 0 ) mbedtls_printf( "Buffer allocation failed\n" ); return( 1 ); } mbedtls_sha512_init( &ctx ); for( i = 0; i < 6; i++ ) { j = i % 3; k = i < 3; if( verbose != 0 ) mbedtls_printf( " SHA-%d test #%d: ", 512 - k * 128, j + 1 ); if( ( ret = mbedtls_sha512_starts_ret( &ctx, k ) ) != 0 ) goto fail; if( j == 2 ) { memset( buf, 'a', buflen = 1000 ); for( j = 0; j < 1000; j++ ) { ret = mbedtls_sha512_update_ret( &ctx, buf, buflen ); if( ret != 0 ) goto fail; } } else { ret = mbedtls_sha512_update_ret( &ctx, sha512_test_buf[j], sha512_test_buflen[j] ); if( ret != 0 ) goto fail; } if( ( ret = mbedtls_sha512_finish_ret( &ctx, sha512sum ) ) != 0 ) goto fail; if( memcmp( sha512sum, sha512_test_sum[i], 64 - k * 16 ) != 0 ) { ret = 1; goto fail; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); } if( verbose != 0 ) mbedtls_printf( "\n" ); goto exit; fail: if( verbose != 0 ) mbedtls_printf( "failed\n" ); exit: mbedtls_sha512_free( &ctx ); mbedtls_free( buf ); return( ret ); } #endif /* MBEDTLS_SELF_TEST */ #endif /* MBEDTLS_SHA512_C */
293647.c
#include <stdbool.h> typedef struct { bool buletin: 1; bool cert_casatorie: 1; bool dipl_licenta: 1; bool dipl_master: 1; bool dipl_doctor: 1; bool fisa_lichidare: 1; bool cert_copii: 1; } dosar;
809534.c
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. struct Pair { int X, Y; }; struct Pair get() { struct Pair P = {4, 5}; return P; } void printfI(int); struct Pair P; int Main() { P = get(); printfI(P.X); printfI(P.Y); return 0; }
495139.c
/** @file CPU Register Table Library functions. Copyright (c) 2017 - 2019, Intel Corporation. All rights reserved.<BR> SPDX-License-Identifier: BSD-2-Clause-Patent **/ #include "RegisterCpuFeatures.h" /** Checks if two CPU feature bit masks are equal. @param[in] FirstFeatureMask The first input CPU feature bit mask @param[in] SecondFeatureMask The second input CPU feature bit mask @retval TRUE Two CPU feature bit masks are equal. @retval FALSE Two CPU feature bit masks are not equal. **/ BOOLEAN IsCpuFeatureMatch ( IN UINT8 *FirstFeatureMask, IN UINT8 *SecondFeatureMask ) { UINTN BitMaskSize; BitMaskSize = PcdGetSize (PcdCpuFeaturesSetting); if (CompareMem (FirstFeatureMask, SecondFeatureMask, BitMaskSize) == 0) { return TRUE; } else { return FALSE; } } /** Function that uses DEBUG() macros to display the contents of a a CPU feature bit mask. @param[in] FeatureMask A pointer to the CPU feature bit mask. **/ VOID DumpCpuFeatureMask ( IN UINT8 *FeatureMask ) { UINTN Index; UINT8 *Data8; UINTN BitMaskSize; BitMaskSize = PcdGetSize (PcdCpuFeaturesSetting); Data8 = (UINT8 *) FeatureMask; for (Index = 0; Index < BitMaskSize; Index++) { DEBUG ((DEBUG_INFO, " %02x ", *Data8++)); } DEBUG ((DEBUG_INFO, "\n")); } /** Dump CPU feature name or CPU feature bit mask. @param[in] CpuFeature Pointer to CPU_FEATURES_ENTRY **/ VOID DumpCpuFeature ( IN CPU_FEATURES_ENTRY *CpuFeature ) { if (CpuFeature->FeatureName != NULL) { DEBUG ((DEBUG_INFO, "FeatureName: %a\n", CpuFeature->FeatureName)); } else { DEBUG ((DEBUG_INFO, "FeatureMask = ")); DumpCpuFeatureMask (CpuFeature->FeatureMask); } } /** Determines if the feature bit mask is in dependent CPU feature bit mask buffer. @param[in] FeatureMask Pointer to CPU feature bit mask @param[in] DependentBitMask Pointer to dependent CPU feature bit mask buffer @retval TRUE The feature bit mask is in dependent CPU feature bit mask buffer. @retval FALSE The feature bit mask is not in dependent CPU feature bit mask buffer. **/ BOOLEAN IsBitMaskMatchCheck ( IN UINT8 *FeatureMask, IN UINT8 *DependentBitMask ) { UINTN Index; UINTN BitMaskSize; UINT8 *Data1; UINT8 *Data2; BitMaskSize = PcdGetSize (PcdCpuFeaturesSetting); Data1 = FeatureMask; Data2 = DependentBitMask; for (Index = 0; Index < BitMaskSize; Index++) { if (((*(Data1++)) & (*(Data2++))) != 0) { return TRUE; } } return FALSE; } /** Try to find the specify cpu featuren in former/after feature list. @param[in] FeatureList Pointer to dependent CPU feature list @param[in] CurrentEntry Pointer to current CPU feature entry. @param[in] SearchFormer Find in former feature or after features. @param[in] FeatureMask Pointer to CPU feature bit mask @retval TRUE The feature bit mask is in dependent CPU feature bit mask buffer. @retval FALSE The feature bit mask is not in dependent CPU feature bit mask buffer. **/ BOOLEAN FindSpecifyFeature ( IN LIST_ENTRY *FeatureList, IN LIST_ENTRY *CurrentEntry, IN BOOLEAN SearchFormer, IN UINT8 *FeatureMask ) { CPU_FEATURES_ENTRY *CpuFeature; LIST_ENTRY *NextEntry; // // Check whether exist the not neighborhood entry first. // If not exist, return FALSE means not found status. // if (SearchFormer) { NextEntry = CurrentEntry->BackLink; if (IsNull (FeatureList, NextEntry)) { return FALSE; } NextEntry = NextEntry->BackLink; if (IsNull (FeatureList, NextEntry)) { return FALSE; } NextEntry = CurrentEntry->BackLink->BackLink; } else { NextEntry = CurrentEntry->ForwardLink; if (IsNull (FeatureList, NextEntry)) { return FALSE; } NextEntry = NextEntry->ForwardLink; if (IsNull (FeatureList, NextEntry)) { return FALSE; } NextEntry = CurrentEntry->ForwardLink->ForwardLink; } while (!IsNull (FeatureList, NextEntry)) { CpuFeature = CPU_FEATURE_ENTRY_FROM_LINK (NextEntry); if (IsBitMaskMatchCheck (FeatureMask, CpuFeature->FeatureMask)) { return TRUE; } if (SearchFormer) { NextEntry = NextEntry->BackLink; } else { NextEntry = NextEntry->ForwardLink; } } return FALSE; } /** Return feature dependence result. @param[in] CpuFeature Pointer to CPU feature. @param[in] Before Check before dependence or after. @param[in] NextCpuFeatureMask Pointer to next CPU feature Mask. @retval return the dependence result. **/ CPU_FEATURE_DEPENDENCE_TYPE DetectFeatureScope ( IN CPU_FEATURES_ENTRY *CpuFeature, IN BOOLEAN Before, IN UINT8 *NextCpuFeatureMask ) { // // if need to check before type dependence but the feature after current feature is not // exist, means this before type dependence not valid, just return NoneDepType. // Just like Feature A has a dependence of feature B, but Feature B not installed, so // Feature A maybe insert to the last entry of the list. In this case, for below code, // Featrure A has depend of feature B, but it is the last entry of the list, so the // NextCpuFeatureMask is NULL, so the dependence for feature A here is useless and code // just return NoneDepType. // if (NextCpuFeatureMask == NULL) { return NoneDepType; } if (Before) { if ((CpuFeature->PackageBeforeFeatureBitMask != NULL) && IsBitMaskMatchCheck (NextCpuFeatureMask, CpuFeature->PackageBeforeFeatureBitMask)) { return PackageDepType; } if ((CpuFeature->CoreBeforeFeatureBitMask != NULL) && IsBitMaskMatchCheck (NextCpuFeatureMask, CpuFeature->CoreBeforeFeatureBitMask)) { return CoreDepType; } if ((CpuFeature->BeforeFeatureBitMask != NULL) && IsBitMaskMatchCheck (NextCpuFeatureMask, CpuFeature->BeforeFeatureBitMask)) { return ThreadDepType; } return NoneDepType; } if ((CpuFeature->PackageAfterFeatureBitMask != NULL) && IsBitMaskMatchCheck (NextCpuFeatureMask, CpuFeature->PackageAfterFeatureBitMask)) { return PackageDepType; } if ((CpuFeature->CoreAfterFeatureBitMask != NULL) && IsBitMaskMatchCheck (NextCpuFeatureMask, CpuFeature->CoreAfterFeatureBitMask)) { return CoreDepType; } if ((CpuFeature->AfterFeatureBitMask != NULL) && IsBitMaskMatchCheck (NextCpuFeatureMask, CpuFeature->AfterFeatureBitMask)) { return ThreadDepType; } return NoneDepType; } /** Return feature dependence result. @param[in] CpuFeature Pointer to CPU feature. @param[in] Before Check before dependence or after. @param[in] FeatureList Pointer to CPU feature list. @retval return the dependence result. **/ CPU_FEATURE_DEPENDENCE_TYPE DetectNoneNeighborhoodFeatureScope ( IN CPU_FEATURES_ENTRY *CpuFeature, IN BOOLEAN Before, IN LIST_ENTRY *FeatureList ) { if (Before) { if ((CpuFeature->PackageBeforeFeatureBitMask != NULL) && FindSpecifyFeature(FeatureList, &CpuFeature->Link, FALSE, CpuFeature->PackageBeforeFeatureBitMask)) { return PackageDepType; } if ((CpuFeature->CoreBeforeFeatureBitMask != NULL) && FindSpecifyFeature(FeatureList, &CpuFeature->Link, FALSE, CpuFeature->CoreBeforeFeatureBitMask)) { return CoreDepType; } if ((CpuFeature->BeforeFeatureBitMask != NULL) && FindSpecifyFeature(FeatureList, &CpuFeature->Link, FALSE, CpuFeature->BeforeFeatureBitMask)) { return ThreadDepType; } return NoneDepType; } if ((CpuFeature->PackageAfterFeatureBitMask != NULL) && FindSpecifyFeature(FeatureList, &CpuFeature->Link, TRUE, CpuFeature->PackageAfterFeatureBitMask)) { return PackageDepType; } if ((CpuFeature->CoreAfterFeatureBitMask != NULL) && FindSpecifyFeature(FeatureList, &CpuFeature->Link, TRUE, CpuFeature->CoreAfterFeatureBitMask)) { return CoreDepType; } if ((CpuFeature->AfterFeatureBitMask != NULL) && FindSpecifyFeature(FeatureList, &CpuFeature->Link, TRUE, CpuFeature->AfterFeatureBitMask)) { return ThreadDepType; } return NoneDepType; } /** Base on dependence relationship to asjust feature dependence. ONLY when the feature before(or after) the find feature also has dependence with the find feature. In this case, driver need to base on dependce relationship to decide how to insert current feature and adjust the feature dependence. @param[in, out] PreviousFeature CPU feature current before the find one. @param[in, out] CurrentFeature Cpu feature need to adjust. @param[in] FindFeature Cpu feature which current feature depends. @param[in] Before Before or after dependence relationship. @retval TRUE means the current feature dependence has been adjusted. @retval FALSE means the previous feature dependence has been adjusted. or previous feature has no dependence with the find one. **/ BOOLEAN AdjustFeaturesDependence ( IN OUT CPU_FEATURES_ENTRY *PreviousFeature, IN OUT CPU_FEATURES_ENTRY *CurrentFeature, IN CPU_FEATURES_ENTRY *FindFeature, IN BOOLEAN Before ) { CPU_FEATURE_DEPENDENCE_TYPE PreDependType; CPU_FEATURE_DEPENDENCE_TYPE CurrentDependType; PreDependType = DetectFeatureScope(PreviousFeature, Before, FindFeature->FeatureMask); CurrentDependType = DetectFeatureScope(CurrentFeature, Before, FindFeature->FeatureMask); // // If previous feature has no dependence with the find featue. // return FALSE. // if (PreDependType == NoneDepType) { return FALSE; } // // If both feature have dependence, keep the one which needs use more // processors and clear the dependence for the other one. // if (PreDependType >= CurrentDependType) { return TRUE; } else { return FALSE; } } /** Base on dependence relationship to asjust feature order. @param[in] FeatureList Pointer to CPU feature list @param[in, out] FindEntry The entry this feature depend on. @param[in, out] CurrentEntry The entry for this feature. @param[in] Before Before or after dependence relationship. **/ VOID AdjustEntry ( IN LIST_ENTRY *FeatureList, IN OUT LIST_ENTRY *FindEntry, IN OUT LIST_ENTRY *CurrentEntry, IN BOOLEAN Before ) { LIST_ENTRY *PreviousEntry; CPU_FEATURES_ENTRY *PreviousFeature; CPU_FEATURES_ENTRY *CurrentFeature; CPU_FEATURES_ENTRY *FindFeature; // // For CPU feature which has core or package type dependence, later code need to insert // AcquireSpinLock/ReleaseSpinLock logic to sequency the execute order. // So if driver finds both feature A and B need to execute before feature C, driver will // base on dependence type of feature A and B to update the logic here. // For example, feature A has package type dependence and feature B has core type dependence, // because package type dependence need to wait for more processors which has strong dependence // than core type dependence. So driver will adjust the feature order to B -> A -> C. and driver // will remove the feature dependence in feature B. // Driver just needs to make sure before feature C been executed, feature A has finished its task // in all all thread. Feature A finished in all threads also means feature B have finshed in all // threads. // if (Before) { PreviousEntry = GetPreviousNode (FeatureList, FindEntry); } else { PreviousEntry = GetNextNode (FeatureList, FindEntry); } CurrentFeature = CPU_FEATURE_ENTRY_FROM_LINK (CurrentEntry); RemoveEntryList (CurrentEntry); if (IsNull (FeatureList, PreviousEntry)) { // // If not exist the previous or next entry, just insert the current entry. // if (Before) { InsertTailList (FindEntry, CurrentEntry); } else { InsertHeadList (FindEntry, CurrentEntry); } } else { // // If exist the previous or next entry, need to check it before insert curent entry. // PreviousFeature = CPU_FEATURE_ENTRY_FROM_LINK (PreviousEntry); FindFeature = CPU_FEATURE_ENTRY_FROM_LINK (FindEntry); if (AdjustFeaturesDependence (PreviousFeature, CurrentFeature, FindFeature, Before)) { // // Return TRUE means current feature dependence has been cleared and the previous // feature dependence has been kept and used. So insert current feature before (or after) // the previous feature. // if (Before) { InsertTailList (PreviousEntry, CurrentEntry); } else { InsertHeadList (PreviousEntry, CurrentEntry); } } else { if (Before) { InsertTailList (FindEntry, CurrentEntry); } else { InsertHeadList (FindEntry, CurrentEntry); } } } } /** Checks and adjusts current CPU features per dependency relationship. @param[in] FeatureList Pointer to CPU feature list @param[in] CurrentEntry Pointer to current checked CPU feature @param[in] FeatureMask The feature bit mask. @retval return Swapped info. **/ BOOLEAN InsertToBeforeEntry ( IN LIST_ENTRY *FeatureList, IN LIST_ENTRY *CurrentEntry, IN UINT8 *FeatureMask ) { LIST_ENTRY *CheckEntry; CPU_FEATURES_ENTRY *CheckFeature; BOOLEAN Swapped; Swapped = FALSE; // // Check all features dispatched before this entry // CheckEntry = GetFirstNode (FeatureList); while (CheckEntry != CurrentEntry) { CheckFeature = CPU_FEATURE_ENTRY_FROM_LINK (CheckEntry); if (IsBitMaskMatchCheck (CheckFeature->FeatureMask, FeatureMask)) { AdjustEntry (FeatureList, CheckEntry, CurrentEntry, TRUE); Swapped = TRUE; break; } CheckEntry = CheckEntry->ForwardLink; } return Swapped; } /** Checks and adjusts current CPU features per dependency relationship. @param[in] FeatureList Pointer to CPU feature list @param[in] CurrentEntry Pointer to current checked CPU feature @param[in] FeatureMask The feature bit mask. @retval return Swapped info. **/ BOOLEAN InsertToAfterEntry ( IN LIST_ENTRY *FeatureList, IN LIST_ENTRY *CurrentEntry, IN UINT8 *FeatureMask ) { LIST_ENTRY *CheckEntry; CPU_FEATURES_ENTRY *CheckFeature; BOOLEAN Swapped; Swapped = FALSE; // // Check all features dispatched after this entry // CheckEntry = GetNextNode (FeatureList, CurrentEntry); while (!IsNull (FeatureList, CheckEntry)) { CheckFeature = CPU_FEATURE_ENTRY_FROM_LINK (CheckEntry); if (IsBitMaskMatchCheck (CheckFeature->FeatureMask, FeatureMask)) { AdjustEntry (FeatureList, CheckEntry, CurrentEntry, FALSE); Swapped = TRUE; break; } CheckEntry = CheckEntry->ForwardLink; } return Swapped; } /** Checks and adjusts CPU features order per dependency relationship. @param[in] FeatureList Pointer to CPU feature list **/ VOID CheckCpuFeaturesDependency ( IN LIST_ENTRY *FeatureList ) { LIST_ENTRY *CurrentEntry; CPU_FEATURES_ENTRY *CpuFeature; LIST_ENTRY *CheckEntry; CPU_FEATURES_ENTRY *CheckFeature; BOOLEAN Swapped; LIST_ENTRY *TempEntry; LIST_ENTRY *NextEntry; CurrentEntry = GetFirstNode (FeatureList); while (!IsNull (FeatureList, CurrentEntry)) { Swapped = FALSE; CpuFeature = CPU_FEATURE_ENTRY_FROM_LINK (CurrentEntry); NextEntry = CurrentEntry->ForwardLink; if (CpuFeature->BeforeAll) { // // Check all features dispatched before this entry // CheckEntry = GetFirstNode (FeatureList); while (CheckEntry != CurrentEntry) { CheckFeature = CPU_FEATURE_ENTRY_FROM_LINK (CheckEntry); if (!CheckFeature->BeforeAll) { // // If this feature has no BeforeAll flag and is dispatched before CpuFeature, // insert currentEntry before Checked feature // RemoveEntryList (CurrentEntry); InsertTailList (CheckEntry, CurrentEntry); Swapped = TRUE; break; } CheckEntry = CheckEntry->ForwardLink; } if (Swapped) { CurrentEntry = NextEntry; continue; } } if (CpuFeature->AfterAll) { // // Check all features dispatched after this entry // CheckEntry = GetNextNode (FeatureList, CurrentEntry); while (!IsNull (FeatureList, CheckEntry)) { CheckFeature = CPU_FEATURE_ENTRY_FROM_LINK (CheckEntry); if (!CheckFeature->AfterAll) { // // If this feature has no AfterAll flag and is dispatched after CpuFeature, // insert currentEntry after Checked feature // TempEntry = GetNextNode (FeatureList, CurrentEntry); RemoveEntryList (CurrentEntry); InsertHeadList (CheckEntry, CurrentEntry); CurrentEntry = TempEntry; Swapped = TRUE; break; } CheckEntry = CheckEntry->ForwardLink; } if (Swapped) { CurrentEntry = NextEntry; continue; } } if (CpuFeature->BeforeFeatureBitMask != NULL) { Swapped = InsertToBeforeEntry (FeatureList, CurrentEntry, CpuFeature->BeforeFeatureBitMask); if (Swapped) { continue; } } if (CpuFeature->AfterFeatureBitMask != NULL) { Swapped = InsertToAfterEntry (FeatureList, CurrentEntry, CpuFeature->AfterFeatureBitMask); if (Swapped) { continue; } } if (CpuFeature->CoreBeforeFeatureBitMask != NULL) { Swapped = InsertToBeforeEntry (FeatureList, CurrentEntry, CpuFeature->CoreBeforeFeatureBitMask); if (Swapped) { continue; } } if (CpuFeature->CoreAfterFeatureBitMask != NULL) { Swapped = InsertToAfterEntry (FeatureList, CurrentEntry, CpuFeature->CoreAfterFeatureBitMask); if (Swapped) { continue; } } if (CpuFeature->PackageBeforeFeatureBitMask != NULL) { Swapped = InsertToBeforeEntry (FeatureList, CurrentEntry, CpuFeature->PackageBeforeFeatureBitMask); if (Swapped) { continue; } } if (CpuFeature->PackageAfterFeatureBitMask != NULL) { Swapped = InsertToAfterEntry (FeatureList, CurrentEntry, CpuFeature->PackageAfterFeatureBitMask); if (Swapped) { continue; } } CurrentEntry = CurrentEntry->ForwardLink; } } /** Worker function to register CPU Feature. @param[in] CpuFeature Pointer to CPU feature entry @retval RETURN_SUCCESS The CPU feature was successfully registered. @retval RETURN_OUT_OF_RESOURCES There are not enough resources to register the CPU feature. @retval RETURN_UNSUPPORTED Registration of the CPU feature is not supported due to a circular dependency between BEFORE and AFTER features. **/ RETURN_STATUS RegisterCpuFeatureWorker ( IN CPU_FEATURES_ENTRY *CpuFeature ) { EFI_STATUS Status; CPU_FEATURES_DATA *CpuFeaturesData; CPU_FEATURES_ENTRY *CpuFeatureEntry; LIST_ENTRY *Entry; UINTN BitMaskSize; BOOLEAN FeatureExist; BitMaskSize = PcdGetSize (PcdCpuFeaturesSetting); CpuFeaturesData = GetCpuFeaturesData (); if (CpuFeaturesData->FeaturesCount == 0) { InitializeListHead (&CpuFeaturesData->FeatureList); InitializeSpinLock (&CpuFeaturesData->CpuFlags.MemoryMappedLock); InitializeSpinLock (&CpuFeaturesData->CpuFlags.ConsoleLogLock); CpuFeaturesData->BitMaskSize = (UINT32) BitMaskSize; } ASSERT (CpuFeaturesData->BitMaskSize == BitMaskSize); FeatureExist = FALSE; CpuFeatureEntry = NULL; Entry = GetFirstNode (&CpuFeaturesData->FeatureList); while (!IsNull (&CpuFeaturesData->FeatureList, Entry)) { CpuFeatureEntry = CPU_FEATURE_ENTRY_FROM_LINK (Entry); if (IsCpuFeatureMatch (CpuFeature->FeatureMask, CpuFeatureEntry->FeatureMask)) { // // If this feature already registered // FeatureExist = TRUE; break; } Entry = Entry->ForwardLink; } if (!FeatureExist) { DEBUG ((DEBUG_INFO, "[NEW] ")); DumpCpuFeature (CpuFeature); InsertTailList (&CpuFeaturesData->FeatureList, &CpuFeature->Link); CpuFeaturesData->FeaturesCount++; } else { DEBUG ((DEBUG_INFO, "[OVERRIDE] ")); DumpCpuFeature (CpuFeature); ASSERT (CpuFeatureEntry != NULL); // // Overwrite original parameters of CPU feature // if (CpuFeature->GetConfigDataFunc != NULL) { CpuFeatureEntry->GetConfigDataFunc = CpuFeature->GetConfigDataFunc; } if (CpuFeature->SupportFunc != NULL) { CpuFeatureEntry->SupportFunc = CpuFeature->SupportFunc; } if (CpuFeature->InitializeFunc != NULL) { CpuFeatureEntry->InitializeFunc = CpuFeature->InitializeFunc; } if (CpuFeature->FeatureName != NULL) { if (CpuFeatureEntry->FeatureName == NULL) { CpuFeatureEntry->FeatureName = AllocatePool (CPU_FEATURE_NAME_SIZE); ASSERT (CpuFeatureEntry->FeatureName != NULL); } Status = AsciiStrCpyS (CpuFeatureEntry->FeatureName, CPU_FEATURE_NAME_SIZE, CpuFeature->FeatureName); ASSERT_EFI_ERROR (Status); FreePool (CpuFeature->FeatureName); } if (CpuFeature->BeforeFeatureBitMask != NULL) { if (CpuFeatureEntry->BeforeFeatureBitMask != NULL) { FreePool (CpuFeatureEntry->BeforeFeatureBitMask); } CpuFeatureEntry->BeforeFeatureBitMask = CpuFeature->BeforeFeatureBitMask; } if (CpuFeature->AfterFeatureBitMask != NULL) { if (CpuFeatureEntry->AfterFeatureBitMask != NULL) { FreePool (CpuFeatureEntry->AfterFeatureBitMask); } CpuFeatureEntry->AfterFeatureBitMask = CpuFeature->AfterFeatureBitMask; } if (CpuFeature->CoreBeforeFeatureBitMask != NULL) { if (CpuFeatureEntry->CoreBeforeFeatureBitMask != NULL) { FreePool (CpuFeatureEntry->CoreBeforeFeatureBitMask); } CpuFeatureEntry->CoreBeforeFeatureBitMask = CpuFeature->CoreBeforeFeatureBitMask; } if (CpuFeature->CoreAfterFeatureBitMask != NULL) { if (CpuFeatureEntry->CoreAfterFeatureBitMask != NULL) { FreePool (CpuFeatureEntry->CoreAfterFeatureBitMask); } CpuFeatureEntry->CoreAfterFeatureBitMask = CpuFeature->CoreAfterFeatureBitMask; } if (CpuFeature->PackageBeforeFeatureBitMask != NULL) { if (CpuFeatureEntry->PackageBeforeFeatureBitMask != NULL) { FreePool (CpuFeatureEntry->PackageBeforeFeatureBitMask); } CpuFeatureEntry->PackageBeforeFeatureBitMask = CpuFeature->PackageBeforeFeatureBitMask; } if (CpuFeature->PackageAfterFeatureBitMask != NULL) { if (CpuFeatureEntry->PackageAfterFeatureBitMask != NULL) { FreePool (CpuFeatureEntry->PackageAfterFeatureBitMask); } CpuFeatureEntry->PackageAfterFeatureBitMask = CpuFeature->PackageAfterFeatureBitMask; } CpuFeatureEntry->BeforeAll = CpuFeature->BeforeAll; CpuFeatureEntry->AfterAll = CpuFeature->AfterAll; FreePool (CpuFeature->FeatureMask); FreePool (CpuFeature); } // // Verify CPU features dependency can change CPU feature order // CheckCpuFeaturesDependency (&CpuFeaturesData->FeatureList); return RETURN_SUCCESS; } /** Sets CPU feature bit mask in CPU feature bit mask buffer. @param[in] FeaturesBitMask Pointer to CPU feature bit mask buffer @param[in] Feature The bit number of the CPU feature @param[in] BitMaskSize CPU feature bit mask buffer size **/ VOID SetCpuFeaturesBitMask ( IN UINT8 **FeaturesBitMask, IN UINT32 Feature, IN UINTN BitMaskSize ) { UINT8 *CpuFeaturesBitMask; ASSERT (FeaturesBitMask != NULL); CpuFeaturesBitMask = *FeaturesBitMask; if (CpuFeaturesBitMask == NULL) { CpuFeaturesBitMask = AllocateZeroPool (BitMaskSize); ASSERT (CpuFeaturesBitMask != NULL); *FeaturesBitMask = CpuFeaturesBitMask; } CpuFeaturesBitMask += (Feature / 8); *CpuFeaturesBitMask |= (UINT8) (1 << (Feature % 8)); } /** Registers a CPU Feature. @param[in] FeatureName A Null-terminated Ascii string indicates CPU feature name. @param[in] GetConfigDataFunc CPU feature get configuration data function. This is an optional parameter that may be NULL. If NULL, then the most recently registered function for the CPU feature is used. If no functions are registered for a CPU feature, then the CPU configuration data for the registered feature is NULL. @param[in] SupportFunc CPU feature support function. This is an optional parameter that may be NULL. If NULL, then the most recently registered function for the CPU feature is used. If no functions are registered for a CPU feature, then the CPU feature is assumed to be supported by all CPUs. @param[in] InitializeFunc CPU feature initialize function. This is an optional parameter that may be NULL. If NULL, then the most recently registered function for the CPU feature is used. If no functions are registered for a CPU feature, then the CPU feature initialization is skipped. @param[in] ... Variable argument list of UINT32 CPU feature value. Values with no modifiers are the features provided by the registered functions. Values with CPU_FEATURE_BEFORE modifier are features that must be initialized after the features provided by the registered functions are used. Values with CPU_FEATURE_AFTER modifier are features that must be initialized before the features provided by the registered functions are used. The last argument in this variable argument list must always be CPU_FEATURE_END. @retval RETURN_SUCCESS The CPU feature was successfully registered. @retval RETURN_OUT_OF_RESOURCES There are not enough resources to register the CPU feature. @retval RETURN_UNSUPPORTED Registration of the CPU feature is not supported due to a circular dependency between BEFORE and AFTER features. @retval RETURN_NOT_READY CPU feature PCD PcdCpuFeaturesUserConfiguration not updated by Platform driver yet. @note This service could be called by BSP only. **/ RETURN_STATUS EFIAPI RegisterCpuFeature ( IN CHAR8 *FeatureName, OPTIONAL IN CPU_FEATURE_GET_CONFIG_DATA GetConfigDataFunc, OPTIONAL IN CPU_FEATURE_SUPPORT SupportFunc, OPTIONAL IN CPU_FEATURE_INITIALIZE InitializeFunc, OPTIONAL ... ) { EFI_STATUS Status; VA_LIST Marker; UINT32 Feature; UINTN BitMaskSize; CPU_FEATURES_ENTRY *CpuFeature; UINT8 *FeatureMask; UINT8 *BeforeFeatureBitMask; UINT8 *AfterFeatureBitMask; UINT8 *CoreBeforeFeatureBitMask; UINT8 *CoreAfterFeatureBitMask; UINT8 *PackageBeforeFeatureBitMask; UINT8 *PackageAfterFeatureBitMask; BOOLEAN BeforeAll; BOOLEAN AfterAll; FeatureMask = NULL; BeforeFeatureBitMask = NULL; AfterFeatureBitMask = NULL; CoreBeforeFeatureBitMask = NULL; CoreAfterFeatureBitMask = NULL; PackageBeforeFeatureBitMask = NULL; PackageAfterFeatureBitMask = NULL; BeforeAll = FALSE; AfterAll = FALSE; BitMaskSize = PcdGetSize (PcdCpuFeaturesSetting); VA_START (Marker, InitializeFunc); Feature = VA_ARG (Marker, UINT32); while (Feature != CPU_FEATURE_END) { ASSERT ((Feature & (CPU_FEATURE_BEFORE | CPU_FEATURE_AFTER)) != (CPU_FEATURE_BEFORE | CPU_FEATURE_AFTER)); ASSERT ((Feature & (CPU_FEATURE_BEFORE_ALL | CPU_FEATURE_AFTER_ALL)) != (CPU_FEATURE_BEFORE_ALL | CPU_FEATURE_AFTER_ALL)); ASSERT ((Feature & (CPU_FEATURE_CORE_BEFORE | CPU_FEATURE_CORE_AFTER)) != (CPU_FEATURE_CORE_BEFORE | CPU_FEATURE_CORE_AFTER)); ASSERT ((Feature & (CPU_FEATURE_PACKAGE_BEFORE | CPU_FEATURE_PACKAGE_AFTER)) != (CPU_FEATURE_PACKAGE_BEFORE | CPU_FEATURE_PACKAGE_AFTER)); if (Feature < CPU_FEATURE_BEFORE) { BeforeAll = ((Feature & CPU_FEATURE_BEFORE_ALL) != 0) ? TRUE : FALSE; AfterAll = ((Feature & CPU_FEATURE_AFTER_ALL) != 0) ? TRUE : FALSE; Feature &= ~(CPU_FEATURE_BEFORE_ALL | CPU_FEATURE_AFTER_ALL); ASSERT (FeatureMask == NULL); SetCpuFeaturesBitMask (&FeatureMask, Feature, BitMaskSize); } else if ((Feature & CPU_FEATURE_BEFORE) != 0) { SetCpuFeaturesBitMask (&BeforeFeatureBitMask, Feature & ~CPU_FEATURE_BEFORE, BitMaskSize); } else if ((Feature & CPU_FEATURE_AFTER) != 0) { SetCpuFeaturesBitMask (&AfterFeatureBitMask, Feature & ~CPU_FEATURE_AFTER, BitMaskSize); } else if ((Feature & CPU_FEATURE_CORE_BEFORE) != 0) { SetCpuFeaturesBitMask (&CoreBeforeFeatureBitMask, Feature & ~CPU_FEATURE_CORE_BEFORE, BitMaskSize); } else if ((Feature & CPU_FEATURE_CORE_AFTER) != 0) { SetCpuFeaturesBitMask (&CoreAfterFeatureBitMask, Feature & ~CPU_FEATURE_CORE_AFTER, BitMaskSize); } else if ((Feature & CPU_FEATURE_PACKAGE_BEFORE) != 0) { SetCpuFeaturesBitMask (&PackageBeforeFeatureBitMask, Feature & ~CPU_FEATURE_PACKAGE_BEFORE, BitMaskSize); } else if ((Feature & CPU_FEATURE_PACKAGE_AFTER) != 0) { SetCpuFeaturesBitMask (&PackageAfterFeatureBitMask, Feature & ~CPU_FEATURE_PACKAGE_AFTER, BitMaskSize); } Feature = VA_ARG (Marker, UINT32); } VA_END (Marker); CpuFeature = AllocateZeroPool (sizeof (CPU_FEATURES_ENTRY)); ASSERT (CpuFeature != NULL); CpuFeature->Signature = CPU_FEATURE_ENTRY_SIGNATURE; CpuFeature->FeatureMask = FeatureMask; CpuFeature->BeforeFeatureBitMask = BeforeFeatureBitMask; CpuFeature->AfterFeatureBitMask = AfterFeatureBitMask; CpuFeature->CoreBeforeFeatureBitMask = CoreBeforeFeatureBitMask; CpuFeature->CoreAfterFeatureBitMask = CoreAfterFeatureBitMask; CpuFeature->PackageBeforeFeatureBitMask = PackageBeforeFeatureBitMask; CpuFeature->PackageAfterFeatureBitMask = PackageAfterFeatureBitMask; CpuFeature->BeforeAll = BeforeAll; CpuFeature->AfterAll = AfterAll; CpuFeature->GetConfigDataFunc = GetConfigDataFunc; CpuFeature->SupportFunc = SupportFunc; CpuFeature->InitializeFunc = InitializeFunc; if (FeatureName != NULL) { CpuFeature->FeatureName = AllocatePool (CPU_FEATURE_NAME_SIZE); ASSERT (CpuFeature->FeatureName != NULL); Status = AsciiStrCpyS (CpuFeature->FeatureName, CPU_FEATURE_NAME_SIZE, FeatureName); ASSERT_EFI_ERROR (Status); } Status = RegisterCpuFeatureWorker (CpuFeature); ASSERT_EFI_ERROR (Status); return RETURN_SUCCESS; } /** Return ACPI_CPU_DATA data. @return Pointer to ACPI_CPU_DATA data. **/ ACPI_CPU_DATA * GetAcpiCpuData ( VOID ) { EFI_STATUS Status; UINTN NumberOfCpus; UINTN NumberOfEnabledProcessors; ACPI_CPU_DATA *AcpiCpuData; UINTN TableSize; CPU_REGISTER_TABLE *RegisterTable; UINTN Index; EFI_PROCESSOR_INFORMATION ProcessorInfoBuffer; AcpiCpuData = (ACPI_CPU_DATA *) (UINTN) PcdGet64 (PcdCpuS3DataAddress); if (AcpiCpuData != NULL) { return AcpiCpuData; } AcpiCpuData = AllocatePages (EFI_SIZE_TO_PAGES (sizeof (ACPI_CPU_DATA))); ASSERT (AcpiCpuData != NULL); // // Set PcdCpuS3DataAddress to the base address of the ACPI_CPU_DATA structure // Status = PcdSet64S (PcdCpuS3DataAddress, (UINT64)(UINTN)AcpiCpuData); ASSERT_EFI_ERROR (Status); GetNumberOfProcessor (&NumberOfCpus, &NumberOfEnabledProcessors); AcpiCpuData->NumberOfCpus = (UINT32)NumberOfCpus; // // Allocate buffer for empty RegisterTable and PreSmmInitRegisterTable for all CPUs // TableSize = 2 * NumberOfCpus * sizeof (CPU_REGISTER_TABLE); RegisterTable = AllocatePages (EFI_SIZE_TO_PAGES (TableSize)); ASSERT (RegisterTable != NULL); for (Index = 0; Index < NumberOfCpus; Index++) { Status = GetProcessorInformation (Index, &ProcessorInfoBuffer); ASSERT_EFI_ERROR (Status); RegisterTable[Index].InitialApicId = (UINT32)ProcessorInfoBuffer.ProcessorId; RegisterTable[Index].TableLength = 0; RegisterTable[Index].AllocatedSize = 0; RegisterTable[Index].RegisterTableEntry = 0; RegisterTable[NumberOfCpus + Index].InitialApicId = (UINT32)ProcessorInfoBuffer.ProcessorId; RegisterTable[NumberOfCpus + Index].TableLength = 0; RegisterTable[NumberOfCpus + Index].AllocatedSize = 0; RegisterTable[NumberOfCpus + Index].RegisterTableEntry = 0; } AcpiCpuData->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTable; AcpiCpuData->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)(RegisterTable + NumberOfCpus); return AcpiCpuData; } /** Enlarges CPU register table for each processor. @param[in, out] RegisterTable Pointer processor's CPU register table **/ STATIC VOID EnlargeRegisterTable ( IN OUT CPU_REGISTER_TABLE *RegisterTable ) { EFI_PHYSICAL_ADDRESS Address; UINTN UsedPages; UsedPages = RegisterTable->AllocatedSize / EFI_PAGE_SIZE; Address = (UINTN)AllocatePages (UsedPages + 1); ASSERT (Address != 0); // // If there are records existing in the register table, then copy its contents // to new region and free the old one. // if (RegisterTable->AllocatedSize > 0) { CopyMem ( (VOID *) (UINTN) Address, (VOID *) (UINTN) RegisterTable->RegisterTableEntry, RegisterTable->AllocatedSize ); FreePages ((VOID *)(UINTN)RegisterTable->RegisterTableEntry, UsedPages); } // // Adjust the allocated size and register table base address. // RegisterTable->AllocatedSize += EFI_PAGE_SIZE; RegisterTable->RegisterTableEntry = Address; } /** Add an entry in specified register table. This function adds an entry in specified register table, with given register type, register index, bit section and value. @param[in] PreSmmFlag If TRUE, entry will be added into PreSmm register table If FALSE, entry will be added into register table @param[in] ProcessorNumber The index of the CPU to add a register table entry @param[in] RegisterType Type of the register to program @param[in] Index Index of the register to program @param[in] ValidBitStart Start of the bit section @param[in] ValidBitLength Length of the bit section @param[in] Value Value to write **/ VOID CpuRegisterTableWriteWorker ( IN BOOLEAN PreSmmFlag, IN UINTN ProcessorNumber, IN REGISTER_TYPE RegisterType, IN UINT64 Index, IN UINT8 ValidBitStart, IN UINT8 ValidBitLength, IN UINT64 Value ) { CPU_FEATURES_DATA *CpuFeaturesData; ACPI_CPU_DATA *AcpiCpuData; CPU_REGISTER_TABLE *RegisterTable; CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; CpuFeaturesData = GetCpuFeaturesData (); if (CpuFeaturesData->RegisterTable == NULL) { AcpiCpuData = GetAcpiCpuData (); ASSERT ((AcpiCpuData != NULL) && (AcpiCpuData->RegisterTable != 0)); CpuFeaturesData->RegisterTable = (CPU_REGISTER_TABLE *) (UINTN) AcpiCpuData->RegisterTable; CpuFeaturesData->PreSmmRegisterTable = (CPU_REGISTER_TABLE *) (UINTN) AcpiCpuData->PreSmmInitRegisterTable; } if (PreSmmFlag) { RegisterTable = &CpuFeaturesData->PreSmmRegisterTable[ProcessorNumber]; } else { RegisterTable = &CpuFeaturesData->RegisterTable[ProcessorNumber]; } if (RegisterTable->TableLength == RegisterTable->AllocatedSize / sizeof (CPU_REGISTER_TABLE_ENTRY)) { EnlargeRegisterTable (RegisterTable); } // // Append entry in the register table. // RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry; RegisterTableEntry[RegisterTable->TableLength].RegisterType = RegisterType; RegisterTableEntry[RegisterTable->TableLength].Index = (UINT32) Index; RegisterTableEntry[RegisterTable->TableLength].HighIndex = (UINT32) RShiftU64 (Index, 32); RegisterTableEntry[RegisterTable->TableLength].ValidBitStart = ValidBitStart; RegisterTableEntry[RegisterTable->TableLength].ValidBitLength = ValidBitLength; RegisterTableEntry[RegisterTable->TableLength].Value = Value; RegisterTable->TableLength++; } /** Adds an entry in specified register table. This function adds an entry in specified register table, with given register type, register index, bit section and value. @param[in] ProcessorNumber The index of the CPU to add a register table entry @param[in] RegisterType Type of the register to program @param[in] Index Index of the register to program @param[in] ValueMask Mask of bits in register to write @param[in] Value Value to write @note This service could be called by BSP only. **/ VOID EFIAPI CpuRegisterTableWrite ( IN UINTN ProcessorNumber, IN REGISTER_TYPE RegisterType, IN UINT64 Index, IN UINT64 ValueMask, IN UINT64 Value ) { UINT8 Start; UINT8 End; UINT8 Length; Start = (UINT8)LowBitSet64 (ValueMask); End = (UINT8)HighBitSet64 (ValueMask); Length = End - Start + 1; CpuRegisterTableWriteWorker (FALSE, ProcessorNumber, RegisterType, Index, Start, Length, Value); } /** Adds an entry in specified Pre-SMM register table. This function adds an entry in specified register table, with given register type, register index, bit section and value. @param[in] ProcessorNumber The index of the CPU to add a register table entry. @param[in] RegisterType Type of the register to program @param[in] Index Index of the register to program @param[in] ValueMask Mask of bits in register to write @param[in] Value Value to write @note This service could be called by BSP only. **/ VOID EFIAPI PreSmmCpuRegisterTableWrite ( IN UINTN ProcessorNumber, IN REGISTER_TYPE RegisterType, IN UINT64 Index, IN UINT64 ValueMask, IN UINT64 Value ) { UINT8 Start; UINT8 End; UINT8 Length; Start = (UINT8)LowBitSet64 (ValueMask); End = (UINT8)HighBitSet64 (ValueMask); Length = End - Start + 1; CpuRegisterTableWriteWorker (TRUE, ProcessorNumber, RegisterType, Index, Start, Length, Value); } /** Worker function to determine if a CPU feature is set in input CPU feature bit mask buffer. @param[in] CpuBitMask CPU feature bit mask buffer @param[in] CpuBitMaskSize The size of CPU feature bit mask buffer @param[in] Feature The bit number of the CPU feature @retval TRUE The CPU feature is set in CpuBitMask. @retval FALSE The CPU feature is not set in CpuBitMask. **/ BOOLEAN IsCpuFeatureSetInCpuPcd ( IN UINT8 *CpuBitMask, IN UINTN CpuBitMaskSize, IN UINT32 Feature ) { if ((Feature >> 3) >= CpuBitMaskSize) { return FALSE; } return ((*(CpuBitMask + (Feature >> 3)) & (1 << (Feature & 0x07))) != 0); } /** Determines if a CPU feature is enabled in PcdCpuFeaturesSupport bit mask. If a CPU feature is disabled in PcdCpuFeaturesSupport then all the code/data associated with that feature should be optimized away if compiler optimizations are enabled. @param[in] Feature The bit number of the CPU feature to check in the PCD PcdCpuFeaturesSupport @retval TRUE The CPU feature is set in PcdCpuFeaturesSupport. @retval FALSE The CPU feature is not set in PcdCpuFeaturesSupport. @note This service could be called by BSP only. **/ BOOLEAN EFIAPI IsCpuFeatureSupported ( IN UINT32 Feature ) { return IsCpuFeatureSetInCpuPcd ( (UINT8 *)PcdGetPtr (PcdCpuFeaturesSupport), PcdGetSize (PcdCpuFeaturesSupport), Feature ); } /** Determines if a CPU feature is set in PcdCpuFeaturesSetting bit mask. @param[in] Feature The bit number of the CPU feature to check in the PCD PcdCpuFeaturesSetting @retval TRUE The CPU feature is set in PcdCpuFeaturesSetting. @retval FALSE The CPU feature is not set in PcdCpuFeaturesSetting. @note This service could be called by BSP only. **/ BOOLEAN EFIAPI IsCpuFeatureInSetting ( IN UINT32 Feature ) { return IsCpuFeatureSetInCpuPcd ( (UINT8 *)PcdGetPtr (PcdCpuFeaturesSetting), PcdGetSize (PcdCpuFeaturesSetting), Feature ); } /** Switches to assigned BSP after CPU features initialization. @param[in] ProcessorNumber The index of the CPU executing this function. @note This service could be called by BSP only. **/ VOID EFIAPI SwitchBspAfterFeaturesInitialize ( IN UINTN ProcessorNumber ) { CPU_FEATURES_DATA *CpuFeaturesData; CpuFeaturesData = GetCpuFeaturesData (); CpuFeaturesData->BspNumber = ProcessorNumber; }
900541.c
#include <std.h> inherit WEAPONLESS; void create() { ::create(); set_name("rat"); set_id(({"rat","scrawny rat"})); set_short("A scrawny rat"); set_long("%^ORANGE%^The scrawny brown rat has a scruffy coat and looks hungry."); set_race("rat"); set_body_type("quadruped"); set_gender("female"); set_size(1); set("aggressive",25); set_hd(1,1); set_level(1); set_class("fighter"); set_mlevel("fighter",1); set_max_hp(15); set_hp(random(5)+10); set_exp(20); set_damage(1,3); set_attacks_num(1); set_nat_weapon_type("piercing"); add_limb("teeth", "head", 0, 0, 0); set_attack_limbs(({"right forepaw","left forepaw","teeth"})); set_ac(10); set_alignment(9); add_money("gold",random(10)+2); add_money("silver",random(10)); set_stats("intelligence",2); set_stats("wisdom",2); set_stats("strength",5); set_stats("charisma",2); set_stats("dexterity",8); set_stats("constitution",12); }
927207.c
/* * Common [OS-independent] portion of * Broadcom Home Networking Division 10/100 Mbit/s Ethernet * Device Driver. * * Copyright (C) 2009, Broadcom Corporation * All Rights Reserved. * * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; * the contents of this file may not be disclosed to third parties, copied * or duplicated in any form, in whole or in part, without the prior * written permission of Broadcom Corporation. * $Id: etc.c,v 1.105.2.5 2009/07/17 23:40:42 Exp $ */ #include <typedefs.h> #include <osl.h> #include <bcmendian.h> #include <proto/ethernet.h> #include <proto/vlan.h> #include <proto/bcmip.h> #include <proto/802.1d.h> #include <bcmenetmib.h> #include <bcmenetrxh.h> #include <bcmenetphy.h> #include <et_dbg.h> #include <etc.h> #include <et_export.h> #include <bcmutils.h> #ifdef ETROBO #ifndef _siutils_h_ typedef const struct si_pub si_t; #endif #include <bcmrobo.h> #endif /* ETROBO */ uint32 et_msg_level = 0; /* local prototypes */ static void etc_loopback(etc_info_t *etc, int on); /* 802.1d priority to traffic class mapping. queues correspond one-to-one * with traffic classes. */ uint32 up2tc[NUMPRIO] = { TC_BE, /* 0 BE TC_BE Best Effort */ TC_BK, /* 1 BK TC_BK Background */ TC_BK, /* 2 -- TC_BK Background */ TC_BE, /* 3 EE TC_BE Best Effort */ TC_CL, /* 4 CL TC_CL Controlled Load */ TC_CL, /* 5 VI TC_CL Controlled Load */ TC_VO, /* 6 VO TC_VO Voice */ TC_VO /* 7 NC TC_VO Voice */ }; uint32 priq_selector[] = { [0x0] = TC_NONE, [0x1] = TC_BK, [0x2] = TC_BE, [0x3] = TC_BE, [0x4] = TC_CL, [0x5] = TC_CL, [0x6] = TC_CL, [0x7] = TC_CL, [0x8] = TC_VO, [0x9] = TC_VO, [0xa] = TC_VO, [0xb] = TC_VO, [0xc] = TC_VO, [0xd] = TC_VO, [0xe] = TC_VO, [0xf] = TC_VO }; /* find the chip opsvec for this chip */ struct chops* etc_chipmatch(uint vendor, uint device) { { extern struct chops bcm47xx_et_chops; if (bcm47xx_et_chops.id(vendor, device)) return (&bcm47xx_et_chops); } #ifdef CFG_GMAC { extern struct chops bcmgmac_et_chops; if (bcmgmac_et_chops.id(vendor, device)) return (&bcmgmac_et_chops); } #endif /* CFG_GMAC */ return (NULL); } void* etc_attach(void *et, uint vendor, uint device, uint unit, void *osh, void *regsva) { etc_info_t *etc; ET_TRACE(("et%d: etc_attach: vendor 0x%x device 0x%x\n", unit, vendor, device)); /* some code depends on packed structures */ ASSERT(sizeof(struct ether_addr) == ETHER_ADDR_LEN); ASSERT(sizeof(struct ether_header) == ETHER_HDR_LEN); /* allocate etc_info_t state structure */ if ((etc = (etc_info_t*) MALLOC(osh, sizeof(etc_info_t))) == NULL) { ET_ERROR(("et%d: etc_attach: out of memory, malloced %d bytes\n", unit, MALLOCED(osh))); return (NULL); } bzero((char*)etc, sizeof(etc_info_t)); etc->et = et; etc->unit = unit; etc->osh = osh; etc->vendorid = (uint16) vendor; etc->deviceid = (uint16) device; etc->forcespeed = ET_AUTO; etc->linkstate = FALSE; /* set chip opsvec */ etc->chops = etc_chipmatch(vendor, device); ASSERT(etc->chops); /* chip attach */ if ((etc->ch = (*etc->chops->attach)(etc, osh, regsva)) == NULL) { ET_ERROR(("et%d: chipattach error\n", unit)); goto fail; } return ((void*)etc); fail: etc_detach(etc); return (NULL); } void etc_detach(etc_info_t *etc) { if (etc == NULL) return; /* free chip private state */ if (etc->ch) { (*etc->chops->detach)(etc->ch); etc->chops = etc->ch = NULL; } MFREE(etc->osh, etc, sizeof(etc_info_t)); } void etc_reset(etc_info_t *etc) { ET_TRACE(("et%d: etc_reset\n", etc->unit)); etc->reset++; /* reset the chip */ (*etc->chops->reset)(etc->ch); /* free any posted tx packets */ (*etc->chops->txreclaim)(etc->ch, TRUE); #ifdef DMA /* free any posted rx packets */ (*etc->chops->rxreclaim)(etc->ch); #endif /* DMA */ } void etc_init(etc_info_t *etc, uint options) { ET_TRACE(("et%d: etc_init\n", etc->unit)); ASSERT(etc->pioactive == NULL); ASSERT(!ETHER_ISNULLADDR(&etc->cur_etheraddr)); ASSERT(!ETHER_ISMULTI(&etc->cur_etheraddr)); /* init the chip */ (*etc->chops->init)(etc->ch, options); } /* mark interface up */ void etc_up(etc_info_t *etc) { etc->up = TRUE; et_init(etc->et, ET_INIT_DEF_OPTIONS); } /* mark interface down */ uint etc_down(etc_info_t *etc, int reset) { uint callback; callback = 0; ET_FLAG_DOWN(etc); if (reset) et_reset(etc->et); /* suppress link state changes during power management mode changes */ if (etc->linkstate) { etc->linkstate = FALSE; if (!etc->pm_modechange) et_link_down(etc->et); } return (callback); } /* common iovar handler. return 0=ok, -1=error */ int etc_iovar(etc_info_t *etc, uint cmd, uint set, void *arg) { int error; #ifdef ETROBO int i; uint *vecarg; robo_info_t *robo = etc->robo; #endif error = 0; ET_TRACE(("et%d: etc_iovar: cmd 0x%x\n", etc->unit, cmd)); switch (cmd) { #ifdef ETROBO case IOV_ET_POWER_SAVE_MODE: vecarg = (uint *)arg; if (set) error = robo_power_save_mode_set(robo, vecarg[1], vecarg[0]); else { /* get power save mode of all the phys */ if (vecarg[0] == MAX_NO_PHYS) { for (i = 0; i < MAX_NO_PHYS; i++) vecarg[i] = robo_power_save_mode_get(robo, i); break; } /* get power save mode of the phy */ error = robo_power_save_mode_get(robo, vecarg[0]); if (error != -1) { vecarg[1] = error; error = 0; } } break; #endif /* ETROBO */ default: error = -1; } return (error); } /* common ioctl handler. return: 0=ok, -1=error */ int etc_ioctl(etc_info_t *etc, int cmd, void *arg) { int error; int val; int *vec = (int*)arg; error = 0; val = arg ? *(int*)arg : 0; ET_TRACE(("et%d: etc_ioctl: cmd 0x%x\n", etc->unit, cmd)); switch (cmd) { case ETCUP: et_up(etc->et); break; case ETCDOWN: et_down(etc->et, TRUE); break; case ETCLOOP: etc_loopback(etc, val); break; case ETCDUMP: if (et_msg_level & 0x10000) bcmdumplog((char *)arg, 4096); break; case ETCSETMSGLEVEL: et_msg_level = val; break; case ETCPROMISC: etc_promisc(etc, val); break; case ETCQOS: etc_qos(etc, val); break; case ETCSPEED: if (val == ET_1000FULL) { etc->speed = 1000; etc->duplex = 1; } else if (val == ET_1000HALF) { etc->speed = 1000; etc->duplex = 0; } else if (val == ET_100FULL) { etc->speed = 100; etc->duplex = 1; } else if (val == ET_100HALF) { etc->speed = 100; etc->duplex = 0; } else if (val == ET_10FULL) { etc->speed = 10; etc->duplex = 1; } else if (val == ET_10HALF) { etc->speed = 10; etc->duplex = 0; } else if (val == ET_AUTO) ; else goto err; etc->forcespeed = val; /* explicitly reset the phy */ (*etc->chops->phyreset)(etc->ch, etc->phyaddr); /* request restart autonegotiation if we're reverting to adv mode */ if (etc->forcespeed == ET_AUTO) { etc->advertise = (ADV_100FULL | ADV_100HALF | ADV_10FULL | ADV_10HALF); etc->advertise2 = ADV_1000FULL; etc->needautoneg = TRUE; } else { etc->advertise = etc->advertise2 = 0; etc->needautoneg = FALSE; } et_init(etc->et, ET_INIT_DEF_OPTIONS); break; case ETCPHYRD: if (vec) { vec[1] = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, vec[0]); ET_TRACE(("etc_ioctl: ETCPHYRD of reg 0x%x => 0x%x\n", vec[0], vec[1])); } break; case ETCPHYRD2: if (vec) { uint phyaddr, reg; phyaddr = vec[0] >> 16; if (phyaddr < MAXEPHY) { reg = vec[0] & 0xffff; vec[1] = (*etc->chops->phyrd)(etc->ch, phyaddr, reg); ET_TRACE(("etc_ioctl: ETCPHYRD2 of phy 0x%x, reg 0x%x => 0x%x\n", phyaddr, reg, vec[1])); } } break; case ETCPHYWR: if (vec) { ET_TRACE(("etc_ioctl: ETCPHYWR to reg 0x%x <= 0x%x\n", vec[0], vec[1])); (*etc->chops->phywr)(etc->ch, etc->phyaddr, vec[0], (uint16)vec[1]); } break; case ETCPHYWR2: if (vec) { uint phyaddr, reg; phyaddr = vec[0] >> 16; if (phyaddr < MAXEPHY) { reg = vec[0] & 0xffff; (*etc->chops->phywr)(etc->ch, phyaddr, reg, (uint16)vec[1]); ET_TRACE(("etc_ioctl: ETCPHYWR2 to phy 0x%x, reg 0x%x <= 0x%x\n", phyaddr, reg, vec[1])); } } break; #ifdef ETROBO case ETCROBORD: if (etc->robo && vec) { uint page, reg; uint16 val; robo_info_t *robo = (robo_info_t *)etc->robo; page = vec[0] >> 16; reg = vec[0] & 0xffff; val = -1; robo->ops->read_reg(etc->robo, page, reg, &val, 2); vec[1] = val; ET_TRACE(("etc_ioctl: ETCROBORD of page 0x%x, reg 0x%x => 0x%x\n", page, reg, val)); } break; case ETCROBOWR: if (etc->robo && vec) { uint page, reg; uint16 val; robo_info_t *robo = (robo_info_t *)etc->robo; page = vec[0] >> 16; reg = vec[0] & 0xffff; val = vec[1]; robo->ops->write_reg(etc->robo, page, vec[0], &val, 2); ET_TRACE(("etc_ioctl: ETCROBOWR to page 0x%x, reg 0x%x <= 0x%x\n", page, reg, val)); } break; #endif /* ETROBO */ default: err: error = -1; } return (error); } /* called once per second */ void etc_watchdog(etc_info_t *etc) { uint16 status; uint16 lpa; #ifdef ETROBO robo_info_t *robo = (robo_info_t *)etc->robo; static uint32 sleep_timer = PWRSAVE_SLEEP_TIME, wake_timer; #endif etc->now++; #ifdef ETROBO /* Every PWRSAVE_WAKE_TIME sec the phys are put into the normal * mode and link status is checked after PWRSAVE_SLEEP_TIME sec * to see if any of the links is up. If any of the links is up * then that port is taken out of the manual power save mode */ if (robo && (robo->pwrsave_mode_manual | robo->pwrsave_mode_auto)) { if (etc->now == sleep_timer) { robo_power_save_toggle(robo, 0); wake_timer = sleep_timer + PWRSAVE_WAKE_TIME; } else if (etc->now == wake_timer) { robo_power_save_mode_update(robo, FALSE); robo_power_save_toggle(robo, 1); sleep_timer = wake_timer + PWRSAVE_SLEEP_TIME; } /* Check the link status. if the link goes down put the * corresponding phy in power save mode (auto, manual or * both). if link comes up put the phy in normal mode. */ if (etc->now == PWRSAVE_WAKE_TIME) robo_power_save_mode_update(robo, TRUE); } #endif /* ETROBO */ /* no local phy registers */ if (etc->phyaddr == EPHY_NOREG) { etc->linkstate = TRUE; etc->duplex = 1; /* keep emac txcontrol duplex bit consistent with current phy duplex */ (*etc->chops->duplexupd)(etc->ch); return; } status = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 1); /* check for bad mdio read */ if (status == 0xffff) { ET_ERROR(("et%d: etc_watchdog: bad mdio read: phyaddr %d mdcport %d\n", etc->unit, etc->phyaddr, etc->mdcport)); return; } if (etc->forcespeed == ET_AUTO) { uint16 adv, adv2 = 0, status2 = 0, estatus; adv = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 4); lpa = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 5); /* read extended status register. if we are 1000BASE-T * capable then get our advertised capabilities and the * link partner capabilities from 1000BASE-T control and * status registers. */ estatus = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 15); if ((estatus != 0xffff) && (estatus & EST_1000TFULL)) { /* read 1000BASE-T control and status registers */ adv2 = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 9); status2 = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 10); } /* update current speed and duplex */ if ((adv2 & ADV_1000FULL) && (status2 & LPA_1000FULL)) { etc->speed = 1000; etc->duplex = 1; } else if ((adv2 & ADV_1000HALF) && (status2 & LPA_1000HALF)) { etc->speed = 1000; etc->duplex = 0; } else if ((adv & ADV_100FULL) && (lpa & LPA_100FULL)) { etc->speed = 100; etc->duplex = 1; } else if ((adv & ADV_100HALF) && (lpa & LPA_100HALF)) { etc->speed = 100; etc->duplex = 0; } else if ((adv & ADV_10FULL) && (lpa & LPA_10FULL)) { etc->speed = 10; etc->duplex = 1; } else { etc->speed = 10; etc->duplex = 0; } } /* monitor link state */ if (!etc->linkstate && (status & STAT_LINK)) { etc->linkstate = TRUE; if (etc->pm_modechange) etc->pm_modechange = FALSE; else et_link_up(etc->et); } else if (etc->linkstate && !(status & STAT_LINK)) { etc->linkstate = FALSE; if (!etc->pm_modechange) et_link_down(etc->et); } /* keep emac txcontrol duplex bit consistent with current phy duplex */ (*etc->chops->duplexupd)(etc->ch); /* check for remote fault error */ if (status & STAT_REMFAULT) { ET_ERROR(("et%d: remote fault\n", etc->unit)); } /* check for jabber error */ if (status & STAT_JAB) { ET_ERROR(("et%d: jabber\n", etc->unit)); } /* * Read chip mib counters occationally before the 16bit ones can wrap. * We don't use the high-rate mib counters. */ if ((etc->now % 30) == 0) (*etc->chops->statsupd)(etc->ch); } static void etc_loopback(etc_info_t *etc, int on) { ET_TRACE(("et%d: etc_loopback: %d\n", etc->unit, on)); etc->loopbk = (bool) on; et_init(etc->et, ET_INIT_DEF_OPTIONS); } void etc_promisc(etc_info_t *etc, uint on) { ET_TRACE(("et%d: etc_promisc: %d\n", etc->unit, on)); etc->promisc = (bool) on; et_init(etc->et, ET_INIT_DEF_OPTIONS); } void etc_qos(etc_info_t *etc, uint on) { ET_TRACE(("et%d: etc_qos: %d\n", etc->unit, on)); etc->qos = (bool) on; et_init(etc->et, ET_INIT_DEF_OPTIONS); } uint etc_totlen(etc_info_t *etc, void *p) { uint total; total = 0; for (; p; p = PKTNEXT(etc->osh, p)) total += PKTLEN(etc->osh, p); return (total); }
312936.c
/* This file has been autogenerated by Ivory * Compiler version 0.1.0.4 */ #include "sequence_numbered_position_sample_types.h" void sequence_numbered_position_sample_get_le(const uint8_t *n_var0, uint32_t n_var1, struct sequence_numbered_position_sample *n_var2) { sequence_num_t_get_le(n_var0, n_var1, &n_var2->seqnum); position_sample_get_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 4U), &n_var2->val); } void sequence_numbered_position_sample_get_be(const uint8_t *n_var0, uint32_t n_var1, struct sequence_numbered_position_sample *n_var2) { sequence_num_t_get_be(n_var0, n_var1, &n_var2->seqnum); position_sample_get_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 4U), &n_var2->val); } void sequence_numbered_position_sample_set_le(uint8_t *n_var0, uint32_t n_var1, const struct sequence_numbered_position_sample *n_var2) { sequence_num_t_set_le(n_var0, n_var1, &n_var2->seqnum); position_sample_set_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 4U), &n_var2->val); } void sequence_numbered_position_sample_set_be(uint8_t *n_var0, uint32_t n_var1, const struct sequence_numbered_position_sample *n_var2) { sequence_num_t_set_be(n_var0, n_var1, &n_var2->seqnum); position_sample_set_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 4U), &n_var2->val); }
704052.c
/* -Procedure dafdc_c ( DAF delete comments ) -Abstract Delete the entire comment area of a specified DAF file. -Disclaimer THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S. GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS" TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE SOFTWARE AND RELATED MATERIALS, HOWEVER USED. IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY. RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE. -Required_Reading DAF -Keywords None. */ #include "SpiceUsr.h" #include "SpiceZfc.h" #include "SpiceZst.h" void dafdc_c ( SpiceInt handle ) /* -Brief_I/O VARIABLE I/O DESCRIPTION -------- --- -------------------------------------------------- handle I The handle of a binary DAF opened for writing. -Detailed_Input handle is the handle of a binary DAF that is to have its entire comment area deleted. The DAF must have been opened with write access. -Detailed_Output None. -Parameters None. -Exceptions 1) If the binary DAF attached to `handle' is not open with write access, an error is signaled by a routine in the call tree of this routine. -Files See argument `handle' in -Detailed_Input. -Particulars A binary DAF contains an area which is reserved for storing annotations or descriptive textual information about the data contained in a file. This area is referred to as the "comment area" of the file. The comment area of a DAF is a line oriented medium for storing textual information. The comment area preserves any leading or embedded white space in the line(s) of text which are stored, so that the appearance of the of information will be unchanged when it is retrieved (extracted) at some other time. Trailing blanks, however, are NOT preserved, due to the way that character strings are represented in standard Fortran 77. This routine will delete the entire comment area from the binary DAF attached to `handle'. The size of the binary DAF will remain unchanged. The space that was used by the comment records is reclaimed: the data area of the DAF is shifted toward the beginning -Examples The numerical results shown for this example may differ across platforms. The results depend on the SPICE kernels used as input, the compiler and supporting libraries, and the machine specific arithmetic implementation. 1) Delete the entire comment area of a DAF file. Note that this action should only be performed if fresh new comments are to be placed within the DAF file. Use the SPK kernel below as input DAF file for the program. earthstns_itrf93_201023.bsp Example code begins here. /. Program dafdc_ex1 ./ #include <stdio.h> #include "SpiceUsr.h" int main( ) { /. Local parameters ./ #define KERNEL "earthstns_itrf93_201023.bsp" #define BUFFSZ 10 #define LINLEN 1000 /. Local variables. ./ SpiceChar buffer [BUFFSZ][LINLEN]; SpiceInt handle; SpiceInt i; SpiceInt n; SpiceBoolean done; /. Open a DAF for write. Return a `handle' referring to the file. ./ dafopw_c ( KERNEL, &handle ); /. Print the first 10 lines of comments from the DAF file. ./ printf( "Comment area of input DAF file (max. 10 lines): \n" ); printf( "--------------------------------" "------------------------------\n" ); dafec_c ( handle, BUFFSZ, LINLEN, &n, buffer, &done ); for ( i = 0; i < n; i++ ) { printf( "%s\n", buffer[i] ); } printf( "--------------------------------" "------------------------------\n" ); printf( " \n" ); printf( "Deleting entire comment area...\n" ); /. Delete all the comments from the DAF file. ./ dafdc_c ( handle ); /. Close the DAF file and re-open it for read access to work around the dafec_c restriction on comments not to be modified while they are being extracted. ./ dafcls_c ( handle ); dafopr_c ( KERNEL, &handle ); /. Check if the comments have indeed been deleted. ./ dafec_c ( handle, BUFFSZ, LINLEN, &n, buffer, &done ); if ( done && ( n == 0 ) ) { printf( " \n" ); printf( " Successful operation.\n" ); } else { printf( " \n" ); printf( " Operation failed.\n" ); } /. Safely close the DAF. ./ dafcls_c ( handle ); return ( 0 ); } When this program was executed on a Mac/Intel/cc/64-bit platform, the output was: Comment area of input DAF file (max. 10 lines): -------------------------------------------------------------- SPK for DSN Station Locations ===================================================================== Original file name: earthstns_itrf93_201023.bsp Creation date: 2020 October 28 12:30 Created by: Nat Bachman (NAIF/JPL) Introduction -------------------------------------------------------------- Deleting entire comment area... Successful operation. -Restrictions None. -Literature_References None. -Author_and_Institution N.J. Bachman (JPL) J. Diaz del Rio (ODC Space) K.R. Gehringer (JPL) -Version -CSPICE Version 1.0.1, 25-NOV-2021 (JDR) Edited the header to comply with NAIF standard. Added complete code example. -CSPICE Version 1.0.0, 16-NOV-2006 (NJB) (KRG) -Index_Entries delete DAF comment area -& */ { /* Begin dafdc_c */ /* Participate in error tracing. */ chkin_c ( "dafdc_c" ); /* Hand off the task to the f2c'd routine. */ dafdc_ ( (integer *) &handle ); chkout_c ( "dafdc_c" ); } /* End dafdc_c */
723714.c
/** * @file jaln_tls.c This file contains function definitions for code related to tls. * * @section LICENSE * * Source code in 3rd-party is licensed and owned by their respective * copyright holders. * * All other source code is copyright Tresys Technology and licensed as below. * * Copyright (c) 2011 Tresys Technology LLC, Columbia, Maryland, USA * * This software was developed by Tresys Technology LLC * with U.S. Government sponsorship. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <axl.h> #include <vortex.h> #include <vortex_tls.h> #include <openssl/ssl.h> #include <jalop/jaln_network.h> #include <jalop/jaln_network_types.h> #include "jaln_context.h" #include "jaln_tls.h" #include "jal_alloc.h" axl_bool jaln_profile_mask (VortexConnection *connection, int channel_num, const char *uri, __attribute__((unused)) const char *profile_content, __attribute__((unused)) VortexEncoding encoding, __attribute__((unused)) const char *server_name, __attribute__((unused)) VortexFrame *frame, char **error_msg, __attribute__((unused)) axlPointer user_data) { if (0 == strcmp(uri, VORTEX_TLS_PROFILE_URI)) { return axl_false; } else if (channel_num > 0 && !vortex_connection_is_tlsficated(connection)) { *error_msg = axl_strdup("Profile not accepted due to an insecure connection"); return axl_true; } return axl_false; } axl_bool jaln_tls_on_connection_accepted(VortexConnection *connection, axlPointer user_data) { vortex_connection_set_profile_mask(connection, jaln_profile_mask, user_data); return axl_true; } axlPointer jaln_ssl_ctx_creation(__attribute__((unused))VortexConnection *connection, axlPointer user_data) { SSL_CTX *ssl_ctx; jaln_context *jaln_ctx = (jaln_context *)user_data; ssl_ctx = SSL_CTX_new(TLSv1_method()); if (!SSL_CTX_load_verify_locations(ssl_ctx, NULL, jaln_ctx->peer_certs)) { goto out; } if (!SSL_CTX_use_certificate_chain_file(ssl_ctx, jaln_ctx->public_cert)) { goto out; } if (!SSL_CTX_use_PrivateKey_file(ssl_ctx, jaln_ctx->private_key, SSL_FILETYPE_PEM)) { goto out; } SSL_CTX_set_verify(ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL); return ssl_ctx; out: SSL_CTX_free(ssl_ctx); return NULL; } enum jal_status jaln_register_tls(jaln_context *ctx, const char *private_key, const char *public_cert, const char *peer_certs) { if (!ctx || !private_key || !public_cert || !peer_certs) { return JAL_E_INVAL; } if (ctx->private_key || ctx->public_cert || ctx->peer_certs) { return JAL_E_INVAL; } ctx->private_key = jal_strdup(private_key); ctx->public_cert = jal_strdup(public_cert); ctx->peer_certs = jal_strdup(peer_certs); return JAL_OK; }
582388.c
/* Copyright (C) 1991-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <errno.h> #include <stddef.h> #include <dirent.h> /* Read a directory entry from DIRP. */ struct dirent * __readdir (DIR *dirp) { __set_errno (ENOSYS); return NULL; } weak_alias (__readdir, readdir) stub_warning (readdir)
563546.c
/* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2014 Intel Corporation */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <inttypes.h> #include <sys/types.h> #include <sys/param.h> #include <string.h> #include <sys/queue.h> #include <stdarg.h> #include <errno.h> #include <getopt.h> #include <rte_common.h> #include <rte_byteorder.h> #include <rte_log.h> #include <rte_memory.h> #include <rte_memcpy.h> #include <rte_eal.h> #include <rte_launch.h> #include <rte_atomic.h> #include <rte_cycles.h> #include <rte_prefetch.h> #include <rte_lcore.h> #include <rte_per_lcore.h> #include <rte_branch_prediction.h> #include <rte_interrupts.h> #include <rte_random.h> #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_lpm.h> #include <rte_lpm6.h> #include <rte_ip.h> #include <rte_string_fns.h> #include <rte_ip_frag.h> #define RTE_LOGTYPE_IP_FRAG RTE_LOGTYPE_USER1 /* allow max jumbo frame 9.5 KB */ #define JUMBO_FRAME_MAX_SIZE 0x2600 #define ROUNDUP_DIV(a, b) (((a) + (b) - 1) / (b)) /* * Default byte size for the IPv6 Maximum Transfer Unit (MTU). * This value includes the size of IPv6 header. */ #define IPV4_MTU_DEFAULT ETHER_MTU #define IPV6_MTU_DEFAULT ETHER_MTU /* * Default payload in bytes for the IPv6 packet. */ #define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct ipv4_hdr)) #define IPV6_DEFAULT_PAYLOAD (IPV6_MTU_DEFAULT - sizeof(struct ipv6_hdr)) /* * Max number of fragments per packet expected - defined by config file. */ #define MAX_PACKET_FRAG RTE_LIBRTE_IP_FRAG_MAX_FRAG #define NB_MBUF 8192 #define MAX_PKT_BURST 32 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ /* Configure how many packets ahead to prefetch, when reading packets */ #define PREFETCH_OFFSET 3 /* * Configurable number of RX/TX ring descriptors */ #define RTE_TEST_RX_DESC_DEFAULT 1024 #define RTE_TEST_TX_DESC_DEFAULT 1024 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /* ethernet addresses of ports */ static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; #ifndef IPv4_BYTES #define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8 #define IPv4_BYTES(addr) \ (uint8_t) (((addr) >> 24) & 0xFF),\ (uint8_t) (((addr) >> 16) & 0xFF),\ (uint8_t) (((addr) >> 8) & 0xFF),\ (uint8_t) ((addr) & 0xFF) #endif #ifndef IPv6_BYTES #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\ "%02x%02x:%02x%02x:%02x%02x:%02x%02x" #define IPv6_BYTES(addr) \ addr[0], addr[1], addr[2], addr[3], \ addr[4], addr[5], addr[6], addr[7], \ addr[8], addr[9], addr[10], addr[11],\ addr[12], addr[13],addr[14], addr[15] #endif #define IPV6_ADDR_LEN 16 /* mask of enabled ports */ static int enabled_port_mask = 0; static int rx_queue_per_lcore = 1; #define MBUF_TABLE_SIZE (2 * MAX(MAX_PKT_BURST, MAX_PACKET_FRAG)) struct mbuf_table { uint16_t len; struct rte_mbuf *m_table[MBUF_TABLE_SIZE]; }; struct rx_queue { struct rte_mempool *direct_pool; struct rte_mempool *indirect_pool; struct rte_lpm *lpm; struct rte_lpm6 *lpm6; uint16_t portid; }; #define MAX_RX_QUEUE_PER_LCORE 16 #define MAX_TX_QUEUE_PER_PORT 16 struct lcore_queue_conf { uint16_t n_rx_queue; uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; } __rte_cache_aligned; struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; static struct rte_eth_conf port_conf = { .rxmode = { .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE, .split_hdr_size = 0, .ignore_offload_bitfield = 1, .offloads = (DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_CRC_STRIP), }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_MULTI_SEGS), }, }; /* * IPv4 forwarding table */ struct l3fwd_ipv4_route { uint32_t ip; uint8_t depth; uint8_t if_out; }; struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = { {IPv4(100,10,0,0), 16, 0}, {IPv4(100,20,0,0), 16, 1}, {IPv4(100,30,0,0), 16, 2}, {IPv4(100,40,0,0), 16, 3}, {IPv4(100,50,0,0), 16, 4}, {IPv4(100,60,0,0), 16, 5}, {IPv4(100,70,0,0), 16, 6}, {IPv4(100,80,0,0), 16, 7}, }; /* * IPv6 forwarding table */ struct l3fwd_ipv6_route { uint8_t ip[IPV6_ADDR_LEN]; uint8_t depth; uint8_t if_out; }; static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = { {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0}, {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1}, {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2}, {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3}, {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4}, {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5}, {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6}, {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7}, }; #define LPM_MAX_RULES 1024 #define LPM6_MAX_RULES 1024 #define LPM6_NUMBER_TBL8S (1 << 16) struct rte_lpm6_config lpm6_config = { .max_rules = LPM6_MAX_RULES, .number_tbl8s = LPM6_NUMBER_TBL8S, .flags = 0 }; static struct rte_mempool *socket_direct_pool[RTE_MAX_NUMA_NODES]; static struct rte_mempool *socket_indirect_pool[RTE_MAX_NUMA_NODES]; static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES]; static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES]; /* Send burst of packets on an output interface */ static inline int send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint16_t port) { struct rte_mbuf **m_table; int ret; uint16_t queueid; queueid = qconf->tx_queue_id[port]; m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; ret = rte_eth_tx_burst(port, queueid, m_table, n); if (unlikely(ret < n)) { do { rte_pktmbuf_free(m_table[ret]); } while (++ret < n); } return 0; } static inline void l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf, uint8_t queueid, uint16_t port_in) { struct rx_queue *rxq; uint32_t i, len, next_hop; uint8_t ipv6; uint16_t port_out; int32_t len2; ipv6 = 0; rxq = &qconf->rx_queue_list[queueid]; /* by default, send everything back to the source port */ port_out = port_in; /* Remove the Ethernet header and trailer from the input packet */ rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr)); /* Build transmission burst */ len = qconf->tx_mbufs[port_out].len; /* if this is an IPv4 packet */ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { struct ipv4_hdr *ip_hdr; uint32_t ip_dst; /* Read the lookup key (i.e. ip_dst) from the input packet */ ip_hdr = rte_pktmbuf_mtod(m, struct ipv4_hdr *); ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr); /* Find destination port */ if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 && (enabled_port_mask & 1 << next_hop) != 0) { port_out = next_hop; /* Build transmission burst for new port */ len = qconf->tx_mbufs[port_out].len; } /* if we don't need to do any fragmentation */ if (likely (IPV4_MTU_DEFAULT >= m->pkt_len)) { qconf->tx_mbufs[port_out].m_table[len] = m; len2 = 1; } else { len2 = rte_ipv4_fragment_packet(m, &qconf->tx_mbufs[port_out].m_table[len], (uint16_t)(MBUF_TABLE_SIZE - len), IPV4_MTU_DEFAULT, rxq->direct_pool, rxq->indirect_pool); /* Free input packet */ rte_pktmbuf_free(m); /* If we fail to fragment the packet */ if (unlikely (len2 < 0)) return; } } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { /* if this is an IPv6 packet */ struct ipv6_hdr *ip_hdr; ipv6 = 1; /* Read the lookup key (i.e. ip_dst) from the input packet */ ip_hdr = rte_pktmbuf_mtod(m, struct ipv6_hdr *); /* Find destination port */ if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr, &next_hop) == 0 && (enabled_port_mask & 1 << next_hop) != 0) { port_out = next_hop; /* Build transmission burst for new port */ len = qconf->tx_mbufs[port_out].len; } /* if we don't need to do any fragmentation */ if (likely (IPV6_MTU_DEFAULT >= m->pkt_len)) { qconf->tx_mbufs[port_out].m_table[len] = m; len2 = 1; } else { len2 = rte_ipv6_fragment_packet(m, &qconf->tx_mbufs[port_out].m_table[len], (uint16_t)(MBUF_TABLE_SIZE - len), IPV6_MTU_DEFAULT, rxq->direct_pool, rxq->indirect_pool); /* Free input packet */ rte_pktmbuf_free(m); /* If we fail to fragment the packet */ if (unlikely (len2 < 0)) return; } } /* else, just forward the packet */ else { qconf->tx_mbufs[port_out].m_table[len] = m; len2 = 1; } for (i = len; i < len + len2; i ++) { void *d_addr_bytes; m = qconf->tx_mbufs[port_out].m_table[i]; struct ether_hdr *eth_hdr = (struct ether_hdr *) rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct ether_hdr)); if (eth_hdr == NULL) { rte_panic("No headroom in mbuf.\n"); } m->l2_len = sizeof(struct ether_hdr); /* 02:00:00:00:00:xx */ d_addr_bytes = &eth_hdr->d_addr.addr_bytes[0]; *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)port_out << 40); /* src addr */ ether_addr_copy(&ports_eth_addr[port_out], &eth_hdr->s_addr); if (ipv6) eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6); else eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4); } len += len2; if (likely(len < MAX_PKT_BURST)) { qconf->tx_mbufs[port_out].len = (uint16_t)len; return; } /* Transmit packets */ send_burst(qconf, (uint16_t)len, port_out); qconf->tx_mbufs[port_out].len = 0; } /* main processing loop */ static int main_loop(__attribute__((unused)) void *dummy) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; unsigned lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; int i, j, nb_rx; uint16_t portid; struct lcore_queue_conf *qconf; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; prev_tsc = 0; lcore_id = rte_lcore_id(); qconf = &lcore_queue_conf[lcore_id]; if (qconf->n_rx_queue == 0) { RTE_LOG(INFO, IP_FRAG, "lcore %u has nothing to do\n", lcore_id); return 0; } RTE_LOG(INFO, IP_FRAG, "entering main loop on lcore %u\n", lcore_id); for (i = 0; i < qconf->n_rx_queue; i++) { portid = qconf->rx_queue_list[i].portid; RTE_LOG(INFO, IP_FRAG, " -- lcoreid=%u portid=%d\n", lcore_id, portid); } while (1) { cur_tsc = rte_rdtsc(); /* * TX burst queue drain */ diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { /* * This could be optimized (use queueid instead of * portid), but it is not called so often */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { if (qconf->tx_mbufs[portid].len == 0) continue; send_burst(&lcore_queue_conf[lcore_id], qconf->tx_mbufs[portid].len, portid); qconf->tx_mbufs[portid].len = 0; } prev_tsc = cur_tsc; } /* * Read packet from RX queues */ for (i = 0; i < qconf->n_rx_queue; i++) { portid = qconf->rx_queue_list[i].portid; nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst, MAX_PKT_BURST); /* Prefetch first packets */ for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) { rte_prefetch0(rte_pktmbuf_mtod( pkts_burst[j], void *)); } /* Prefetch and forward already prefetched packets */ for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) { rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[ j + PREFETCH_OFFSET], void *)); l3fwd_simple_forward(pkts_burst[j], qconf, i, portid); } /* Forward remaining prefetched packets */ for (; j < nb_rx; j++) { l3fwd_simple_forward(pkts_burst[j], qconf, i, portid); } } } } /* display usage */ static void print_usage(const char *prgname) { printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" " -p PORTMASK: hexadecimal bitmask of ports to configure\n" " -q NQ: number of queue (=ports) per lcore (default is 1)\n", prgname); } static int parse_portmask(const char *portmask) { char *end = NULL; unsigned long pm; /* parse hexadecimal string */ pm = strtoul(portmask, &end, 16); if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) return -1; if (pm == 0) return -1; return pm; } static int parse_nqueue(const char *q_arg) { char *end = NULL; unsigned long n; /* parse hexadecimal string */ n = strtoul(q_arg, &end, 10); if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) return -1; if (n == 0) return -1; if (n >= MAX_RX_QUEUE_PER_LCORE) return -1; return n; } /* Parse the argument given in the command line of the application */ static int parse_args(int argc, char **argv) { int opt, ret; char **argvopt; int option_index; char *prgname = argv[0]; static struct option lgopts[] = { {NULL, 0, 0, 0} }; argvopt = argv; while ((opt = getopt_long(argc, argvopt, "p:q:", lgopts, &option_index)) != EOF) { switch (opt) { /* portmask */ case 'p': enabled_port_mask = parse_portmask(optarg); if (enabled_port_mask < 0) { printf("invalid portmask\n"); print_usage(prgname); return -1; } break; /* nqueue */ case 'q': rx_queue_per_lcore = parse_nqueue(optarg); if (rx_queue_per_lcore < 0) { printf("invalid queue number\n"); print_usage(prgname); return -1; } break; /* long options */ case 0: print_usage(prgname); return -1; default: print_usage(prgname); return -1; } } if (enabled_port_mask == 0) { printf("portmask not specified\n"); print_usage(prgname); return -1; } if (optind >= 0) argv[optind-1] = prgname; ret = optind-1; optind = 1; /* reset getopt lib */ return ret; } static void print_ethaddr(const char *name, struct ether_addr *eth_addr) { char buf[ETHER_ADDR_FMT_SIZE]; ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); printf("%s%s", name, buf); } /* Check the link status of all ports in up to 9s, and print them finally */ static void check_all_ports_link_status(uint32_t port_mask) { #define CHECK_INTERVAL 100 /* 100ms */ #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ uint16_t portid; uint8_t count, all_ports_up, print_flag = 0; struct rte_eth_link link; printf("\nChecking link status"); fflush(stdout); for (count = 0; count <= MAX_CHECK_TIME; count++) { all_ports_up = 1; RTE_ETH_FOREACH_DEV(portid) { if ((port_mask & (1 << portid)) == 0) continue; memset(&link, 0, sizeof(link)); rte_eth_link_get_nowait(portid, &link); /* print link status if flag set */ if (print_flag == 1) { if (link.link_status) printf( "Port%d Link Up .Speed %u Mbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); else printf("Port %d Link Down\n", portid); continue; } /* clear all_ports_up flag if any link down */ if (link.link_status == ETH_LINK_DOWN) { all_ports_up = 0; break; } } /* after finally printing all link status, get out */ if (print_flag == 1) break; if (all_ports_up == 0) { printf("."); fflush(stdout); rte_delay_ms(CHECK_INTERVAL); } /* set the print_flag if all ports up or timeout */ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { print_flag = 1; printf("\ndone\n"); } } } /* Check L3 packet type detection capablity of the NIC port */ static int check_ptype(int portid) { int i, ret; int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0; uint32_t ptype_mask = RTE_PTYPE_L3_MASK; ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0); if (ret <= 0) return 0; uint32_t ptypes[ret]; ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret); for (i = 0; i < ret; ++i) { if (ptypes[i] & RTE_PTYPE_L3_IPV4) ptype_l3_ipv4 = 1; if (ptypes[i] & RTE_PTYPE_L3_IPV6) ptype_l3_ipv6 = 1; } if (ptype_l3_ipv4 == 0) printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid); if (ptype_l3_ipv6 == 0) printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid); if (ptype_l3_ipv4 && ptype_l3_ipv6) return 1; return 0; } /* Parse packet type of a packet by SW */ static inline void parse_ptype(struct rte_mbuf *m) { struct ether_hdr *eth_hdr; uint32_t packet_type = RTE_PTYPE_UNKNOWN; uint16_t ether_type; eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); ether_type = eth_hdr->ether_type; if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; m->packet_type = packet_type; } /* callback function to detect packet type for a queue of a port */ static uint16_t cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts __rte_unused, void *user_param __rte_unused) { uint16_t i; for (i = 0; i < nb_pkts; ++i) parse_ptype(pkts[i]); return nb_pkts; } static int init_routing_table(void) { struct rte_lpm *lpm; struct rte_lpm6 *lpm6; int socket, ret; unsigned i; for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) { if (socket_lpm[socket]) { lpm = socket_lpm[socket]; /* populate the LPM table */ for (i = 0; i < RTE_DIM(l3fwd_ipv4_route_array); i++) { ret = rte_lpm_add(lpm, l3fwd_ipv4_route_array[i].ip, l3fwd_ipv4_route_array[i].depth, l3fwd_ipv4_route_array[i].if_out); if (ret < 0) { RTE_LOG(ERR, IP_FRAG, "Unable to add entry %i to the l3fwd " "LPM table\n", i); return -1; } RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv4_BYTES_FMT "/%d (port %d)\n", socket, IPv4_BYTES(l3fwd_ipv4_route_array[i].ip), l3fwd_ipv4_route_array[i].depth, l3fwd_ipv4_route_array[i].if_out); } } if (socket_lpm6[socket]) { lpm6 = socket_lpm6[socket]; /* populate the LPM6 table */ for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) { ret = rte_lpm6_add(lpm6, l3fwd_ipv6_route_array[i].ip, l3fwd_ipv6_route_array[i].depth, l3fwd_ipv6_route_array[i].if_out); if (ret < 0) { RTE_LOG(ERR, IP_FRAG, "Unable to add entry %i to the l3fwd " "LPM6 table\n", i); return -1; } RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv6_BYTES_FMT "/%d (port %d)\n", socket, IPv6_BYTES(l3fwd_ipv6_route_array[i].ip), l3fwd_ipv6_route_array[i].depth, l3fwd_ipv6_route_array[i].if_out); } } } return 0; } static int init_mem(void) { char buf[PATH_MAX]; struct rte_mempool *mp; struct rte_lpm *lpm; struct rte_lpm6 *lpm6; struct rte_lpm_config lpm_config; int socket; unsigned lcore_id; /* traverse through lcores and initialize structures on each socket */ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; socket = rte_lcore_to_socket_id(lcore_id); if (socket == SOCKET_ID_ANY) socket = 0; if (socket_direct_pool[socket] == NULL) { RTE_LOG(INFO, IP_FRAG, "Creating direct mempool on socket %i\n", socket); snprintf(buf, sizeof(buf), "pool_direct_%i", socket); mp = rte_pktmbuf_pool_create(buf, NB_MBUF, 32, 0, RTE_MBUF_DEFAULT_BUF_SIZE, socket); if (mp == NULL) { RTE_LOG(ERR, IP_FRAG, "Cannot create direct mempool\n"); return -1; } socket_direct_pool[socket] = mp; } if (socket_indirect_pool[socket] == NULL) { RTE_LOG(INFO, IP_FRAG, "Creating indirect mempool on socket %i\n", socket); snprintf(buf, sizeof(buf), "pool_indirect_%i", socket); mp = rte_pktmbuf_pool_create(buf, NB_MBUF, 32, 0, 0, socket); if (mp == NULL) { RTE_LOG(ERR, IP_FRAG, "Cannot create indirect mempool\n"); return -1; } socket_indirect_pool[socket] = mp; } if (socket_lpm[socket] == NULL) { RTE_LOG(INFO, IP_FRAG, "Creating LPM table on socket %i\n", socket); snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket); lpm_config.max_rules = LPM_MAX_RULES; lpm_config.number_tbl8s = 256; lpm_config.flags = 0; lpm = rte_lpm_create(buf, socket, &lpm_config); if (lpm == NULL) { RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n"); return -1; } socket_lpm[socket] = lpm; } if (socket_lpm6[socket] == NULL) { RTE_LOG(INFO, IP_FRAG, "Creating LPM6 table on socket %i\n", socket); snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket); lpm6 = rte_lpm6_create(buf, socket, &lpm6_config); if (lpm6 == NULL) { RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n"); return -1; } socket_lpm6[socket] = lpm6; } } return 0; } int main(int argc, char **argv) { struct lcore_queue_conf *qconf; struct rte_eth_dev_info dev_info; struct rte_eth_txconf *txconf; struct rx_queue *rxq; int socket, ret; unsigned nb_ports; uint16_t queueid = 0; unsigned lcore_id = 0, rx_lcore_id = 0; uint32_t n_tx_queue, nb_lcores; uint16_t portid; /* init EAL */ ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eal_init failed"); argc -= ret; argv += ret; /* parse application arguments (after the EAL ones) */ ret = parse_args(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid arguments"); nb_ports = rte_eth_dev_count(); if (nb_ports == 0) rte_exit(EXIT_FAILURE, "No ports found!\n"); nb_lcores = rte_lcore_count(); /* initialize structures (mempools, lpm etc.) */ if (init_mem() < 0) rte_panic("Cannot initialize memory structures!\n"); /* check if portmask has non-existent ports */ if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned))) rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n"); /* initialize all ports */ RTE_ETH_FOREACH_DEV(portid) { struct rte_eth_conf local_port_conf = port_conf; struct rte_eth_rxconf rxq_conf; /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { printf("Skipping disabled port %d\n", portid); continue; } qconf = &lcore_queue_conf[rx_lcore_id]; /* limit the frame size to the maximum supported by NIC */ rte_eth_dev_info_get(portid, &dev_info); local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN( dev_info.max_rx_pktlen, local_port_conf.rxmode.max_rx_pkt_len); /* get the lcore_id for this port */ while (rte_lcore_is_enabled(rx_lcore_id) == 0 || qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) { rx_lcore_id ++; if (rx_lcore_id >= RTE_MAX_LCORE) rte_exit(EXIT_FAILURE, "Not enough cores\n"); qconf = &lcore_queue_conf[rx_lcore_id]; } socket = (int) rte_lcore_to_socket_id(rx_lcore_id); if (socket == SOCKET_ID_ANY) socket = 0; rxq = &qconf->rx_queue_list[qconf->n_rx_queue]; rxq->portid = portid; rxq->direct_pool = socket_direct_pool[socket]; rxq->indirect_pool = socket_indirect_pool[socket]; rxq->lpm = socket_lpm[socket]; rxq->lpm6 = socket_lpm6[socket]; qconf->n_rx_queue++; /* init port */ printf("Initializing port %d on lcore %u...", portid, rx_lcore_id); fflush(stdout); n_tx_queue = nb_lcores; if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) n_tx_queue = MAX_TX_QUEUE_PER_PORT; if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue, &local_port_conf); if (ret < 0) { printf("\n"); rte_exit(EXIT_FAILURE, "Cannot configure device: " "err=%d, port=%d\n", ret, portid); } ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd); if (ret < 0) { printf("\n"); rte_exit(EXIT_FAILURE, "Cannot adjust number of " "descriptors: err=%d, port=%d\n", ret, portid); } /* init one RX queue */ rxq_conf = dev_info.default_rxconf; rxq_conf.offloads = local_port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, socket, &rxq_conf, socket_direct_pool[socket]); if (ret < 0) { printf("\n"); rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: " "err=%d, port=%d\n", ret, portid); } rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); print_ethaddr(" Address:", &ports_eth_addr[portid]); printf("\n"); /* init one TX queue per couple (lcore,port) */ queueid = 0; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; socket = (int) rte_lcore_to_socket_id(lcore_id); printf("txq=%u,%d ", lcore_id, queueid); fflush(stdout); txconf = &dev_info.default_txconf; txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; txconf->offloads = local_port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, socket, txconf); if (ret < 0) { printf("\n"); rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " "err=%d, port=%d\n", ret, portid); } qconf = &lcore_queue_conf[lcore_id]; qconf->tx_queue_id[portid] = queueid; queueid++; } printf("\n"); } printf("\n"); /* start ports */ RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) { continue; } /* Start device */ ret = rte_eth_dev_start(portid); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n", ret, portid); rte_eth_promiscuous_enable(portid); if (check_ptype(portid) == 0) { rte_eth_add_rx_callback(portid, 0, cb_parse_ptype, NULL); printf("Add Rx callback function to detect L3 packet type by SW :" " port = %d\n", portid); } } if (init_routing_table() < 0) rte_exit(EXIT_FAILURE, "Cannot init routing table\n"); check_all_ports_link_status(enabled_port_mask); /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); RTE_LCORE_FOREACH_SLAVE(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) return -1; } return 0; }
14536.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_substr.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: aherlind <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2020/11/04 13:17:27 by aherlind #+# #+# */ /* Updated: 2020/11/04 13:17:28 by aherlind ### ########.fr */ /* */ /* ************************************************************************** */ #include "libft.h" char *ft_substr(char const *s, unsigned int start, size_t len) { char *result; size_t i; size_t j; if (!s) return (NULL); i = 0; j = 0; result = (char *)malloc(sizeof(char) * (len + 1)); if (!result) return (NULL); while (s[i]) { if (i >= start && j < len) { result[j] = s[i]; j++; } i++; } result[j] = '\0'; return (result); }
33270.c
/** * xrdp: A Remote Desktop Protocol server. * * Copyright (C) Jay Sorg 2004-2014 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * mcs layer */ #if defined(HAVE_CONFIG_H) #include <config_ac.h> #endif #include "libxrdp.h" #include "ms-rdpbcgr.h" #include "log.h" /*****************************************************************************/ struct xrdp_mcs * xrdp_mcs_create(struct xrdp_sec *owner, struct trans *trans, struct stream *client_mcs_data, struct stream *server_mcs_data) { struct xrdp_mcs *self; DEBUG((" in xrdp_mcs_create")); self = (struct xrdp_mcs *)g_malloc(sizeof(struct xrdp_mcs), 1); self->sec_layer = owner; self->userid = 1; self->chanid = 1001; self->client_mcs_data = client_mcs_data; self->server_mcs_data = server_mcs_data; self->iso_layer = xrdp_iso_create(self, trans); self->channel_list = list_create(); DEBUG((" out xrdp_mcs_create")); return self; } /*****************************************************************************/ void xrdp_mcs_delete(struct xrdp_mcs *self) { struct mcs_channel_item *channel_item; int index; int count; if (self == 0) { return; } /* here we have to free the channel items and anything in them */ count = self->channel_list->count; for (index = count - 1; index >= 0; index--) { channel_item = (struct mcs_channel_item *) list_get_item(self->channel_list, index); g_free(channel_item); } list_delete(self->channel_list); xrdp_iso_delete(self->iso_layer); /* make sure we get null pointer exception if struct is used again. */ DEBUG(("xrdp_mcs_delete processed")) g_memset(self, 0, sizeof(struct xrdp_mcs)) ; g_free(self); } /*****************************************************************************/ /* This function sends channel join confirm */ /* returns error = 1 ok = 0 */ static int xrdp_mcs_send_cjcf(struct xrdp_mcs *self, int userid, int chanid) { struct stream *s; DEBUG((" in xrdp_mcs_send_cjcf")); make_stream(s); init_stream(s, 8192); if (xrdp_iso_init(self->iso_layer, s) != 0) { free_stream(s); DEBUG((" out xrdp_mcs_send_cjcf error")); return 1; } out_uint8(s, (MCS_CJCF << 2) | 2); out_uint8(s, 0); out_uint16_be(s, userid); out_uint16_be(s, chanid); /* TODO Explain why we send this two times */ out_uint16_be(s, chanid); s_mark_end(s); if (xrdp_iso_send(self->iso_layer, s) != 0) { free_stream(s); DEBUG((" out xrdp_mcs_send_cjcf error")); return 1; } free_stream(s); DEBUG((" out xrdp_mcs_send_cjcf")); return 0; } /*****************************************************************************/ /* returns error */ int xrdp_mcs_recv(struct xrdp_mcs *self, struct stream *s, int *chan) { int appid; int opcode; int len; int userid; int chanid; DEBUG((" in xrdp_mcs_recv")); while (1) { if (xrdp_iso_recv(self->iso_layer, s) != 0) { DEBUG((" out xrdp_mcs_recv, xrdp_iso_recv return non zero")); g_writeln("xrdp_mcs_recv: xrdp_iso_recv failed"); return 1; } if (!s_check_rem(s, 1)) { return 1; } in_uint8(s, opcode); appid = opcode >> 2; if (appid == MCS_DPUM) /* Disconnect Provider Ultimatum */ { g_writeln("received Disconnect Provider Ultimatum"); DEBUG((" out xrdp_mcs_recv appid != MCS_DPUM")); return 1; } /* this is channels getting added from the client */ if (appid == MCS_CJRQ) { if (!s_check_rem(s, 4)) { return 1; } in_uint16_be(s, userid); in_uint16_be(s, chanid); log_message(LOG_LEVEL_DEBUG,"MCS_CJRQ - channel join request received"); DEBUG(("xrdp_mcs_recv adding channel %4.4x", chanid)); if (xrdp_mcs_send_cjcf(self, userid, chanid) != 0) { log_message(LOG_LEVEL_ERROR,"Non handled error from xrdp_mcs_send_cjcf") ; } s = libxrdp_force_read(self->iso_layer->trans); if (s == 0) { g_writeln("xrdp_mcs_recv: libxrdp_force_read failed"); return 1; } continue; } if (appid == MCS_SDRQ || appid == MCS_SDIN) { break; } else { log_message(LOG_LEVEL_DEBUG,"Received an unhandled appid:%d",appid); } break; } if (appid != MCS_SDRQ) { DEBUG((" out xrdp_mcs_recv err got 0x%x need MCS_SDRQ", appid)); return 1; } if (!s_check_rem(s, 6)) { return 1; } in_uint8s(s, 2); in_uint16_be(s, *chan); in_uint8s(s, 1); in_uint8(s, len); if (len & 0x80) { if (!s_check_rem(s, 1)) { return 1; } in_uint8s(s, 1); } DEBUG((" out xrdp_mcs_recv")); return 0; } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_ber_parse_header(struct xrdp_mcs *self, struct stream *s, int tag_val, int *len) { int tag; int l; int i; if (tag_val > 0xff) { if (!s_check_rem(s, 2)) { return 1; } in_uint16_be(s, tag); } else { if (!s_check_rem(s, 1)) { return 1; } in_uint8(s, tag); } if (tag != tag_val) { return 1; } if (!s_check_rem(s, 1)) { return 1; } in_uint8(s, l); if (l & 0x80) { l = l & ~0x80; *len = 0; while (l > 0) { if (!s_check_rem(s, 1)) { return 1; } in_uint8(s, i); *len = (*len << 8) | i; l--; } } else { *len = l; } if (s_check(s)) { return 0; } else { return 1; } } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_parse_domain_params(struct xrdp_mcs *self, struct stream *s) { int len; if (xrdp_mcs_ber_parse_header(self, s, MCS_TAG_DOMAIN_PARAMS, &len) != 0) { return 1; } if ((len < 0) || !s_check_rem(s, len)) { return 1; } in_uint8s(s, len); if (s_check(s)) { return 0; } else { return 1; } } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_recv_connect_initial(struct xrdp_mcs *self) { int len; struct stream *s; s = libxrdp_force_read(self->iso_layer->trans); if (s == 0) { return 1; } if (xrdp_iso_recv(self->iso_layer, s) != 0) { return 1; } if (xrdp_mcs_ber_parse_header(self, s, MCS_CONNECT_INITIAL, &len) != 0) { return 1; } if (xrdp_mcs_ber_parse_header(self, s, BER_TAG_OCTET_STRING, &len) != 0) { return 1; } if ((len < 0) || !s_check_rem(s, len)) { return 1; } in_uint8s(s, len); if (xrdp_mcs_ber_parse_header(self, s, BER_TAG_OCTET_STRING, &len) != 0) { return 1; } if ((len < 0) || !s_check_rem(s, len)) { return 1; } in_uint8s(s, len); if (xrdp_mcs_ber_parse_header(self, s, BER_TAG_BOOLEAN, &len) != 0) { return 1; } if ((len < 0) || !s_check_rem(s, len)) { return 1; } in_uint8s(s, len); if (xrdp_mcs_parse_domain_params(self, s) != 0) { return 1; } if (xrdp_mcs_parse_domain_params(self, s) != 0) { return 1; } if (xrdp_mcs_parse_domain_params(self, s) != 0) { return 1; } if (xrdp_mcs_ber_parse_header(self, s, BER_TAG_OCTET_STRING, &len) != 0) { return 1; } /* mcs data can not be zero length */ if ((len <= 0) || (len > 16 * 1024)) { return 1; } if (!s_check_rem(s, len)) { return 1; } /* make a copy of client mcs data */ init_stream(self->client_mcs_data, len); out_uint8a(self->client_mcs_data, s->p, len); in_uint8s(s, len); s_mark_end(self->client_mcs_data); if (s_check_end(s)) { return 0; } else { return 1; } } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_recv_edrq(struct xrdp_mcs *self) { int opcode; struct stream *s; DEBUG((" in xrdp_mcs_recv_edrq")); s = libxrdp_force_read(self->iso_layer->trans); if (s == 0) { return 1; } if (xrdp_iso_recv(self->iso_layer, s) != 0) { return 1; } if (!s_check_rem(s, 1)) { return 1; } in_uint8(s, opcode); if ((opcode >> 2) != MCS_EDRQ) { return 1; } if (!s_check_rem(s, 4)) { return 1; } in_uint8s(s, 2); in_uint8s(s, 2); if (opcode & 2) { if (!s_check_rem(s, 2)) { return 1; } in_uint16_be(s, self->userid); } if (!(s_check_end(s))) { return 1; } DEBUG((" out xrdp_mcs_recv_edrq")); return 0; } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_recv_aurq(struct xrdp_mcs *self) { int opcode; struct stream *s; DEBUG((" in xrdp_mcs_recv_aurq")); s = libxrdp_force_read(self->iso_layer->trans); if (s == 0) { return 1; } if (xrdp_iso_recv(self->iso_layer, s) != 0) { return 1; } if (!s_check_rem(s, 1)) { return 1; } in_uint8(s, opcode); if ((opcode >> 2) != MCS_AURQ) { return 1; } if (opcode & 2) { if (!s_check_rem(s, 2)) { return 1; } in_uint16_be(s, self->userid); } if (!(s_check_end(s))) { return 1; } DEBUG((" out xrdp_mcs_recv_aurq")); return 0; } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_send_aucf(struct xrdp_mcs *self) { struct stream *s; DEBUG((" in xrdp_mcs_send_aucf")); make_stream(s); init_stream(s, 8192); if (xrdp_iso_init(self->iso_layer, s) != 0) { free_stream(s); DEBUG((" out xrdp_mcs_send_aucf error")); return 1; } out_uint8(s, ((MCS_AUCF << 2) | 2)); out_uint8s(s, 1); out_uint16_be(s, self->userid); s_mark_end(s); if (xrdp_iso_send(self->iso_layer, s) != 0) { free_stream(s); DEBUG((" out xrdp_mcs_send_aucf error")); return 1; } free_stream(s); DEBUG((" out xrdp_mcs_send_aucf")); return 0; } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_recv_cjrq(struct xrdp_mcs *self) { int opcode; struct stream *s; s = libxrdp_force_read(self->iso_layer->trans); if (s == 0) { return 1; } if (xrdp_iso_recv(self->iso_layer, s) != 0) { return 1; } if (!s_check_rem(s, 1)) { return 1; } in_uint8(s, opcode); if ((opcode >> 2) != MCS_CJRQ) { return 1; } if (!s_check_rem(s, 4)) { return 1; } in_uint8s(s, 4); if (opcode & 2) { if (!s_check_rem(s, 2)) { return 1; } in_uint8s(s, 2); } if (!(s_check_end(s))) { return 1; } return 0; } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_ber_out_header(struct xrdp_mcs *self, struct stream *s, int tag_val, int len) { if (tag_val > 0xff) { out_uint16_be(s, tag_val); } else { out_uint8(s, tag_val); } if (len >= 0x80) { out_uint8(s, 0x82); out_uint16_be(s, len); } else { out_uint8(s, len); } return 0; } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_ber_out_int8(struct xrdp_mcs *self, struct stream *s, int value) { xrdp_mcs_ber_out_header(self, s, BER_TAG_INTEGER, 1); out_uint8(s, value); return 0; } #if 0 /* not used */ /*****************************************************************************/ /* returns error */ static int xrdp_mcs_ber_out_int16(struct xrdp_mcs *self, struct stream *s, int value) { xrdp_mcs_ber_out_header(self, s, BER_TAG_INTEGER, 2); out_uint8(s, (value >> 8)); out_uint8(s, value); return 0; } #endif /*****************************************************************************/ /* returns error */ static int xrdp_mcs_ber_out_int24(struct xrdp_mcs *self, struct stream *s, int value) { xrdp_mcs_ber_out_header(self, s, BER_TAG_INTEGER, 3); out_uint8(s, (value >> 16)); out_uint8(s, (value >> 8)); out_uint8(s, value); return 0; } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_out_domain_params(struct xrdp_mcs *self, struct stream *s, int max_channels, int max_users, int max_tokens, int max_pdu_size) { xrdp_mcs_ber_out_header(self, s, MCS_TAG_DOMAIN_PARAMS, 26); xrdp_mcs_ber_out_int8(self, s, max_channels); xrdp_mcs_ber_out_int8(self, s, max_users); xrdp_mcs_ber_out_int8(self, s, max_tokens); xrdp_mcs_ber_out_int8(self, s, 1); xrdp_mcs_ber_out_int8(self, s, 0); xrdp_mcs_ber_out_int8(self, s, 1); xrdp_mcs_ber_out_int24(self, s, max_pdu_size); xrdp_mcs_ber_out_int8(self, s, 2); return 0; } /*****************************************************************************/ /* prepare server gcc data to send in mcs response msg */ int xrdp_mcs_out_gcc_data(struct xrdp_sec *self) { struct stream *s; int num_channels_even; int num_channels; int index; int channel; int gcc_size; char* gcc_size_ptr; char* ud_ptr; num_channels = self->mcs_layer->channel_list->count; num_channels_even = num_channels + (num_channels & 1); s = &(self->server_mcs_data); init_stream(s, 8192); out_uint16_be(s, 5); /* AsnBerObjectIdentifier */ out_uint16_be(s, 0x14); out_uint8(s, 0x7c); out_uint16_be(s, 1); /* -- */ out_uint8(s, 0x2a); /* ConnectPDULen */ out_uint8(s, 0x14); out_uint8(s, 0x76); out_uint8(s, 0x0a); out_uint8(s, 1); out_uint8(s, 1); out_uint8(s, 0); out_uint16_le(s, 0xc001); out_uint8(s, 0); out_uint8(s, 0x4d); /* M */ out_uint8(s, 0x63); /* c */ out_uint8(s, 0x44); /* D */ out_uint8(s, 0x6e); /* n */ /* GCC Response Total Length - 2 bytes , set later */ gcc_size_ptr = s->p; /* RDPGCCUserDataResponseLength */ out_uint8s(s, 2); ud_ptr = s->p; /* User Data */ out_uint16_le(s, SEC_TAG_SRV_INFO); if (self->mcs_layer->iso_layer->rdpNegData) { out_uint16_le(s, 12); /* len */ } else { out_uint16_le(s, 8); /* len */ } out_uint8(s, 4); /* 4 = rdp5 1 = rdp4 */ out_uint8(s, 0); out_uint8(s, 8); out_uint8(s, 0); if (self->mcs_layer->iso_layer->rdpNegData) { /* RequestedProtocol */ out_uint32_le(s, self->mcs_layer->iso_layer->requestedProtocol); } out_uint16_le(s, SEC_TAG_SRV_CHANNELS); out_uint16_le(s, 8 + (num_channels_even * 2)); /* len */ out_uint16_le(s, MCS_GLOBAL_CHANNEL); /* 1003, 0x03eb main channel */ out_uint16_le(s, num_channels); /* number of other channels */ for (index = 0; index < num_channels_even; index++) { if (index < num_channels) { channel = MCS_GLOBAL_CHANNEL + (index + 1); out_uint16_le(s, channel); } else { out_uint16_le(s, 0); } } if (self->rsa_key_bytes == 64) { g_writeln("xrdp_sec_out_mcs_data: using 512 bit RSA key"); out_uint16_le(s, SEC_TAG_SRV_CRYPT); out_uint16_le(s, 0x00ec); /* len is 236 */ out_uint32_le(s, self->crypt_method); out_uint32_le(s, self->crypt_level); out_uint32_le(s, 32); /* 32 bytes random len */ out_uint32_le(s, 0xb8); /* 184 bytes rsa info(certificate) len */ out_uint8a(s, self->server_random, 32); /* here to end is certificate */ /* HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\ */ /* TermService\Parameters\Certificate */ out_uint32_le(s, 1); out_uint32_le(s, 1); out_uint32_le(s, 1); out_uint16_le(s, SEC_TAG_PUBKEY); /* 0x0006 */ out_uint16_le(s, 0x005c); /* 92 bytes length of SEC_TAG_PUBKEY */ out_uint32_le(s, SEC_RSA_MAGIC); /* 0x31415352 'RSA1' */ out_uint32_le(s, 0x0048); /* 72 bytes modulus len */ out_uint32_be(s, 0x00020000); /* bit len */ out_uint32_be(s, 0x3f000000); /* data len */ out_uint8a(s, self->pub_exp, 4); /* pub exp */ out_uint8a(s, self->pub_mod, 64); /* pub mod */ out_uint8s(s, 8); /* pad */ out_uint16_le(s, SEC_TAG_KEYSIG); /* 0x0008 */ out_uint16_le(s, 72); /* len */ out_uint8a(s, self->pub_sig, 64); /* pub sig */ out_uint8s(s, 8); /* pad */ } else if (self->rsa_key_bytes == 256) { g_writeln("xrdp_sec_out_mcs_data: using 2048 bit RSA key"); out_uint16_le(s, SEC_TAG_SRV_CRYPT); out_uint16_le(s, 0x01ac); /* len is 428 */ out_uint32_le(s, self->crypt_method); out_uint32_le(s, self->crypt_level); out_uint32_le(s, 32); /* 32 bytes random len */ out_uint32_le(s, 0x178); /* 376 bytes rsa info(certificate) len */ out_uint8a(s, self->server_random, 32); /* here to end is certificate */ /* HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\ */ /* TermService\Parameters\Certificate */ out_uint32_le(s, 1); out_uint32_le(s, 1); out_uint32_le(s, 1); out_uint16_le(s, SEC_TAG_PUBKEY); /* 0x0006 */ out_uint16_le(s, 0x011c); /* 284 bytes length of SEC_TAG_PUBKEY */ out_uint32_le(s, SEC_RSA_MAGIC); /* 0x31415352 'RSA1' */ out_uint32_le(s, 0x0108); /* 264 bytes modulus len */ out_uint32_be(s, 0x00080000); /* bit len */ out_uint32_be(s, 0xff000000); /* data len */ out_uint8a(s, self->pub_exp, 4); /* pub exp */ out_uint8a(s, self->pub_mod, 256); /* pub mod */ out_uint8s(s, 8); /* pad */ out_uint16_le(s, SEC_TAG_KEYSIG); /* 0x0008 */ out_uint16_le(s, 72); /* len */ out_uint8a(s, self->pub_sig, 64); /* pub sig */ out_uint8s(s, 8); /* pad */ } else if (self->rsa_key_bytes == 0) /* no security */ { g_writeln("xrdp_sec_out_mcs_data: using no security"); out_uint16_le(s, SEC_TAG_SRV_CRYPT); out_uint16_le(s, 12); /* len is 12 */ out_uint32_le(s, self->crypt_method); out_uint32_le(s, self->crypt_level); } else { g_writeln("xrdp_sec_out_mcs_data: error"); } /* end certificate */ s_mark_end(s); gcc_size = (int)(s->end - ud_ptr) | 0x8000; gcc_size_ptr[0] = gcc_size >> 8; gcc_size_ptr[1] = gcc_size; return 0; } /*****************************************************************************/ /* returns error */ static int xrdp_mcs_send_connect_response(struct xrdp_mcs *self) { int data_len; struct stream *s; DEBUG((" in xrdp_mcs_send_connect_response")); make_stream(s); init_stream(s, 8192); data_len = (int) (self->server_mcs_data->end - self->server_mcs_data->data); xrdp_iso_init(self->iso_layer, s); //TODO: we should calculate the whole length include MCS_CONNECT_RESPONSE xrdp_mcs_ber_out_header(self, s, MCS_CONNECT_RESPONSE, data_len > 0x80 ? data_len + 38 : data_len + 36); xrdp_mcs_ber_out_header(self, s, BER_TAG_RESULT, 1); out_uint8(s, 0); xrdp_mcs_ber_out_header(self, s, BER_TAG_INTEGER, 1); out_uint8(s, 0); xrdp_mcs_out_domain_params(self, s, 22, 3, 0, 0xfff8); xrdp_mcs_ber_out_header(self, s, BER_TAG_OCTET_STRING, data_len); /* mcs data */ out_uint8a(s, self->server_mcs_data->data, data_len); s_mark_end(s); if (xrdp_iso_send(self->iso_layer, s) != 0) { free_stream(s); DEBUG((" out xrdp_mcs_send_connect_response error")); return 1; } free_stream(s); DEBUG((" out xrdp_mcs_send_connect_response")); return 0; } /*****************************************************************************/ /* returns error */ int xrdp_mcs_incoming(struct xrdp_mcs *self) { int index; DEBUG((" in xrdp_mcs_incoming")); if (xrdp_mcs_recv_connect_initial(self) != 0) { return 1; } /* in xrdp_sec.c */ if (xrdp_sec_process_mcs_data(self->sec_layer) != 0) { return 1; } if (xrdp_mcs_out_gcc_data(self->sec_layer) != 0) { return 1; } if (xrdp_mcs_send_connect_response(self) != 0) { return 1; } if (xrdp_mcs_recv_edrq(self) != 0) { return 1; } if (xrdp_mcs_recv_aurq(self) != 0) { return 1; } if (xrdp_mcs_send_aucf(self) != 0) { return 1; } for (index = 0; index < self->channel_list->count + 2; index++) { if (xrdp_mcs_recv_cjrq(self) != 0) { return 1; } if (xrdp_mcs_send_cjcf(self, self->userid, self->userid + MCS_USERCHANNEL_BASE + index) != 0) { return 1; } } DEBUG((" out xrdp_mcs_incoming")); return 0; } /*****************************************************************************/ /* returns error */ int xrdp_mcs_init(struct xrdp_mcs *self, struct stream *s) { xrdp_iso_init(self->iso_layer, s); s_push_layer(s, mcs_hdr, 8); return 0; } /*****************************************************************************/ /* returns error */ /* Inform the callback that an mcs packet has been sent. This is needed so the module can send any high priority mcs packets like audio. */ static int xrdp_mcs_call_callback(struct xrdp_mcs *self) { int rv; struct xrdp_session *session; rv = 0; /* if there is a callback, call it here */ session = self->sec_layer->rdp_layer->session; if (session != 0) { if (session->callback != 0) { if (session->check_for_app_input) { /* in xrdp_wm.c */ rv = session->callback(session->id, 0x5556, 0, 0, 0, 0); } } else { g_writeln("in xrdp_mcs_send, session->callback is nil"); } } else { g_writeln("in xrdp_mcs_send, session is nil"); } return rv; } /*****************************************************************************/ /* returns error */ int xrdp_mcs_send(struct xrdp_mcs *self, struct stream *s, int chan) { int len; char *lp; //static int max_len = 0; DEBUG((" in xrdp_mcs_send")); s_pop_layer(s, mcs_hdr); len = (s->end - s->p) - 8; if (len > 8192 * 2) { g_writeln("error in xrdp_mcs_send, size too big: %d bytes", len); } //if (len > max_len) //{ // max_len = len; // g_printf("mcs max length is %d\r\n", max_len); //} //g_printf("mcs length %d max length is %d\r\n", len, max_len); //g_printf("mcs length %d\r\n", len); out_uint8(s, MCS_SDIN << 2); out_uint16_be(s, self->userid); out_uint16_be(s, chan); out_uint8(s, 0x70); if (len >= 128) { len = len | 0x8000; out_uint16_be(s, len); } else { out_uint8(s, len); /* move everything up one byte */ lp = s->p; while (lp < s->end) { lp[0] = lp[1]; lp++; } s->end--; } if (xrdp_iso_send(self->iso_layer, s) != 0) { DEBUG((" out xrdp_mcs_send error")); return 1; } /* todo, do we need to call this for every mcs packet, maybe every 5 or so */ if (chan == MCS_GLOBAL_CHANNEL) { xrdp_mcs_call_callback(self); } DEBUG((" out xrdp_mcs_send")); return 0; } /** * Internal help function to close the socket * @param self */ void close_rdp_socket(struct xrdp_mcs *self) { if (self->iso_layer != 0) { if (self->iso_layer->trans != 0) { trans_shutdown_tls_mode(self->iso_layer->trans); g_tcp_close(self->iso_layer->trans->sck); self->iso_layer->trans->sck = 0 ; g_writeln("xrdp_mcs_disconnect - socket closed"); return; } } g_writeln("Failed to close socket"); } /*****************************************************************************/ /* returns error */ int xrdp_mcs_disconnect(struct xrdp_mcs *self) { struct stream *s; DEBUG((" in xrdp_mcs_disconnect")); make_stream(s); init_stream(s, 8192); if (xrdp_iso_init(self->iso_layer, s) != 0) { free_stream(s); close_rdp_socket(self); DEBUG((" out xrdp_mcs_disconnect error - 1")); return 1; } out_uint8(s, (MCS_DPUM << 2) | 1); out_uint8(s, 0x80); s_mark_end(s); if (xrdp_iso_send(self->iso_layer, s) != 0) { free_stream(s); close_rdp_socket(self); DEBUG((" out xrdp_mcs_disconnect error - 2")); return 1; } free_stream(s); close_rdp_socket(self); DEBUG(("xrdp_mcs_disconnect - close sent")); return 0; }
544200.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE122_Heap_Based_Buffer_Overflow__c_CWE805_wchar_t_loop_14.c Label Definition File: CWE122_Heap_Based_Buffer_Overflow__c_CWE805.string.label.xml Template File: sources-sink-14.tmpl.c */ /* * @description * CWE: 122 Heap Based Buffer Overflow * BadSource: Allocate using malloc() and set data pointer to a small buffer * GoodSource: Allocate using malloc() and set data pointer to a large buffer * Sink: loop * BadSink : Copy string to data using a loop * Flow Variant: 14 Control flow: if(globalFive==5) and if(globalFive!=5) * * */ #include "std_testcase.h" #include <wchar.h> #ifndef OMITBAD void CWE122_Heap_Based_Buffer_Overflow__c_CWE805_wchar_t_loop_14_bad() { wchar_t * data; data = NULL; if(globalFive==5) { /* FLAW: Allocate and point data to a small buffer that is smaller than the large buffer used in the sinks */ data = (wchar_t *)malloc(50*sizeof(wchar_t)); data[0] = L'\0'; /* null terminate */ } { size_t i; wchar_t source[100]; wmemset(source, L'C', 100-1); /* fill with L'C's */ source[100-1] = L'\0'; /* null terminate */ /* POTENTIAL FLAW: Possible buffer overflow if source is larger than data */ for (i = 0; i < 100; i++) { data[i] = source[i]; } data[100-1] = L'\0'; /* Ensure the destination buffer is null terminated */ printWLine(data); free(data); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B1() - use goodsource and badsink by changing the globalFive==5 to globalFive!=5 */ static void goodG2B1() { wchar_t * data; data = NULL; if(globalFive!=5) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Allocate and point data to a large buffer that is at least as large as the large buffer used in the sink */ data = (wchar_t *)malloc(100*sizeof(wchar_t)); data[0] = L'\0'; /* null terminate */ } { size_t i; wchar_t source[100]; wmemset(source, L'C', 100-1); /* fill with L'C's */ source[100-1] = L'\0'; /* null terminate */ /* POTENTIAL FLAW: Possible buffer overflow if source is larger than data */ for (i = 0; i < 100; i++) { data[i] = source[i]; } data[100-1] = L'\0'; /* Ensure the destination buffer is null terminated */ printWLine(data); free(data); } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */ static void goodG2B2() { wchar_t * data; data = NULL; if(globalFive==5) { /* FIX: Allocate and point data to a large buffer that is at least as large as the large buffer used in the sink */ data = (wchar_t *)malloc(100*sizeof(wchar_t)); data[0] = L'\0'; /* null terminate */ } { size_t i; wchar_t source[100]; wmemset(source, L'C', 100-1); /* fill with L'C's */ source[100-1] = L'\0'; /* null terminate */ /* POTENTIAL FLAW: Possible buffer overflow if source is larger than data */ for (i = 0; i < 100; i++) { data[i] = source[i]; } data[100-1] = L'\0'; /* Ensure the destination buffer is null terminated */ printWLine(data); free(data); } } void CWE122_Heap_Based_Buffer_Overflow__c_CWE805_wchar_t_loop_14_good() { goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE122_Heap_Based_Buffer_Overflow__c_CWE805_wchar_t_loop_14_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE122_Heap_Based_Buffer_Overflow__c_CWE805_wchar_t_loop_14_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
950909.c
FALLBACK fallback Contract 1 1 INNCALL NULL FUN1 uint16 NULL 1 0 NULL NULL VAR0 FUN1 3 INNFUN ASSIGN VAR1 FALLBACK 1 INNFUN NULL