filename
stringlengths
3
9
code
stringlengths
4
1.87M
638336.c
#include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/wait.h> #include <unistd.h> int main(int argc, char* argv[]) { if (argc == 2) { char* cmd_vec[] = {"wc", "-w", argv[1], NULL}; int pid; if ((pid = fork()) < 0) { perror("fork"); exit(1); } else if (pid == 0) { execvp(cmd_vec[0], cmd_vec); } else { wait(NULL); return 0; } } }
230896.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "la_system.h" #include "la_thread.h" int thread(void *arg) { int *i = (int *)arg; system_sleep(rand() % 1000); printf ( "THREAD: %d\n", *i ); return *i; } int main(void) { srand(time(NULL)); int i1 = 1; THREAD *t1 = thread_new(); thread_setFunction(t1, thread, &i1); thread_run(t1); int i2 = 2; THREAD *t2 = thread_new(); thread_setFunction(t2, thread, &i2); thread_run(t2); int i3 = 3; THREAD *t3 = thread_new(); thread_setFunction(t3, thread, &i3); thread_run(t3); thread_wait(t1); thread_free(t1); thread_wait(t2); thread_free(t2); thread_wait(t3); thread_free(t3); return EXIT_SUCCESS; }
731343.c
/* * Copyright 2020-2021 Telecom Paris Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* * Copyright (c) 2003-2017 Lev Walkin <[email protected]>. * All rights reserved. * Redistribution and modifications are permitted subject to BSD license. */ #include "asn_internal.h" #include "constr_SEQUENCE.h" #include "OPEN_TYPE.h" #include "per_opentype.h" /* * Number of bytes left for this structure. * (ctx->left) indicates the number of bytes _transferred_ for the structure. * (size) contains the number of bytes in the buffer passed. */ #define LEFT ((size<(size_t)ctx->left)?size:(size_t)ctx->left) /* * If the subprocessor function returns with an indication that it wants * more data, it may well be a fatal decoding problem, because the * size is constrained by the <TLV>'s L, even if the buffer size allows * reading more data. * For example, consider the buffer containing the following TLVs: * <T:5><L:1><V> <T:6>... * The TLV length clearly indicates that one byte is expected in V, but * if the V processor returns with "want more data" even if the buffer * contains way more data than the V processor have seen. */ #define SIZE_VIOLATION (ctx->left >= 0 && (size_t)ctx->left <= size) /* * This macro "eats" the part of the buffer which is definitely "consumed", * i.e. was correctly converted into local representation or rightfully skipped. */ #undef ADVANCE #define ADVANCE(num_bytes) do { \ size_t num = num_bytes; \ ptr = ((const char *)ptr) + num; \ size -= num; \ if(ctx->left >= 0) \ ctx->left -= num; \ consumed_myself += num; \ } while(0) /* * Switch to the next phase of parsing. */ #undef NEXT_PHASE #undef PHASE_OUT #define NEXT_PHASE(ctx) do { \ ctx->phase++; \ ctx->step = 0; \ } while(0) #define PHASE_OUT(ctx) do { ctx->phase = 10; } while(0) /* * Return a standardized complex structure. */ #undef RETURN #define RETURN(_code) do { \ rval.code = _code; \ rval.consumed = consumed_myself;\ return rval; \ } while(0) /* * Check whether we are inside the extensions group. */ #define IN_EXTENSION_GROUP(specs, memb_idx) \ ((specs)->first_extension >= 0 \ && (unsigned)(specs)->first_extension <= (memb_idx)) /* * Tags are canonically sorted in the tag2element map. */ static int _t2e_cmp(const void *ap, const void *bp) { const asn_TYPE_tag2member_t *a = (const asn_TYPE_tag2member_t *)ap; const asn_TYPE_tag2member_t *b = (const asn_TYPE_tag2member_t *)bp; int a_class = BER_TAG_CLASS(a->el_tag); int b_class = BER_TAG_CLASS(b->el_tag); if(a_class == b_class) { ber_tlv_tag_t a_value = BER_TAG_VALUE(a->el_tag); ber_tlv_tag_t b_value = BER_TAG_VALUE(b->el_tag); if(a_value == b_value) { if(a->el_no > b->el_no) return 1; /* * Important: we do not check * for a->el_no <= b->el_no! */ return 0; } else if(a_value < b_value) return -1; else return 1; } else if(a_class < b_class) { return -1; } else { return 1; } } /* * The decoder of the SEQUENCE type. */ asn_dec_rval_t SEQUENCE_decode_ber(const asn_codec_ctx_t *opt_codec_ctx, const asn_TYPE_descriptor_t *td, void **struct_ptr, const void *ptr, size_t size, int tag_mode) { /* * Bring closer parts of structure description. */ const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; const asn_TYPE_member_t *elements = td->elements; /* * Parts of the structure being constructed. */ void *st = *struct_ptr; /* Target structure. */ asn_struct_ctx_t *ctx; /* Decoder context */ ber_tlv_tag_t tlv_tag; /* T from TLV */ asn_dec_rval_t rval; /* Return code from subparsers */ ssize_t consumed_myself = 0; /* Consumed bytes from ptr */ size_t edx; /* SEQUENCE element's index */ ASN_DEBUG("Decoding %s as SEQUENCE", td->name); /* * Create the target structure if it is not present already. */ if(st == 0) { st = *struct_ptr = CALLOC(1, specs->struct_size); if(st == 0) { RETURN(RC_FAIL); } } /* * Restore parsing context. */ ctx = (asn_struct_ctx_t *)((char *)st + specs->ctx_offset); /* * Start to parse where left previously */ switch(ctx->phase) { case 0: /* * PHASE 0. * Check that the set of tags associated with given structure * perfectly fits our expectations. */ rval = ber_check_tags(opt_codec_ctx, td, ctx, ptr, size, tag_mode, 1, &ctx->left, 0); if(rval.code != RC_OK) { ASN_DEBUG("%s tagging check failed: %d", td->name, rval.code); return rval; } if(ctx->left >= 0) ctx->left += rval.consumed; /* ?Substracted below! */ ADVANCE(rval.consumed); NEXT_PHASE(ctx); ASN_DEBUG("Structure consumes %ld bytes, buffer %ld", (long)ctx->left, (long)size); /* Fall through */ case 1: /* * PHASE 1. * From the place where we've left it previously, * try to decode the next member from the list of * this structure's elements. * (ctx->step) stores the member being processed * between invocations and the microphase {0,1} of parsing * that member: * step = (<member_number> * 2 + <microphase>). */ for(edx = ((size_t)ctx->step >> 1); edx < td->elements_count; edx++, ctx->step = (ctx->step & ~1) + 2) { void *memb_ptr; /* Pointer to the member */ void **memb_ptr2; /* Pointer to that pointer */ ssize_t tag_len; /* Length of TLV's T */ size_t opt_edx_end; /* Next non-optional element */ size_t n; int use_bsearch; if(ctx->step & 1) goto microphase2; /* * MICROPHASE 1: Synchronize decoding. */ ASN_DEBUG("In %s SEQUENCE left %d, edx=%" ASN_PRI_SIZE " flags=%d" " opt=%d ec=%d", td->name, (int)ctx->left, edx, elements[edx].flags, elements[edx].optional, td->elements_count); if(ctx->left == 0 /* No more stuff is expected */ && ( /* Explicit OPTIONAL specification reaches the end */ (edx + elements[edx].optional == td->elements_count) || /* All extensions are optional */ IN_EXTENSION_GROUP(specs, edx))) { ASN_DEBUG("End of SEQUENCE %s", td->name); /* * Found the legitimate end of the structure. */ PHASE_OUT(ctx); RETURN(RC_OK); } /* * Fetch the T from TLV. */ tag_len = ber_fetch_tag(ptr, LEFT, &tlv_tag); ASN_DEBUG("Current tag in %s SEQUENCE for element %" ASN_PRI_SIZE " " "(%s) is %s encoded in %d bytes, of frame %ld", td->name, edx, elements[edx].name, ber_tlv_tag_string(tlv_tag), (int)tag_len, (long)LEFT); switch(tag_len) { case 0: if(!SIZE_VIOLATION) RETURN(RC_WMORE); /* Fall through */ case -1: RETURN(RC_FAIL); } if(ctx->left < 0 && ((const uint8_t *)ptr)[0] == 0) { if(LEFT < 2) { if(SIZE_VIOLATION) { RETURN(RC_FAIL); } else { RETURN(RC_WMORE); } } else if(((const uint8_t *)ptr)[1] == 0) { ASN_DEBUG("edx = %" ASN_PRI_SIZE ", opt = %d, ec=%d", edx, elements[edx].optional, td->elements_count); if((edx + elements[edx].optional == td->elements_count) || IN_EXTENSION_GROUP(specs, edx)) { /* * Yeah, baby! Found the terminator * of the indefinite length structure. */ /* * Proceed to the canonical * finalization function. * No advancing is necessary. */ goto phase3; } } } /* * Find the next available type with this tag. */ use_bsearch = 0; opt_edx_end = edx + elements[edx].optional + 1; if(opt_edx_end > td->elements_count) opt_edx_end = td->elements_count; /* Cap */ else if(opt_edx_end - edx > 8) { /* Limit the scope of linear search... */ opt_edx_end = edx + 8; use_bsearch = 1; /* ... and resort to bsearch() */ } for(n = edx; n < opt_edx_end; n++) { if(BER_TAGS_EQUAL(tlv_tag, elements[n].tag)) { /* * Found element corresponding to the tag * being looked at. * Reposition over the right element. */ edx = n; ctx->step = 1 + 2 * edx; /* Remember! */ goto microphase2; } else if(elements[n].flags & ATF_ANY_TYPE) { /* * This is the ANY type, which may bear * any flag whatsoever. */ edx = n; ctx->step = 1 + 2 * edx; /* Remember! */ goto microphase2; } else if(elements[n].tag == (ber_tlv_tag_t)-1) { use_bsearch = 1; break; } } if(use_bsearch) { /* * Resort to a binary search over * sorted array of tags. */ const asn_TYPE_tag2member_t *t2m; asn_TYPE_tag2member_t key = {0, 0, 0, 0}; key.el_tag = tlv_tag; key.el_no = edx; t2m = (const asn_TYPE_tag2member_t *)bsearch(&key, specs->tag2el, specs->tag2el_count, sizeof(specs->tag2el[0]), _t2e_cmp); if(t2m) { const asn_TYPE_tag2member_t *best = 0; const asn_TYPE_tag2member_t *t2m_f, *t2m_l; size_t edx_max = edx + elements[edx].optional; /* * Rewind to the first element with that tag, * `cause bsearch() does not guarantee order. */ t2m_f = t2m + t2m->toff_first; t2m_l = t2m + t2m->toff_last; for(t2m = t2m_f; t2m <= t2m_l; t2m++) { if(t2m->el_no > edx_max) break; if(t2m->el_no < edx) continue; best = t2m; } if(best) { edx = best->el_no; ctx->step = 1 + 2 * edx; goto microphase2; } } n = opt_edx_end; } if(n == opt_edx_end) { /* * If tag is unknown, it may be either * an unknown (thus, incorrect) tag, * or an extension (...), * or an end of the indefinite-length structure. */ if(!IN_EXTENSION_GROUP(specs, edx + elements[edx].optional)) { ASN_DEBUG("Unexpected tag %s (at %" ASN_PRI_SIZE ")", ber_tlv_tag_string(tlv_tag), edx); ASN_DEBUG("Expected tag %s (%s)%s", ber_tlv_tag_string(elements[edx].tag), elements[edx].name, elements[edx].optional ?" or alternatives":""); RETURN(RC_FAIL); } else { /* Skip this tag */ ssize_t skip; edx += elements[edx].optional; ASN_DEBUG("Skipping unexpected %s (at %" ASN_PRI_SIZE ")", ber_tlv_tag_string(tlv_tag), edx); skip = ber_skip_length(opt_codec_ctx, BER_TLV_CONSTRUCTED(ptr), (const char *)ptr + tag_len, LEFT - tag_len); ASN_DEBUG("Skip length %d in %s", (int)skip, td->name); switch(skip) { case 0: if(!SIZE_VIOLATION) RETURN(RC_WMORE); /* Fall through */ case -1: RETURN(RC_FAIL); } ADVANCE(skip + tag_len); ctx->step -= 2; edx--; continue; /* Try again with the next tag */ } } /* * MICROPHASE 2: Invoke the member-specific decoder. */ ctx->step |= 1; /* Confirm entering next microphase */ microphase2: ASN_DEBUG("Inside SEQUENCE %s MF2", td->name); /* * Compute the position of the member inside a structure, * and also a type of containment (it may be contained * as pointer or using inline inclusion). */ if(elements[edx].flags & ATF_POINTER) { /* Member is a pointer to another structure */ memb_ptr2 = (void **)((char *)st + elements[edx].memb_offset); } else { /* * A pointer to a pointer * holding the start of the structure */ memb_ptr = (char *)st + elements[edx].memb_offset; memb_ptr2 = &memb_ptr; } /* * Invoke the member fetch routine according to member's type */ if(elements[edx].flags & ATF_OPEN_TYPE) { rval = OPEN_TYPE_ber_get(opt_codec_ctx, td, st, &elements[edx], ptr, LEFT); } else { rval = elements[edx].type->op->ber_decoder(opt_codec_ctx, elements[edx].type, memb_ptr2, ptr, LEFT, elements[edx].tag_mode); } ASN_DEBUG("In %s SEQUENCE decoded %" ASN_PRI_SIZE " %s of %d " "in %d bytes rval.code %d, size=%d", td->name, edx, elements[edx].type->name, (int)LEFT, (int)rval.consumed, rval.code, (int)size); switch(rval.code) { case RC_OK: break; case RC_WMORE: /* More data expected */ if(!SIZE_VIOLATION) { ADVANCE(rval.consumed); RETURN(RC_WMORE); } ASN_DEBUG("Size violation (c->l=%ld <= s=%ld)", (long)ctx->left, (long)size); /* Fall through */ case RC_FAIL: /* Fatal error */ RETURN(RC_FAIL); } /* switch(rval) */ ADVANCE(rval.consumed); } /* for(all structure members) */ phase3: ctx->phase = 3; /* Fall through */ case 3: /* 00 and other tags expected */ case 4: /* only 00's expected */ ASN_DEBUG("SEQUENCE %s Leftover: %ld, size = %ld", td->name, (long)ctx->left, (long)size); /* * Skip everything until the end of the SEQUENCE. */ while(ctx->left) { ssize_t tl, ll; tl = ber_fetch_tag(ptr, LEFT, &tlv_tag); switch(tl) { case 0: if(!SIZE_VIOLATION) RETURN(RC_WMORE); /* Fall through */ case -1: RETURN(RC_FAIL); } /* * If expected <0><0>... */ if(ctx->left < 0 && ((const uint8_t *)ptr)[0] == 0) { if(LEFT < 2) { if(SIZE_VIOLATION) RETURN(RC_FAIL); else RETURN(RC_WMORE); } else if(((const uint8_t *)ptr)[1] == 0) { /* * Correctly finished with <0><0>. */ ADVANCE(2); ctx->left++; ctx->phase = 4; continue; } } if(!IN_EXTENSION_GROUP(specs, td->elements_count) || ctx->phase == 4) { ASN_DEBUG("Unexpected continuation " "of a non-extensible type " "%s (SEQUENCE): %s", td->name, ber_tlv_tag_string(tlv_tag)); RETURN(RC_FAIL); } ll = ber_skip_length(opt_codec_ctx, BER_TLV_CONSTRUCTED(ptr), (const char *)ptr + tl, LEFT - tl); switch(ll) { case 0: if(!SIZE_VIOLATION) RETURN(RC_WMORE); /* Fall through */ case -1: RETURN(RC_FAIL); } ADVANCE(tl + ll); } PHASE_OUT(ctx); } RETURN(RC_OK); } /* * The DER encoder of the SEQUENCE type. */ asn_enc_rval_t SEQUENCE_encode_der(const asn_TYPE_descriptor_t *td, const void *sptr, int tag_mode, ber_tlv_tag_t tag, asn_app_consume_bytes_f *cb, void *app_key) { size_t computed_size = 0; asn_enc_rval_t erval; ssize_t ret; size_t edx; ASN_DEBUG("%s %s as SEQUENCE", cb?"Encoding":"Estimating", td->name); /* * Gather the length of the underlying members sequence. */ for(edx = 0; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; const void *memb_ptr; /* Pointer to the member */ const void *const *memb_ptr2; /* Pointer to that pointer */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (const void *const *)((const char *)sptr + elm->memb_offset); if(!*memb_ptr2) { ASN_DEBUG("Element %s %" ASN_PRI_SIZE " not present", elm->name, edx); if(elm->optional) continue; /* Mandatory element is missing */ ASN__ENCODE_FAILED; } } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); memb_ptr2 = &memb_ptr; } /* Eliminate default values */ if(elm->default_value_cmp && elm->default_value_cmp(*memb_ptr2) == 0) continue; erval = elm->type->op->der_encoder(elm->type, *memb_ptr2, elm->tag_mode, elm->tag, 0, 0); if(erval.encoded == -1) return erval; computed_size += erval.encoded; ASN_DEBUG("Member %" ASN_PRI_SIZE " %s estimated %ld bytes", edx, elm->name, (long)erval.encoded); } /* * Encode the TLV for the sequence itself. */ ret = der_write_tags(td, computed_size, tag_mode, 1, tag, cb, app_key); ASN_DEBUG("Wrote tags: %ld (+%ld)", (long)ret, (long)computed_size); if(ret == -1) ASN__ENCODE_FAILED; erval.encoded = computed_size + ret; if(!cb) ASN__ENCODED_OK(erval); /* * Encode all members. */ for(edx = 0; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; asn_enc_rval_t tmperval; const void *memb_ptr; /* Pointer to the member */ const void *const *memb_ptr2; /* Pointer to that pointer */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (const void *const *)((const char *)sptr + elm->memb_offset); if(!*memb_ptr2) continue; } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); memb_ptr2 = &memb_ptr; } /* Eliminate default values */ if(elm->default_value_cmp && elm->default_value_cmp(*memb_ptr2) == 0) continue; tmperval = elm->type->op->der_encoder(elm->type, *memb_ptr2, elm->tag_mode, elm->tag, cb, app_key); if(tmperval.encoded == -1) return tmperval; computed_size -= tmperval.encoded; ASN_DEBUG("Member %" ASN_PRI_SIZE " %s of SEQUENCE %s encoded in %ld bytes", edx, elm->name, td->name, (long)tmperval.encoded); } if(computed_size != 0) /* * Encoded size is not equal to the computed size. */ ASN__ENCODE_FAILED; ASN__ENCODED_OK(erval); } #undef XER_ADVANCE #define XER_ADVANCE(num_bytes) \ do { \ size_t num = (num_bytes); \ ptr = ((const char *)ptr) + num; \ size -= num; \ consumed_myself += num; \ } while(0) /* * Decode the XER (XML) data. */ asn_dec_rval_t SEQUENCE_decode_xer(const asn_codec_ctx_t *opt_codec_ctx, const asn_TYPE_descriptor_t *td, void **struct_ptr, const char *opt_mname, const void *ptr, size_t size) { /* * Bring closer parts of structure description. */ const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; asn_TYPE_member_t *elements = td->elements; const char *xml_tag = opt_mname ? opt_mname : td->xml_tag; /* * ... and parts of the structure being constructed. */ void *st = *struct_ptr; /* Target structure. */ asn_struct_ctx_t *ctx; /* Decoder context */ asn_dec_rval_t rval; /* Return value from a decoder */ ssize_t consumed_myself = 0; /* Consumed bytes from ptr */ size_t edx; /* Element index */ /* * Create the target structure if it is not present already. */ if(st == 0) { st = *struct_ptr = CALLOC(1, specs->struct_size); if(st == 0) RETURN(RC_FAIL); } /* * Restore parsing context. */ ctx = (asn_struct_ctx_t *)((char *)st + specs->ctx_offset); /* * Phases of XER/XML processing: * Phase 0: Check that the opening tag matches our expectations. * Phase 1: Processing body and reacting on closing tag. * Phase 2: Processing inner type. * Phase 3: Skipping unknown extensions. * Phase 4: PHASED OUT */ for(edx = ctx->step; ctx->phase <= 3;) { pxer_chunk_type_e ch_type; /* XER chunk type */ ssize_t ch_size; /* Chunk size */ xer_check_tag_e tcv; /* Tag check value */ asn_TYPE_member_t *elm; /* * Go inside the inner member of a sequence. */ if(ctx->phase == 2) { asn_dec_rval_t tmprval; void *memb_ptr_dontuse; /* Pointer to the member */ void **memb_ptr2; /* Pointer to that pointer */ elm = &td->elements[edx]; if(elm->flags & ATF_POINTER) { /* Member is a pointer to another structure */ memb_ptr2 = (void **)((char *)st + elm->memb_offset); } else { memb_ptr_dontuse = (char *)st + elm->memb_offset; memb_ptr2 = &memb_ptr_dontuse; /* Only use of memb_ptr_dontuse */ } if(elm->flags & ATF_OPEN_TYPE) { tmprval = OPEN_TYPE_xer_get(opt_codec_ctx, td, st, elm, ptr, size); } else { /* Invoke the inner type decoder, m.b. multiple times */ tmprval = elm->type->op->xer_decoder(opt_codec_ctx, elm->type, memb_ptr2, elm->name, ptr, size); } XER_ADVANCE(tmprval.consumed); if(tmprval.code != RC_OK) RETURN(tmprval.code); ctx->phase = 1; /* Back to body processing */ ctx->step = ++edx; ASN_DEBUG("XER/SEQUENCE phase => %d, step => %d", ctx->phase, ctx->step); /* Fall through */ } /* * Get the next part of the XML stream. */ ch_size = xer_next_token(&ctx->context, ptr, size, &ch_type); if(ch_size == -1) { RETURN(RC_FAIL); } else { switch(ch_type) { case PXER_WMORE: RETURN(RC_WMORE); case PXER_COMMENT: /* Got XML comment */ case PXER_TEXT: /* Ignore free-standing text */ XER_ADVANCE(ch_size); /* Skip silently */ continue; case PXER_TAG: break; /* Check the rest down there */ } } tcv = xer_check_tag(ptr, ch_size, xml_tag); ASN_DEBUG("XER/SEQUENCE: tcv = %d, ph=%d [%s]", tcv, ctx->phase, xml_tag); /* Skip the extensions section */ if(ctx->phase == 3) { switch(xer_skip_unknown(tcv, &ctx->left)) { case -1: ctx->phase = 4; RETURN(RC_FAIL); case 0: XER_ADVANCE(ch_size); continue; case 1: XER_ADVANCE(ch_size); ctx->phase = 1; continue; case 2: ctx->phase = 1; break; } } switch(tcv) { case XCT_CLOSING: if(ctx->phase == 0) break; ctx->phase = 0; /* Fall through */ case XCT_BOTH: if(ctx->phase == 0) { if(edx >= td->elements_count || /* Explicit OPTIONAL specs reaches the end */ (edx + elements[edx].optional == td->elements_count) || /* All extensions are optional */ IN_EXTENSION_GROUP(specs, edx)) { XER_ADVANCE(ch_size); ctx->phase = 4; /* Phase out */ RETURN(RC_OK); } else { ASN_DEBUG("Premature end of XER SEQUENCE"); RETURN(RC_FAIL); } } /* Fall through */ case XCT_OPENING: if(ctx->phase == 0) { XER_ADVANCE(ch_size); ctx->phase = 1; /* Processing body phase */ continue; } /* Fall through */ case XCT_UNKNOWN_OP: case XCT_UNKNOWN_BO: ASN_DEBUG("XER/SEQUENCE: tcv=%d, ph=%d, edx=%" ASN_PRI_SIZE "", tcv, ctx->phase, edx); if(ctx->phase != 1) { break; /* Really unexpected */ } if(edx < td->elements_count) { /* * Search which member corresponds to this tag. */ size_t n; size_t edx_end = edx + elements[edx].optional + 1; if(edx_end > td->elements_count) edx_end = td->elements_count; for(n = edx; n < edx_end; n++) { elm = &td->elements[n]; tcv = xer_check_tag(ptr, ch_size, elm->name); switch(tcv) { case XCT_BOTH: case XCT_OPENING: /* * Process this member. */ ctx->step = edx = n; ctx->phase = 2; break; case XCT_UNKNOWN_OP: case XCT_UNKNOWN_BO: continue; default: n = edx_end; break; /* Phase out */ } break; } if(n != edx_end) continue; } else { ASN_DEBUG("Out of defined members: %" ASN_PRI_SIZE "/%u", edx, td->elements_count); } /* It is expected extension */ if(IN_EXTENSION_GROUP(specs, edx + (edx < td->elements_count ? elements[edx].optional : 0))) { ASN_DEBUG("Got anticipated extension at %" ASN_PRI_SIZE "", edx); /* * Check for (XCT_BOTH or XCT_UNKNOWN_BO) * By using a mask. Only record a pure * <opening> tags. */ if(tcv & XCT_CLOSING) { /* Found </extension> without body */ } else { ctx->left = 1; ctx->phase = 3; /* Skip ...'s */ } XER_ADVANCE(ch_size); continue; } /* Fall through */ default: break; } ASN_DEBUG("Unexpected XML tag in SEQUENCE [%c%c%c%c%c%c]", size>0?((const char *)ptr)[0]:'.', size>1?((const char *)ptr)[1]:'.', size>2?((const char *)ptr)[2]:'.', size>3?((const char *)ptr)[3]:'.', size>4?((const char *)ptr)[4]:'.', size>5?((const char *)ptr)[5]:'.'); break; } ctx->phase = 4; /* "Phase out" on hard failure */ RETURN(RC_FAIL); } asn_enc_rval_t SEQUENCE_encode_xer(const asn_TYPE_descriptor_t *td, const void *sptr, int ilevel, enum xer_encoder_flags_e flags, asn_app_consume_bytes_f *cb, void *app_key) { asn_enc_rval_t er; int xcan = (flags & XER_F_CANONICAL); asn_TYPE_descriptor_t *tmp_def_val_td = 0; void *tmp_def_val = 0; size_t edx; if(!sptr) ASN__ENCODE_FAILED; er.encoded = 0; for(edx = 0; edx < td->elements_count; edx++) { asn_enc_rval_t tmper; asn_TYPE_member_t *elm = &td->elements[edx]; const void *memb_ptr; const char *mname = elm->name; unsigned int mlen = strlen(mname); if(elm->flags & ATF_POINTER) { memb_ptr = *(const void *const *)((const char *)sptr + elm->memb_offset); if(!memb_ptr) { assert(tmp_def_val == 0); if(elm->default_value_set) { if(elm->default_value_set(&tmp_def_val)) { ASN__ENCODE_FAILED; } else { memb_ptr = tmp_def_val; tmp_def_val_td = elm->type; } } else if(elm->optional) { continue; } else { /* Mandatory element is missing */ ASN__ENCODE_FAILED; } } } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); } if(!xcan) ASN__TEXT_INDENT(1, ilevel); ASN__CALLBACK3("<", 1, mname, mlen, ">", 1); /* Print the member itself */ tmper = elm->type->op->xer_encoder(elm->type, memb_ptr, ilevel + 1, flags, cb, app_key); if(tmp_def_val) { ASN_STRUCT_FREE(*tmp_def_val_td, tmp_def_val); tmp_def_val = 0; } if(tmper.encoded == -1) return tmper; er.encoded += tmper.encoded; ASN__CALLBACK3("</", 2, mname, mlen, ">", 1); } if(!xcan) ASN__TEXT_INDENT(1, ilevel - 1); ASN__ENCODED_OK(er); cb_failed: if(tmp_def_val) ASN_STRUCT_FREE(*tmp_def_val_td, tmp_def_val); ASN__ENCODE_FAILED; } int SEQUENCE_print(const asn_TYPE_descriptor_t *td, const void *sptr, int ilevel, asn_app_consume_bytes_f *cb, void *app_key) { size_t edx; int ret; if(!sptr) return (cb("<absent>", 8, app_key) < 0) ? -1 : 0; /* Dump preamble */ if(cb(td->name, strlen(td->name), app_key) < 0 || cb(" ::= {", 6, app_key) < 0) return -1; for(edx = 0; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; const void *memb_ptr; if(elm->flags & ATF_POINTER) { memb_ptr = *(const void * const *)((const char *)sptr + elm->memb_offset); if(!memb_ptr) { if(elm->optional) continue; /* Print <absent> line */ /* Fall through */ } } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); } /* Indentation */ _i_INDENT(1); /* Print the member's name and stuff */ if(cb(elm->name, strlen(elm->name), app_key) < 0 || cb(": ", 2, app_key) < 0) return -1; /* Print the member itself */ ret = elm->type->op->print_struct(elm->type, memb_ptr, ilevel + 1, cb, app_key); if(ret) return ret; } ilevel--; _i_INDENT(1); return (cb("}", 1, app_key) < 0) ? -1 : 0; } void SEQUENCE_free(const asn_TYPE_descriptor_t *td, void *sptr, enum asn_struct_free_method method) { size_t edx; const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; asn_struct_ctx_t *ctx; /* Decoder context */ if(!td || !sptr) return; ASN_DEBUG("Freeing %s as SEQUENCE", td->name); for(edx = 0; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; void *memb_ptr; if(elm->flags & ATF_POINTER) { memb_ptr = *(void **)((char *)sptr + elm->memb_offset); if(memb_ptr) ASN_STRUCT_FREE(*elm->type, memb_ptr); } else { memb_ptr = (void *)((char *)sptr + elm->memb_offset); ASN_STRUCT_FREE_CONTENTS_ONLY(*elm->type, memb_ptr); } } /* Clean parsing context */ ctx = (asn_struct_ctx_t *)((char *)sptr + specs->ctx_offset); FREEMEM(ctx->ptr); switch(method) { case ASFM_FREE_EVERYTHING: FREEMEM(sptr); break; case ASFM_FREE_UNDERLYING: break; case ASFM_FREE_UNDERLYING_AND_RESET: memset( sptr, 0, ((const asn_SEQUENCE_specifics_t *)(td->specifics))->struct_size); break; } } int SEQUENCE_constraint(const asn_TYPE_descriptor_t *td, const void *sptr, asn_app_constraint_failed_f *ctfailcb, void *app_key) { size_t edx; if(!sptr) { ASN__CTFAIL(app_key, td, sptr, "%s: value not given (%s:%d)", td->name, __FILE__, __LINE__); return -1; } /* * Iterate over structure members and check their validity. */ for(edx = 0; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; const void *memb_ptr; if(elm->flags & ATF_POINTER) { memb_ptr = *(const void * const *)((const char *)sptr + elm->memb_offset); if(!memb_ptr) { if(elm->optional) continue; ASN__CTFAIL(app_key, td, sptr, "%s: mandatory element %s absent (%s:%d)", td->name, elm->name, __FILE__, __LINE__); return -1; } } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); } if(elm->encoding_constraints.general_constraints) { int ret = elm->encoding_constraints.general_constraints(elm->type, memb_ptr, ctfailcb, app_key); if(ret) return ret; } else { return elm->type->encoding_constraints.general_constraints(elm->type, memb_ptr, ctfailcb, app_key); } } return 0; } #ifndef ASN_DISABLE_PER_SUPPORT asn_dec_rval_t SEQUENCE_decode_uper(const asn_codec_ctx_t *opt_codec_ctx, const asn_TYPE_descriptor_t *td, const asn_per_constraints_t *constraints, void **sptr, asn_per_data_t *pd) { const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; void *st = *sptr; /* Target structure. */ int extpresent; /* Extension additions are present */ uint8_t *opres; /* Presence of optional root members */ asn_per_data_t opmd; asn_dec_rval_t rv; size_t edx; (void)constraints; if(ASN__STACK_OVERFLOW_CHECK(opt_codec_ctx)) ASN__DECODE_FAILED; if(!st) { st = *sptr = CALLOC(1, specs->struct_size); if(!st) ASN__DECODE_FAILED; } ASN_DEBUG("Decoding %s as SEQUENCE (UPER)", td->name); /* Handle extensions */ if(specs->first_extension < 0) { extpresent = 0; } else { extpresent = per_get_few_bits(pd, 1); if(extpresent < 0) ASN__DECODE_STARVED; } /* Prepare a place and read-in the presence bitmap */ memset(&opmd, 0, sizeof(opmd)); if(specs->roms_count) { opres = (uint8_t *)MALLOC(((specs->roms_count + 7) >> 3) + 1); if(!opres) ASN__DECODE_FAILED; /* Get the presence map */ if(per_get_many_bits(pd, opres, 0, specs->roms_count)) { FREEMEM(opres); ASN__DECODE_STARVED; } opmd.buffer = opres; opmd.nbits = specs->roms_count; ASN_DEBUG("Read in presence bitmap for %s of %d bits (%x..)", td->name, specs->roms_count, *opres); } else { opres = 0; } /* * Get the sequence ROOT elements. */ for(edx = 0; edx < (specs->first_extension < 0 ? td->elements_count : (size_t)specs->first_extension); edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; void *memb_ptr; /* Pointer to the member */ void **memb_ptr2; /* Pointer to that pointer */ assert(!IN_EXTENSION_GROUP(specs, edx)); /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (void **)((char *)st + elm->memb_offset); } else { memb_ptr = (char *)st + elm->memb_offset; memb_ptr2 = &memb_ptr; } /* Deal with optionality */ if(elm->optional) { int present = per_get_few_bits(&opmd, 1); ASN_DEBUG("Member %s->%s is optional, p=%d (%d->%d)", td->name, elm->name, present, (int)opmd.nboff, (int)opmd.nbits); if(present == 0) { /* This element is not present */ if(elm->default_value_set) { /* Fill-in DEFAULT */ if(elm->default_value_set(memb_ptr2)) { FREEMEM(opres); ASN__DECODE_FAILED; } ASN_DEBUG("Filled-in default"); } /* The member is just not present */ continue; } /* Fall through */ } /* Fetch the member from the stream */ ASN_DEBUG("Decoding member \"%s\" in %s", elm->name, td->name); if(elm->flags & ATF_OPEN_TYPE) { rv = OPEN_TYPE_uper_get(opt_codec_ctx, td, st, elm, pd); } else { rv = elm->type->op->uper_decoder(opt_codec_ctx, elm->type, elm->encoding_constraints.per_constraints, memb_ptr2, pd); } if(rv.code != RC_OK) { ASN_DEBUG("Failed decode %s in %s", elm->name, td->name); FREEMEM(opres); return rv; } } /* Optionality map is not needed anymore */ FREEMEM(opres); /* * Deal with extensions. */ if(extpresent) { ssize_t bmlength; uint8_t *epres; /* Presence of extension members */ asn_per_data_t epmd; bmlength = uper_get_nslength(pd); if(bmlength < 0) ASN__DECODE_STARVED; ASN_DEBUG("Extensions %" ASN_PRI_SSIZE " present in %s", bmlength, td->name); epres = (uint8_t *)MALLOC((bmlength + 15) >> 3); if(!epres) ASN__DECODE_STARVED; /* Get the extensions map */ if(per_get_many_bits(pd, epres, 0, bmlength)) { FREEMEM(epres); ASN__DECODE_STARVED; } memset(&epmd, 0, sizeof(epmd)); epmd.buffer = epres; epmd.nbits = bmlength; ASN_DEBUG("Read in extensions bitmap for %s of %ld bits (%x..)", td->name, (long)bmlength, *epres); /* Go over extensions and read them in */ for(edx = specs->first_extension; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; void *memb_ptr; /* Pointer to the member */ void **memb_ptr2; /* Pointer to that pointer */ int present; /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (void **)((char *)st + elm->memb_offset); } else { memb_ptr = (void *)((char *)st + elm->memb_offset); memb_ptr2 = &memb_ptr; } present = per_get_few_bits(&epmd, 1); if(present <= 0) { if(present < 0) break; /* No more extensions */ continue; } ASN_DEBUG("Decoding member %s in %s %p", elm->name, td->name, *memb_ptr2); rv = uper_open_type_get(opt_codec_ctx, elm->type, elm->encoding_constraints.per_constraints, memb_ptr2, pd); if(rv.code != RC_OK) { FREEMEM(epres); return rv; } } /* Skip over overflow extensions which aren't present * in this system's version of the protocol */ for(;;) { ASN_DEBUG("Getting overflow extensions"); switch(per_get_few_bits(&epmd, 1)) { case -1: break; case 0: continue; default: if(uper_open_type_skip(opt_codec_ctx, pd)) { FREEMEM(epres); ASN__DECODE_STARVED; } ASN_DEBUG("Skipped overflow extension"); continue; } break; } FREEMEM(epres); } if(specs->first_extension >= 0) { unsigned i; /* Fill DEFAULT members in extensions */ for(i = specs->roms_count; i < specs->roms_count + specs->aoms_count; i++) { asn_TYPE_member_t *elm; void **memb_ptr2; /* Pointer to member pointer */ edx = specs->oms[i]; elm = &td->elements[edx]; if(!elm->default_value_set) continue; /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (void **)((char *)st + elm->memb_offset); if(*memb_ptr2) continue; } else { continue; /* Extensions are all optionals */ } /* Set default value */ if(elm->default_value_set(memb_ptr2)) { ASN__DECODE_FAILED; } } } rv.consumed = 0; rv.code = RC_OK; return rv; } static int SEQUENCE__handle_extensions(const asn_TYPE_descriptor_t *td, const void *sptr, asn_per_outp_t *po1, asn_per_outp_t *po2) { const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; int exts_present = 0; int exts_count = 0; size_t edx; if(specs->first_extension < 0) { return 0; } /* Find out which extensions are present */ for(edx = specs->first_extension; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; const void *memb_ptr; /* Pointer to the member */ const void *const *memb_ptr2; /* Pointer to that pointer */ int present; /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (const void *const *)((const char *)sptr + elm->memb_offset); present = (*memb_ptr2 != 0); } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); memb_ptr2 = &memb_ptr; present = 1; } ASN_DEBUG("checking %s:%s (@%" ASN_PRI_SIZE ") present => %d", elm->name, elm->type->name, edx, present); exts_count++; exts_present += present; /* Encode as presence marker */ if(po1 && per_put_few_bits(po1, present, 1)) { return -1; } /* Encode as open type field */ if(po2 && present && uper_open_type_put(elm->type, elm->encoding_constraints.per_constraints, *memb_ptr2, po2)) return -1; } return exts_present ? exts_count : 0; } asn_enc_rval_t SEQUENCE_encode_uper(const asn_TYPE_descriptor_t *td, const asn_per_constraints_t *constraints, const void *sptr, asn_per_outp_t *po) { const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; asn_enc_rval_t er; int n_extensions; size_t edx; size_t i; (void)constraints; if(!sptr) ASN__ENCODE_FAILED; er.encoded = 0; ASN_DEBUG("Encoding %s as SEQUENCE (UPER)", td->name); /* * X.691#18.1 Whether structure is extensible * and whether to encode extensions */ if(specs->first_extension < 0) { n_extensions = 0; /* There are no extensions to encode */ } else { n_extensions = SEQUENCE__handle_extensions(td, sptr, 0, 0); if(n_extensions < 0) ASN__ENCODE_FAILED; if(per_put_few_bits(po, n_extensions ? 1 : 0, 1)) { ASN__ENCODE_FAILED; } } /* Encode a presence bitmap */ for(i = 0; i < specs->roms_count; i++) { asn_TYPE_member_t *elm; const void *memb_ptr; /* Pointer to the member */ const void *const *memb_ptr2; /* Pointer to that pointer */ int present; edx = specs->oms[i]; elm = &td->elements[edx]; /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (const void *const *)((const char *)sptr + elm->memb_offset); present = (*memb_ptr2 != 0); } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); memb_ptr2 = &memb_ptr; present = 1; } /* Eliminate default values */ if(present && elm->default_value_cmp && elm->default_value_cmp(*memb_ptr2) == 0) present = 0; ASN_DEBUG("Element %s %s %s->%s is %s", elm->flags & ATF_POINTER ? "ptr" : "inline", elm->default_value_cmp ? "def" : "wtv", td->name, elm->name, present ? "present" : "absent"); if(per_put_few_bits(po, present, 1)) ASN__ENCODE_FAILED; } /* * Encode the sequence ROOT elements. */ ASN_DEBUG("first_extension = %d, elements = %d", specs->first_extension, td->elements_count); for(edx = 0; edx < ((specs->first_extension < 0) ? td->elements_count : (size_t)specs->first_extension); edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; const void *memb_ptr; /* Pointer to the member */ const void *const *memb_ptr2; /* Pointer to that pointer */ ASN_DEBUG("About to encode %s", elm->type->name); /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (const void *const *)((const char *)sptr + elm->memb_offset); if(!*memb_ptr2) { ASN_DEBUG("Element %s %" ASN_PRI_SIZE " not present", elm->name, edx); if(elm->optional) continue; /* Mandatory element is missing */ ASN__ENCODE_FAILED; } } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); memb_ptr2 = &memb_ptr; } /* Eliminate default values */ if(elm->default_value_cmp && elm->default_value_cmp(*memb_ptr2) == 0) continue; ASN_DEBUG("Encoding %s->%s:%s", td->name, elm->name, elm->type->name); er = elm->type->op->uper_encoder( elm->type, elm->encoding_constraints.per_constraints, *memb_ptr2, po); if(er.encoded == -1) return er; } /* No extensions to encode */ if(!n_extensions) ASN__ENCODED_OK(er); ASN_DEBUG("Length of extensions %d bit-map", n_extensions); /* #18.8. Write down the presence bit-map length. */ if(uper_put_nslength(po, n_extensions)) ASN__ENCODE_FAILED; ASN_DEBUG("Bit-map of %d elements", n_extensions); /* #18.7. Encoding the extensions presence bit-map. */ /* TODO: act upon NOTE in #18.7 for canonical PER */ if(SEQUENCE__handle_extensions(td, sptr, po, 0) != n_extensions) ASN__ENCODE_FAILED; ASN_DEBUG("Writing %d extensions", n_extensions); /* #18.9. Encode extensions as open type fields. */ if(SEQUENCE__handle_extensions(td, sptr, 0, po) != n_extensions) ASN__ENCODE_FAILED; ASN__ENCODED_OK(er); } asn_dec_rval_t SEQUENCE_decode_aper(const asn_codec_ctx_t *opt_codec_ctx, const asn_TYPE_descriptor_t *td, const asn_per_constraints_t *constraints, void **sptr, asn_per_data_t *pd) { const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; void *st = *sptr; /* Target structure. */ int extpresent; /* Extension additions are present */ uint8_t *opres; /* Presence of optional root members */ asn_per_data_t opmd; asn_dec_rval_t rv; size_t edx; (void)constraints; if(ASN__STACK_OVERFLOW_CHECK(opt_codec_ctx)) ASN__DECODE_FAILED; if(!st) { st = *sptr = CALLOC(1, specs->struct_size); if(!st) ASN__DECODE_FAILED; } ASN_DEBUG("Decoding %s as SEQUENCE (APER)", td->name); /* Handle extensions */ if(specs->first_extension < 0) { extpresent = 0; } else { extpresent = per_get_few_bits(pd, 1); if(extpresent < 0) ASN__DECODE_STARVED; } /* Prepare a place and read-in the presence bitmap */ memset(&opmd, 0, sizeof(opmd)); if(specs->roms_count) { opres = (uint8_t *)MALLOC(((specs->roms_count + 7) >> 3) + 1); if(!opres) ASN__DECODE_FAILED; /* Get the presence map */ if(per_get_many_bits(pd, opres, 0, specs->roms_count)) { FREEMEM(opres); ASN__DECODE_STARVED; } opmd.buffer = opres; opmd.nbits = specs->roms_count; ASN_DEBUG("Read in presence bitmap for %s of %d bits (%x..)", td->name, specs->roms_count, *opres); } else { opres = 0; } /* * Get the sequence ROOT elements. */ for(edx = 0; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; void *memb_ptr; /* Pointer to the member */ void **memb_ptr2; /* Pointer to that pointer */ #if 0 int padding; #endif if(IN_EXTENSION_GROUP(specs, edx)) continue; /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (void **)((char *)st + elm->memb_offset); } else { memb_ptr = (char *)st + elm->memb_offset; memb_ptr2 = &memb_ptr; } #if 0 /* Get Padding */ padding = (8 - (pd->moved % 8)) % 8; if(padding > 0) ASN_DEBUG("For element %s,offset= %ld Padding bits = %d", td->name, pd->moved, padding); #if 0 /* old way of removing padding */ per_get_few_bits(pd, padding); #else /* Experimental fix proposed by @mhanna123 */ if(edx != (td->elements_count-1)) per_get_few_bits(pd, padding); else { if(specs->roms_count && (padding > 0)) ASN_DEBUG(">>>>> not skipping padding of %d bits for element:%ld out of %d", padding, edx, td->elements_count); else per_get_few_bits(pd, padding); } #endif /* dealing with padding */ #endif /* Deal with optionality */ if(elm->optional) { int present = per_get_few_bits(&opmd, 1); ASN_DEBUG("Member %s->%s is optional, p=%d (%d->%d)", td->name, elm->name, present, (int)opmd.nboff, (int)opmd.nbits); if(present == 0) { /* This element is not present */ if(elm->default_value_set) { /* Fill-in DEFAULT */ if(elm->default_value_set(memb_ptr2)) { FREEMEM(opres); ASN__DECODE_FAILED; } ASN_DEBUG("Filled-in default"); } /* The member is just not present */ continue; } /* Fall through */ } /* Fetch the member from the stream */ ASN_DEBUG("Decoding member \"%s\" in %s", elm->name, td->name); if(elm->flags & ATF_OPEN_TYPE) { rv = OPEN_TYPE_aper_get(opt_codec_ctx, td, st, elm, pd); } else { rv = elm->type->op->aper_decoder(opt_codec_ctx, elm->type, elm->encoding_constraints.per_constraints, memb_ptr2, pd); } if(rv.code != RC_OK) { ASN_DEBUG("Failed decode %s in %s", elm->name, td->name); FREEMEM(opres); return rv; } } /* Optionality map is not needed anymore */ FREEMEM(opres); /* * Deal with extensions. */ if(extpresent) { ssize_t bmlength; uint8_t *epres; /* Presence of extension members */ asn_per_data_t epmd; bmlength = aper_get_nslength(pd); if(bmlength < 0) ASN__DECODE_STARVED; ASN_DEBUG("Extensions %" ASN_PRI_SSIZE " present in %s", bmlength, td->name); epres = (uint8_t *)MALLOC((bmlength + 15) >> 3); if(!epres) ASN__DECODE_STARVED; /* Get the extensions map */ if(per_get_many_bits(pd, epres, 0, bmlength)) ASN__DECODE_STARVED; memset(&epmd, 0, sizeof(epmd)); epmd.buffer = epres; epmd.nbits = bmlength; ASN_DEBUG("Read in extensions bitmap for %s of %ld bits (%x..)", td->name, bmlength, *epres); /* Go over extensions and read them in */ for(edx = specs->first_extension; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; void *memb_ptr; /* Pointer to the member */ void **memb_ptr2; /* Pointer to that pointer */ int present; if(!IN_EXTENSION_GROUP(specs, edx)) { ASN_DEBUG("%ld is not extension", edx); continue; } /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (void **)((char *)st + elm->memb_offset); } else { memb_ptr = (void *)((char *)st + elm->memb_offset); memb_ptr2 = &memb_ptr; } present = per_get_few_bits(&epmd, 1); if(present <= 0) { if(present < 0) break; /* No more extensions */ continue; } ASN_DEBUG("Decoding member %s in %s %p", elm->name, td->name, *memb_ptr2); rv = aper_open_type_get(opt_codec_ctx, elm->type, elm->encoding_constraints.per_constraints, memb_ptr2, pd); if(rv.code != RC_OK) { FREEMEM(epres); return rv; } } /* Skip over overflow extensions which aren't present * in this system's version of the protocol */ for(;;) { ASN_DEBUG("Getting overflow extensions"); switch(per_get_few_bits(&epmd, 1)) { case -1: break; case 0: continue; default: if(aper_open_type_skip(opt_codec_ctx, pd)) { FREEMEM(epres); ASN__DECODE_STARVED; } } break; } FREEMEM(epres); } /* Fill DEFAULT members in extensions */ for(edx = specs->roms_count; edx < specs->roms_count + specs->aoms_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; void **memb_ptr2; /* Pointer to member pointer */ if(!elm->default_value_set) continue; /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (void **)((char *)st + elm->memb_offset); if(*memb_ptr2) continue; } else { continue; /* Extensions are all optionals */ } /* Set default value */ if(elm->default_value_set(memb_ptr2)) { ASN__DECODE_FAILED; } } rv.consumed = 0; rv.code = RC_OK; return rv; } static int SEQUENCE_handle_extensions_aper(const asn_TYPE_descriptor_t *td, const void *sptr, asn_per_outp_t *po1, asn_per_outp_t *po2) { const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; int exts_present = 0; int exts_count = 0; size_t edx; if(specs->first_extension < 0) { return 0; } /* Find out which extensions are present */ for(edx = specs->first_extension; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; const void *memb_ptr; /* Pointer to the member */ const void * const *memb_ptr2; /* Pointer to that pointer */ int present; if(!IN_EXTENSION_GROUP(specs, edx)) { ASN_DEBUG("%s (@%ld) is not extension", elm->type->name, edx); continue; } /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (const void * const *)((const char *)sptr + elm->memb_offset); present = (*memb_ptr2 != 0); } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); memb_ptr2 = &memb_ptr; present = 1; } ASN_DEBUG("checking %s (@%ld) present => %d", elm->type->name, edx, present); exts_count++; exts_present += present; /* Encode as presence marker */ if(po1 && per_put_few_bits(po1, present, 1)) return -1; /* Encode as open type field */ if(po2 && present && aper_open_type_put(elm->type, elm->encoding_constraints.per_constraints, *memb_ptr2, po2)) return -1; } return exts_present ? exts_count : 0; } asn_enc_rval_t SEQUENCE_encode_aper(const asn_TYPE_descriptor_t *td, const asn_per_constraints_t *constraints, const void *sptr, asn_per_outp_t *po) { const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; asn_enc_rval_t er; int n_extensions; size_t edx; size_t i; (void)constraints; if(!sptr) ASN__ENCODE_FAILED; er.encoded = 0; ASN_DEBUG("Encoding %s as SEQUENCE (APER)", td->name); /* * X.691#18.1 Whether structure is extensible * and whether to encode extensions */ if(specs->first_extension < 0) { n_extensions = 0; /* There are no extensions to encode */ } else { n_extensions = SEQUENCE_handle_extensions_aper(td, sptr, 0, 0); if(n_extensions < 0) ASN__ENCODE_FAILED; if(per_put_few_bits(po, n_extensions ? 1 : 0, 1)) { ASN__ENCODE_FAILED; } } /* Encode a presence bitmap */ for(i = 0; i < specs->roms_count; i++) { asn_TYPE_member_t *elm; const void *memb_ptr; /* Pointer to the member */ const void * const *memb_ptr2; /* Pointer to that pointer */ int present; edx = specs->oms[i]; elm = &td->elements[edx]; /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (const void * const *)((const char *)sptr + elm->memb_offset); present = (*memb_ptr2 != 0); } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); memb_ptr2 = &memb_ptr; present = 1; } /* Eliminate default values */ if(present && elm->default_value_cmp && elm->default_value_cmp(memb_ptr2) == 0) present = 0; ASN_DEBUG("Element %s %s %s->%s is %s", elm->flags & ATF_POINTER ? "ptr" : "inline", elm->default_value_cmp ? "def" : "wtv", td->name, elm->name, present ? "present" : "absent"); if(per_put_few_bits(po, present, 1)) ASN__ENCODE_FAILED; } /* * Encode the sequence ROOT elements. */ ASN_DEBUG("first_extension = %d, elements = %d", specs->first_extension, td->elements_count); for(edx = 0; edx < ((specs->first_extension < 0) ? td->elements_count : (size_t)specs->first_extension); edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; const void *memb_ptr; /* Pointer to the member */ const void * const *memb_ptr2; /* Pointer to that pointer */ if(IN_EXTENSION_GROUP(specs, edx)) continue; ASN_DEBUG("About to encode %s", elm->type->name); /* Fetch the pointer to this member */ if(elm->flags & ATF_POINTER) { memb_ptr2 = (const void * const *)((const char *)sptr + elm->memb_offset); if(!*memb_ptr2) { ASN_DEBUG("Element %s %ld not present", elm->name, edx); if(elm->optional) continue; /* Mandatory element is missing */ ASN__ENCODE_FAILED; } } else { memb_ptr = (const void *)((const char *)sptr + elm->memb_offset); memb_ptr2 = &memb_ptr; } /* Eliminate default values */ if(elm->default_value_cmp && elm->default_value_cmp(memb_ptr2) == 0) continue; ASN_DEBUG("Encoding %s->%s", td->name, elm->name); er = elm->type->op->aper_encoder(elm->type, elm->encoding_constraints.per_constraints, *memb_ptr2, po); if(er.encoded == -1) return er; } /* No extensions to encode */ if(!n_extensions) ASN__ENCODED_OK(er); ASN_DEBUG("Length of %d bit-map", n_extensions); /* #18.8. Write down the presence bit-map length. */ if(aper_put_nslength(po, n_extensions)) ASN__ENCODE_FAILED; ASN_DEBUG("Bit-map of %d elements", n_extensions); /* #18.7. Encoding the extensions presence bit-map. */ /* TODO: act upon NOTE in #18.7 for canonical PER */ if(SEQUENCE_handle_extensions_aper(td, sptr, po, 0) != n_extensions) ASN__ENCODE_FAILED; ASN_DEBUG("Writing %d extensions", n_extensions); /* #18.9. Encode extensions as open type fields. */ if(SEQUENCE_handle_extensions_aper(td, sptr, 0, po) != n_extensions) ASN__ENCODE_FAILED; ASN__ENCODED_OK(er); } #endif /* ASN_DISABLE_PER_SUPPORT */ int SEQUENCE_compare(const asn_TYPE_descriptor_t *td, const void *aptr, const void *bptr) { size_t edx; for(edx = 0; edx < td->elements_count; edx++) { asn_TYPE_member_t *elm = &td->elements[edx]; const void *amemb; const void *bmemb; int ret; if(elm->flags & ATF_POINTER) { amemb = *(const void *const *)((const char *)aptr + elm->memb_offset); bmemb = *(const void *const *)((const char *)bptr + elm->memb_offset); if(!amemb) { if(!bmemb) continue; if(elm->default_value_cmp && elm->default_value_cmp(bmemb) == 0) { /* A is absent, but B is present and equal to DEFAULT */ continue; } return -1; } else if(!bmemb) { if(elm->default_value_cmp && elm->default_value_cmp(amemb) == 0) { /* B is absent, but A is present and equal to DEFAULT */ continue; } return 1; } } else { amemb = (const void *)((const char *)aptr + elm->memb_offset); bmemb = (const void *)((const char *)bptr + elm->memb_offset); } ret = elm->type->op->compare_struct(elm->type, amemb, bmemb); if(ret != 0) return ret; } return 0; } asn_TYPE_operation_t asn_OP_SEQUENCE = { SEQUENCE_free, SEQUENCE_print, SEQUENCE_compare, SEQUENCE_decode_ber, SEQUENCE_encode_der, SEQUENCE_decode_xer, SEQUENCE_encode_xer, #ifdef ASN_DISABLE_OER_SUPPORT 0, 0, #else SEQUENCE_decode_oer, SEQUENCE_encode_oer, #endif /* ASN_DISABLE_OER_SUPPORT */ #ifdef ASN_DISABLE_PER_SUPPORT 0, 0, 0, 0, #else SEQUENCE_decode_uper, SEQUENCE_encode_uper, SEQUENCE_decode_aper, SEQUENCE_encode_aper, #endif /* ASN_DISABLE_PER_SUPPORT */ SEQUENCE_random_fill, 0 /* Use generic outmost tag fetcher */ }; asn_random_fill_result_t SEQUENCE_random_fill(const asn_TYPE_descriptor_t *td, void **sptr, const asn_encoding_constraints_t *constr, size_t max_length) { const asn_SEQUENCE_specifics_t *specs = (const asn_SEQUENCE_specifics_t *)td->specifics; asn_random_fill_result_t result_ok = {ARFILL_OK, 0}; asn_random_fill_result_t result_failed = {ARFILL_FAILED, 0}; asn_random_fill_result_t result_skipped = {ARFILL_SKIPPED, 0}; void *st = *sptr; size_t edx; if(max_length == 0) return result_skipped; (void)constr; if(st == NULL) { st = CALLOC(1, specs->struct_size); if(st == NULL) { return result_failed; } } for(edx = 0; edx < td->elements_count; edx++) { const asn_TYPE_member_t *elm = &td->elements[edx]; void *memb_ptr; /* Pointer to the member */ void **memb_ptr2; /* Pointer to that pointer */ asn_random_fill_result_t tmpres; if(elm->optional && asn_random_between(0, 4) == 2) { /* Sometimes decide not to fill the optional value */ continue; } if(elm->flags & ATF_POINTER) { /* Member is a pointer to another structure */ memb_ptr2 = (void **)((char *)st + elm->memb_offset); } else { memb_ptr = (char *)st + elm->memb_offset; memb_ptr2 = &memb_ptr; } tmpres = elm->type->op->random_fill( elm->type, memb_ptr2, &elm->encoding_constraints, max_length > result_ok.length ? max_length - result_ok.length : 0); switch(tmpres.code) { case ARFILL_OK: result_ok.length += tmpres.length; continue; case ARFILL_SKIPPED: assert(!(elm->flags & ATF_POINTER) || *memb_ptr2 == NULL); continue; case ARFILL_FAILED: if(st == *sptr) { ASN_STRUCT_RESET(*td, st); } else { ASN_STRUCT_FREE(*td, st); } return tmpres; } } *sptr = st; return result_ok; }
716405.c
/***************************************************************************//** @file main.c @author Stephen Brennan @date Created Wednesday, 10 June 2015 @brief Main program for tetris. @copyright Copyright (c) 2015, Stephen Brennan. Released under the Revised BSD License. See LICENSE.txt for details. *******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <time.h> #include <ncurses.h> #include <string.h> #if WITH_SDL # include <SDL/SDL.h> # include <SDL/SDL_mixer.h> #endif #include "tetris.h" #include "util.h" /* 2 columns per cell makes the game much nicer. */ #define COLS_PER_CELL 2 /* Macro to print a cell of a specific type to a window. */ #define ADD_BLOCK(w,x) waddch((w),' '|A_REVERSE|COLOR_PAIR(x)); \ waddch((w),' '|A_REVERSE|COLOR_PAIR(x)) #define ADD_EMPTY(w) waddch((w), ' '); waddch((w), ' ') /* Print the tetris board onto the ncurses window. */ void display_board(WINDOW *w, tetris_game *obj) { int i, j; box(w, 0, 0); for (i = 0; i < obj->rows; i++) { wmove(w, 1 + i, 1); for (j = 0; j < obj->cols; j++) { if (TC_IS_FILLED(tg_get(obj, i, j))) { ADD_BLOCK(w,tg_get(obj, i, j)); } else { ADD_EMPTY(w); } } } wnoutrefresh(w); } /* Display a tetris piece in a dedicated window. */ void display_piece(WINDOW *w, tetris_block block) { int b; tetris_location c; wclear(w); box(w, 0, 0); if (block.typ == -1) { wnoutrefresh(w); return; } for (b = 0; b < TETRIS; b++) { c = TETROMINOS[block.typ][block.ori][b]; wmove(w, c.row + 1, c.col * COLS_PER_CELL + 1); ADD_BLOCK(w, TYPE_TO_CELL(block.typ)); } wnoutrefresh(w); } /* Display score information in a dedicated window. */ void display_score(WINDOW *w, tetris_game *tg) { wclear(w); box(w, 0, 0); wprintw(w, "Score\n%d\n", tg->points); wprintw(w, "Level\n%d\n", tg->level); wprintw(w, "Lines\n%d\n", tg->lines_remaining); wnoutrefresh(w); } /* Boss mode! Make it look like you're doing work. */ void boss_mode(void) { clear(); #if WITH_SDL Mix_PauseMusic(); #endif printw("user@workstation-312:~/Documents/presentation $ ls -l\n" "total 528\n" "drwxr-xr-x 2 user users 4096 Jun 9 17:05 .\n" "drwxr-xr-x 4 user users 4096 Jun 10 09:52 ..\n" "-rw-r--r-- 1 user users 88583 Jun 9 14:13 figure1.png\n" "-rw-r--r-- 1 user users 65357 Jun 9 15:40 figure2.png\n" "-rw-r--r-- 1 user users 4469 Jun 9 16:17 presentation.aux\n" "-rw-r--r-- 1 user users 42858 Jun 9 16:17 presentation.log\n" "-rw-r--r-- 1 user users 2516 Jun 9 16:17 presentation.nav\n" "-rw-r--r-- 1 user users 183 Jun 9 16:17 presentation.out\n" "-rw-r--r-- 1 user users 349607 Jun 9 16:17 presentation.pdf\n" "-rw-r--r-- 1 user users 0 Jun 9 16:17 presentation.snm\n" "-rw-r--r-- 1 user users 9284 Jun 9 17:05 presentation.tex\n" "-rw-r--r-- 1 user users 229 Jun 9 16:17 presentation.toc\n" "\n" "user@workstation-312:~/Documents/presentation $ "); echo(); timeout(-1); while (getch() != KEY_F(1)); timeout(0); noecho(); clear(); #if WITH_SDL Mix_ResumeMusic(); #endif } /* Save and exit the game. */ void save(tetris_game *game, WINDOW *w) { FILE *f; wclear(w); box(w, 0, 0); // return the border wmove(w, 1, 1); wprintw(w, "Save and exit? [Y/n] "); wrefresh(w); timeout(-1); if (getch() == 'n') { timeout(0); return; } f = fopen("tetris.save", "w"); tg_save(game, f); fclose(f); tg_delete(game); endwin(); printf("Game saved to \"tetris.save\".\n"); printf("Resume by passing the filename as an argument to this program.\n"); exit(EXIT_SUCCESS); } /* Do the NCURSES initialization steps for color blocks. */ void init_colors(void) { start_color(); //init_color(COLOR_ORANGE, 1000, 647, 0); init_pair(TC_CELLI, COLOR_CYAN, COLOR_BLACK); init_pair(TC_CELLJ, COLOR_BLUE, COLOR_BLACK); init_pair(TC_CELLL, COLOR_WHITE, COLOR_BLACK); init_pair(TC_CELLO, COLOR_YELLOW, COLOR_BLACK); init_pair(TC_CELLS, COLOR_GREEN, COLOR_BLACK); init_pair(TC_CELLT, COLOR_MAGENTA, COLOR_BLACK); init_pair(TC_CELLZ, COLOR_RED, COLOR_BLACK); } /* Main tetris game! */ int main(int argc, char **argv) { tetris_game *tg; tetris_move move = TM_NONE; bool running = true; WINDOW *board, *next, *hold, *score; #if WITH_SDL Mix_Music *music; #endif // Load file if given a filename. if (argc >= 2) { FILE *f = fopen(argv[1], "r"); if (f == NULL) { perror("tetris"); exit(EXIT_FAILURE); } tg = tg_load(f); fclose(f); } else { // Otherwise create new game. tg = tg_create(22, 10); } #if WITH_SDL // Initialize music. if (SDL_Init(SDL_INIT_AUDIO) < 0) { fprintf(stderr, "unable to initialize SDL\n"); exit(EXIT_FAILURE); } if (Mix_Init(MIX_INIT_MP3) != MIX_INIT_MP3) { fprintf(stderr, "unable to initialize SDL_mixer\n"); exit(EXIT_FAILURE); } if (Mix_OpenAudio(MIX_DEFAULT_FREQUENCY, MIX_DEFAULT_FORMAT, 2, 1024) != 0) { fprintf(stderr, "unable to initialize audio\n"); exit(EXIT_FAILURE); } Mix_AllocateChannels(1); // only need background music music = Mix_LoadMUS("tetris.mp3"); if (music) { Mix_PlayMusic(music, -1); } #endif // NCURSES initialization: initscr(); // initialize curses cbreak(); // pass key presses to program, but not signals noecho(); // don't echo key presses to screen keypad(stdscr, TRUE); // allow arrow keys timeout(0); // no blocking on getch() curs_set(0); // set the cursor to invisible init_colors(); // setup tetris colors // Create windows for each section of the interface. board = newwin(tg->rows + 2, 2 * tg->cols + 2, 0, 0); next = newwin(6, 10, 0, 2 * (tg->cols + 1) + 1); hold = newwin(6, 10, 7, 2 * (tg->cols + 1) + 1); score = newwin(6, 10, 14, 2 * (tg->cols + 1 ) + 1); // Game loop while (running) { running = tg_tick(tg, move); display_board(board, tg); display_piece(next, tg->next); display_piece(hold, tg->stored); display_score(score, tg); doupdate(); sleep_milli(10); switch (getch()) { case KEY_LEFT: move = TM_LEFT; break; case KEY_RIGHT: move = TM_RIGHT; break; case KEY_UP: move = TM_CLOCK; break; case KEY_DOWN: move = TM_DROP; break; case 'q': running = false; move = TM_NONE; break; case 'p': wclear(board); box(board, 0, 0); wmove(board, tg->rows/2, (tg->cols*COLS_PER_CELL-6)/2); wprintw(board, "PAUSED"); wrefresh(board); timeout(-1); getch(); timeout(0); move = TM_NONE; break; case 'b': boss_mode(); move = TM_NONE; break; case 's': save(tg, board); move = TM_NONE; break; case ' ': move = TM_HOLD; break; default: move = TM_NONE; } } // Deinitialize NCurses wclear(stdscr); endwin(); #if WITH_SDL // Deinitialize Sound Mix_HaltMusic(); Mix_FreeMusic(music); Mix_CloseAudio(); Mix_Quit(); #endif // Output ending message. printf("Game over!\n"); printf("You finished with %d points on level %d.\n", tg->points, tg->level); // Deinitialize Tetris tg_delete(tg); return 0; }
21627.c
/* * Copyright 2019 Nikolay Sivov for CodeWeavers * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #define COBJMACROS #include "mfapi.h" #include "mfidl.h" #include "mf_private.h" #include "initguid.h" #include "mmdeviceapi.h" #include "audioclient.h" #include "wine/debug.h" #include "wine/list.h" WINE_DEFAULT_DEBUG_CHANNEL(mfplat); enum stream_state { STREAM_STATE_STOPPED = 0, STREAM_STATE_RUNNING, STREAM_STATE_PAUSED, }; enum audio_renderer_flags { SAR_SHUT_DOWN = 0x1, SAR_PREROLLED = 0x2, SAR_SAMPLE_REQUESTED = 0x4, }; enum queued_object_type { OBJECT_TYPE_SAMPLE, OBJECT_TYPE_MARKER, }; struct queued_object { struct list entry; enum queued_object_type type; union { struct { IMFSample *sample; unsigned int frame_offset; } sample; struct { MFSTREAMSINK_MARKER_TYPE type; PROPVARIANT context; } marker; } u; }; struct audio_renderer { IMFMediaSink IMFMediaSink_iface; IMFMediaSinkPreroll IMFMediaSinkPreroll_iface; IMFStreamSink IMFStreamSink_iface; IMFMediaTypeHandler IMFMediaTypeHandler_iface; IMFClockStateSink IMFClockStateSink_iface; IMFMediaEventGenerator IMFMediaEventGenerator_iface; IMFGetService IMFGetService_iface; IMFSimpleAudioVolume IMFSimpleAudioVolume_iface; IMFAudioStreamVolume IMFAudioStreamVolume_iface; IMFAudioPolicy IMFAudioPolicy_iface; IMFAsyncCallback render_callback; LONG refcount; IMFMediaEventQueue *event_queue; IMFMediaEventQueue *stream_event_queue; IMFPresentationClock *clock; IMFMediaType *media_type; IMFMediaType *current_media_type; IMMDevice *device; IAudioClient *audio_client; IAudioRenderClient *audio_render_client; IAudioStreamVolume *stream_volume; ISimpleAudioVolume *audio_volume; struct { unsigned int flags; GUID session_id; } stream_config; HANDLE buffer_ready_event; MFWORKITEM_KEY buffer_ready_key; unsigned int frame_size; struct list queue; enum stream_state state; unsigned int flags; CRITICAL_SECTION cs; }; static void release_pending_object(struct queued_object *object) { list_remove(&object->entry); switch (object->type) { case OBJECT_TYPE_SAMPLE: if (object->u.sample.sample) IMFSample_Release(object->u.sample.sample); break; case OBJECT_TYPE_MARKER: PropVariantClear(&object->u.marker.context); break; } free(object); } static struct audio_renderer *impl_from_IMFMediaSink(IMFMediaSink *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFMediaSink_iface); } static struct audio_renderer *impl_from_IMFMediaSinkPreroll(IMFMediaSinkPreroll *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFMediaSinkPreroll_iface); } static struct audio_renderer *impl_from_IMFClockStateSink(IMFClockStateSink *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFClockStateSink_iface); } static struct audio_renderer *impl_from_IMFMediaEventGenerator(IMFMediaEventGenerator *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFMediaEventGenerator_iface); } static struct audio_renderer *impl_from_IMFGetService(IMFGetService *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFGetService_iface); } static struct audio_renderer *impl_from_IMFSimpleAudioVolume(IMFSimpleAudioVolume *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFSimpleAudioVolume_iface); } static struct audio_renderer *impl_from_IMFAudioStreamVolume(IMFAudioStreamVolume *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFAudioStreamVolume_iface); } static struct audio_renderer *impl_from_IMFAudioPolicy(IMFAudioPolicy *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFAudioPolicy_iface); } static struct audio_renderer *impl_from_IMFStreamSink(IMFStreamSink *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFStreamSink_iface); } static struct audio_renderer *impl_from_IMFMediaTypeHandler(IMFMediaTypeHandler *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, IMFMediaTypeHandler_iface); } static struct audio_renderer *impl_from_render_callback_IMFAsyncCallback(IMFAsyncCallback *iface) { return CONTAINING_RECORD(iface, struct audio_renderer, render_callback); } static HRESULT WINAPI audio_renderer_sink_QueryInterface(IMFMediaSink *iface, REFIID riid, void **obj) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); TRACE("%p, %s, %p.\n", iface, debugstr_guid(riid), obj); if (IsEqualIID(riid, &IID_IMFMediaSink) || IsEqualIID(riid, &IID_IUnknown)) { *obj = iface; } else if (IsEqualIID(riid, &IID_IMFMediaSinkPreroll)) { *obj = &renderer->IMFMediaSinkPreroll_iface; } else if (IsEqualIID(riid, &IID_IMFClockStateSink)) { *obj = &renderer->IMFClockStateSink_iface; } else if (IsEqualIID(riid, &IID_IMFMediaEventGenerator)) { *obj = &renderer->IMFMediaEventGenerator_iface; } else if (IsEqualIID(riid, &IID_IMFGetService)) { *obj = &renderer->IMFGetService_iface; } else { WARN("Unsupported %s.\n", debugstr_guid(riid)); *obj = NULL; return E_NOINTERFACE; } IUnknown_AddRef((IUnknown *)*obj); return S_OK; } static ULONG WINAPI audio_renderer_sink_AddRef(IMFMediaSink *iface) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); ULONG refcount = InterlockedIncrement(&renderer->refcount); TRACE("%p, refcount %u.\n", iface, refcount); return refcount; } static void audio_renderer_release_audio_client(struct audio_renderer *renderer) { struct queued_object *obj, *obj2; MFCancelWorkItem(renderer->buffer_ready_key); LIST_FOR_EACH_ENTRY_SAFE(obj, obj2, &renderer->queue, struct queued_object, entry) { release_pending_object(obj); } renderer->buffer_ready_key = 0; if (renderer->audio_client) { IAudioClient_Stop(renderer->audio_client); IAudioClient_Reset(renderer->audio_client); IAudioClient_Release(renderer->audio_client); } renderer->audio_client = NULL; if (renderer->audio_render_client) IAudioRenderClient_Release(renderer->audio_render_client); renderer->audio_render_client = NULL; if (renderer->stream_volume) IAudioStreamVolume_Release(renderer->stream_volume); renderer->stream_volume = NULL; if (renderer->audio_volume) ISimpleAudioVolume_Release(renderer->audio_volume); renderer->audio_volume = NULL; renderer->flags &= ~SAR_PREROLLED; } static ULONG WINAPI audio_renderer_sink_Release(IMFMediaSink *iface) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); ULONG refcount = InterlockedDecrement(&renderer->refcount); TRACE("%p, refcount %u.\n", iface, refcount); if (!refcount) { if (renderer->event_queue) IMFMediaEventQueue_Release(renderer->event_queue); if (renderer->stream_event_queue) IMFMediaEventQueue_Release(renderer->stream_event_queue); if (renderer->clock) IMFPresentationClock_Release(renderer->clock); if (renderer->device) IMMDevice_Release(renderer->device); if (renderer->media_type) IMFMediaType_Release(renderer->media_type); if (renderer->current_media_type) IMFMediaType_Release(renderer->current_media_type); audio_renderer_release_audio_client(renderer); CloseHandle(renderer->buffer_ready_event); DeleteCriticalSection(&renderer->cs); free(renderer); } return refcount; } static HRESULT WINAPI audio_renderer_sink_GetCharacteristics(IMFMediaSink *iface, DWORD *flags) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); TRACE("%p, %p.\n", iface, flags); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_SHUTDOWN; *flags = MEDIASINK_FIXED_STREAMS | MEDIASINK_CAN_PREROLL; return S_OK; } static HRESULT WINAPI audio_renderer_sink_AddStreamSink(IMFMediaSink *iface, DWORD stream_sink_id, IMFMediaType *media_type, IMFStreamSink **stream_sink) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); TRACE("%p, %#x, %p, %p.\n", iface, stream_sink_id, media_type, stream_sink); return renderer->flags & SAR_SHUT_DOWN ? MF_E_SHUTDOWN : MF_E_STREAMSINKS_FIXED; } static HRESULT WINAPI audio_renderer_sink_RemoveStreamSink(IMFMediaSink *iface, DWORD stream_sink_id) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); TRACE("%p, %#x.\n", iface, stream_sink_id); return renderer->flags & SAR_SHUT_DOWN ? MF_E_SHUTDOWN : MF_E_STREAMSINKS_FIXED; } static HRESULT WINAPI audio_renderer_sink_GetStreamSinkCount(IMFMediaSink *iface, DWORD *count) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); TRACE("%p, %p.\n", iface, count); if (!count) return E_POINTER; if (renderer->flags & SAR_SHUT_DOWN) return MF_E_SHUTDOWN; *count = 1; return S_OK; } static HRESULT WINAPI audio_renderer_sink_GetStreamSinkByIndex(IMFMediaSink *iface, DWORD index, IMFStreamSink **stream) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); HRESULT hr = S_OK; TRACE("%p, %u, %p.\n", iface, index, stream); EnterCriticalSection(&renderer->cs); if (renderer->flags & SAR_SHUT_DOWN) hr = MF_E_SHUTDOWN; else if (index > 0) hr = MF_E_INVALIDINDEX; else { *stream = &renderer->IMFStreamSink_iface; IMFStreamSink_AddRef(*stream); } LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_sink_GetStreamSinkById(IMFMediaSink *iface, DWORD stream_sink_id, IMFStreamSink **stream) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); HRESULT hr = S_OK; TRACE("%p, %#x, %p.\n", iface, stream_sink_id, stream); EnterCriticalSection(&renderer->cs); if (renderer->flags & SAR_SHUT_DOWN) hr = MF_E_SHUTDOWN; else if (stream_sink_id > 0) hr = MF_E_INVALIDSTREAMNUMBER; else { *stream = &renderer->IMFStreamSink_iface; IMFStreamSink_AddRef(*stream); } LeaveCriticalSection(&renderer->cs); return hr; } static void audio_renderer_set_presentation_clock(struct audio_renderer *renderer, IMFPresentationClock *clock) { if (renderer->clock) { IMFPresentationClock_RemoveClockStateSink(renderer->clock, &renderer->IMFClockStateSink_iface); IMFPresentationClock_Release(renderer->clock); } renderer->clock = clock; if (renderer->clock) { IMFPresentationClock_AddRef(renderer->clock); IMFPresentationClock_AddClockStateSink(renderer->clock, &renderer->IMFClockStateSink_iface); } } static HRESULT WINAPI audio_renderer_sink_SetPresentationClock(IMFMediaSink *iface, IMFPresentationClock *clock) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); HRESULT hr = S_OK; TRACE("%p, %p.\n", iface, clock); EnterCriticalSection(&renderer->cs); if (renderer->flags & SAR_SHUT_DOWN) hr = MF_E_SHUTDOWN; else audio_renderer_set_presentation_clock(renderer, clock); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_sink_GetPresentationClock(IMFMediaSink *iface, IMFPresentationClock **clock) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); HRESULT hr = S_OK; TRACE("%p, %p.\n", iface, clock); if (!clock) return E_POINTER; EnterCriticalSection(&renderer->cs); if (renderer->flags & SAR_SHUT_DOWN) hr = MF_E_SHUTDOWN; else if (renderer->clock) { *clock = renderer->clock; IMFPresentationClock_AddRef(*clock); } else hr = MF_E_NO_CLOCK; LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_sink_Shutdown(IMFMediaSink *iface) { struct audio_renderer *renderer = impl_from_IMFMediaSink(iface); TRACE("%p.\n", iface); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_SHUTDOWN; EnterCriticalSection(&renderer->cs); renderer->flags |= SAR_SHUT_DOWN; IMFMediaEventQueue_Shutdown(renderer->event_queue); IMFMediaEventQueue_Shutdown(renderer->stream_event_queue); audio_renderer_set_presentation_clock(renderer, NULL); audio_renderer_release_audio_client(renderer); LeaveCriticalSection(&renderer->cs); return S_OK; } static const IMFMediaSinkVtbl audio_renderer_sink_vtbl = { audio_renderer_sink_QueryInterface, audio_renderer_sink_AddRef, audio_renderer_sink_Release, audio_renderer_sink_GetCharacteristics, audio_renderer_sink_AddStreamSink, audio_renderer_sink_RemoveStreamSink, audio_renderer_sink_GetStreamSinkCount, audio_renderer_sink_GetStreamSinkByIndex, audio_renderer_sink_GetStreamSinkById, audio_renderer_sink_SetPresentationClock, audio_renderer_sink_GetPresentationClock, audio_renderer_sink_Shutdown, }; static void audio_renderer_preroll(struct audio_renderer *renderer) { unsigned int i; if (renderer->flags & SAR_PREROLLED) return; for (i = 0; i < 2; ++i) IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkRequestSample, &GUID_NULL, S_OK, NULL); renderer->flags |= SAR_PREROLLED; } static HRESULT WINAPI audio_renderer_preroll_QueryInterface(IMFMediaSinkPreroll *iface, REFIID riid, void **obj) { struct audio_renderer *renderer = impl_from_IMFMediaSinkPreroll(iface); return IMFMediaSink_QueryInterface(&renderer->IMFMediaSink_iface, riid, obj); } static ULONG WINAPI audio_renderer_preroll_AddRef(IMFMediaSinkPreroll *iface) { struct audio_renderer *renderer = impl_from_IMFMediaSinkPreroll(iface); return IMFMediaSink_AddRef(&renderer->IMFMediaSink_iface); } static ULONG WINAPI audio_renderer_preroll_Release(IMFMediaSinkPreroll *iface) { struct audio_renderer *renderer = impl_from_IMFMediaSinkPreroll(iface); return IMFMediaSink_Release(&renderer->IMFMediaSink_iface); } static HRESULT WINAPI audio_renderer_preroll_NotifyPreroll(IMFMediaSinkPreroll *iface, MFTIME start_time) { struct audio_renderer *renderer = impl_from_IMFMediaSinkPreroll(iface); TRACE("%p, %s.\n", iface, debugstr_time(start_time)); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_SHUTDOWN; audio_renderer_preroll(renderer); return IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkPrerolled, &GUID_NULL, S_OK, NULL); } static const IMFMediaSinkPrerollVtbl audio_renderer_preroll_vtbl = { audio_renderer_preroll_QueryInterface, audio_renderer_preroll_AddRef, audio_renderer_preroll_Release, audio_renderer_preroll_NotifyPreroll, }; static HRESULT WINAPI audio_renderer_events_QueryInterface(IMFMediaEventGenerator *iface, REFIID riid, void **obj) { struct audio_renderer *renderer = impl_from_IMFMediaEventGenerator(iface); return IMFMediaSink_QueryInterface(&renderer->IMFMediaSink_iface, riid, obj); } static ULONG WINAPI audio_renderer_events_AddRef(IMFMediaEventGenerator *iface) { struct audio_renderer *renderer = impl_from_IMFMediaEventGenerator(iface); return IMFMediaSink_AddRef(&renderer->IMFMediaSink_iface); } static ULONG WINAPI audio_renderer_events_Release(IMFMediaEventGenerator *iface) { struct audio_renderer *renderer = impl_from_IMFMediaEventGenerator(iface); return IMFMediaSink_Release(&renderer->IMFMediaSink_iface); } static HRESULT WINAPI audio_renderer_events_GetEvent(IMFMediaEventGenerator *iface, DWORD flags, IMFMediaEvent **event) { struct audio_renderer *renderer = impl_from_IMFMediaEventGenerator(iface); TRACE("%p, %#x, %p.\n", iface, flags, event); return IMFMediaEventQueue_GetEvent(renderer->event_queue, flags, event); } static HRESULT WINAPI audio_renderer_events_BeginGetEvent(IMFMediaEventGenerator *iface, IMFAsyncCallback *callback, IUnknown *state) { struct audio_renderer *renderer = impl_from_IMFMediaEventGenerator(iface); TRACE("%p, %p, %p.\n", iface, callback, state); return IMFMediaEventQueue_BeginGetEvent(renderer->event_queue, callback, state); } static HRESULT WINAPI audio_renderer_events_EndGetEvent(IMFMediaEventGenerator *iface, IMFAsyncResult *result, IMFMediaEvent **event) { struct audio_renderer *renderer = impl_from_IMFMediaEventGenerator(iface); TRACE("%p, %p, %p.\n", iface, result, event); return IMFMediaEventQueue_EndGetEvent(renderer->event_queue, result, event); } static HRESULT WINAPI audio_renderer_events_QueueEvent(IMFMediaEventGenerator *iface, MediaEventType event_type, REFGUID ext_type, HRESULT hr, const PROPVARIANT *value) { struct audio_renderer *renderer = impl_from_IMFMediaEventGenerator(iface); TRACE("%p, %u, %s, %#x, %p.\n", iface, event_type, debugstr_guid(ext_type), hr, value); return IMFMediaEventQueue_QueueEventParamVar(renderer->event_queue, event_type, ext_type, hr, value); } static const IMFMediaEventGeneratorVtbl audio_renderer_events_vtbl = { audio_renderer_events_QueryInterface, audio_renderer_events_AddRef, audio_renderer_events_Release, audio_renderer_events_GetEvent, audio_renderer_events_BeginGetEvent, audio_renderer_events_EndGetEvent, audio_renderer_events_QueueEvent, }; static HRESULT WINAPI audio_renderer_clock_sink_QueryInterface(IMFClockStateSink *iface, REFIID riid, void **obj) { struct audio_renderer *renderer = impl_from_IMFClockStateSink(iface); return IMFMediaSink_QueryInterface(&renderer->IMFMediaSink_iface, riid, obj); } static ULONG WINAPI audio_renderer_clock_sink_AddRef(IMFClockStateSink *iface) { struct audio_renderer *renderer = impl_from_IMFClockStateSink(iface); return IMFMediaSink_AddRef(&renderer->IMFMediaSink_iface); } static ULONG WINAPI audio_renderer_clock_sink_Release(IMFClockStateSink *iface) { struct audio_renderer *renderer = impl_from_IMFClockStateSink(iface); return IMFMediaSink_Release(&renderer->IMFMediaSink_iface); } static HRESULT WINAPI audio_renderer_clock_sink_OnClockStart(IMFClockStateSink *iface, MFTIME systime, LONGLONG offset) { struct audio_renderer *renderer = impl_from_IMFClockStateSink(iface); HRESULT hr = S_OK; TRACE("%p, %s, %s.\n", iface, debugstr_time(systime), debugstr_time(offset)); EnterCriticalSection(&renderer->cs); if (renderer->audio_client) { if (renderer->state == STREAM_STATE_STOPPED) { if (FAILED(hr = IAudioClient_Start(renderer->audio_client))) WARN("Failed to start audio client, hr %#x.\n", hr); renderer->state = STREAM_STATE_RUNNING; } } else hr = MF_E_NOT_INITIALIZED; IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkStarted, &GUID_NULL, hr, NULL); if (SUCCEEDED(hr)) audio_renderer_preroll(renderer); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_clock_sink_OnClockStop(IMFClockStateSink *iface, MFTIME systime) { struct audio_renderer *renderer = impl_from_IMFClockStateSink(iface); HRESULT hr = S_OK; TRACE("%p, %s.\n", iface, debugstr_time(systime)); EnterCriticalSection(&renderer->cs); if (renderer->audio_client) { if (renderer->state != STREAM_STATE_STOPPED) { if (SUCCEEDED(hr = IAudioClient_Stop(renderer->audio_client))) { if (FAILED(hr = IAudioClient_Reset(renderer->audio_client))) WARN("Failed to reset audio client, hr %#x.\n", hr); } else WARN("Failed to stop audio client, hr %#x.\n", hr); renderer->state = STREAM_STATE_STOPPED; renderer->flags &= ~SAR_PREROLLED; } } else hr = MF_E_NOT_INITIALIZED; IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkStopped, &GUID_NULL, hr, NULL); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_clock_sink_OnClockPause(IMFClockStateSink *iface, MFTIME systime) { struct audio_renderer *renderer = impl_from_IMFClockStateSink(iface); HRESULT hr; TRACE("%p, %s.\n", iface, debugstr_time(systime)); EnterCriticalSection(&renderer->cs); if (renderer->state == STREAM_STATE_RUNNING) { if (renderer->audio_client) { if (FAILED(hr = IAudioClient_Stop(renderer->audio_client))) WARN("Failed to stop audio client, hr %#x.\n", hr); renderer->state = STREAM_STATE_PAUSED; } else hr = MF_E_NOT_INITIALIZED; IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkPaused, &GUID_NULL, hr, NULL); } else hr = MF_E_INVALID_STATE_TRANSITION; LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_clock_sink_OnClockRestart(IMFClockStateSink *iface, MFTIME systime) { struct audio_renderer *renderer = impl_from_IMFClockStateSink(iface); BOOL preroll = FALSE; HRESULT hr = S_OK; TRACE("%p, %s.\n", iface, debugstr_time(systime)); EnterCriticalSection(&renderer->cs); if (renderer->audio_client) { if ((preroll = (renderer->state != STREAM_STATE_RUNNING))) { if (FAILED(hr = IAudioClient_Start(renderer->audio_client))) WARN("Failed to start audio client, hr %#x.\n", hr); renderer->state = STREAM_STATE_RUNNING; } } else hr = MF_E_NOT_INITIALIZED; IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkStarted, &GUID_NULL, hr, NULL); if (preroll) audio_renderer_preroll(renderer); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_clock_sink_OnClockSetRate(IMFClockStateSink *iface, MFTIME systime, float rate) { FIXME("%p, %s, %f.\n", iface, debugstr_time(systime), rate); return E_NOTIMPL; } static const IMFClockStateSinkVtbl audio_renderer_clock_sink_vtbl = { audio_renderer_clock_sink_QueryInterface, audio_renderer_clock_sink_AddRef, audio_renderer_clock_sink_Release, audio_renderer_clock_sink_OnClockStart, audio_renderer_clock_sink_OnClockStop, audio_renderer_clock_sink_OnClockPause, audio_renderer_clock_sink_OnClockRestart, audio_renderer_clock_sink_OnClockSetRate, }; static HRESULT WINAPI audio_renderer_get_service_QueryInterface(IMFGetService *iface, REFIID riid, void **obj) { struct audio_renderer *renderer = impl_from_IMFGetService(iface); return IMFMediaSink_QueryInterface(&renderer->IMFMediaSink_iface, riid, obj); } static ULONG WINAPI audio_renderer_get_service_AddRef(IMFGetService *iface) { struct audio_renderer *renderer = impl_from_IMFGetService(iface); return IMFMediaSink_AddRef(&renderer->IMFMediaSink_iface); } static ULONG WINAPI audio_renderer_get_service_Release(IMFGetService *iface) { struct audio_renderer *renderer = impl_from_IMFGetService(iface); return IMFMediaSink_Release(&renderer->IMFMediaSink_iface); } static HRESULT WINAPI audio_renderer_get_service_GetService(IMFGetService *iface, REFGUID service, REFIID riid, void **obj) { struct audio_renderer *renderer = impl_from_IMFGetService(iface); TRACE("%p, %s, %s, %p.\n", iface, debugstr_guid(service), debugstr_guid(riid), obj); *obj = NULL; if (IsEqualGUID(service, &MR_POLICY_VOLUME_SERVICE) && IsEqualIID(riid, &IID_IMFSimpleAudioVolume)) { *obj = &renderer->IMFSimpleAudioVolume_iface; } else if (IsEqualGUID(service, &MR_STREAM_VOLUME_SERVICE) && IsEqualIID(riid, &IID_IMFAudioStreamVolume)) { *obj = &renderer->IMFAudioStreamVolume_iface; } else if (IsEqualGUID(service, &MR_AUDIO_POLICY_SERVICE) && IsEqualIID(riid, &IID_IMFAudioPolicy)) { *obj = &renderer->IMFAudioPolicy_iface; } else FIXME("Unsupported service %s, interface %s.\n", debugstr_guid(service), debugstr_guid(riid)); if (*obj) IUnknown_AddRef((IUnknown *)*obj); return *obj ? S_OK : E_NOINTERFACE; } static const IMFGetServiceVtbl audio_renderer_get_service_vtbl = { audio_renderer_get_service_QueryInterface, audio_renderer_get_service_AddRef, audio_renderer_get_service_Release, audio_renderer_get_service_GetService, }; static HRESULT WINAPI audio_renderer_simple_volume_QueryInterface(IMFSimpleAudioVolume *iface, REFIID riid, void **obj) { TRACE("%p, %s, %p.\n", iface, debugstr_guid(riid), obj); if (IsEqualIID(riid, &IID_IMFSimpleAudioVolume) || IsEqualIID(riid, &IID_IUnknown)) { *obj = iface; IMFSimpleAudioVolume_AddRef(iface); return S_OK; } WARN("Unsupported interface %s.\n", debugstr_guid(riid)); *obj = NULL; return E_NOINTERFACE; } static ULONG WINAPI audio_renderer_simple_volume_AddRef(IMFSimpleAudioVolume *iface) { struct audio_renderer *renderer = impl_from_IMFSimpleAudioVolume(iface); return IMFMediaSink_AddRef(&renderer->IMFMediaSink_iface); } static ULONG WINAPI audio_renderer_simple_volume_Release(IMFSimpleAudioVolume *iface) { struct audio_renderer *renderer = impl_from_IMFSimpleAudioVolume(iface); return IMFMediaSink_Release(&renderer->IMFMediaSink_iface); } static HRESULT WINAPI audio_renderer_simple_volume_SetMasterVolume(IMFSimpleAudioVolume *iface, float level) { struct audio_renderer *renderer = impl_from_IMFSimpleAudioVolume(iface); HRESULT hr = S_OK; TRACE("%p, %f.\n", iface, level); EnterCriticalSection(&renderer->cs); if (renderer->audio_volume) hr = ISimpleAudioVolume_SetMasterVolume(renderer->audio_volume, level, NULL); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_simple_volume_GetMasterVolume(IMFSimpleAudioVolume *iface, float *level) { struct audio_renderer *renderer = impl_from_IMFSimpleAudioVolume(iface); HRESULT hr = S_OK; TRACE("%p, %p.\n", iface, level); if (!level) return E_POINTER; *level = 0.0f; EnterCriticalSection(&renderer->cs); if (renderer->audio_volume) hr = ISimpleAudioVolume_GetMasterVolume(renderer->audio_volume, level); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_simple_volume_SetMute(IMFSimpleAudioVolume *iface, BOOL mute) { struct audio_renderer *renderer = impl_from_IMFSimpleAudioVolume(iface); HRESULT hr = S_OK; TRACE("%p, %d.\n", iface, mute); EnterCriticalSection(&renderer->cs); if (renderer->audio_volume) hr = ISimpleAudioVolume_SetMute(renderer->audio_volume, mute, NULL); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_simple_volume_GetMute(IMFSimpleAudioVolume *iface, BOOL *mute) { struct audio_renderer *renderer = impl_from_IMFSimpleAudioVolume(iface); HRESULT hr = S_OK; TRACE("%p, %p.\n", iface, mute); if (!mute) return E_POINTER; *mute = FALSE; EnterCriticalSection(&renderer->cs); if (renderer->audio_volume) hr = ISimpleAudioVolume_GetMute(renderer->audio_volume, mute); LeaveCriticalSection(&renderer->cs); return hr; } static const IMFSimpleAudioVolumeVtbl audio_renderer_simple_volume_vtbl = { audio_renderer_simple_volume_QueryInterface, audio_renderer_simple_volume_AddRef, audio_renderer_simple_volume_Release, audio_renderer_simple_volume_SetMasterVolume, audio_renderer_simple_volume_GetMasterVolume, audio_renderer_simple_volume_SetMute, audio_renderer_simple_volume_GetMute, }; static HRESULT WINAPI audio_renderer_stream_volume_QueryInterface(IMFAudioStreamVolume *iface, REFIID riid, void **obj) { TRACE("%p, %s, %p.\n", iface, debugstr_guid(riid), obj); if (IsEqualIID(riid, &IID_IMFAudioStreamVolume) || IsEqualIID(riid, &IID_IUnknown)) { *obj = iface; IMFAudioStreamVolume_AddRef(iface); return S_OK; } WARN("Unsupported interface %s.\n", debugstr_guid(riid)); *obj = NULL; return E_NOINTERFACE; } static ULONG WINAPI audio_renderer_stream_volume_AddRef(IMFAudioStreamVolume *iface) { struct audio_renderer *renderer = impl_from_IMFAudioStreamVolume(iface); return IMFMediaSink_AddRef(&renderer->IMFMediaSink_iface); } static ULONG WINAPI audio_renderer_stream_volume_Release(IMFAudioStreamVolume *iface) { struct audio_renderer *renderer = impl_from_IMFAudioStreamVolume(iface); return IMFMediaSink_Release(&renderer->IMFMediaSink_iface); } static HRESULT WINAPI audio_renderer_stream_volume_GetChannelCount(IMFAudioStreamVolume *iface, UINT32 *count) { struct audio_renderer *renderer = impl_from_IMFAudioStreamVolume(iface); HRESULT hr = S_OK; TRACE("%p, %p.\n", iface, count); if (!count) return E_POINTER; *count = 0; EnterCriticalSection(&renderer->cs); if (renderer->stream_volume) hr = IAudioStreamVolume_GetChannelCount(renderer->stream_volume, count); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_stream_volume_SetChannelVolume(IMFAudioStreamVolume *iface, UINT32 index, float level) { struct audio_renderer *renderer = impl_from_IMFAudioStreamVolume(iface); HRESULT hr = S_OK; TRACE("%p, %u, %f.\n", iface, index, level); EnterCriticalSection(&renderer->cs); if (renderer->stream_volume) hr = IAudioStreamVolume_SetChannelVolume(renderer->stream_volume, index, level); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_stream_volume_GetChannelVolume(IMFAudioStreamVolume *iface, UINT32 index, float *level) { struct audio_renderer *renderer = impl_from_IMFAudioStreamVolume(iface); HRESULT hr = S_OK; TRACE("%p, %u, %p.\n", iface, index, level); if (!level) return E_POINTER; *level = 0.0f; EnterCriticalSection(&renderer->cs); if (renderer->stream_volume) hr = IAudioStreamVolume_GetChannelVolume(renderer->stream_volume, index, level); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_stream_volume_SetAllVolumes(IMFAudioStreamVolume *iface, UINT32 count, const float *volumes) { struct audio_renderer *renderer = impl_from_IMFAudioStreamVolume(iface); HRESULT hr = S_OK; TRACE("%p, %u, %p.\n", iface, count, volumes); EnterCriticalSection(&renderer->cs); if (renderer->stream_volume) hr = IAudioStreamVolume_SetAllVolumes(renderer->stream_volume, count, volumes); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_stream_volume_GetAllVolumes(IMFAudioStreamVolume *iface, UINT32 count, float *volumes) { struct audio_renderer *renderer = impl_from_IMFAudioStreamVolume(iface); HRESULT hr = S_OK; TRACE("%p, %u, %p.\n", iface, count, volumes); if (!volumes) return E_POINTER; if (count) memset(volumes, 0, sizeof(*volumes) * count); EnterCriticalSection(&renderer->cs); if (renderer->stream_volume) hr = IAudioStreamVolume_GetAllVolumes(renderer->stream_volume, count, volumes); LeaveCriticalSection(&renderer->cs); return hr; } static const IMFAudioStreamVolumeVtbl audio_renderer_stream_volume_vtbl = { audio_renderer_stream_volume_QueryInterface, audio_renderer_stream_volume_AddRef, audio_renderer_stream_volume_Release, audio_renderer_stream_volume_GetChannelCount, audio_renderer_stream_volume_SetChannelVolume, audio_renderer_stream_volume_GetChannelVolume, audio_renderer_stream_volume_SetAllVolumes, audio_renderer_stream_volume_GetAllVolumes, }; static HRESULT WINAPI audio_renderer_policy_QueryInterface(IMFAudioPolicy *iface, REFIID riid, void **obj) { TRACE("%p, %s, %p.\n", iface, debugstr_guid(riid), obj); if (IsEqualIID(riid, &IID_IMFAudioPolicy) || IsEqualIID(riid, &IID_IUnknown)) { *obj = iface; IMFAudioPolicy_AddRef(iface); return S_OK; } WARN("Unsupported interface %s.\n", debugstr_guid(riid)); *obj = NULL; return E_NOINTERFACE; } static ULONG WINAPI audio_renderer_policy_AddRef(IMFAudioPolicy *iface) { struct audio_renderer *renderer = impl_from_IMFAudioPolicy(iface); return IMFMediaSink_AddRef(&renderer->IMFMediaSink_iface); } static ULONG WINAPI audio_renderer_policy_Release(IMFAudioPolicy *iface) { struct audio_renderer *renderer = impl_from_IMFAudioPolicy(iface); return IMFMediaSink_Release(&renderer->IMFMediaSink_iface); } static HRESULT WINAPI audio_renderer_policy_SetGroupingParam(IMFAudioPolicy *iface, REFGUID param) { FIXME("%p, %s.\n", iface, debugstr_guid(param)); return E_NOTIMPL; } static HRESULT WINAPI audio_renderer_policy_GetGroupingParam(IMFAudioPolicy *iface, GUID *param) { FIXME("%p, %p.\n", iface, param); return E_NOTIMPL; } static HRESULT WINAPI audio_renderer_policy_SetDisplayName(IMFAudioPolicy *iface, const WCHAR *name) { FIXME("%p, %s.\n", iface, debugstr_w(name)); return E_NOTIMPL; } static HRESULT WINAPI audio_renderer_policy_GetDisplayName(IMFAudioPolicy *iface, WCHAR **name) { FIXME("%p, %p.\n", iface, name); return E_NOTIMPL; } static HRESULT WINAPI audio_renderer_policy_SetIconPath(IMFAudioPolicy *iface, const WCHAR *path) { FIXME("%p, %s.\n", iface, debugstr_w(path)); return E_NOTIMPL; } static HRESULT WINAPI audio_renderer_policy_GetIconPath(IMFAudioPolicy *iface, WCHAR **path) { FIXME("%p, %p.\n", iface, path); return E_NOTIMPL; } static const IMFAudioPolicyVtbl audio_renderer_policy_vtbl = { audio_renderer_policy_QueryInterface, audio_renderer_policy_AddRef, audio_renderer_policy_Release, audio_renderer_policy_SetGroupingParam, audio_renderer_policy_GetGroupingParam, audio_renderer_policy_SetDisplayName, audio_renderer_policy_GetDisplayName, audio_renderer_policy_SetIconPath, audio_renderer_policy_GetIconPath, }; static HRESULT sar_create_mmdevice(IMFAttributes *attributes, struct audio_renderer *renderer) { WCHAR *endpoint; unsigned int length, role = eMultimedia; IMMDeviceEnumerator *devenum; HRESULT hr; if (attributes) { /* Mutually exclusive attributes. */ if (SUCCEEDED(IMFAttributes_GetItem(attributes, &MF_AUDIO_RENDERER_ATTRIBUTE_ENDPOINT_ROLE, NULL)) && SUCCEEDED(IMFAttributes_GetItem(attributes, &MF_AUDIO_RENDERER_ATTRIBUTE_ENDPOINT_ID, NULL))) { return E_INVALIDARG; } } if (FAILED(hr = CoCreateInstance(&CLSID_MMDeviceEnumerator, NULL, CLSCTX_INPROC_SERVER, &IID_IMMDeviceEnumerator, (void **)&devenum))) { return hr; } role = eMultimedia; if (attributes && SUCCEEDED(IMFAttributes_GetUINT32(attributes, &MF_AUDIO_RENDERER_ATTRIBUTE_ENDPOINT_ROLE, &role))) TRACE("Specified role %d.\n", role); if (attributes && SUCCEEDED(IMFAttributes_GetAllocatedString(attributes, &MF_AUDIO_RENDERER_ATTRIBUTE_ENDPOINT_ID, &endpoint, &length))) { TRACE("Specified end point %s.\n", debugstr_w(endpoint)); hr = IMMDeviceEnumerator_GetDevice(devenum, endpoint, &renderer->device); CoTaskMemFree(endpoint); } else hr = IMMDeviceEnumerator_GetDefaultAudioEndpoint(devenum, eRender, role, &renderer->device); /* Configuration attributes to be used later for audio client initialization. */ if (attributes) { IMFAttributes_GetUINT32(attributes, &MF_AUDIO_RENDERER_ATTRIBUTE_FLAGS, &renderer->stream_config.flags); IMFAttributes_GetGUID(attributes, &MF_AUDIO_RENDERER_ATTRIBUTE_SESSION_ID, &renderer->stream_config.session_id); } if (FAILED(hr)) hr = MF_E_NO_AUDIO_PLAYBACK_DEVICE; IMMDeviceEnumerator_Release(devenum); return hr; } static HRESULT WINAPI audio_renderer_stream_QueryInterface(IMFStreamSink *iface, REFIID riid, void **obj) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); TRACE("%p, %s, %p.\n", iface, debugstr_guid(riid), obj); if (IsEqualIID(riid, &IID_IMFStreamSink) || IsEqualIID(riid, &IID_IMFMediaEventGenerator) || IsEqualIID(riid, &IID_IUnknown)) { *obj = &renderer->IMFStreamSink_iface; } else if (IsEqualIID(riid, &IID_IMFMediaTypeHandler)) { *obj = &renderer->IMFMediaTypeHandler_iface; } else { WARN("Unsupported %s.\n", debugstr_guid(riid)); *obj = NULL; return E_NOINTERFACE; } IUnknown_AddRef((IUnknown *)*obj); return S_OK; } static ULONG WINAPI audio_renderer_stream_AddRef(IMFStreamSink *iface) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); return IMFMediaSink_AddRef(&renderer->IMFMediaSink_iface); } static ULONG WINAPI audio_renderer_stream_Release(IMFStreamSink *iface) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); return IMFMediaSink_Release(&renderer->IMFMediaSink_iface); } static HRESULT WINAPI audio_renderer_stream_GetEvent(IMFStreamSink *iface, DWORD flags, IMFMediaEvent **event) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); TRACE("%p, %#x, %p.\n", iface, flags, event); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; return IMFMediaEventQueue_GetEvent(renderer->stream_event_queue, flags, event); } static HRESULT WINAPI audio_renderer_stream_BeginGetEvent(IMFStreamSink *iface, IMFAsyncCallback *callback, IUnknown *state) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); TRACE("%p, %p, %p.\n", iface, callback, state); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; return IMFMediaEventQueue_BeginGetEvent(renderer->stream_event_queue, callback, state); } static HRESULT WINAPI audio_renderer_stream_EndGetEvent(IMFStreamSink *iface, IMFAsyncResult *result, IMFMediaEvent **event) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); TRACE("%p, %p, %p.\n", iface, result, event); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; return IMFMediaEventQueue_EndGetEvent(renderer->stream_event_queue, result, event); } static HRESULT WINAPI audio_renderer_stream_QueueEvent(IMFStreamSink *iface, MediaEventType event_type, REFGUID ext_type, HRESULT hr, const PROPVARIANT *value) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); TRACE("%p, %u, %s, %#x, %p.\n", iface, event_type, debugstr_guid(ext_type), hr, value); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; return IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, event_type, ext_type, hr, value); } static HRESULT WINAPI audio_renderer_stream_GetMediaSink(IMFStreamSink *iface, IMFMediaSink **sink) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); TRACE("%p, %p.\n", iface, sink); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; *sink = &renderer->IMFMediaSink_iface; IMFMediaSink_AddRef(*sink); return S_OK; } static HRESULT WINAPI audio_renderer_stream_GetIdentifier(IMFStreamSink *iface, DWORD *identifier) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); TRACE("%p, %p.\n", iface, identifier); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; *identifier = 0; return S_OK; } static HRESULT WINAPI audio_renderer_stream_GetMediaTypeHandler(IMFStreamSink *iface, IMFMediaTypeHandler **handler) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); TRACE("%p, %p.\n", iface, handler); if (!handler) return E_POINTER; if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; *handler = &renderer->IMFMediaTypeHandler_iface; IMFMediaTypeHandler_AddRef(*handler); return S_OK; } static HRESULT stream_queue_sample(struct audio_renderer *renderer, IMFSample *sample) { struct queued_object *object; if (!(object = calloc(1, sizeof(*object)))) return E_OUTOFMEMORY; object->type = OBJECT_TYPE_SAMPLE; object->u.sample.sample = sample; IMFSample_AddRef(object->u.sample.sample); list_add_tail(&renderer->queue, &object->entry); return S_OK; } static HRESULT WINAPI audio_renderer_stream_ProcessSample(IMFStreamSink *iface, IMFSample *sample) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); HRESULT hr = S_OK; TRACE("%p, %p.\n", iface, sample); if (!sample) return E_POINTER; if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; EnterCriticalSection(&renderer->cs); if (renderer->state == STREAM_STATE_RUNNING) hr = stream_queue_sample(renderer, sample); renderer->flags &= ~SAR_SAMPLE_REQUESTED; LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT stream_place_marker(struct audio_renderer *renderer, MFSTREAMSINK_MARKER_TYPE marker_type, const PROPVARIANT *context_value) { struct queued_object *marker; HRESULT hr = S_OK; if (!(marker = calloc(1, sizeof(*marker)))) return E_OUTOFMEMORY; marker->type = OBJECT_TYPE_MARKER; marker->u.marker.type = marker_type; PropVariantInit(&marker->u.marker.context); if (context_value) hr = PropVariantCopy(&marker->u.marker.context, context_value); if (SUCCEEDED(hr)) list_add_tail(&renderer->queue, &marker->entry); else release_pending_object(marker); return hr; } static HRESULT WINAPI audio_renderer_stream_PlaceMarker(IMFStreamSink *iface, MFSTREAMSINK_MARKER_TYPE marker_type, const PROPVARIANT *marker_value, const PROPVARIANT *context_value) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); HRESULT hr; TRACE("%p, %d, %p, %p.\n", iface, marker_type, marker_value, context_value); if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; EnterCriticalSection(&renderer->cs); hr = stream_place_marker(renderer, marker_type, context_value); LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_stream_Flush(IMFStreamSink *iface) { struct audio_renderer *renderer = impl_from_IMFStreamSink(iface); struct queued_object *obj, *obj2; HRESULT hr = S_OK; TRACE("%p.\n", iface); EnterCriticalSection(&renderer->cs); if (renderer->flags & SAR_SHUT_DOWN) hr = MF_E_STREAMSINK_REMOVED; else { LIST_FOR_EACH_ENTRY_SAFE(obj, obj2, &renderer->queue, struct queued_object, entry) { if (obj->type == OBJECT_TYPE_MARKER) { IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkMarker, &GUID_NULL, S_OK, &obj->u.marker.context); } release_pending_object(obj); } } LeaveCriticalSection(&renderer->cs); return hr; } static const IMFStreamSinkVtbl audio_renderer_stream_vtbl = { audio_renderer_stream_QueryInterface, audio_renderer_stream_AddRef, audio_renderer_stream_Release, audio_renderer_stream_GetEvent, audio_renderer_stream_BeginGetEvent, audio_renderer_stream_EndGetEvent, audio_renderer_stream_QueueEvent, audio_renderer_stream_GetMediaSink, audio_renderer_stream_GetIdentifier, audio_renderer_stream_GetMediaTypeHandler, audio_renderer_stream_ProcessSample, audio_renderer_stream_PlaceMarker, audio_renderer_stream_Flush, }; static HRESULT WINAPI audio_renderer_stream_type_handler_QueryInterface(IMFMediaTypeHandler *iface, REFIID riid, void **obj) { struct audio_renderer *renderer = impl_from_IMFMediaTypeHandler(iface); return IMFStreamSink_QueryInterface(&renderer->IMFStreamSink_iface, riid, obj); } static ULONG WINAPI audio_renderer_stream_type_handler_AddRef(IMFMediaTypeHandler *iface) { struct audio_renderer *renderer = impl_from_IMFMediaTypeHandler(iface); return IMFStreamSink_AddRef(&renderer->IMFStreamSink_iface); } static ULONG WINAPI audio_renderer_stream_type_handler_Release(IMFMediaTypeHandler *iface) { struct audio_renderer *renderer = impl_from_IMFMediaTypeHandler(iface); return IMFStreamSink_Release(&renderer->IMFStreamSink_iface); } static HRESULT WINAPI audio_renderer_stream_type_handler_IsMediaTypeSupported(IMFMediaTypeHandler *iface, IMFMediaType *in_type, IMFMediaType **out_type) { struct audio_renderer *renderer = impl_from_IMFMediaTypeHandler(iface); unsigned int flags; HRESULT hr; TRACE("%p, %p, %p.\n", iface, in_type, out_type); EnterCriticalSection(&renderer->cs); hr = IMFMediaType_IsEqual(renderer->media_type, in_type, &flags); LeaveCriticalSection(&renderer->cs); return hr != S_OK ? MF_E_INVALIDMEDIATYPE : hr; } static HRESULT WINAPI audio_renderer_stream_type_handler_GetMediaTypeCount(IMFMediaTypeHandler *iface, DWORD *count) { TRACE("%p, %p.\n", iface, count); *count = 1; return S_OK; } static HRESULT WINAPI audio_renderer_stream_type_handler_GetMediaTypeByIndex(IMFMediaTypeHandler *iface, DWORD index, IMFMediaType **media_type) { struct audio_renderer *renderer = impl_from_IMFMediaTypeHandler(iface); TRACE("%p, %u, %p.\n", iface, index, media_type); if (index == 0) { *media_type = renderer->media_type; IMFMediaType_AddRef(*media_type); } return S_OK; } static HRESULT audio_renderer_create_audio_client(struct audio_renderer *renderer) { IMFAsyncResult *result; unsigned int flags; WAVEFORMATEX *wfx; HRESULT hr; audio_renderer_release_audio_client(renderer); hr = IMMDevice_Activate(renderer->device, &IID_IAudioClient, CLSCTX_INPROC_SERVER, NULL, (void **)&renderer->audio_client); if (FAILED(hr)) { WARN("Failed to create audio client, hr %#x.\n", hr); return hr; } /* FIXME: for now always use default format. */ if (FAILED(hr = IAudioClient_GetMixFormat(renderer->audio_client, &wfx))) { WARN("Failed to get audio format, hr %#x.\n", hr); return hr; } renderer->frame_size = wfx->wBitsPerSample * wfx->nChannels / 8; flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK; if (renderer->stream_config.flags & MF_AUDIO_RENDERER_ATTRIBUTE_FLAGS_CROSSPROCESS) flags |= AUDCLNT_STREAMFLAGS_CROSSPROCESS; if (renderer->stream_config.flags & MF_AUDIO_RENDERER_ATTRIBUTE_FLAGS_NOPERSIST) flags |= AUDCLNT_STREAMFLAGS_NOPERSIST; hr = IAudioClient_Initialize(renderer->audio_client, AUDCLNT_SHAREMODE_SHARED, flags, 1000000, 0, wfx, &renderer->stream_config.session_id); CoTaskMemFree(wfx); if (FAILED(hr)) { WARN("Failed to initialize audio client, hr %#x.\n", hr); return hr; } if (FAILED(hr = IAudioClient_GetService(renderer->audio_client, &IID_IAudioStreamVolume, (void **)&renderer->stream_volume))) { WARN("Failed to get stream volume control, hr %#x.\n", hr); return hr; } if (FAILED(hr = IAudioClient_GetService(renderer->audio_client, &IID_ISimpleAudioVolume, (void **)&renderer->audio_volume))) { WARN("Failed to get audio volume control, hr %#x.\n", hr); return hr; } if (FAILED(hr = IAudioClient_GetService(renderer->audio_client, &IID_IAudioRenderClient, (void **)&renderer->audio_render_client))) { WARN("Failed to get audio render client, hr %#x.\n", hr); return hr; } if (FAILED(hr = IAudioClient_SetEventHandle(renderer->audio_client, renderer->buffer_ready_event))) { WARN("Failed to set event handle, hr %#x.\n", hr); return hr; } if (SUCCEEDED(hr = MFCreateAsyncResult(NULL, &renderer->render_callback, NULL, &result))) { if (FAILED(hr = MFPutWaitingWorkItem(renderer->buffer_ready_event, 0, result, &renderer->buffer_ready_key))) WARN("Failed to submit wait item, hr %#x.\n", hr); IMFAsyncResult_Release(result); } return hr; } static HRESULT WINAPI audio_renderer_stream_type_handler_SetCurrentMediaType(IMFMediaTypeHandler *iface, IMFMediaType *media_type) { struct audio_renderer *renderer = impl_from_IMFMediaTypeHandler(iface); const unsigned int test_flags = MF_MEDIATYPE_EQUAL_MAJOR_TYPES | MF_MEDIATYPE_EQUAL_FORMAT_TYPES; BOOL compare_result; unsigned int flags; HRESULT hr = S_OK; TRACE("%p, %p.\n", iface, media_type); if (!media_type) return E_POINTER; EnterCriticalSection(&renderer->cs); if (SUCCEEDED(IMFMediaType_IsEqual(renderer->media_type, media_type, &flags)) && ((flags & test_flags) == test_flags)) { if (renderer->current_media_type) IMFMediaType_Release(renderer->current_media_type); renderer->current_media_type = media_type; IMFMediaType_AddRef(renderer->current_media_type); if (SUCCEEDED(hr = audio_renderer_create_audio_client(renderer))) { if (SUCCEEDED(IMFMediaType_Compare(renderer->media_type, (IMFAttributes *)media_type, MF_ATTRIBUTES_MATCH_OUR_ITEMS, &compare_result)) && !compare_result) { IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkFormatInvalidated, &GUID_NULL, S_OK, NULL); audio_renderer_preroll(renderer); } } } else hr = MF_E_INVALIDMEDIATYPE; LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_stream_type_handler_GetCurrentMediaType(IMFMediaTypeHandler *iface, IMFMediaType **media_type) { struct audio_renderer *renderer = impl_from_IMFMediaTypeHandler(iface); HRESULT hr = S_OK; TRACE("%p, %p.\n", iface, media_type); EnterCriticalSection(&renderer->cs); if (renderer->current_media_type) { *media_type = renderer->current_media_type; IMFMediaType_AddRef(*media_type); } else hr = MF_E_NOT_INITIALIZED; LeaveCriticalSection(&renderer->cs); return hr; } static HRESULT WINAPI audio_renderer_stream_type_handler_GetMajorType(IMFMediaTypeHandler *iface, GUID *type) { struct audio_renderer *renderer = impl_from_IMFMediaTypeHandler(iface); TRACE("%p, %p.\n", iface, type); if (!type) return E_POINTER; if (renderer->flags & SAR_SHUT_DOWN) return MF_E_STREAMSINK_REMOVED; memcpy(type, &MFMediaType_Audio, sizeof(*type)); return S_OK; } static const IMFMediaTypeHandlerVtbl audio_renderer_stream_type_handler_vtbl = { audio_renderer_stream_type_handler_QueryInterface, audio_renderer_stream_type_handler_AddRef, audio_renderer_stream_type_handler_Release, audio_renderer_stream_type_handler_IsMediaTypeSupported, audio_renderer_stream_type_handler_GetMediaTypeCount, audio_renderer_stream_type_handler_GetMediaTypeByIndex, audio_renderer_stream_type_handler_SetCurrentMediaType, audio_renderer_stream_type_handler_GetCurrentMediaType, audio_renderer_stream_type_handler_GetMajorType, }; static HRESULT audio_renderer_collect_supported_types(struct audio_renderer *renderer) { IAudioClient *client; WAVEFORMATEX *format; HRESULT hr; if (FAILED(hr = MFCreateMediaType(&renderer->media_type))) return hr; hr = IMMDevice_Activate(renderer->device, &IID_IAudioClient, CLSCTX_INPROC_SERVER, NULL, (void **)&client); if (FAILED(hr)) { WARN("Failed to create audio client, hr %#x.\n", hr); return hr; } /* FIXME: */ hr = IAudioClient_GetMixFormat(client, &format); IAudioClient_Release(client); if (FAILED(hr)) { WARN("Failed to get device audio format, hr %#x.\n", hr); return hr; } hr = MFInitMediaTypeFromWaveFormatEx(renderer->media_type, format, format->cbSize + sizeof(*format)); CoTaskMemFree(format); if (FAILED(hr)) { WARN("Failed to initialize media type, hr %#x.\n", hr); return hr; } IMFMediaType_DeleteItem(renderer->media_type, &MF_MT_AUDIO_PREFER_WAVEFORMATEX); return hr; } static HRESULT WINAPI audio_renderer_render_callback_QueryInterface(IMFAsyncCallback *iface, REFIID riid, void **obj) { TRACE("%p, %s, %p.\n", iface, debugstr_guid(riid), obj); if (IsEqualIID(riid, &IID_IMFAsyncCallback) || IsEqualIID(riid, &IID_IUnknown)) { *obj = iface; IMFAsyncCallback_AddRef(iface); return S_OK; } WARN("Unsupported interface %s.\n", debugstr_guid(riid)); *obj = NULL; return E_NOINTERFACE; } static ULONG WINAPI audio_renderer_render_callback_AddRef(IMFAsyncCallback *iface) { struct audio_renderer *renderer = impl_from_render_callback_IMFAsyncCallback(iface); return IMFMediaSink_AddRef(&renderer->IMFMediaSink_iface); } static ULONG WINAPI audio_renderer_render_callback_Release(IMFAsyncCallback *iface) { struct audio_renderer *renderer = impl_from_render_callback_IMFAsyncCallback(iface); return IMFMediaSink_Release(&renderer->IMFMediaSink_iface); } static HRESULT WINAPI audio_renderer_render_callback_GetParameters(IMFAsyncCallback *iface, DWORD *flags, DWORD *queue) { return E_NOTIMPL; } static void audio_renderer_render(struct audio_renderer *renderer, IMFAsyncResult *result) { unsigned int src_frames, dst_frames, max_frames, pad_frames, src_len; struct queued_object *obj, *obj2; BOOL keep_sample = FALSE; IMFMediaBuffer *buffer; BYTE *dst, *src; HRESULT hr; LIST_FOR_EACH_ENTRY_SAFE(obj, obj2, &renderer->queue, struct queued_object, entry) { if (obj->type == OBJECT_TYPE_MARKER) { IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkMarker, &GUID_NULL, S_OK, &obj->u.marker.context); } else if (obj->type == OBJECT_TYPE_SAMPLE) { if (SUCCEEDED(IMFSample_ConvertToContiguousBuffer(obj->u.sample.sample, &buffer))) { if (SUCCEEDED(IMFMediaBuffer_Lock(buffer, &src, NULL, &src_len))) { if ((src_frames = src_len / renderer->frame_size)) { if (SUCCEEDED(IAudioClient_GetBufferSize(renderer->audio_client, &max_frames))) { if (SUCCEEDED(IAudioClient_GetCurrentPadding(renderer->audio_client, &pad_frames))) { max_frames -= pad_frames; src_frames -= obj->u.sample.frame_offset; dst_frames = min(src_frames, max_frames); if (SUCCEEDED(hr = IAudioRenderClient_GetBuffer(renderer->audio_render_client, dst_frames, &dst))) { memcpy(dst, src + obj->u.sample.frame_offset * renderer->frame_size, dst_frames * renderer->frame_size); IAudioRenderClient_ReleaseBuffer(renderer->audio_render_client, dst_frames, 0); obj->u.sample.frame_offset += dst_frames; } keep_sample = FAILED(hr) || src_frames > max_frames; } } } IMFMediaBuffer_Unlock(buffer); } IMFMediaBuffer_Release(buffer); } } if (keep_sample) break; list_remove(&obj->entry); release_pending_object(obj); } if (list_empty(&renderer->queue) && !(renderer->flags & SAR_SAMPLE_REQUESTED)) { IMFMediaEventQueue_QueueEventParamVar(renderer->stream_event_queue, MEStreamSinkRequestSample, &GUID_NULL, S_OK, NULL); renderer->flags |= SAR_SAMPLE_REQUESTED; } if (FAILED(hr = MFPutWaitingWorkItem(renderer->buffer_ready_event, 0, result, &renderer->buffer_ready_key))) WARN("Failed to submit wait item, hr %#x.\n", hr); } static HRESULT WINAPI audio_renderer_render_callback_Invoke(IMFAsyncCallback *iface, IMFAsyncResult *result) { struct audio_renderer *renderer = impl_from_render_callback_IMFAsyncCallback(iface); EnterCriticalSection(&renderer->cs); if (!(renderer->flags & SAR_SHUT_DOWN)) audio_renderer_render(renderer, result); LeaveCriticalSection(&renderer->cs); return S_OK; } static const IMFAsyncCallbackVtbl audio_renderer_render_callback_vtbl = { audio_renderer_render_callback_QueryInterface, audio_renderer_render_callback_AddRef, audio_renderer_render_callback_Release, audio_renderer_render_callback_GetParameters, audio_renderer_render_callback_Invoke, }; static HRESULT sar_create_object(IMFAttributes *attributes, void *user_context, IUnknown **obj) { struct audio_renderer *renderer; HRESULT hr; TRACE("%p, %p, %p.\n", attributes, user_context, obj); if (!(renderer = calloc(1, sizeof(*renderer)))) return E_OUTOFMEMORY; renderer->IMFMediaSink_iface.lpVtbl = &audio_renderer_sink_vtbl; renderer->IMFMediaSinkPreroll_iface.lpVtbl = &audio_renderer_preroll_vtbl; renderer->IMFStreamSink_iface.lpVtbl = &audio_renderer_stream_vtbl; renderer->IMFMediaTypeHandler_iface.lpVtbl = &audio_renderer_stream_type_handler_vtbl; renderer->IMFClockStateSink_iface.lpVtbl = &audio_renderer_clock_sink_vtbl; renderer->IMFMediaEventGenerator_iface.lpVtbl = &audio_renderer_events_vtbl; renderer->IMFGetService_iface.lpVtbl = &audio_renderer_get_service_vtbl; renderer->IMFSimpleAudioVolume_iface.lpVtbl = &audio_renderer_simple_volume_vtbl; renderer->IMFAudioStreamVolume_iface.lpVtbl = &audio_renderer_stream_volume_vtbl; renderer->IMFAudioPolicy_iface.lpVtbl = &audio_renderer_policy_vtbl; renderer->render_callback.lpVtbl = &audio_renderer_render_callback_vtbl; renderer->refcount = 1; InitializeCriticalSection(&renderer->cs); renderer->buffer_ready_event = CreateEventW(NULL, FALSE, FALSE, NULL); list_init(&renderer->queue); if (FAILED(hr = MFCreateEventQueue(&renderer->event_queue))) goto failed; if (FAILED(hr = MFCreateEventQueue(&renderer->stream_event_queue))) goto failed; if (FAILED(hr = sar_create_mmdevice(attributes, renderer))) goto failed; if (FAILED(hr = audio_renderer_collect_supported_types(renderer))) goto failed; *obj = (IUnknown *)&renderer->IMFMediaSink_iface; return S_OK; failed: IMFMediaSink_Release(&renderer->IMFMediaSink_iface); return hr; } static void sar_shutdown_object(void *user_context, IUnknown *obj) { IMFMediaSink *sink; if (SUCCEEDED(IUnknown_QueryInterface(obj, &IID_IMFMediaSink, (void **)&sink))) { IMFMediaSink_Shutdown(sink); IMFMediaSink_Release(sink); } } static const struct activate_funcs sar_activate_funcs = { .create_object = sar_create_object, .shutdown_object = sar_shutdown_object, }; /*********************************************************************** * MFCreateAudioRendererActivate (mf.@) */ HRESULT WINAPI MFCreateAudioRendererActivate(IMFActivate **activate) { TRACE("%p.\n", activate); if (!activate) return E_POINTER; return create_activation_object(NULL, &sar_activate_funcs, activate); } /*********************************************************************** * MFCreateAudioRenderer (mf.@) */ HRESULT WINAPI MFCreateAudioRenderer(IMFAttributes *attributes, IMFMediaSink **sink) { IUnknown *object; HRESULT hr; TRACE("%p, %p.\n", attributes, sink); if (SUCCEEDED(hr = sar_create_object(attributes, NULL, &object))) { hr = IUnknown_QueryInterface(object, &IID_IMFMediaSink, (void **)sink); IUnknown_Release(object); } return hr; }
556657.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <string.h> #include <ctype.h> void Softmax(float mem_block2_dense[1000], float bd3[1000], int class[1000], float result[1000]){ #pragma HLS INTERFACE s_axilite port=mem_block2_dense bundle=INPUT #pragma HLS INTERFACE s_axilite port=bd3 bundle=parameter #pragma HLS INTERFACE s_axilite port=class bundle=OUTPUT #pragma HLS INTERFACE s_axilite port=result bundle=OUTPUT #pragma HLS INTERFACE s_axilite port=return bundle=CONTROL int i; int counter =0; //softmax(); float max_val, sum; max_val = mem_block2_dense[0]; for (i = 1; i < 1000; i++) { #pragma HLS PIPELINE II=1 if (mem_block2_dense[i] > max_val) max_val = mem_block2_dense[i]; } sum = 0.0; for (i = 0; i < 1000; i++) { #pragma HLS PIPELINE II=2 mem_block2_dense[i] = exp(mem_block2_dense[i] - max_val); sum += mem_block2_dense[i]; } for (i = 0; i < 1000; i++) { #pragma HLS PIPELINE II=1 mem_block2_dense[i] /= sum; } for (i = 0; i < 1000; i++) { #pragma HLS PIPELINE II=2 if (mem_block2_dense[i]) { class[i] = counter; result[i] = mem_block2_dense[i]; } counter = counter + 1; } return; }
571111.c
/* OpenGL loader generated by glad 0.1.24a0 on Mon Jul 9 17:07:15 2018. Language/Generator: C/C++ Specification: gl APIs: gl=4.6 Profile: compatibility Extensions: Loader: True Local files: False Omit khrplatform: False Commandline: --profile="compatibility" --api="gl=4.6" --generator="c" --spec="gl" --extensions="" Online: http://glad.dav1d.de/#profile=compatibility&language=c&specification=gl&loader=on&api=gl%3D4.6 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <glad/glad.h> static void* get_proc(const char *namez); #if defined(_WIN32) || defined(__CYGWIN__) #include <windows.h> static HMODULE libGL; typedef void* (APIENTRYP PFNWGLGETPROCADDRESSPROC_PRIVATE)(const char*); static PFNWGLGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; #ifdef _MSC_VER #ifdef __has_include #if __has_include(<winapifamily.h>) #define HAVE_WINAPIFAMILY 1 #endif #elif _MSC_VER >= 1700 && !_USING_V110_SDK71_ #define HAVE_WINAPIFAMILY 1 #endif #endif #ifdef HAVE_WINAPIFAMILY #include <winapifamily.h> #if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) #define IS_UWP 1 #endif #endif static int open_gl(void) { #ifndef IS_UWP libGL = LoadLibraryW(L"opengl32.dll"); if(libGL != NULL) { gladGetProcAddressPtr = (PFNWGLGETPROCADDRESSPROC_PRIVATE)GetProcAddress( libGL, "wglGetProcAddress"); return gladGetProcAddressPtr != NULL; } #endif return 0; } static void close_gl(void) { if(libGL != NULL) { FreeLibrary((HMODULE) libGL); libGL = NULL; } } #else #include <dlfcn.h> static void* libGL; #ifndef __APPLE__ typedef void* (APIENTRYP PFNGLXGETPROCADDRESSPROC_PRIVATE)(const char*); static PFNGLXGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; #endif static int open_gl(void) { #ifdef __APPLE__ static const char *NAMES[] = { "../Frameworks/OpenGL.framework/OpenGL", "/Library/Frameworks/OpenGL.framework/OpenGL", "/System/Library/Frameworks/OpenGL.framework/OpenGL", "/System/Library/Frameworks/OpenGL.framework/Versions/Current/OpenGL" }; #else static const char *NAMES[] = {"libGL.so.1", "libGL.so"}; #endif unsigned int index = 0; for(index = 0; index < (sizeof(NAMES) / sizeof(NAMES[0])); index++) { libGL = dlopen(NAMES[index], RTLD_NOW | RTLD_GLOBAL); if(libGL != NULL) { #ifdef __APPLE__ return 1; #else gladGetProcAddressPtr = (PFNGLXGETPROCADDRESSPROC_PRIVATE)dlsym(libGL, "glXGetProcAddressARB"); return gladGetProcAddressPtr != NULL; #endif } } return 0; } static void close_gl(void) { if(libGL != NULL) { dlclose(libGL); libGL = NULL; } } #endif static void* get_proc(const char *namez) { void* result = NULL; if(libGL == NULL) return NULL; #ifndef __APPLE__ if(gladGetProcAddressPtr != NULL) { result = gladGetProcAddressPtr(namez); } #endif if(result == NULL) { #if defined(_WIN32) || defined(__CYGWIN__) result = (void*)GetProcAddress((HMODULE) libGL, namez); #else result = dlsym(libGL, namez); #endif } return result; } int gladLoadGL(void) { int status = 0; if(open_gl()) { status = gladLoadGLLoader(&get_proc); close_gl(); } return status; } struct gladGLversionStruct GLVersion; #if defined(GL_ES_VERSION_3_0) || defined(GL_VERSION_3_0) #define _GLAD_IS_SOME_NEW_VERSION 1 #endif static int max_loaded_major; static int max_loaded_minor; static const char *exts = NULL; static int num_exts_i = 0; static char **exts_i = NULL; static int get_exts(void) { #ifdef _GLAD_IS_SOME_NEW_VERSION if(max_loaded_major < 3) { #endif exts = (const char *)glGetString(GL_EXTENSIONS); #ifdef _GLAD_IS_SOME_NEW_VERSION } else { unsigned int index; num_exts_i = 0; glGetIntegerv(GL_NUM_EXTENSIONS, &num_exts_i); if (num_exts_i > 0) { exts_i = (char **)realloc((void *)exts_i, (size_t)num_exts_i * (sizeof *exts_i)); } if (exts_i == NULL) { return 0; } for(index = 0; index < (unsigned)num_exts_i; index++) { const char *gl_str_tmp = (const char*)glGetStringi(GL_EXTENSIONS, index); size_t len = strlen(gl_str_tmp); char *local_str = (char*)malloc((len+1) * sizeof(char)); if(local_str != NULL) { #if _MSC_VER >= 1400 strncpy_s(local_str, len+1, gl_str_tmp, len); #else strncpy(local_str, gl_str_tmp, len+1); #endif } exts_i[index] = local_str; } } #endif return 1; } static void free_exts(void) { if (exts_i != NULL) { int index; for(index = 0; index < num_exts_i; index++) { free((char *)exts_i[index]); } free((void *)exts_i); exts_i = NULL; } } static int has_ext(const char *ext) { #ifdef _GLAD_IS_SOME_NEW_VERSION if(max_loaded_major < 3) { #endif const char *extensions; const char *loc; const char *terminator; extensions = exts; if(extensions == NULL || ext == NULL) { return 0; } while(1) { loc = strstr(extensions, ext); if(loc == NULL) { return 0; } terminator = loc + strlen(ext); if((loc == extensions || *(loc - 1) == ' ') && (*terminator == ' ' || *terminator == '\0')) { return 1; } extensions = terminator; } #ifdef _GLAD_IS_SOME_NEW_VERSION } else { int index; if(exts_i == NULL) return 0; for(index = 0; index < num_exts_i; index++) { const char *e = exts_i[index]; if(exts_i[index] != NULL && strcmp(e, ext) == 0) { return 1; } } } #endif return 0; } int GLAD_GL_VERSION_1_0; int GLAD_GL_VERSION_1_1; int GLAD_GL_VERSION_1_2; int GLAD_GL_VERSION_1_3; int GLAD_GL_VERSION_1_4; int GLAD_GL_VERSION_1_5; int GLAD_GL_VERSION_2_0; int GLAD_GL_VERSION_2_1; int GLAD_GL_VERSION_3_0; int GLAD_GL_VERSION_3_1; int GLAD_GL_VERSION_3_2; int GLAD_GL_VERSION_3_3; int GLAD_GL_VERSION_4_0; int GLAD_GL_VERSION_4_1; int GLAD_GL_VERSION_4_2; int GLAD_GL_VERSION_4_3; int GLAD_GL_VERSION_4_4; int GLAD_GL_VERSION_4_5; int GLAD_GL_VERSION_4_6; PFNGLCOPYTEXIMAGE1DPROC glad_glCopyTexImage1D; PFNGLTEXTUREPARAMETERFPROC glad_glTextureParameterf; PFNGLVERTEXATTRIBI3UIPROC glad_glVertexAttribI3ui; PFNGLVERTEXARRAYELEMENTBUFFERPROC glad_glVertexArrayElementBuffer; PFNGLWINDOWPOS2SPROC glad_glWindowPos2s; PFNGLTEXTURESTORAGE3DMULTISAMPLEPROC glad_glTextureStorage3DMultisample; PFNGLTEXTUREPARAMETERFVPROC glad_glTextureParameterfv; PFNGLWINDOWPOS2IPROC glad_glWindowPos2i; PFNGLWINDOWPOS2FPROC glad_glWindowPos2f; PFNGLWINDOWPOS2DPROC glad_glWindowPos2d; PFNGLVERTEX2FVPROC glad_glVertex2fv; PFNGLINDEXIPROC glad_glIndexi; PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer; PFNGLUNIFORMSUBROUTINESUIVPROC glad_glUniformSubroutinesuiv; PFNGLRECTDVPROC glad_glRectdv; PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_glCompressedTexSubImage3D; PFNGLEVALCOORD2DPROC glad_glEvalCoord2d; PFNGLEVALCOORD2FPROC glad_glEvalCoord2f; PFNGLGETDOUBLEI_VPROC glad_glGetDoublei_v; PFNGLINDEXDPROC glad_glIndexd; PFNGLVERTEXATTRIB1SVPROC glad_glVertexAttrib1sv; PFNGLINDEXFPROC glad_glIndexf; PFNGLBINDSAMPLERPROC glad_glBindSampler; PFNGLLINEWIDTHPROC glad_glLineWidth; PFNGLCOLORP3UIVPROC glad_glColorP3uiv; PFNGLGETINTEGERI_VPROC glad_glGetIntegeri_v; PFNGLGETMAPFVPROC glad_glGetMapfv; PFNGLINDEXSPROC glad_glIndexs; PFNGLCOMPILESHADERPROC glad_glCompileShader; PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_glGetTransformFeedbackVarying; PFNGLWINDOWPOS2IVPROC glad_glWindowPos2iv; PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC glad_glDrawTransformFeedbackStreamInstanced; PFNGLINDEXFVPROC glad_glIndexfv; PFNGLGETCOMPRESSEDTEXTUREIMAGEPROC glad_glGetCompressedTextureImage; PFNGLGETNMAPFVPROC glad_glGetnMapfv; PFNGLFOGIVPROC glad_glFogiv; PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate; PFNGLRASTERPOS2FVPROC glad_glRasterPos2fv; PFNGLLIGHTMODELIVPROC glad_glLightModeliv; PFNGLDEPTHRANGEFPROC glad_glDepthRangef; PFNGLMULTIDRAWELEMENTSINDIRECTCOUNTPROC glad_glMultiDrawElementsIndirectCount; PFNGLCOLOR4UIPROC glad_glColor4ui; PFNGLSECONDARYCOLOR3FVPROC glad_glSecondaryColor3fv; PFNGLMULTITEXCOORDP3UIPROC glad_glMultiTexCoordP3ui; PFNGLMEMORYBARRIERBYREGIONPROC glad_glMemoryBarrierByRegion; PFNGLGETNAMEDBUFFERPARAMETERIVPROC glad_glGetNamedBufferParameteriv; PFNGLFOGFVPROC glad_glFogfv; PFNGLVERTEXP4UIPROC glad_glVertexP4ui; PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC glad_glDrawElementsInstancedBaseInstance; PFNGLENABLEIPROC glad_glEnablei; PFNGLPROGRAMUNIFORM3DVPROC glad_glProgramUniform3dv; PFNGLVERTEX4IVPROC glad_glVertex4iv; PFNGLEVALCOORD1FVPROC glad_glEvalCoord1fv; PFNGLWINDOWPOS2SVPROC glad_glWindowPos2sv; PFNGLVERTEXATTRIBP4UIPROC glad_glVertexAttribP4ui; PFNGLCREATESHADERPROC glad_glCreateShader; PFNGLISBUFFERPROC glad_glIsBuffer; PFNGLGETMULTISAMPLEFVPROC glad_glGetMultisamplefv; PFNGLPROGRAMUNIFORMMATRIX2DVPROC glad_glProgramUniformMatrix2dv; PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers; PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D; PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D; PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f; PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate; PFNGLVERTEX4FVPROC glad_glVertex4fv; PFNGLMINSAMPLESHADINGPROC glad_glMinSampleShading; PFNGLCLEARNAMEDFRAMEBUFFERFIPROC glad_glClearNamedFramebufferfi; PFNGLGETQUERYBUFFEROBJECTUIVPROC glad_glGetQueryBufferObjectuiv; PFNGLBINDTEXTUREPROC glad_glBindTexture; PFNGLVERTEXATTRIB1SPROC glad_glVertexAttrib1s; PFNGLTEXCOORD2FVPROC glad_glTexCoord2fv; PFNGLSAMPLEMASKIPROC glad_glSampleMaski; PFNGLVERTEXP2UIPROC glad_glVertexP2ui; PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_glDrawRangeElementsBaseVertex; PFNGLTEXCOORD4FVPROC glad_glTexCoord4fv; PFNGLUNIFORMMATRIX3X2FVPROC glad_glUniformMatrix3x2fv; PFNGLDEBUGMESSAGECONTROLPROC glad_glDebugMessageControl; PFNGLPOINTSIZEPROC glad_glPointSize; PFNGLBINDTEXTUREUNITPROC glad_glBindTextureUnit; PFNGLVERTEXATTRIB2DVPROC glad_glVertexAttrib2dv; PFNGLDELETEPROGRAMPROC glad_glDeleteProgram; PFNGLCOLOR4BVPROC glad_glColor4bv; PFNGLRASTERPOS2FPROC glad_glRasterPos2f; PFNGLRASTERPOS2DPROC glad_glRasterPos2d; PFNGLLOADIDENTITYPROC glad_glLoadIdentity; PFNGLRASTERPOS2IPROC glad_glRasterPos2i; PFNGLMULTIDRAWARRAYSINDIRECTPROC glad_glMultiDrawArraysIndirect; PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage; PFNGLUNIFORMMATRIX4X3FVPROC glad_glUniformMatrix4x3fv; PFNGLCOLOR3BPROC glad_glColor3b; PFNGLCLEARBUFFERFVPROC glad_glClearBufferfv; PFNGLEDGEFLAGPROC glad_glEdgeFlag; PFNGLDELETESAMPLERSPROC glad_glDeleteSamplers; PFNGLVERTEX3DPROC glad_glVertex3d; PFNGLVERTEX3FPROC glad_glVertex3f; PFNGLGETNMAPIVPROC glad_glGetnMapiv; PFNGLVERTEX3IPROC glad_glVertex3i; PFNGLCOLOR3IPROC glad_glColor3i; PFNGLUNIFORM3DPROC glad_glUniform3d; PFNGLUNIFORM3FPROC glad_glUniform3f; PFNGLVERTEXATTRIB4UBVPROC glad_glVertexAttrib4ubv; PFNGLCOLOR3SPROC glad_glColor3s; PFNGLVERTEX3SPROC glad_glVertex3s; PFNGLTEXCOORDP2UIPROC glad_glTexCoordP2ui; PFNGLCOLORMASKIPROC glad_glColorMaski; PFNGLCLEARBUFFERFIPROC glad_glClearBufferfi; PFNGLDRAWARRAYSINDIRECTPROC glad_glDrawArraysIndirect; PFNGLTEXCOORD1IVPROC glad_glTexCoord1iv; PFNGLBLITFRAMEBUFFERPROC glad_glBlitFramebuffer; PFNGLPAUSETRANSFORMFEEDBACKPROC glad_glPauseTransformFeedback; PFNGLMULTITEXCOORDP2UIPROC glad_glMultiTexCoordP2ui; PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC glad_glProgramUniformMatrix3x2dv; PFNGLCOPYNAMEDBUFFERSUBDATAPROC glad_glCopyNamedBufferSubData; PFNGLNAMEDFRAMEBUFFERTEXTUREPROC glad_glNamedFramebufferTexture; PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC glad_glProgramUniformMatrix3x2fv; PFNGLGETSAMPLERPARAMETERIIVPROC glad_glGetSamplerParameterIiv; PFNGLGETFRAGDATAINDEXPROC glad_glGetFragDataIndex; PFNGLVERTEXATTRIBL4DPROC glad_glVertexAttribL4d; PFNGLBINDIMAGETEXTUREPROC glad_glBindImageTexture; PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f; PFNGLPROGRAMUNIFORMMATRIX4FVPROC glad_glProgramUniformMatrix4fv; PFNGLVERTEX2IVPROC glad_glVertex2iv; PFNGLGETQUERYBUFFEROBJECTI64VPROC glad_glGetQueryBufferObjecti64v; PFNGLCOLOR3SVPROC glad_glColor3sv; PFNGLGETVERTEXATTRIBDVPROC glad_glGetVertexAttribdv; PFNGLACTIVESHADERPROGRAMPROC glad_glActiveShaderProgram; PFNGLUNIFORMMATRIX3X4FVPROC glad_glUniformMatrix3x4fv; PFNGLUNIFORMMATRIX3DVPROC glad_glUniformMatrix3dv; PFNGLNORMALPOINTERPROC glad_glNormalPointer; PFNGLTEXCOORDP3UIVPROC glad_glTexCoordP3uiv; PFNGLVERTEX4SVPROC glad_glVertex4sv; PFNGLVERTEXARRAYATTRIBLFORMATPROC glad_glVertexArrayAttribLFormat; PFNGLINVALIDATEBUFFERSUBDATAPROC glad_glInvalidateBufferSubData; PFNGLPASSTHROUGHPROC glad_glPassThrough; PFNGLMULTITEXCOORDP4UIPROC glad_glMultiTexCoordP4ui; PFNGLFOGIPROC glad_glFogi; PFNGLBEGINPROC glad_glBegin; PFNGLEVALCOORD2DVPROC glad_glEvalCoord2dv; PFNGLCOLOR3UBVPROC glad_glColor3ubv; PFNGLVERTEXPOINTERPROC glad_glVertexPointer; PFNGLSECONDARYCOLOR3UIVPROC glad_glSecondaryColor3uiv; PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers; PFNGLDRAWARRAYSPROC glad_glDrawArrays; PFNGLUNIFORM1UIPROC glad_glUniform1ui; PFNGLGETTRANSFORMFEEDBACKIVPROC glad_glGetTransformFeedbackiv; PFNGLMULTITEXCOORD1DPROC glad_glMultiTexCoord1d; PFNGLMULTITEXCOORD1FPROC glad_glMultiTexCoord1f; PFNGLPROGRAMPARAMETERIPROC glad_glProgramParameteri; PFNGLLIGHTFVPROC glad_glLightfv; PFNGLTEXCOORDP3UIPROC glad_glTexCoordP3ui; PFNGLVERTEXATTRIB3DPROC glad_glVertexAttrib3d; PFNGLCLEARPROC glad_glClear; PFNGLMULTITEXCOORD1IPROC glad_glMultiTexCoord1i; PFNGLGETACTIVEUNIFORMNAMEPROC glad_glGetActiveUniformName; PFNGLMEMORYBARRIERPROC glad_glMemoryBarrier; PFNGLGETGRAPHICSRESETSTATUSPROC glad_glGetGraphicsResetStatus; PFNGLMULTITEXCOORD1SPROC glad_glMultiTexCoord1s; PFNGLISENABLEDPROC glad_glIsEnabled; PFNGLSTENCILOPPROC glad_glStencilOp; PFNGLGETQUERYOBJECTUIVPROC glad_glGetQueryObjectuiv; PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D; PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv; PFNGLTRANSLATEFPROC glad_glTranslatef; PFNGLVERTEXATTRIB4NUBPROC glad_glVertexAttrib4Nub; PFNGLTRANSLATEDPROC glad_glTranslated; PFNGLTEXCOORD3SVPROC glad_glTexCoord3sv; PFNGLGETFRAGDATALOCATIONPROC glad_glGetFragDataLocation; PFNGLGETTEXTUREPARAMETERIIVPROC glad_glGetTextureParameterIiv; PFNGLTEXIMAGE1DPROC glad_glTexImage1D; PFNGLCOPYTEXTURESUBIMAGE3DPROC glad_glCopyTextureSubImage3D; PFNGLVERTEXP3UIVPROC glad_glVertexP3uiv; PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv; PFNGLVERTEXARRAYATTRIBIFORMATPROC glad_glVertexArrayAttribIFormat; PFNGLSECONDARYCOLOR3BVPROC glad_glSecondaryColor3bv; PFNGLGETMATERIALFVPROC glad_glGetMaterialfv; PFNGLGETTEXIMAGEPROC glad_glGetTexImage; PFNGLFOGCOORDFVPROC glad_glFogCoordfv; PFNGLPIXELMAPUIVPROC glad_glPixelMapuiv; PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog; PFNGLGETQUERYOBJECTI64VPROC glad_glGetQueryObjecti64v; PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers; PFNGLCREATETEXTURESPROC glad_glCreateTextures; PFNGLTRANSFORMFEEDBACKBUFFERBASEPROC glad_glTransformFeedbackBufferBase; PFNGLINDEXSVPROC glad_glIndexsv; PFNGLCLEARTEXSUBIMAGEPROC glad_glClearTexSubImage; PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC glad_glProgramUniformMatrix3x4dv; PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders; PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer; PFNGLVERTEX3IVPROC glad_glVertex3iv; PFNGLBITMAPPROC glad_glBitmap; PFNGLGETDEBUGMESSAGELOGPROC glad_glGetDebugMessageLog; PFNGLPROGRAMUNIFORM1UIVPROC glad_glProgramUniform1uiv; PFNGLMATERIALIPROC glad_glMateriali; PFNGLISVERTEXARRAYPROC glad_glIsVertexArray; PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray; PFNGLPROGRAMUNIFORM2IVPROC glad_glProgramUniform2iv; PFNGLGETQUERYIVPROC glad_glGetQueryiv; PFNGLTEXCOORD4FPROC glad_glTexCoord4f; PFNGLBLITNAMEDFRAMEBUFFERPROC glad_glBlitNamedFramebuffer; PFNGLTEXCOORD4DPROC glad_glTexCoord4d; PFNGLCREATEQUERIESPROC glad_glCreateQueries; PFNGLGETSAMPLERPARAMETERFVPROC glad_glGetSamplerParameterfv; PFNGLTEXCOORD4IPROC glad_glTexCoord4i; PFNGLSHADERSTORAGEBLOCKBINDINGPROC glad_glShaderStorageBlockBinding; PFNGLMATERIALFPROC glad_glMaterialf; PFNGLTEXCOORD4SPROC glad_glTexCoord4s; PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC glad_glProgramUniformMatrix4x2dv; PFNGLGETUNIFORMINDICESPROC glad_glGetUniformIndices; PFNGLISSHADERPROC glad_glIsShader; PFNGLMULTITEXCOORD2SPROC glad_glMultiTexCoord2s; PFNGLVERTEXATTRIBI4UBVPROC glad_glVertexAttribI4ubv; PFNGLVERTEX3DVPROC glad_glVertex3dv; PFNGLGETINTEGER64VPROC glad_glGetInteger64v; PFNGLPOINTPARAMETERIVPROC glad_glPointParameteriv; PFNGLGETNMINMAXPROC glad_glGetnMinmax; PFNGLENABLEPROC glad_glEnable; PFNGLGETACTIVEUNIFORMSIVPROC glad_glGetActiveUniformsiv; PFNGLCOLOR4FVPROC glad_glColor4fv; PFNGLTEXCOORD1FVPROC glad_glTexCoord1fv; PFNGLVERTEXARRAYATTRIBBINDINGPROC glad_glVertexArrayAttribBinding; PFNGLTEXTURESTORAGE1DPROC glad_glTextureStorage1D; PFNGLPOPDEBUGGROUPPROC glad_glPopDebugGroup; PFNGLBLENDEQUATIONIPROC glad_glBlendEquationi; PFNGLTEXCOORD2SVPROC glad_glTexCoord2sv; PFNGLVERTEXATTRIB4DVPROC glad_glVertexAttrib4dv; PFNGLMULTITEXCOORD1DVPROC glad_glMultiTexCoord1dv; PFNGLGETPROGRAMINTERFACEIVPROC glad_glGetProgramInterfaceiv; PFNGLMULTITEXCOORD2IPROC glad_glMultiTexCoord2i; PFNGLTEXCOORD3FVPROC glad_glTexCoord3fv; PFNGLSECONDARYCOLOR3USVPROC glad_glSecondaryColor3usv; PFNGLTEXGENFPROC glad_glTexGenf; PFNGLMAPNAMEDBUFFERPROC glad_glMapNamedBuffer; PFNGLMULTITEXCOORDP3UIVPROC glad_glMultiTexCoordP3uiv; PFNGLVERTEXATTRIBP3UIPROC glad_glVertexAttribP3ui; PFNGLVERTEXATTRIBL1DVPROC glad_glVertexAttribL1dv; PFNGLTEXTUREBUFFERRANGEPROC glad_glTextureBufferRange; PFNGLGETNUNIFORMDVPROC glad_glGetnUniformdv; PFNGLMULTITEXCOORDP1UIPROC glad_glMultiTexCoordP1ui; PFNGLPROGRAMUNIFORM3UIPROC glad_glProgramUniform3ui; PFNGLTRANSFORMFEEDBACKBUFFERRANGEPROC glad_glTransformFeedbackBufferRange; PFNGLGETPOINTERVPROC glad_glGetPointerv; PFNGLVERTEXBINDINGDIVISORPROC glad_glVertexBindingDivisor; PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset; PFNGLGETUNIFORMUIVPROC glad_glGetUniformuiv; PFNGLNORMAL3FVPROC glad_glNormal3fv; PFNGLSECONDARYCOLOR3SPROC glad_glSecondaryColor3s; PFNGLNAMEDFRAMEBUFFERDRAWBUFFERSPROC glad_glNamedFramebufferDrawBuffers; PFNGLDEPTHRANGEPROC glad_glDepthRange; PFNGLFRUSTUMPROC glad_glFrustum; PFNGLMULTITEXCOORD4SVPROC glad_glMultiTexCoord4sv; PFNGLVERTEXARRAYBINDINGDIVISORPROC glad_glVertexArrayBindingDivisor; PFNGLDRAWBUFFERPROC glad_glDrawBuffer; PFNGLPUSHMATRIXPROC glad_glPushMatrix; PFNGLGETNPIXELMAPUSVPROC glad_glGetnPixelMapusv; PFNGLRASTERPOS3FVPROC glad_glRasterPos3fv; PFNGLORTHOPROC glad_glOrtho; PFNGLDRAWELEMENTSINSTANCEDPROC glad_glDrawElementsInstanced; PFNGLWINDOWPOS3SVPROC glad_glWindowPos3sv; PFNGLVERTEXATTRIBL4DVPROC glad_glVertexAttribL4dv; PFNGLPROGRAMUNIFORM1IPROC glad_glProgramUniform1i; PFNGLUNIFORM2DVPROC glad_glUniform2dv; PFNGLPROGRAMUNIFORM1DPROC glad_glProgramUniform1d; PFNGLPROGRAMUNIFORM1FPROC glad_glProgramUniform1f; PFNGLCLEARINDEXPROC glad_glClearIndex; PFNGLMAP1DPROC glad_glMap1d; PFNGLMAP1FPROC glad_glMap1f; PFNGLFLUSHPROC glad_glFlush; PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv; PFNGLBEGINQUERYINDEXEDPROC glad_glBeginQueryIndexed; PFNGLPROGRAMUNIFORM3IVPROC glad_glProgramUniform3iv; PFNGLINDEXIVPROC glad_glIndexiv; PFNGLNAMEDRENDERBUFFERSTORAGEPROC glad_glNamedRenderbufferStorage; PFNGLRASTERPOS3SVPROC glad_glRasterPos3sv; PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv; PFNGLPIXELZOOMPROC glad_glPixelZoom; PFNGLPOLYGONOFFSETCLAMPPROC glad_glPolygonOffsetClamp; PFNGLFENCESYNCPROC glad_glFenceSync; PFNGLDELETEVERTEXARRAYSPROC glad_glDeleteVertexArrays; PFNGLCOLORP3UIPROC glad_glColorP3ui; PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC glad_glDrawElementsInstancedBaseVertexBaseInstance; PFNGLTEXTURESTORAGE2DMULTISAMPLEPROC glad_glTextureStorage2DMultisample; PFNGLVERTEXATTRIB3SVPROC glad_glVertexAttrib3sv; PFNGLBEGINCONDITIONALRENDERPROC glad_glBeginConditionalRender; PFNGLPUSHDEBUGGROUPPROC glad_glPushDebugGroup; PFNGLGETSHADERPRECISIONFORMATPROC glad_glGetShaderPrecisionFormat; PFNGLVALIDATEPROGRAMPIPELINEPROC glad_glValidateProgramPipeline; PFNGLDRAWELEMENTSBASEVERTEXPROC glad_glDrawElementsBaseVertex; PFNGLGETTEXLEVELPARAMETERIVPROC glad_glGetTexLevelParameteriv; PFNGLLIGHTIPROC glad_glLighti; PFNGLMULTITEXCOORDP4UIVPROC glad_glMultiTexCoordP4uiv; PFNGLVERTEXARRAYVERTEXBUFFERPROC glad_glVertexArrayVertexBuffer; PFNGLLIGHTFPROC glad_glLightf; PFNGLBINDVERTEXBUFFERSPROC glad_glBindVertexBuffers; PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation; PFNGLTEXSTORAGE3DMULTISAMPLEPROC glad_glTexStorage3DMultisample; PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate; PFNGLDISABLEVERTEXARRAYATTRIBPROC glad_glDisableVertexArrayAttrib; PFNGLGENSAMPLERSPROC glad_glGenSamplers; PFNGLCLAMPCOLORPROC glad_glClampColor; PFNGLUNIFORM4IVPROC glad_glUniform4iv; PFNGLCLEARSTENCILPROC glad_glClearStencil; PFNGLTEXCOORDP1UIVPROC glad_glTexCoordP1uiv; PFNGLGETNAMEDRENDERBUFFERPARAMETERIVPROC glad_glGetNamedRenderbufferParameteriv; PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC glad_glDrawTransformFeedbackInstanced; PFNGLSPECIALIZESHADERPROC glad_glSpecializeShader; PFNGLMULTITEXCOORD3FVPROC glad_glMultiTexCoord3fv; PFNGLGETPIXELMAPUIVPROC glad_glGetPixelMapuiv; PFNGLGENTEXTURESPROC glad_glGenTextures; PFNGLTEXCOORD4IVPROC glad_glTexCoord4iv; PFNGLDRAWTRANSFORMFEEDBACKPROC glad_glDrawTransformFeedback; PFNGLUNIFORM1DVPROC glad_glUniform1dv; PFNGLGETTEXPARAMETERIUIVPROC glad_glGetTexParameterIuiv; PFNGLGETTRANSFORMFEEDBACKI_VPROC glad_glGetTransformFeedbacki_v; PFNGLINDEXPOINTERPROC glad_glIndexPointer; PFNGLGETNPOLYGONSTIPPLEPROC glad_glGetnPolygonStipple; PFNGLVERTEXATTRIB4NBVPROC glad_glVertexAttrib4Nbv; PFNGLCLEARNAMEDFRAMEBUFFERUIVPROC glad_glClearNamedFramebufferuiv; PFNGLGETVERTEXARRAYINDEXEDIVPROC glad_glGetVertexArrayIndexediv; PFNGLISSYNCPROC glad_glIsSync; PFNGLVERTEX2FPROC glad_glVertex2f; PFNGLVERTEX2DPROC glad_glVertex2d; PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers; PFNGLUNIFORM2IPROC glad_glUniform2i; PFNGLMAPGRID2DPROC glad_glMapGrid2d; PFNGLMAPGRID2FPROC glad_glMapGrid2f; PFNGLTEXCOORDP4UIPROC glad_glTexCoordP4ui; PFNGLVERTEX2IPROC glad_glVertex2i; PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer; PFNGLPROGRAMUNIFORM1UIPROC glad_glProgramUniform1ui; PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_glFramebufferTextureLayer; PFNGLVERTEX2SPROC glad_glVertex2s; PFNGLGETOBJECTPTRLABELPROC glad_glGetObjectPtrLabel; PFNGLTEXTUREPARAMETERIPROC glad_glTextureParameteri; PFNGLNORMAL3BVPROC glad_glNormal3bv; PFNGLVERTEXATTRIB4NUIVPROC glad_glVertexAttrib4Nuiv; PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_glFlushMappedBufferRange; PFNGLPROGRAMUNIFORM2FVPROC glad_glProgramUniform2fv; PFNGLUNIFORMMATRIX2X3DVPROC glad_glUniformMatrix2x3dv; PFNGLPROGRAMUNIFORMMATRIX4DVPROC glad_glProgramUniformMatrix4dv; PFNGLSECONDARYCOLOR3SVPROC glad_glSecondaryColor3sv; PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC glad_glProgramUniformMatrix2x4dv; PFNGLDISPATCHCOMPUTEPROC glad_glDispatchCompute; PFNGLVERTEX3SVPROC glad_glVertex3sv; PFNGLGENQUERIESPROC glad_glGenQueries; PFNGLGETPIXELMAPFVPROC glad_glGetPixelMapfv; PFNGLTEXENVFPROC glad_glTexEnvf; PFNGLVERTEXATTRIBP1UIPROC glad_glVertexAttribP1ui; PFNGLTEXSUBIMAGE3DPROC glad_glTexSubImage3D; PFNGLGETINTEGER64I_VPROC glad_glGetInteger64i_v; PFNGLFOGCOORDDPROC glad_glFogCoordd; PFNGLFOGCOORDFPROC glad_glFogCoordf; PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D; PFNGLTEXENVIPROC glad_glTexEnvi; PFNGLMULTITEXCOORD1IVPROC glad_glMultiTexCoord1iv; PFNGLISENABLEDIPROC glad_glIsEnabledi; PFNGLBINDBUFFERSRANGEPROC glad_glBindBuffersRange; PFNGLSECONDARYCOLORP3UIPROC glad_glSecondaryColorP3ui; PFNGLVERTEXATTRIBI2IPROC glad_glVertexAttribI2i; PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_glBindFragDataLocationIndexed; PFNGLCOPYIMAGESUBDATAPROC glad_glCopyImageSubData; PFNGLMULTITEXCOORD2DVPROC glad_glMultiTexCoord2dv; PFNGLUNIFORM2IVPROC glad_glUniform2iv; PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv; PFNGLGETINTERNALFORMATIVPROC glad_glGetInternalformativ; PFNGLUNIFORM4UIVPROC glad_glUniform4uiv; PFNGLMATRIXMODEPROC glad_glMatrixMode; PFNGLGETTEXTUREIMAGEPROC glad_glGetTextureImage; PFNGLFEEDBACKBUFFERPROC glad_glFeedbackBuffer; PFNGLPROGRAMUNIFORM2DVPROC glad_glProgramUniform2dv; PFNGLENDQUERYINDEXEDPROC glad_glEndQueryIndexed; PFNGLGETMAPIVPROC glad_glGetMapiv; PFNGLTEXTURESUBIMAGE3DPROC glad_glTextureSubImage3D; PFNGLFRAMEBUFFERTEXTURE1DPROC glad_glFramebufferTexture1D; PFNGLUNIFORM4DPROC glad_glUniform4d; PFNGLGETSHADERIVPROC glad_glGetShaderiv; PFNGLMULTITEXCOORD2DPROC glad_glMultiTexCoord2d; PFNGLMULTITEXCOORD2FPROC glad_glMultiTexCoord2f; PFNGLPROGRAMUNIFORMMATRIX3FVPROC glad_glProgramUniformMatrix3fv; PFNGLOBJECTPTRLABELPROC glad_glObjectPtrLabel; PFNGLINVALIDATEFRAMEBUFFERPROC glad_glInvalidateFramebuffer; PFNGLBINDTEXTURESPROC glad_glBindTextures; PFNGLBINDFRAGDATALOCATIONPROC glad_glBindFragDataLocation; PFNGLNAMEDBUFFERSTORAGEPROC glad_glNamedBufferStorage; PFNGLSCISSORARRAYVPROC glad_glScissorArrayv; PFNGLPRIORITIZETEXTURESPROC glad_glPrioritizeTextures; PFNGLCALLLISTPROC glad_glCallList; PFNGLPATCHPARAMETERFVPROC glad_glPatchParameterfv; PFNGLSECONDARYCOLOR3UBVPROC glad_glSecondaryColor3ubv; PFNGLGETDOUBLEVPROC glad_glGetDoublev; PFNGLMULTITEXCOORD3IVPROC glad_glMultiTexCoord3iv; PFNGLVERTEXATTRIB1DPROC glad_glVertexAttrib1d; PFNGLUNIFORM4DVPROC glad_glUniform4dv; PFNGLLIGHTMODELFPROC glad_glLightModelf; PFNGLGETUNIFORMIVPROC glad_glGetUniformiv; PFNGLINVALIDATEBUFFERDATAPROC glad_glInvalidateBufferData; PFNGLVERTEX2SVPROC glad_glVertex2sv; PFNGLVERTEXARRAYVERTEXBUFFERSPROC glad_glVertexArrayVertexBuffers; PFNGLCOMPRESSEDTEXTURESUBIMAGE1DPROC glad_glCompressedTextureSubImage1D; PFNGLLIGHTMODELIPROC glad_glLightModeli; PFNGLWINDOWPOS3IVPROC glad_glWindowPos3iv; PFNGLMULTITEXCOORDP1UIVPROC glad_glMultiTexCoordP1uiv; PFNGLUNIFORM3FVPROC glad_glUniform3fv; PFNGLPIXELSTOREIPROC glad_glPixelStorei; PFNGLGETPROGRAMPIPELINEINFOLOGPROC glad_glGetProgramPipelineInfoLog; PFNGLCALLLISTSPROC glad_glCallLists; PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC glad_glProgramUniformMatrix3x4fv; PFNGLINVALIDATESUBFRAMEBUFFERPROC glad_glInvalidateSubFramebuffer; PFNGLMAPBUFFERPROC glad_glMapBuffer; PFNGLSECONDARYCOLOR3DPROC glad_glSecondaryColor3d; PFNGLTEXCOORD3IPROC glad_glTexCoord3i; PFNGLMULTITEXCOORD4FVPROC glad_glMultiTexCoord4fv; PFNGLRASTERPOS3IPROC glad_glRasterPos3i; PFNGLSECONDARYCOLOR3BPROC glad_glSecondaryColor3b; PFNGLRASTERPOS3DPROC glad_glRasterPos3d; PFNGLRASTERPOS3FPROC glad_glRasterPos3f; PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_glCompressedTexImage3D; PFNGLTEXCOORD3FPROC glad_glTexCoord3f; PFNGLDELETESYNCPROC glad_glDeleteSync; PFNGLTEXCOORD3DPROC glad_glTexCoord3d; PFNGLGETTRANSFORMFEEDBACKI64_VPROC glad_glGetTransformFeedbacki64_v; PFNGLUNIFORMMATRIX4DVPROC glad_glUniformMatrix4dv; PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_glTexImage2DMultisample; PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv; PFNGLUNIFORMMATRIX4X2DVPROC glad_glUniformMatrix4x2dv; PFNGLMULTIDRAWELEMENTSPROC glad_glMultiDrawElements; PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv; PFNGLTEXCOORD3SPROC glad_glTexCoord3s; PFNGLUNIFORM3IVPROC glad_glUniform3iv; PFNGLRASTERPOS3SPROC glad_glRasterPos3s; PFNGLPOLYGONMODEPROC glad_glPolygonMode; PFNGLDRAWBUFFERSPROC glad_glDrawBuffers; PFNGLGETNHISTOGRAMPROC glad_glGetnHistogram; PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_glGetActiveUniformBlockiv; PFNGLARETEXTURESRESIDENTPROC glad_glAreTexturesResident; PFNGLPROGRAMUNIFORM2DPROC glad_glProgramUniform2d; PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC glad_glProgramUniformMatrix4x3dv; PFNGLISLISTPROC glad_glIsList; PFNGLPROGRAMUNIFORM4IVPROC glad_glProgramUniform4iv; PFNGLRASTERPOS2SVPROC glad_glRasterPos2sv; PFNGLRASTERPOS4SVPROC glad_glRasterPos4sv; PFNGLCOLOR4SPROC glad_glColor4s; PFNGLGETPROGRAMBINARYPROC glad_glGetProgramBinary; PFNGLUSEPROGRAMPROC glad_glUseProgram; PFNGLLINESTIPPLEPROC glad_glLineStipple; PFNGLMULTITEXCOORD1SVPROC glad_glMultiTexCoord1sv; PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog; PFNGLCLEARTEXIMAGEPROC glad_glClearTexImage; PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv; PFNGLMULTITEXCOORD2IVPROC glad_glMultiTexCoord2iv; PFNGLUNIFORMMATRIX2X4FVPROC glad_glUniformMatrix2x4fv; PFNGLBINDVERTEXARRAYPROC glad_glBindVertexArray; PFNGLCOLOR4BPROC glad_glColor4b; PFNGLSECONDARYCOLOR3FPROC glad_glSecondaryColor3f; PFNGLCOLOR4FPROC glad_glColor4f; PFNGLCOLOR4DPROC glad_glColor4d; PFNGLCOLOR4IPROC glad_glColor4i; PFNGLSAMPLERPARAMETERIIVPROC glad_glSamplerParameterIiv; PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_glMultiDrawElementsBaseVertex; PFNGLVERTEXATTRIBLFORMATPROC glad_glVertexAttribLFormat; PFNGLRASTERPOS3IVPROC glad_glRasterPos3iv; PFNGLTEXTURESTORAGE2DPROC glad_glTextureStorage2D; PFNGLGENERATETEXTUREMIPMAPPROC glad_glGenerateTextureMipmap; PFNGLVERTEX2DVPROC glad_glVertex2dv; PFNGLTEXCOORD4SVPROC glad_glTexCoord4sv; PFNGLUNIFORM2UIVPROC glad_glUniform2uiv; PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_glCompressedTexSubImage1D; PFNGLFINISHPROC glad_glFinish; PFNGLDEPTHRANGEINDEXEDPROC glad_glDepthRangeIndexed; PFNGLGETBOOLEANVPROC glad_glGetBooleanv; PFNGLDELETESHADERPROC glad_glDeleteShader; PFNGLDRAWELEMENTSPROC glad_glDrawElements; PFNGLGETINTERNALFORMATI64VPROC glad_glGetInternalformati64v; PFNGLRASTERPOS2SPROC glad_glRasterPos2s; PFNGLCOPYTEXTURESUBIMAGE1DPROC glad_glCopyTextureSubImage1D; PFNGLGETMAPDVPROC glad_glGetMapdv; PFNGLVERTEXATTRIB4NSVPROC glad_glVertexAttrib4Nsv; PFNGLMATERIALFVPROC glad_glMaterialfv; PFNGLTEXTUREPARAMETERIUIVPROC glad_glTextureParameterIuiv; PFNGLVIEWPORTPROC glad_glViewport; PFNGLUNIFORM1UIVPROC glad_glUniform1uiv; PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_glTransformFeedbackVaryings; PFNGLINDEXDVPROC glad_glIndexdv; PFNGLCOPYTEXSUBIMAGE3DPROC glad_glCopyTexSubImage3D; PFNGLTEXCOORD3IVPROC glad_glTexCoord3iv; PFNGLDEBUGMESSAGECALLBACKPROC glad_glDebugMessageCallback; PFNGLVERTEXATTRIBI3IPROC glad_glVertexAttribI3i; PFNGLINVALIDATETEXIMAGEPROC glad_glInvalidateTexImage; PFNGLVERTEXATTRIBFORMATPROC glad_glVertexAttribFormat; PFNGLCLEARDEPTHPROC glad_glClearDepth; PFNGLVERTEXATTRIBI4USVPROC glad_glVertexAttribI4usv; PFNGLTEXPARAMETERFPROC glad_glTexParameterf; PFNGLVERTEXATTRIBBINDINGPROC glad_glVertexAttribBinding; PFNGLTEXPARAMETERIPROC glad_glTexParameteri; PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC glad_glGetActiveSubroutineUniformiv; PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource; PFNGLCREATETRANSFORMFEEDBACKSPROC glad_glCreateTransformFeedbacks; PFNGLGETNTEXIMAGEPROC glad_glGetnTexImage; PFNGLTEXBUFFERPROC glad_glTexBuffer; PFNGLPOPNAMEPROC glad_glPopName; PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram; PFNGLPIXELSTOREFPROC glad_glPixelStoref; PFNGLUNIFORM3UIVPROC glad_glUniform3uiv; PFNGLVIEWPORTINDEXEDFPROC glad_glViewportIndexedf; PFNGLRASTERPOS4FVPROC glad_glRasterPos4fv; PFNGLEVALCOORD1DVPROC glad_glEvalCoord1dv; PFNGLMULTITEXCOORDP2UIVPROC glad_glMultiTexCoordP2uiv; PFNGLGENPROGRAMPIPELINESPROC glad_glGenProgramPipelines; PFNGLRECTIPROC glad_glRecti; PFNGLCOLOR4UBPROC glad_glColor4ub; PFNGLMULTTRANSPOSEMATRIXFPROC glad_glMultTransposeMatrixf; PFNGLRECTFPROC glad_glRectf; PFNGLRECTDPROC glad_glRectd; PFNGLNORMAL3SVPROC glad_glNormal3sv; PFNGLNEWLISTPROC glad_glNewList; PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC glad_glProgramUniformMatrix2x3dv; PFNGLCOLOR4USPROC glad_glColor4us; PFNGLVERTEXATTRIBP1UIVPROC glad_glVertexAttribP1uiv; PFNGLLINKPROGRAMPROC glad_glLinkProgram; PFNGLHINTPROC glad_glHint; PFNGLRECTSPROC glad_glRects; PFNGLTEXCOORD2DVPROC glad_glTexCoord2dv; PFNGLRASTERPOS4IVPROC glad_glRasterPos4iv; PFNGLGETOBJECTLABELPROC glad_glGetObjectLabel; PFNGLPROGRAMUNIFORM2FPROC glad_glProgramUniform2f; PFNGLGETSTRINGPROC glad_glGetString; PFNGLVERTEXATTRIBP2UIVPROC glad_glVertexAttribP2uiv; PFNGLEDGEFLAGVPROC glad_glEdgeFlagv; PFNGLDETACHSHADERPROC glad_glDetachShader; PFNGLPROGRAMUNIFORM3IPROC glad_glProgramUniform3i; PFNGLSCALEFPROC glad_glScalef; PFNGLENDQUERYPROC glad_glEndQuery; PFNGLSCALEDPROC glad_glScaled; PFNGLEDGEFLAGPOINTERPROC glad_glEdgeFlagPointer; PFNGLFRAMEBUFFERPARAMETERIPROC glad_glFramebufferParameteri; PFNGLGETPROGRAMRESOURCENAMEPROC glad_glGetProgramResourceName; PFNGLUNIFORMMATRIX4X3DVPROC glad_glUniformMatrix4x3dv; PFNGLDEPTHRANGEARRAYVPROC glad_glDepthRangeArrayv; PFNGLCOPYPIXELSPROC glad_glCopyPixels; PFNGLVERTEXATTRIBI2UIPROC glad_glVertexAttribI2ui; PFNGLGETPROGRAMRESOURCELOCATIONPROC glad_glGetProgramResourceLocation; PFNGLPOPATTRIBPROC glad_glPopAttrib; PFNGLDELETETEXTURESPROC glad_glDeleteTextures; PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC glad_glGetActiveAtomicCounterBufferiv; PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate; PFNGLGETTEXTUREPARAMETERIVPROC glad_glGetTextureParameteriv; PFNGLDELETEQUERIESPROC glad_glDeleteQueries; PFNGLNORMALP3UIVPROC glad_glNormalP3uiv; PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f; PFNGLVERTEXATTRIB4DPROC glad_glVertexAttrib4d; PFNGLVIEWPORTINDEXEDFVPROC glad_glViewportIndexedfv; PFNGLINITNAMESPROC glad_glInitNames; PFNGLGETBUFFERPARAMETERI64VPROC glad_glGetBufferParameteri64v; PFNGLCOLOR3DVPROC glad_glColor3dv; PFNGLVERTEXATTRIBI1IPROC glad_glVertexAttribI1i; PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv; PFNGLWAITSYNCPROC glad_glWaitSync; PFNGLCREATEVERTEXARRAYSPROC glad_glCreateVertexArrays; PFNGLPROGRAMUNIFORM1DVPROC glad_glProgramUniform1dv; PFNGLVERTEXATTRIB4SPROC glad_glVertexAttrib4s; PFNGLCOLORMATERIALPROC glad_glColorMaterial; PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage; PFNGLSAMPLERPARAMETERIPROC glad_glSamplerParameteri; PFNGLCLEARBUFFERSUBDATAPROC glad_glClearBufferSubData; PFNGLSAMPLERPARAMETERFPROC glad_glSamplerParameterf; PFNGLTEXSTORAGE1DPROC glad_glTexStorage1D; PFNGLUNIFORM1FPROC glad_glUniform1f; PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv; PFNGLUNIFORM1DPROC glad_glUniform1d; PFNGLRENDERMODEPROC glad_glRenderMode; PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_glGetCompressedTexImage; PFNGLGETNCOMPRESSEDTEXIMAGEPROC glad_glGetnCompressedTexImage; PFNGLWINDOWPOS2DVPROC glad_glWindowPos2dv; PFNGLUNIFORM1IPROC glad_glUniform1i; PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib; PFNGLUNIFORM3IPROC glad_glUniform3i; PFNGLPIXELTRANSFERIPROC glad_glPixelTransferi; PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D; PFNGLDISABLEPROC glad_glDisable; PFNGLLOGICOPPROC glad_glLogicOp; PFNGLEVALPOINT2PROC glad_glEvalPoint2; PFNGLPIXELTRANSFERFPROC glad_glPixelTransferf; PFNGLMULTIDRAWARRAYSINDIRECTCOUNTPROC glad_glMultiDrawArraysIndirectCount; PFNGLSECONDARYCOLOR3IPROC glad_glSecondaryColor3i; PFNGLPROGRAMUNIFORM4UIVPROC glad_glProgramUniform4uiv; PFNGLUNIFORM4UIPROC glad_glUniform4ui; PFNGLCOLOR3FPROC glad_glColor3f; PFNGLNAMEDFRAMEBUFFERREADBUFFERPROC glad_glNamedFramebufferReadBuffer; PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer; PFNGLGETTEXENVFVPROC glad_glGetTexEnvfv; PFNGLRECTFVPROC glad_glRectfv; PFNGLCULLFACEPROC glad_glCullFace; PFNGLGETLIGHTFVPROC glad_glGetLightfv; PFNGLGETNUNIFORMIVPROC glad_glGetnUniformiv; PFNGLCOLOR3DPROC glad_glColor3d; PFNGLPROGRAMUNIFORM4IPROC glad_glProgramUniform4i; PFNGLTEXGENDPROC glad_glTexGend; PFNGLPROGRAMUNIFORM4FPROC glad_glProgramUniform4f; PFNGLTEXGENIPROC glad_glTexGeni; PFNGLPROGRAMUNIFORM4DPROC glad_glProgramUniform4d; PFNGLTEXTUREPARAMETERIIVPROC glad_glTextureParameterIiv; PFNGLMULTITEXCOORD3SPROC glad_glMultiTexCoord3s; PFNGLGETSTRINGIPROC glad_glGetStringi; PFNGLGETTEXTUREPARAMETERFVPROC glad_glGetTextureParameterfv; PFNGLTEXTURESUBIMAGE2DPROC glad_glTextureSubImage2D; PFNGLMULTITEXCOORD3IPROC glad_glMultiTexCoord3i; PFNGLMULTITEXCOORD3FPROC glad_glMultiTexCoord3f; PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC glad_glDrawTransformFeedbackStream; PFNGLMULTITEXCOORD3DPROC glad_glMultiTexCoord3d; PFNGLATTACHSHADERPROC glad_glAttachShader; PFNGLFOGCOORDDVPROC glad_glFogCoorddv; PFNGLUNIFORMMATRIX2X3FVPROC glad_glUniformMatrix2x3fv; PFNGLGETTEXGENFVPROC glad_glGetTexGenfv; PFNGLQUERYCOUNTERPROC glad_glQueryCounter; PFNGLFOGCOORDPOINTERPROC glad_glFogCoordPointer; PFNGLPROGRAMUNIFORMMATRIX3DVPROC glad_glProgramUniformMatrix3dv; PFNGLPROVOKINGVERTEXPROC glad_glProvokingVertex; PFNGLSHADERBINARYPROC glad_glShaderBinary; PFNGLUNMAPNAMEDBUFFERPROC glad_glUnmapNamedBuffer; PFNGLGETNCOLORTABLEPROC glad_glGetnColorTable; PFNGLFRAMEBUFFERTEXTURE3DPROC glad_glFramebufferTexture3D; PFNGLTEXGENIVPROC glad_glTexGeniv; PFNGLRASTERPOS2DVPROC glad_glRasterPos2dv; PFNGLSECONDARYCOLOR3DVPROC glad_glSecondaryColor3dv; PFNGLCLIENTACTIVETEXTUREPROC glad_glClientActiveTexture; PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glNamedRenderbufferStorageMultisample; PFNGLVERTEXATTRIBI4SVPROC glad_glVertexAttribI4sv; PFNGLCLEARNAMEDBUFFERDATAPROC glad_glClearNamedBufferData; PFNGLSECONDARYCOLOR3USPROC glad_glSecondaryColor3us; PFNGLNORMALP3UIPROC glad_glNormalP3ui; PFNGLTEXENVFVPROC glad_glTexEnvfv; PFNGLREADBUFFERPROC glad_glReadBuffer; PFNGLVIEWPORTARRAYVPROC glad_glViewportArrayv; PFNGLTEXPARAMETERIUIVPROC glad_glTexParameterIuiv; PFNGLDRAWARRAYSINSTANCEDPROC glad_glDrawArraysInstanced; PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap; PFNGLCOMPRESSEDTEXTURESUBIMAGE2DPROC glad_glCompressedTextureSubImage2D; PFNGLPROGRAMUNIFORMMATRIX2FVPROC glad_glProgramUniformMatrix2fv; PFNGLWINDOWPOS3FVPROC glad_glWindowPos3fv; PFNGLUNIFORMMATRIX3X4DVPROC glad_glUniformMatrix3x4dv; PFNGLLIGHTMODELFVPROC glad_glLightModelfv; PFNGLSAMPLERPARAMETERIVPROC glad_glSamplerParameteriv; PFNGLDELETELISTSPROC glad_glDeleteLists; PFNGLGETCLIPPLANEPROC glad_glGetClipPlane; PFNGLVERTEX4DVPROC glad_glVertex4dv; PFNGLTEXCOORD2DPROC glad_glTexCoord2d; PFNGLPOPMATRIXPROC glad_glPopMatrix; PFNGLTEXCOORD2FPROC glad_glTexCoord2f; PFNGLCOLOR4IVPROC glad_glColor4iv; PFNGLINDEXUBVPROC glad_glIndexubv; PFNGLCHECKNAMEDFRAMEBUFFERSTATUSPROC glad_glCheckNamedFramebufferStatus; PFNGLUNMAPBUFFERPROC glad_glUnmapBuffer; PFNGLTEXCOORD2IPROC glad_glTexCoord2i; PFNGLRASTERPOS4DPROC glad_glRasterPos4d; PFNGLRASTERPOS4FPROC glad_glRasterPos4f; PFNGLPROGRAMUNIFORM1IVPROC glad_glProgramUniform1iv; PFNGLGETVERTEXARRAYIVPROC glad_glGetVertexArrayiv; PFNGLCOPYTEXTURESUBIMAGE2DPROC glad_glCopyTextureSubImage2D; PFNGLVERTEXATTRIB3SPROC glad_glVertexAttrib3s; PFNGLTEXCOORD2SPROC glad_glTexCoord2s; PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer; PFNGLVERTEX3FVPROC glad_glVertex3fv; PFNGLTEXCOORD4DVPROC glad_glTexCoord4dv; PFNGLMATERIALIVPROC glad_glMaterialiv; PFNGLVERTEXATTRIBP4UIVPROC glad_glVertexAttribP4uiv; PFNGLGETPROGRAMSTAGEIVPROC glad_glGetProgramStageiv; PFNGLISPROGRAMPROC glad_glIsProgram; PFNGLVERTEXATTRIB4BVPROC glad_glVertexAttrib4bv; PFNGLVERTEX4SPROC glad_glVertex4s; PFNGLUNIFORMMATRIX3X2DVPROC glad_glUniformMatrix3x2dv; PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv; PFNGLNORMAL3DVPROC glad_glNormal3dv; PFNGLISTRANSFORMFEEDBACKPROC glad_glIsTransformFeedback; PFNGLUNIFORM4IPROC glad_glUniform4i; PFNGLACTIVETEXTUREPROC glad_glActiveTexture; PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray; PFNGLROTATEDPROC glad_glRotated; PFNGLISPROGRAMPIPELINEPROC glad_glIsProgramPipeline; PFNGLROTATEFPROC glad_glRotatef; PFNGLVERTEX4IPROC glad_glVertex4i; PFNGLREADPIXELSPROC glad_glReadPixels; PFNGLVERTEXATTRIBI3IVPROC glad_glVertexAttribI3iv; PFNGLLOADNAMEPROC glad_glLoadName; PFNGLUNIFORM4FPROC glad_glUniform4f; PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glRenderbufferStorageMultisample; PFNGLCREATEPROGRAMPIPELINESPROC glad_glCreateProgramPipelines; PFNGLGENVERTEXARRAYSPROC glad_glGenVertexArrays; PFNGLSHADEMODELPROC glad_glShadeModel; PFNGLMAPGRID1DPROC glad_glMapGrid1d; PFNGLGETUNIFORMFVPROC glad_glGetUniformfv; PFNGLMAPGRID1FPROC glad_glMapGrid1f; PFNGLSAMPLERPARAMETERFVPROC glad_glSamplerParameterfv; PFNGLVERTEXATTRIBLPOINTERPROC glad_glVertexAttribLPointer; PFNGLDISABLECLIENTSTATEPROC glad_glDisableClientState; PFNGLMULTITEXCOORD3SVPROC glad_glMultiTexCoord3sv; PFNGLGETNUNIFORMFVPROC glad_glGetnUniformfv; PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_glDrawElementsInstancedBaseVertex; PFNGLVERTEXATTRIBL2DVPROC glad_glVertexAttribL2dv; PFNGLMULTIDRAWELEMENTSINDIRECTPROC glad_glMultiDrawElementsIndirect; PFNGLENABLEVERTEXARRAYATTRIBPROC glad_glEnableVertexArrayAttrib; PFNGLSECONDARYCOLORPOINTERPROC glad_glSecondaryColorPointer; PFNGLALPHAFUNCPROC glad_glAlphaFunc; PFNGLUNIFORM1IVPROC glad_glUniform1iv; PFNGLCREATESHADERPROGRAMVPROC glad_glCreateShaderProgramv; PFNGLGETACTIVESUBROUTINENAMEPROC glad_glGetActiveSubroutineName; PFNGLMULTITEXCOORD4IVPROC glad_glMultiTexCoord4iv; PFNGLVERTEXATTRIBL2DPROC glad_glVertexAttribL2d; PFNGLGETQUERYOBJECTIVPROC glad_glGetQueryObjectiv; PFNGLSTENCILFUNCPROC glad_glStencilFunc; PFNGLINVALIDATENAMEDFRAMEBUFFERDATAPROC glad_glInvalidateNamedFramebufferData; PFNGLMULTITEXCOORD1FVPROC glad_glMultiTexCoord1fv; PFNGLUNIFORMBLOCKBINDINGPROC glad_glUniformBlockBinding; PFNGLCOLOR4UIVPROC glad_glColor4uiv; PFNGLRECTIVPROC glad_glRectiv; PFNGLCOLORP4UIPROC glad_glColorP4ui; PFNGLUSEPROGRAMSTAGESPROC glad_glUseProgramStages; PFNGLRASTERPOS3DVPROC glad_glRasterPos3dv; PFNGLEVALMESH2PROC glad_glEvalMesh2; PFNGLEVALMESH1PROC glad_glEvalMesh1; PFNGLTEXCOORDPOINTERPROC glad_glTexCoordPointer; PFNGLPROGRAMUNIFORM3FPROC glad_glProgramUniform3f; PFNGLPROGRAMUNIFORM3DPROC glad_glProgramUniform3d; PFNGLVERTEXATTRIB4NUBVPROC glad_glVertexAttrib4Nubv; PFNGLVERTEXATTRIBI4IVPROC glad_glVertexAttribI4iv; PFNGLGETPROGRAMPIPELINEIVPROC glad_glGetProgramPipelineiv; PFNGLTEXSTORAGE3DPROC glad_glTexStorage3D; PFNGLEVALCOORD2FVPROC glad_glEvalCoord2fv; PFNGLNAMEDFRAMEBUFFERDRAWBUFFERPROC glad_glNamedFramebufferDrawBuffer; PFNGLGETQUERYINDEXEDIVPROC glad_glGetQueryIndexediv; PFNGLCOLOR4UBVPROC glad_glColor4ubv; PFNGLLOADTRANSPOSEMATRIXDPROC glad_glLoadTransposeMatrixd; PFNGLLOADTRANSPOSEMATRIXFPROC glad_glLoadTransposeMatrixf; PFNGLTEXTUREPARAMETERIVPROC glad_glTextureParameteriv; PFNGLOBJECTLABELPROC glad_glObjectLabel; PFNGLVERTEXATTRIBI4IPROC glad_glVertexAttribI4i; PFNGLRASTERPOS2IVPROC glad_glRasterPos2iv; PFNGLGETBUFFERSUBDATAPROC glad_glGetBufferSubData; PFNGLGETVERTEXATTRIBLDVPROC glad_glGetVertexAttribLdv; PFNGLGETNUNIFORMUIVPROC glad_glGetnUniformuiv; PFNGLGETQUERYBUFFEROBJECTIVPROC glad_glGetQueryBufferObjectiv; PFNGLTEXENVIVPROC glad_glTexEnviv; PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate; PFNGLVERTEXATTRIBI1UIPROC glad_glVertexAttribI1ui; PFNGLGENBUFFERSPROC glad_glGenBuffers; PFNGLSELECTBUFFERPROC glad_glSelectBuffer; PFNGLGETSUBROUTINEINDEXPROC glad_glGetSubroutineIndex; PFNGLVERTEXATTRIB2SVPROC glad_glVertexAttrib2sv; PFNGLSCISSORINDEXEDVPROC glad_glScissorIndexedv; PFNGLPUSHATTRIBPROC glad_glPushAttrib; PFNGLVERTEXATTRIBIPOINTERPROC glad_glVertexAttribIPointer; PFNGLBLENDFUNCPROC glad_glBlendFunc; PFNGLCREATEPROGRAMPROC glad_glCreateProgram; PFNGLNAMEDBUFFERSUBDATAPROC glad_glNamedBufferSubData; PFNGLTEXIMAGE3DPROC glad_glTexImage3D; PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer; PFNGLCLEARNAMEDFRAMEBUFFERFVPROC glad_glClearNamedFramebufferfv; PFNGLLIGHTIVPROC glad_glLightiv; PFNGLGETNAMEDBUFFERSUBDATAPROC glad_glGetNamedBufferSubData; PFNGLCOMPRESSEDTEXTURESUBIMAGE3DPROC glad_glCompressedTextureSubImage3D; PFNGLPRIMITIVERESTARTINDEXPROC glad_glPrimitiveRestartIndex; PFNGLFLUSHMAPPEDNAMEDBUFFERRANGEPROC glad_glFlushMappedNamedBufferRange; PFNGLINVALIDATETEXSUBIMAGEPROC glad_glInvalidateTexSubImage; PFNGLTEXGENFVPROC glad_glTexGenfv; PFNGLGETTEXTUREPARAMETERIUIVPROC glad_glGetTextureParameterIuiv; PFNGLGETNCONVOLUTIONFILTERPROC glad_glGetnConvolutionFilter; PFNGLBINDIMAGETEXTURESPROC glad_glBindImageTextures; PFNGLENDPROC glad_glEnd; PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers; PFNGLBINDPROGRAMPIPELINEPROC glad_glBindProgramPipeline; PFNGLSCISSORPROC glad_glScissor; PFNGLTEXCOORDP4UIVPROC glad_glTexCoordP4uiv; PFNGLCLIPPLANEPROC glad_glClipPlane; PFNGLPUSHNAMEPROC glad_glPushName; PFNGLTEXGENDVPROC glad_glTexGendv; PFNGLINDEXUBPROC glad_glIndexub; PFNGLGETNAMEDFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetNamedFramebufferAttachmentParameteriv; PFNGLNAMEDFRAMEBUFFERRENDERBUFFERPROC glad_glNamedFramebufferRenderbuffer; PFNGLVERTEXP2UIVPROC glad_glVertexP2uiv; PFNGLSECONDARYCOLOR3IVPROC glad_glSecondaryColor3iv; PFNGLRASTERPOS4IPROC glad_glRasterPos4i; PFNGLMULTTRANSPOSEMATRIXDPROC glad_glMultTransposeMatrixd; PFNGLCLEARCOLORPROC glad_glClearColor; PFNGLVERTEXATTRIB4UIVPROC glad_glVertexAttrib4uiv; PFNGLNORMAL3SPROC glad_glNormal3s; PFNGLVERTEXATTRIB4NIVPROC glad_glVertexAttrib4Niv; PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC glad_glProgramUniformMatrix2x3fv; PFNGLCLEARBUFFERIVPROC glad_glClearBufferiv; PFNGLPOINTPARAMETERIPROC glad_glPointParameteri; PFNGLPROGRAMUNIFORM4DVPROC glad_glProgramUniform4dv; PFNGLCOLORP4UIVPROC glad_glColorP4uiv; PFNGLBLENDCOLORPROC glad_glBlendColor; PFNGLGETNPIXELMAPUIVPROC glad_glGetnPixelMapuiv; PFNGLGETTEXTURELEVELPARAMETERIVPROC glad_glGetTextureLevelParameteriv; PFNGLWINDOWPOS3DPROC glad_glWindowPos3d; PFNGLPROGRAMUNIFORM3FVPROC glad_glProgramUniform3fv; PFNGLVERTEXATTRIBI2UIVPROC glad_glVertexAttribI2uiv; PFNGLGETNAMEDFRAMEBUFFERPARAMETERIVPROC glad_glGetNamedFramebufferParameteriv; PFNGLSAMPLERPARAMETERIUIVPROC glad_glSamplerParameterIuiv; PFNGLUNIFORM3UIPROC glad_glUniform3ui; PFNGLPROGRAMUNIFORM3UIVPROC glad_glProgramUniform3uiv; PFNGLCOLOR4DVPROC glad_glColor4dv; PFNGLVERTEXATTRIBI4UIVPROC glad_glVertexAttribI4uiv; PFNGLPOINTPARAMETERFVPROC glad_glPointParameterfv; PFNGLRESUMETRANSFORMFEEDBACKPROC glad_glResumeTransformFeedback; PFNGLUNIFORM2FVPROC glad_glUniform2fv; PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC glad_glGetActiveSubroutineUniformName; PFNGLGETPROGRAMRESOURCEINDEXPROC glad_glGetProgramResourceIndex; PFNGLSECONDARYCOLOR3UBPROC glad_glSecondaryColor3ub; PFNGLDRAWELEMENTSINDIRECTPROC glad_glDrawElementsIndirect; PFNGLGETTEXTURELEVELPARAMETERFVPROC glad_glGetTextureLevelParameterfv; PFNGLSECONDARYCOLOR3UIPROC glad_glSecondaryColor3ui; PFNGLTEXCOORD3DVPROC glad_glTexCoord3dv; PFNGLGETNAMEDBUFFERPOINTERVPROC glad_glGetNamedBufferPointerv; PFNGLDISPATCHCOMPUTEINDIRECTPROC glad_glDispatchComputeIndirect; PFNGLINVALIDATENAMEDFRAMEBUFFERSUBDATAPROC glad_glInvalidateNamedFramebufferSubData; PFNGLGETSAMPLERPARAMETERIUIVPROC glad_glGetSamplerParameterIuiv; PFNGLBINDBUFFERRANGEPROC glad_glBindBufferRange; PFNGLNORMAL3IVPROC glad_glNormal3iv; PFNGLTEXTURESUBIMAGE1DPROC glad_glTextureSubImage1D; PFNGLVERTEXATTRIBL3DVPROC glad_glVertexAttribL3dv; PFNGLGETUNIFORMDVPROC glad_glGetUniformdv; PFNGLWINDOWPOS3SPROC glad_glWindowPos3s; PFNGLPOINTPARAMETERFPROC glad_glPointParameterf; PFNGLCLEARDEPTHFPROC glad_glClearDepthf; PFNGLGETVERTEXATTRIBIUIVPROC glad_glGetVertexAttribIuiv; PFNGLWINDOWPOS3IPROC glad_glWindowPos3i; PFNGLMULTITEXCOORD4SPROC glad_glMultiTexCoord4s; PFNGLGETTEXTURESUBIMAGEPROC glad_glGetTextureSubImage; PFNGLWINDOWPOS3FPROC glad_glWindowPos3f; PFNGLGENTRANSFORMFEEDBACKSPROC glad_glGenTransformFeedbacks; PFNGLCOLOR3USPROC glad_glColor3us; PFNGLCOLOR3UIVPROC glad_glColor3uiv; PFNGLVERTEXATTRIB4NUSVPROC glad_glVertexAttrib4Nusv; PFNGLGETLIGHTIVPROC glad_glGetLightiv; PFNGLDEPTHFUNCPROC glad_glDepthFunc; PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D; PFNGLLISTBASEPROC glad_glListBase; PFNGLMULTITEXCOORD4FPROC glad_glMultiTexCoord4f; PFNGLCOLOR3UBPROC glad_glColor3ub; PFNGLMULTITEXCOORD4DPROC glad_glMultiTexCoord4d; PFNGLVERTEXATTRIBI4BVPROC glad_glVertexAttribI4bv; PFNGLBLENDEQUATIONSEPARATEIPROC glad_glBlendEquationSeparatei; PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv; PFNGLCOLOR3UIPROC glad_glColor3ui; PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC glad_glGetProgramResourceLocationIndex; PFNGLMULTITEXCOORD4IPROC glad_glMultiTexCoord4i; PFNGLBUFFERSTORAGEPROC glad_glBufferStorage; PFNGLGETPOLYGONSTIPPLEPROC glad_glGetPolygonStipple; PFNGLCLIENTWAITSYNCPROC glad_glClientWaitSync; PFNGLVERTEXATTRIBI4UIPROC glad_glVertexAttribI4ui; PFNGLGETFLOATI_VPROC glad_glGetFloati_v; PFNGLMULTITEXCOORD4DVPROC glad_glMultiTexCoord4dv; PFNGLCOLORMASKPROC glad_glColorMask; PFNGLTEXTUREBUFFERPROC glad_glTextureBuffer; PFNGLTEXPARAMETERIIVPROC glad_glTexParameterIiv; PFNGLBLENDEQUATIONPROC glad_glBlendEquation; PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation; PFNGLGETSAMPLERPARAMETERIVPROC glad_glGetSamplerParameteriv; PFNGLVERTEXARRAYATTRIBFORMATPROC glad_glVertexArrayAttribFormat; PFNGLREADNPIXELSPROC glad_glReadnPixels; PFNGLRASTERPOS4SPROC glad_glRasterPos4s; PFNGLENDTRANSFORMFEEDBACKPROC glad_glEndTransformFeedback; PFNGLVERTEXATTRIB4USVPROC glad_glVertexAttrib4usv; PFNGLGETUNIFORMSUBROUTINEUIVPROC glad_glGetUniformSubroutineuiv; PFNGLMULTITEXCOORD3DVPROC glad_glMultiTexCoord3dv; PFNGLBINDVERTEXBUFFERPROC glad_glBindVertexBuffer; PFNGLCOLOR4SVPROC glad_glColor4sv; PFNGLDEBUGMESSAGEINSERTPROC glad_glDebugMessageInsert; PFNGLCREATESAMPLERSPROC glad_glCreateSamplers; PFNGLPOPCLIENTATTRIBPROC glad_glPopClientAttrib; PFNGLCLEARBUFFERDATAPROC glad_glClearBufferData; PFNGLBEGINTRANSFORMFEEDBACKPROC glad_glBeginTransformFeedback; PFNGLFOGFPROC glad_glFogf; PFNGLVERTEXATTRIBI1IVPROC glad_glVertexAttribI1iv; PFNGLPROGRAMBINARYPROC glad_glProgramBinary; PFNGLISSAMPLERPROC glad_glIsSampler; PFNGLVERTEXP3UIPROC glad_glVertexP3ui; PFNGLVERTEXATTRIBDIVISORPROC glad_glVertexAttribDivisor; PFNGLBINDSAMPLERSPROC glad_glBindSamplers; PFNGLCOLOR3IVPROC glad_glColor3iv; PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_glCompressedTexImage1D; PFNGLDELETETRANSFORMFEEDBACKSPROC glad_glDeleteTransformFeedbacks; PFNGLCOPYTEXSUBIMAGE1DPROC glad_glCopyTexSubImage1D; PFNGLTEXCOORD1IPROC glad_glTexCoord1i; PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus; PFNGLTEXCOORD1DPROC glad_glTexCoord1d; PFNGLTEXCOORD1FPROC glad_glTexCoord1f; PFNGLTEXTURESTORAGE3DPROC glad_glTextureStorage3D; PFNGLENDCONDITIONALRENDERPROC glad_glEndConditionalRender; PFNGLENABLECLIENTSTATEPROC glad_glEnableClientState; PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation; PFNGLUNIFORMMATRIX4X2FVPROC glad_glUniformMatrix4x2fv; PFNGLUNIFORMMATRIX2DVPROC glad_glUniformMatrix2dv; PFNGLBLENDFUNCIPROC glad_glBlendFunci; PFNGLMULTITEXCOORD2SVPROC glad_glMultiTexCoord2sv; PFNGLVERTEXATTRIB1DVPROC glad_glVertexAttrib1dv; PFNGLDRAWRANGEELEMENTSPROC glad_glDrawRangeElements; PFNGLTEXCOORD1SPROC glad_glTexCoord1s; PFNGLBINDBUFFERBASEPROC glad_glBindBufferBase; PFNGLBUFFERSUBDATAPROC glad_glBufferSubData; PFNGLVERTEXATTRIB4IVPROC glad_glVertexAttrib4iv; PFNGLGENLISTSPROC glad_glGenLists; PFNGLCOLOR3BVPROC glad_glColor3bv; PFNGLMAPBUFFERRANGEPROC glad_glMapBufferRange; PFNGLFRAMEBUFFERTEXTUREPROC glad_glFramebufferTexture; PFNGLBLENDFUNCSEPARATEIPROC glad_glBlendFuncSeparatei; PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC glad_glProgramUniformMatrix4x2fv; PFNGLVERTEXATTRIBL1DPROC glad_glVertexAttribL1d; PFNGLGETTEXGENDVPROC glad_glGetTexGendv; PFNGLCLEARNAMEDFRAMEBUFFERIVPROC glad_glClearNamedFramebufferiv; PFNGLMULTIDRAWARRAYSPROC glad_glMultiDrawArrays; PFNGLENDLISTPROC glad_glEndList; PFNGLSCISSORINDEXEDPROC glad_glScissorIndexed; PFNGLVERTEXP4UIVPROC glad_glVertexP4uiv; PFNGLUNIFORM2UIPROC glad_glUniform2ui; PFNGLVERTEXATTRIBI2IVPROC glad_glVertexAttribI2iv; PFNGLGETNMAPDVPROC glad_glGetnMapdv; PFNGLCOLOR3USVPROC glad_glColor3usv; PFNGLWINDOWPOS2FVPROC glad_glWindowPos2fv; PFNGLTEXTUREVIEWPROC glad_glTextureView; PFNGLDISABLEIPROC glad_glDisablei; PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC glad_glProgramUniformMatrix2x4fv; PFNGLCREATERENDERBUFFERSPROC glad_glCreateRenderbuffers; PFNGLINDEXMASKPROC glad_glIndexMask; PFNGLPUSHCLIENTATTRIBPROC glad_glPushClientAttrib; PFNGLSHADERSOURCEPROC glad_glShaderSource; PFNGLGETNSEPARABLEFILTERPROC glad_glGetnSeparableFilter; PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_glGetActiveUniformBlockName; PFNGLVERTEXATTRIBI3UIVPROC glad_glVertexAttribI3uiv; PFNGLRELEASESHADERCOMPILERPROC glad_glReleaseShaderCompiler; PFNGLVERTEXATTRIBIFORMATPROC glad_glVertexAttribIFormat; PFNGLCREATEFRAMEBUFFERSPROC glad_glCreateFramebuffers; PFNGLCLEARACCUMPROC glad_glClearAccum; PFNGLGETSYNCIVPROC glad_glGetSynciv; PFNGLPROGRAMUNIFORM2UIVPROC glad_glProgramUniform2uiv; PFNGLGETNPIXELMAPFVPROC glad_glGetnPixelMapfv; PFNGLTEXCOORDP2UIVPROC glad_glTexCoordP2uiv; PFNGLPATCHPARAMETERIPROC glad_glPatchParameteri; PFNGLPROGRAMUNIFORM2IPROC glad_glProgramUniform2i; PFNGLUNIFORM2FPROC glad_glUniform2f; PFNGLGETNAMEDBUFFERPARAMETERI64VPROC glad_glGetNamedBufferParameteri64v; PFNGLBEGINQUERYPROC glad_glBeginQuery; PFNGLGETUNIFORMBLOCKINDEXPROC glad_glGetUniformBlockIndex; PFNGLBINDBUFFERPROC glad_glBindBuffer; PFNGLMAP2DPROC glad_glMap2d; PFNGLMAP2FPROC glad_glMap2f; PFNGLTEXSTORAGE2DMULTISAMPLEPROC glad_glTexStorage2DMultisample; PFNGLUNIFORM2DPROC glad_glUniform2d; PFNGLVERTEX4DPROC glad_glVertex4d; PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv; PFNGLTEXCOORD1SVPROC glad_glTexCoord1sv; PFNGLBUFFERDATAPROC glad_glBufferData; PFNGLEVALPOINT1PROC glad_glEvalPoint1; PFNGLGETTEXPARAMETERIIVPROC glad_glGetTexParameterIiv; PFNGLGETQUERYBUFFEROBJECTUI64VPROC glad_glGetQueryBufferObjectui64v; PFNGLTEXCOORD1DVPROC glad_glTexCoord1dv; PFNGLTEXCOORDP1UIPROC glad_glTexCoordP1ui; PFNGLGETERRORPROC glad_glGetError; PFNGLGETTEXENVIVPROC glad_glGetTexEnviv; PFNGLGETPROGRAMIVPROC glad_glGetProgramiv; PFNGLVERTEXATTRIBP2UIPROC glad_glVertexAttribP2ui; PFNGLGETFLOATVPROC glad_glGetFloatv; PFNGLTEXSUBIMAGE1DPROC glad_glTexSubImage1D; PFNGLMULTITEXCOORD2FVPROC glad_glMultiTexCoord2fv; PFNGLUNIFORMMATRIX2X4DVPROC glad_glUniformMatrix2x4dv; PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv; PFNGLEVALCOORD1DPROC glad_glEvalCoord1d; PFNGLGETTEXLEVELPARAMETERFVPROC glad_glGetTexLevelParameterfv; PFNGLEVALCOORD1FPROC glad_glEvalCoord1f; PFNGLPIXELMAPFVPROC glad_glPixelMapfv; PFNGLVERTEXATTRIBP3UIVPROC glad_glVertexAttribP3uiv; PFNGLGETPIXELMAPUSVPROC glad_glGetPixelMapusv; PFNGLSECONDARYCOLORP3UIVPROC glad_glSecondaryColorP3uiv; PFNGLGETINTEGERVPROC glad_glGetIntegerv; PFNGLACCUMPROC glad_glAccum; PFNGLGETVERTEXARRAYINDEXED64IVPROC glad_glGetVertexArrayIndexed64iv; PFNGLGETBUFFERPOINTERVPROC glad_glGetBufferPointerv; PFNGLGETVERTEXATTRIBIIVPROC glad_glGetVertexAttribIiv; PFNGLRASTERPOS4DVPROC glad_glRasterPos4dv; PFNGLPROGRAMUNIFORM4FVPROC glad_glProgramUniform4fv; PFNGLTEXCOORD2IVPROC glad_glTexCoord2iv; PFNGLTEXTUREBARRIERPROC glad_glTextureBarrier; PFNGLISQUERYPROC glad_glIsQuery; PFNGLPROGRAMUNIFORM2UIPROC glad_glProgramUniform2ui; PFNGLPROGRAMUNIFORM4UIPROC glad_glProgramUniform4ui; PFNGLVERTEXATTRIB4SVPROC glad_glVertexAttrib4sv; PFNGLWINDOWPOS3DVPROC glad_glWindowPos3dv; PFNGLTEXIMAGE2DPROC glad_glTexImage2D; PFNGLSTENCILMASKPROC glad_glStencilMask; PFNGLDRAWPIXELSPROC glad_glDrawPixels; PFNGLMULTMATRIXDPROC glad_glMultMatrixd; PFNGLMULTMATRIXFPROC glad_glMultMatrixf; PFNGLISTEXTUREPROC glad_glIsTexture; PFNGLGETMATERIALIVPROC glad_glGetMaterialiv; PFNGLNAMEDBUFFERDATAPROC glad_glNamedBufferData; PFNGLUNIFORM1FVPROC glad_glUniform1fv; PFNGLLOADMATRIXFPROC glad_glLoadMatrixf; PFNGLTEXSTORAGE2DPROC glad_glTexStorage2D; PFNGLLOADMATRIXDPROC glad_glLoadMatrixd; PFNGLCLEARNAMEDBUFFERSUBDATAPROC glad_glClearNamedBufferSubData; PFNGLMAPNAMEDBUFFERRANGEPROC glad_glMapNamedBufferRange; PFNGLNAMEDFRAMEBUFFERTEXTURELAYERPROC glad_glNamedFramebufferTextureLayer; PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv; PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv; PFNGLVERTEX4FPROC glad_glVertex4f; PFNGLRECTSVPROC glad_glRectsv; PFNGLCOLOR4USVPROC glad_glColor4usv; PFNGLUNIFORM3DVPROC glad_glUniform3dv; PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC glad_glProgramUniformMatrix4x3fv; PFNGLPOLYGONSTIPPLEPROC glad_glPolygonStipple; PFNGLBINDBUFFERSBASEPROC glad_glBindBuffersBase; PFNGLINTERLEAVEDARRAYSPROC glad_glInterleavedArrays; PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC glad_glGetSubroutineUniformLocation; PFNGLNORMAL3IPROC glad_glNormal3i; PFNGLNORMAL3FPROC glad_glNormal3f; PFNGLNORMAL3DPROC glad_glNormal3d; PFNGLNORMAL3BPROC glad_glNormal3b; PFNGLGETFRAMEBUFFERPARAMETERIVPROC glad_glGetFramebufferParameteriv; PFNGLPIXELMAPUSVPROC glad_glPixelMapusv; PFNGLGETTEXGENIVPROC glad_glGetTexGeniv; PFNGLARRAYELEMENTPROC glad_glArrayElement; PFNGLGETCOMPRESSEDTEXTURESUBIMAGEPROC glad_glGetCompressedTextureSubImage; PFNGLCOPYBUFFERSUBDATAPROC glad_glCopyBufferSubData; PFNGLVERTEXATTRIBI1UIVPROC glad_glVertexAttribI1uiv; PFNGLVERTEXATTRIB2DPROC glad_glVertexAttrib2d; PFNGLBINDTRANSFORMFEEDBACKPROC glad_glBindTransformFeedback; PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f; PFNGLVERTEXATTRIB3DVPROC glad_glVertexAttrib3dv; PFNGLGETQUERYOBJECTUI64VPROC glad_glGetQueryObjectui64v; PFNGLDEPTHMASKPROC glad_glDepthMask; PFNGLVERTEXATTRIB2SPROC glad_glVertexAttrib2s; PFNGLCOLOR3FVPROC glad_glColor3fv; PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_glTexImage3DMultisample; PFNGLPROGRAMUNIFORM1FVPROC glad_glProgramUniform1fv; PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv; PFNGLUNIFORM4FVPROC glad_glUniform4fv; PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform; PFNGLCOLORPOINTERPROC glad_glColorPointer; PFNGLFRONTFACEPROC glad_glFrontFace; PFNGLTEXBUFFERRANGEPROC glad_glTexBufferRange; PFNGLCREATEBUFFERSPROC glad_glCreateBuffers; PFNGLNAMEDFRAMEBUFFERPARAMETERIPROC glad_glNamedFramebufferParameteri; PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC glad_glDrawArraysInstancedBaseInstance; PFNGLGETBOOLEANI_VPROC glad_glGetBooleani_v; PFNGLVERTEXATTRIBL3DPROC glad_glVertexAttribL3d; PFNGLDELETEPROGRAMPIPELINESPROC glad_glDeleteProgramPipelines; PFNGLCLEARBUFFERUIVPROC glad_glClearBufferuiv; PFNGLCLIPCONTROLPROC glad_glClipControl; PFNGLGETPROGRAMRESOURCEIVPROC glad_glGetProgramResourceiv; static void load_GL_VERSION_1_0(GLADloadproc load) { if(!GLAD_GL_VERSION_1_0) return; glad_glCullFace = (PFNGLCULLFACEPROC)load("glCullFace"); glad_glFrontFace = (PFNGLFRONTFACEPROC)load("glFrontFace"); glad_glHint = (PFNGLHINTPROC)load("glHint"); glad_glLineWidth = (PFNGLLINEWIDTHPROC)load("glLineWidth"); glad_glPointSize = (PFNGLPOINTSIZEPROC)load("glPointSize"); glad_glPolygonMode = (PFNGLPOLYGONMODEPROC)load("glPolygonMode"); glad_glScissor = (PFNGLSCISSORPROC)load("glScissor"); glad_glTexParameterf = (PFNGLTEXPARAMETERFPROC)load("glTexParameterf"); glad_glTexParameterfv = (PFNGLTEXPARAMETERFVPROC)load("glTexParameterfv"); glad_glTexParameteri = (PFNGLTEXPARAMETERIPROC)load("glTexParameteri"); glad_glTexParameteriv = (PFNGLTEXPARAMETERIVPROC)load("glTexParameteriv"); glad_glTexImage1D = (PFNGLTEXIMAGE1DPROC)load("glTexImage1D"); glad_glTexImage2D = (PFNGLTEXIMAGE2DPROC)load("glTexImage2D"); glad_glDrawBuffer = (PFNGLDRAWBUFFERPROC)load("glDrawBuffer"); glad_glClear = (PFNGLCLEARPROC)load("glClear"); glad_glClearColor = (PFNGLCLEARCOLORPROC)load("glClearColor"); glad_glClearStencil = (PFNGLCLEARSTENCILPROC)load("glClearStencil"); glad_glClearDepth = (PFNGLCLEARDEPTHPROC)load("glClearDepth"); glad_glStencilMask = (PFNGLSTENCILMASKPROC)load("glStencilMask"); glad_glColorMask = (PFNGLCOLORMASKPROC)load("glColorMask"); glad_glDepthMask = (PFNGLDEPTHMASKPROC)load("glDepthMask"); glad_glDisable = (PFNGLDISABLEPROC)load("glDisable"); glad_glEnable = (PFNGLENABLEPROC)load("glEnable"); glad_glFinish = (PFNGLFINISHPROC)load("glFinish"); glad_glFlush = (PFNGLFLUSHPROC)load("glFlush"); glad_glBlendFunc = (PFNGLBLENDFUNCPROC)load("glBlendFunc"); glad_glLogicOp = (PFNGLLOGICOPPROC)load("glLogicOp"); glad_glStencilFunc = (PFNGLSTENCILFUNCPROC)load("glStencilFunc"); glad_glStencilOp = (PFNGLSTENCILOPPROC)load("glStencilOp"); glad_glDepthFunc = (PFNGLDEPTHFUNCPROC)load("glDepthFunc"); glad_glPixelStoref = (PFNGLPIXELSTOREFPROC)load("glPixelStoref"); glad_glPixelStorei = (PFNGLPIXELSTOREIPROC)load("glPixelStorei"); glad_glReadBuffer = (PFNGLREADBUFFERPROC)load("glReadBuffer"); glad_glReadPixels = (PFNGLREADPIXELSPROC)load("glReadPixels"); glad_glGetBooleanv = (PFNGLGETBOOLEANVPROC)load("glGetBooleanv"); glad_glGetDoublev = (PFNGLGETDOUBLEVPROC)load("glGetDoublev"); glad_glGetError = (PFNGLGETERRORPROC)load("glGetError"); glad_glGetFloatv = (PFNGLGETFLOATVPROC)load("glGetFloatv"); glad_glGetIntegerv = (PFNGLGETINTEGERVPROC)load("glGetIntegerv"); glad_glGetString = (PFNGLGETSTRINGPROC)load("glGetString"); glad_glGetTexImage = (PFNGLGETTEXIMAGEPROC)load("glGetTexImage"); glad_glGetTexParameterfv = (PFNGLGETTEXPARAMETERFVPROC)load("glGetTexParameterfv"); glad_glGetTexParameteriv = (PFNGLGETTEXPARAMETERIVPROC)load("glGetTexParameteriv"); glad_glGetTexLevelParameterfv = (PFNGLGETTEXLEVELPARAMETERFVPROC)load("glGetTexLevelParameterfv"); glad_glGetTexLevelParameteriv = (PFNGLGETTEXLEVELPARAMETERIVPROC)load("glGetTexLevelParameteriv"); glad_glIsEnabled = (PFNGLISENABLEDPROC)load("glIsEnabled"); glad_glDepthRange = (PFNGLDEPTHRANGEPROC)load("glDepthRange"); glad_glViewport = (PFNGLVIEWPORTPROC)load("glViewport"); glad_glNewList = (PFNGLNEWLISTPROC)load("glNewList"); glad_glEndList = (PFNGLENDLISTPROC)load("glEndList"); glad_glCallList = (PFNGLCALLLISTPROC)load("glCallList"); glad_glCallLists = (PFNGLCALLLISTSPROC)load("glCallLists"); glad_glDeleteLists = (PFNGLDELETELISTSPROC)load("glDeleteLists"); glad_glGenLists = (PFNGLGENLISTSPROC)load("glGenLists"); glad_glListBase = (PFNGLLISTBASEPROC)load("glListBase"); glad_glBegin = (PFNGLBEGINPROC)load("glBegin"); glad_glBitmap = (PFNGLBITMAPPROC)load("glBitmap"); glad_glColor3b = (PFNGLCOLOR3BPROC)load("glColor3b"); glad_glColor3bv = (PFNGLCOLOR3BVPROC)load("glColor3bv"); glad_glColor3d = (PFNGLCOLOR3DPROC)load("glColor3d"); glad_glColor3dv = (PFNGLCOLOR3DVPROC)load("glColor3dv"); glad_glColor3f = (PFNGLCOLOR3FPROC)load("glColor3f"); glad_glColor3fv = (PFNGLCOLOR3FVPROC)load("glColor3fv"); glad_glColor3i = (PFNGLCOLOR3IPROC)load("glColor3i"); glad_glColor3iv = (PFNGLCOLOR3IVPROC)load("glColor3iv"); glad_glColor3s = (PFNGLCOLOR3SPROC)load("glColor3s"); glad_glColor3sv = (PFNGLCOLOR3SVPROC)load("glColor3sv"); glad_glColor3ub = (PFNGLCOLOR3UBPROC)load("glColor3ub"); glad_glColor3ubv = (PFNGLCOLOR3UBVPROC)load("glColor3ubv"); glad_glColor3ui = (PFNGLCOLOR3UIPROC)load("glColor3ui"); glad_glColor3uiv = (PFNGLCOLOR3UIVPROC)load("glColor3uiv"); glad_glColor3us = (PFNGLCOLOR3USPROC)load("glColor3us"); glad_glColor3usv = (PFNGLCOLOR3USVPROC)load("glColor3usv"); glad_glColor4b = (PFNGLCOLOR4BPROC)load("glColor4b"); glad_glColor4bv = (PFNGLCOLOR4BVPROC)load("glColor4bv"); glad_glColor4d = (PFNGLCOLOR4DPROC)load("glColor4d"); glad_glColor4dv = (PFNGLCOLOR4DVPROC)load("glColor4dv"); glad_glColor4f = (PFNGLCOLOR4FPROC)load("glColor4f"); glad_glColor4fv = (PFNGLCOLOR4FVPROC)load("glColor4fv"); glad_glColor4i = (PFNGLCOLOR4IPROC)load("glColor4i"); glad_glColor4iv = (PFNGLCOLOR4IVPROC)load("glColor4iv"); glad_glColor4s = (PFNGLCOLOR4SPROC)load("glColor4s"); glad_glColor4sv = (PFNGLCOLOR4SVPROC)load("glColor4sv"); glad_glColor4ub = (PFNGLCOLOR4UBPROC)load("glColor4ub"); glad_glColor4ubv = (PFNGLCOLOR4UBVPROC)load("glColor4ubv"); glad_glColor4ui = (PFNGLCOLOR4UIPROC)load("glColor4ui"); glad_glColor4uiv = (PFNGLCOLOR4UIVPROC)load("glColor4uiv"); glad_glColor4us = (PFNGLCOLOR4USPROC)load("glColor4us"); glad_glColor4usv = (PFNGLCOLOR4USVPROC)load("glColor4usv"); glad_glEdgeFlag = (PFNGLEDGEFLAGPROC)load("glEdgeFlag"); glad_glEdgeFlagv = (PFNGLEDGEFLAGVPROC)load("glEdgeFlagv"); glad_glEnd = (PFNGLENDPROC)load("glEnd"); glad_glIndexd = (PFNGLINDEXDPROC)load("glIndexd"); glad_glIndexdv = (PFNGLINDEXDVPROC)load("glIndexdv"); glad_glIndexf = (PFNGLINDEXFPROC)load("glIndexf"); glad_glIndexfv = (PFNGLINDEXFVPROC)load("glIndexfv"); glad_glIndexi = (PFNGLINDEXIPROC)load("glIndexi"); glad_glIndexiv = (PFNGLINDEXIVPROC)load("glIndexiv"); glad_glIndexs = (PFNGLINDEXSPROC)load("glIndexs"); glad_glIndexsv = (PFNGLINDEXSVPROC)load("glIndexsv"); glad_glNormal3b = (PFNGLNORMAL3BPROC)load("glNormal3b"); glad_glNormal3bv = (PFNGLNORMAL3BVPROC)load("glNormal3bv"); glad_glNormal3d = (PFNGLNORMAL3DPROC)load("glNormal3d"); glad_glNormal3dv = (PFNGLNORMAL3DVPROC)load("glNormal3dv"); glad_glNormal3f = (PFNGLNORMAL3FPROC)load("glNormal3f"); glad_glNormal3fv = (PFNGLNORMAL3FVPROC)load("glNormal3fv"); glad_glNormal3i = (PFNGLNORMAL3IPROC)load("glNormal3i"); glad_glNormal3iv = (PFNGLNORMAL3IVPROC)load("glNormal3iv"); glad_glNormal3s = (PFNGLNORMAL3SPROC)load("glNormal3s"); glad_glNormal3sv = (PFNGLNORMAL3SVPROC)load("glNormal3sv"); glad_glRasterPos2d = (PFNGLRASTERPOS2DPROC)load("glRasterPos2d"); glad_glRasterPos2dv = (PFNGLRASTERPOS2DVPROC)load("glRasterPos2dv"); glad_glRasterPos2f = (PFNGLRASTERPOS2FPROC)load("glRasterPos2f"); glad_glRasterPos2fv = (PFNGLRASTERPOS2FVPROC)load("glRasterPos2fv"); glad_glRasterPos2i = (PFNGLRASTERPOS2IPROC)load("glRasterPos2i"); glad_glRasterPos2iv = (PFNGLRASTERPOS2IVPROC)load("glRasterPos2iv"); glad_glRasterPos2s = (PFNGLRASTERPOS2SPROC)load("glRasterPos2s"); glad_glRasterPos2sv = (PFNGLRASTERPOS2SVPROC)load("glRasterPos2sv"); glad_glRasterPos3d = (PFNGLRASTERPOS3DPROC)load("glRasterPos3d"); glad_glRasterPos3dv = (PFNGLRASTERPOS3DVPROC)load("glRasterPos3dv"); glad_glRasterPos3f = (PFNGLRASTERPOS3FPROC)load("glRasterPos3f"); glad_glRasterPos3fv = (PFNGLRASTERPOS3FVPROC)load("glRasterPos3fv"); glad_glRasterPos3i = (PFNGLRASTERPOS3IPROC)load("glRasterPos3i"); glad_glRasterPos3iv = (PFNGLRASTERPOS3IVPROC)load("glRasterPos3iv"); glad_glRasterPos3s = (PFNGLRASTERPOS3SPROC)load("glRasterPos3s"); glad_glRasterPos3sv = (PFNGLRASTERPOS3SVPROC)load("glRasterPos3sv"); glad_glRasterPos4d = (PFNGLRASTERPOS4DPROC)load("glRasterPos4d"); glad_glRasterPos4dv = (PFNGLRASTERPOS4DVPROC)load("glRasterPos4dv"); glad_glRasterPos4f = (PFNGLRASTERPOS4FPROC)load("glRasterPos4f"); glad_glRasterPos4fv = (PFNGLRASTERPOS4FVPROC)load("glRasterPos4fv"); glad_glRasterPos4i = (PFNGLRASTERPOS4IPROC)load("glRasterPos4i"); glad_glRasterPos4iv = (PFNGLRASTERPOS4IVPROC)load("glRasterPos4iv"); glad_glRasterPos4s = (PFNGLRASTERPOS4SPROC)load("glRasterPos4s"); glad_glRasterPos4sv = (PFNGLRASTERPOS4SVPROC)load("glRasterPos4sv"); glad_glRectd = (PFNGLRECTDPROC)load("glRectd"); glad_glRectdv = (PFNGLRECTDVPROC)load("glRectdv"); glad_glRectf = (PFNGLRECTFPROC)load("glRectf"); glad_glRectfv = (PFNGLRECTFVPROC)load("glRectfv"); glad_glRecti = (PFNGLRECTIPROC)load("glRecti"); glad_glRectiv = (PFNGLRECTIVPROC)load("glRectiv"); glad_glRects = (PFNGLRECTSPROC)load("glRects"); glad_glRectsv = (PFNGLRECTSVPROC)load("glRectsv"); glad_glTexCoord1d = (PFNGLTEXCOORD1DPROC)load("glTexCoord1d"); glad_glTexCoord1dv = (PFNGLTEXCOORD1DVPROC)load("glTexCoord1dv"); glad_glTexCoord1f = (PFNGLTEXCOORD1FPROC)load("glTexCoord1f"); glad_glTexCoord1fv = (PFNGLTEXCOORD1FVPROC)load("glTexCoord1fv"); glad_glTexCoord1i = (PFNGLTEXCOORD1IPROC)load("glTexCoord1i"); glad_glTexCoord1iv = (PFNGLTEXCOORD1IVPROC)load("glTexCoord1iv"); glad_glTexCoord1s = (PFNGLTEXCOORD1SPROC)load("glTexCoord1s"); glad_glTexCoord1sv = (PFNGLTEXCOORD1SVPROC)load("glTexCoord1sv"); glad_glTexCoord2d = (PFNGLTEXCOORD2DPROC)load("glTexCoord2d"); glad_glTexCoord2dv = (PFNGLTEXCOORD2DVPROC)load("glTexCoord2dv"); glad_glTexCoord2f = (PFNGLTEXCOORD2FPROC)load("glTexCoord2f"); glad_glTexCoord2fv = (PFNGLTEXCOORD2FVPROC)load("glTexCoord2fv"); glad_glTexCoord2i = (PFNGLTEXCOORD2IPROC)load("glTexCoord2i"); glad_glTexCoord2iv = (PFNGLTEXCOORD2IVPROC)load("glTexCoord2iv"); glad_glTexCoord2s = (PFNGLTEXCOORD2SPROC)load("glTexCoord2s"); glad_glTexCoord2sv = (PFNGLTEXCOORD2SVPROC)load("glTexCoord2sv"); glad_glTexCoord3d = (PFNGLTEXCOORD3DPROC)load("glTexCoord3d"); glad_glTexCoord3dv = (PFNGLTEXCOORD3DVPROC)load("glTexCoord3dv"); glad_glTexCoord3f = (PFNGLTEXCOORD3FPROC)load("glTexCoord3f"); glad_glTexCoord3fv = (PFNGLTEXCOORD3FVPROC)load("glTexCoord3fv"); glad_glTexCoord3i = (PFNGLTEXCOORD3IPROC)load("glTexCoord3i"); glad_glTexCoord3iv = (PFNGLTEXCOORD3IVPROC)load("glTexCoord3iv"); glad_glTexCoord3s = (PFNGLTEXCOORD3SPROC)load("glTexCoord3s"); glad_glTexCoord3sv = (PFNGLTEXCOORD3SVPROC)load("glTexCoord3sv"); glad_glTexCoord4d = (PFNGLTEXCOORD4DPROC)load("glTexCoord4d"); glad_glTexCoord4dv = (PFNGLTEXCOORD4DVPROC)load("glTexCoord4dv"); glad_glTexCoord4f = (PFNGLTEXCOORD4FPROC)load("glTexCoord4f"); glad_glTexCoord4fv = (PFNGLTEXCOORD4FVPROC)load("glTexCoord4fv"); glad_glTexCoord4i = (PFNGLTEXCOORD4IPROC)load("glTexCoord4i"); glad_glTexCoord4iv = (PFNGLTEXCOORD4IVPROC)load("glTexCoord4iv"); glad_glTexCoord4s = (PFNGLTEXCOORD4SPROC)load("glTexCoord4s"); glad_glTexCoord4sv = (PFNGLTEXCOORD4SVPROC)load("glTexCoord4sv"); glad_glVertex2d = (PFNGLVERTEX2DPROC)load("glVertex2d"); glad_glVertex2dv = (PFNGLVERTEX2DVPROC)load("glVertex2dv"); glad_glVertex2f = (PFNGLVERTEX2FPROC)load("glVertex2f"); glad_glVertex2fv = (PFNGLVERTEX2FVPROC)load("glVertex2fv"); glad_glVertex2i = (PFNGLVERTEX2IPROC)load("glVertex2i"); glad_glVertex2iv = (PFNGLVERTEX2IVPROC)load("glVertex2iv"); glad_glVertex2s = (PFNGLVERTEX2SPROC)load("glVertex2s"); glad_glVertex2sv = (PFNGLVERTEX2SVPROC)load("glVertex2sv"); glad_glVertex3d = (PFNGLVERTEX3DPROC)load("glVertex3d"); glad_glVertex3dv = (PFNGLVERTEX3DVPROC)load("glVertex3dv"); glad_glVertex3f = (PFNGLVERTEX3FPROC)load("glVertex3f"); glad_glVertex3fv = (PFNGLVERTEX3FVPROC)load("glVertex3fv"); glad_glVertex3i = (PFNGLVERTEX3IPROC)load("glVertex3i"); glad_glVertex3iv = (PFNGLVERTEX3IVPROC)load("glVertex3iv"); glad_glVertex3s = (PFNGLVERTEX3SPROC)load("glVertex3s"); glad_glVertex3sv = (PFNGLVERTEX3SVPROC)load("glVertex3sv"); glad_glVertex4d = (PFNGLVERTEX4DPROC)load("glVertex4d"); glad_glVertex4dv = (PFNGLVERTEX4DVPROC)load("glVertex4dv"); glad_glVertex4f = (PFNGLVERTEX4FPROC)load("glVertex4f"); glad_glVertex4fv = (PFNGLVERTEX4FVPROC)load("glVertex4fv"); glad_glVertex4i = (PFNGLVERTEX4IPROC)load("glVertex4i"); glad_glVertex4iv = (PFNGLVERTEX4IVPROC)load("glVertex4iv"); glad_glVertex4s = (PFNGLVERTEX4SPROC)load("glVertex4s"); glad_glVertex4sv = (PFNGLVERTEX4SVPROC)load("glVertex4sv"); glad_glClipPlane = (PFNGLCLIPPLANEPROC)load("glClipPlane"); glad_glColorMaterial = (PFNGLCOLORMATERIALPROC)load("glColorMaterial"); glad_glFogf = (PFNGLFOGFPROC)load("glFogf"); glad_glFogfv = (PFNGLFOGFVPROC)load("glFogfv"); glad_glFogi = (PFNGLFOGIPROC)load("glFogi"); glad_glFogiv = (PFNGLFOGIVPROC)load("glFogiv"); glad_glLightf = (PFNGLLIGHTFPROC)load("glLightf"); glad_glLightfv = (PFNGLLIGHTFVPROC)load("glLightfv"); glad_glLighti = (PFNGLLIGHTIPROC)load("glLighti"); glad_glLightiv = (PFNGLLIGHTIVPROC)load("glLightiv"); glad_glLightModelf = (PFNGLLIGHTMODELFPROC)load("glLightModelf"); glad_glLightModelfv = (PFNGLLIGHTMODELFVPROC)load("glLightModelfv"); glad_glLightModeli = (PFNGLLIGHTMODELIPROC)load("glLightModeli"); glad_glLightModeliv = (PFNGLLIGHTMODELIVPROC)load("glLightModeliv"); glad_glLineStipple = (PFNGLLINESTIPPLEPROC)load("glLineStipple"); glad_glMaterialf = (PFNGLMATERIALFPROC)load("glMaterialf"); glad_glMaterialfv = (PFNGLMATERIALFVPROC)load("glMaterialfv"); glad_glMateriali = (PFNGLMATERIALIPROC)load("glMateriali"); glad_glMaterialiv = (PFNGLMATERIALIVPROC)load("glMaterialiv"); glad_glPolygonStipple = (PFNGLPOLYGONSTIPPLEPROC)load("glPolygonStipple"); glad_glShadeModel = (PFNGLSHADEMODELPROC)load("glShadeModel"); glad_glTexEnvf = (PFNGLTEXENVFPROC)load("glTexEnvf"); glad_glTexEnvfv = (PFNGLTEXENVFVPROC)load("glTexEnvfv"); glad_glTexEnvi = (PFNGLTEXENVIPROC)load("glTexEnvi"); glad_glTexEnviv = (PFNGLTEXENVIVPROC)load("glTexEnviv"); glad_glTexGend = (PFNGLTEXGENDPROC)load("glTexGend"); glad_glTexGendv = (PFNGLTEXGENDVPROC)load("glTexGendv"); glad_glTexGenf = (PFNGLTEXGENFPROC)load("glTexGenf"); glad_glTexGenfv = (PFNGLTEXGENFVPROC)load("glTexGenfv"); glad_glTexGeni = (PFNGLTEXGENIPROC)load("glTexGeni"); glad_glTexGeniv = (PFNGLTEXGENIVPROC)load("glTexGeniv"); glad_glFeedbackBuffer = (PFNGLFEEDBACKBUFFERPROC)load("glFeedbackBuffer"); glad_glSelectBuffer = (PFNGLSELECTBUFFERPROC)load("glSelectBuffer"); glad_glRenderMode = (PFNGLRENDERMODEPROC)load("glRenderMode"); glad_glInitNames = (PFNGLINITNAMESPROC)load("glInitNames"); glad_glLoadName = (PFNGLLOADNAMEPROC)load("glLoadName"); glad_glPassThrough = (PFNGLPASSTHROUGHPROC)load("glPassThrough"); glad_glPopName = (PFNGLPOPNAMEPROC)load("glPopName"); glad_glPushName = (PFNGLPUSHNAMEPROC)load("glPushName"); glad_glClearAccum = (PFNGLCLEARACCUMPROC)load("glClearAccum"); glad_glClearIndex = (PFNGLCLEARINDEXPROC)load("glClearIndex"); glad_glIndexMask = (PFNGLINDEXMASKPROC)load("glIndexMask"); glad_glAccum = (PFNGLACCUMPROC)load("glAccum"); glad_glPopAttrib = (PFNGLPOPATTRIBPROC)load("glPopAttrib"); glad_glPushAttrib = (PFNGLPUSHATTRIBPROC)load("glPushAttrib"); glad_glMap1d = (PFNGLMAP1DPROC)load("glMap1d"); glad_glMap1f = (PFNGLMAP1FPROC)load("glMap1f"); glad_glMap2d = (PFNGLMAP2DPROC)load("glMap2d"); glad_glMap2f = (PFNGLMAP2FPROC)load("glMap2f"); glad_glMapGrid1d = (PFNGLMAPGRID1DPROC)load("glMapGrid1d"); glad_glMapGrid1f = (PFNGLMAPGRID1FPROC)load("glMapGrid1f"); glad_glMapGrid2d = (PFNGLMAPGRID2DPROC)load("glMapGrid2d"); glad_glMapGrid2f = (PFNGLMAPGRID2FPROC)load("glMapGrid2f"); glad_glEvalCoord1d = (PFNGLEVALCOORD1DPROC)load("glEvalCoord1d"); glad_glEvalCoord1dv = (PFNGLEVALCOORD1DVPROC)load("glEvalCoord1dv"); glad_glEvalCoord1f = (PFNGLEVALCOORD1FPROC)load("glEvalCoord1f"); glad_glEvalCoord1fv = (PFNGLEVALCOORD1FVPROC)load("glEvalCoord1fv"); glad_glEvalCoord2d = (PFNGLEVALCOORD2DPROC)load("glEvalCoord2d"); glad_glEvalCoord2dv = (PFNGLEVALCOORD2DVPROC)load("glEvalCoord2dv"); glad_glEvalCoord2f = (PFNGLEVALCOORD2FPROC)load("glEvalCoord2f"); glad_glEvalCoord2fv = (PFNGLEVALCOORD2FVPROC)load("glEvalCoord2fv"); glad_glEvalMesh1 = (PFNGLEVALMESH1PROC)load("glEvalMesh1"); glad_glEvalPoint1 = (PFNGLEVALPOINT1PROC)load("glEvalPoint1"); glad_glEvalMesh2 = (PFNGLEVALMESH2PROC)load("glEvalMesh2"); glad_glEvalPoint2 = (PFNGLEVALPOINT2PROC)load("glEvalPoint2"); glad_glAlphaFunc = (PFNGLALPHAFUNCPROC)load("glAlphaFunc"); glad_glPixelZoom = (PFNGLPIXELZOOMPROC)load("glPixelZoom"); glad_glPixelTransferf = (PFNGLPIXELTRANSFERFPROC)load("glPixelTransferf"); glad_glPixelTransferi = (PFNGLPIXELTRANSFERIPROC)load("glPixelTransferi"); glad_glPixelMapfv = (PFNGLPIXELMAPFVPROC)load("glPixelMapfv"); glad_glPixelMapuiv = (PFNGLPIXELMAPUIVPROC)load("glPixelMapuiv"); glad_glPixelMapusv = (PFNGLPIXELMAPUSVPROC)load("glPixelMapusv"); glad_glCopyPixels = (PFNGLCOPYPIXELSPROC)load("glCopyPixels"); glad_glDrawPixels = (PFNGLDRAWPIXELSPROC)load("glDrawPixels"); glad_glGetClipPlane = (PFNGLGETCLIPPLANEPROC)load("glGetClipPlane"); glad_glGetLightfv = (PFNGLGETLIGHTFVPROC)load("glGetLightfv"); glad_glGetLightiv = (PFNGLGETLIGHTIVPROC)load("glGetLightiv"); glad_glGetMapdv = (PFNGLGETMAPDVPROC)load("glGetMapdv"); glad_glGetMapfv = (PFNGLGETMAPFVPROC)load("glGetMapfv"); glad_glGetMapiv = (PFNGLGETMAPIVPROC)load("glGetMapiv"); glad_glGetMaterialfv = (PFNGLGETMATERIALFVPROC)load("glGetMaterialfv"); glad_glGetMaterialiv = (PFNGLGETMATERIALIVPROC)load("glGetMaterialiv"); glad_glGetPixelMapfv = (PFNGLGETPIXELMAPFVPROC)load("glGetPixelMapfv"); glad_glGetPixelMapuiv = (PFNGLGETPIXELMAPUIVPROC)load("glGetPixelMapuiv"); glad_glGetPixelMapusv = (PFNGLGETPIXELMAPUSVPROC)load("glGetPixelMapusv"); glad_glGetPolygonStipple = (PFNGLGETPOLYGONSTIPPLEPROC)load("glGetPolygonStipple"); glad_glGetTexEnvfv = (PFNGLGETTEXENVFVPROC)load("glGetTexEnvfv"); glad_glGetTexEnviv = (PFNGLGETTEXENVIVPROC)load("glGetTexEnviv"); glad_glGetTexGendv = (PFNGLGETTEXGENDVPROC)load("glGetTexGendv"); glad_glGetTexGenfv = (PFNGLGETTEXGENFVPROC)load("glGetTexGenfv"); glad_glGetTexGeniv = (PFNGLGETTEXGENIVPROC)load("glGetTexGeniv"); glad_glIsList = (PFNGLISLISTPROC)load("glIsList"); glad_glFrustum = (PFNGLFRUSTUMPROC)load("glFrustum"); glad_glLoadIdentity = (PFNGLLOADIDENTITYPROC)load("glLoadIdentity"); glad_glLoadMatrixf = (PFNGLLOADMATRIXFPROC)load("glLoadMatrixf"); glad_glLoadMatrixd = (PFNGLLOADMATRIXDPROC)load("glLoadMatrixd"); glad_glMatrixMode = (PFNGLMATRIXMODEPROC)load("glMatrixMode"); glad_glMultMatrixf = (PFNGLMULTMATRIXFPROC)load("glMultMatrixf"); glad_glMultMatrixd = (PFNGLMULTMATRIXDPROC)load("glMultMatrixd"); glad_glOrtho = (PFNGLORTHOPROC)load("glOrtho"); glad_glPopMatrix = (PFNGLPOPMATRIXPROC)load("glPopMatrix"); glad_glPushMatrix = (PFNGLPUSHMATRIXPROC)load("glPushMatrix"); glad_glRotated = (PFNGLROTATEDPROC)load("glRotated"); glad_glRotatef = (PFNGLROTATEFPROC)load("glRotatef"); glad_glScaled = (PFNGLSCALEDPROC)load("glScaled"); glad_glScalef = (PFNGLSCALEFPROC)load("glScalef"); glad_glTranslated = (PFNGLTRANSLATEDPROC)load("glTranslated"); glad_glTranslatef = (PFNGLTRANSLATEFPROC)load("glTranslatef"); } static void load_GL_VERSION_1_1(GLADloadproc load) { if(!GLAD_GL_VERSION_1_1) return; glad_glDrawArrays = (PFNGLDRAWARRAYSPROC)load("glDrawArrays"); glad_glDrawElements = (PFNGLDRAWELEMENTSPROC)load("glDrawElements"); glad_glGetPointerv = (PFNGLGETPOINTERVPROC)load("glGetPointerv"); glad_glPolygonOffset = (PFNGLPOLYGONOFFSETPROC)load("glPolygonOffset"); glad_glCopyTexImage1D = (PFNGLCOPYTEXIMAGE1DPROC)load("glCopyTexImage1D"); glad_glCopyTexImage2D = (PFNGLCOPYTEXIMAGE2DPROC)load("glCopyTexImage2D"); glad_glCopyTexSubImage1D = (PFNGLCOPYTEXSUBIMAGE1DPROC)load("glCopyTexSubImage1D"); glad_glCopyTexSubImage2D = (PFNGLCOPYTEXSUBIMAGE2DPROC)load("glCopyTexSubImage2D"); glad_glTexSubImage1D = (PFNGLTEXSUBIMAGE1DPROC)load("glTexSubImage1D"); glad_glTexSubImage2D = (PFNGLTEXSUBIMAGE2DPROC)load("glTexSubImage2D"); glad_glBindTexture = (PFNGLBINDTEXTUREPROC)load("glBindTexture"); glad_glDeleteTextures = (PFNGLDELETETEXTURESPROC)load("glDeleteTextures"); glad_glGenTextures = (PFNGLGENTEXTURESPROC)load("glGenTextures"); glad_glIsTexture = (PFNGLISTEXTUREPROC)load("glIsTexture"); glad_glArrayElement = (PFNGLARRAYELEMENTPROC)load("glArrayElement"); glad_glColorPointer = (PFNGLCOLORPOINTERPROC)load("glColorPointer"); glad_glDisableClientState = (PFNGLDISABLECLIENTSTATEPROC)load("glDisableClientState"); glad_glEdgeFlagPointer = (PFNGLEDGEFLAGPOINTERPROC)load("glEdgeFlagPointer"); glad_glEnableClientState = (PFNGLENABLECLIENTSTATEPROC)load("glEnableClientState"); glad_glIndexPointer = (PFNGLINDEXPOINTERPROC)load("glIndexPointer"); glad_glInterleavedArrays = (PFNGLINTERLEAVEDARRAYSPROC)load("glInterleavedArrays"); glad_glNormalPointer = (PFNGLNORMALPOINTERPROC)load("glNormalPointer"); glad_glTexCoordPointer = (PFNGLTEXCOORDPOINTERPROC)load("glTexCoordPointer"); glad_glVertexPointer = (PFNGLVERTEXPOINTERPROC)load("glVertexPointer"); glad_glAreTexturesResident = (PFNGLARETEXTURESRESIDENTPROC)load("glAreTexturesResident"); glad_glPrioritizeTextures = (PFNGLPRIORITIZETEXTURESPROC)load("glPrioritizeTextures"); glad_glIndexub = (PFNGLINDEXUBPROC)load("glIndexub"); glad_glIndexubv = (PFNGLINDEXUBVPROC)load("glIndexubv"); glad_glPopClientAttrib = (PFNGLPOPCLIENTATTRIBPROC)load("glPopClientAttrib"); glad_glPushClientAttrib = (PFNGLPUSHCLIENTATTRIBPROC)load("glPushClientAttrib"); } static void load_GL_VERSION_1_2(GLADloadproc load) { if(!GLAD_GL_VERSION_1_2) return; glad_glDrawRangeElements = (PFNGLDRAWRANGEELEMENTSPROC)load("glDrawRangeElements"); glad_glTexImage3D = (PFNGLTEXIMAGE3DPROC)load("glTexImage3D"); glad_glTexSubImage3D = (PFNGLTEXSUBIMAGE3DPROC)load("glTexSubImage3D"); glad_glCopyTexSubImage3D = (PFNGLCOPYTEXSUBIMAGE3DPROC)load("glCopyTexSubImage3D"); } static void load_GL_VERSION_1_3(GLADloadproc load) { if(!GLAD_GL_VERSION_1_3) return; glad_glActiveTexture = (PFNGLACTIVETEXTUREPROC)load("glActiveTexture"); glad_glSampleCoverage = (PFNGLSAMPLECOVERAGEPROC)load("glSampleCoverage"); glad_glCompressedTexImage3D = (PFNGLCOMPRESSEDTEXIMAGE3DPROC)load("glCompressedTexImage3D"); glad_glCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC)load("glCompressedTexImage2D"); glad_glCompressedTexImage1D = (PFNGLCOMPRESSEDTEXIMAGE1DPROC)load("glCompressedTexImage1D"); glad_glCompressedTexSubImage3D = (PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC)load("glCompressedTexSubImage3D"); glad_glCompressedTexSubImage2D = (PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC)load("glCompressedTexSubImage2D"); glad_glCompressedTexSubImage1D = (PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC)load("glCompressedTexSubImage1D"); glad_glGetCompressedTexImage = (PFNGLGETCOMPRESSEDTEXIMAGEPROC)load("glGetCompressedTexImage"); glad_glClientActiveTexture = (PFNGLCLIENTACTIVETEXTUREPROC)load("glClientActiveTexture"); glad_glMultiTexCoord1d = (PFNGLMULTITEXCOORD1DPROC)load("glMultiTexCoord1d"); glad_glMultiTexCoord1dv = (PFNGLMULTITEXCOORD1DVPROC)load("glMultiTexCoord1dv"); glad_glMultiTexCoord1f = (PFNGLMULTITEXCOORD1FPROC)load("glMultiTexCoord1f"); glad_glMultiTexCoord1fv = (PFNGLMULTITEXCOORD1FVPROC)load("glMultiTexCoord1fv"); glad_glMultiTexCoord1i = (PFNGLMULTITEXCOORD1IPROC)load("glMultiTexCoord1i"); glad_glMultiTexCoord1iv = (PFNGLMULTITEXCOORD1IVPROC)load("glMultiTexCoord1iv"); glad_glMultiTexCoord1s = (PFNGLMULTITEXCOORD1SPROC)load("glMultiTexCoord1s"); glad_glMultiTexCoord1sv = (PFNGLMULTITEXCOORD1SVPROC)load("glMultiTexCoord1sv"); glad_glMultiTexCoord2d = (PFNGLMULTITEXCOORD2DPROC)load("glMultiTexCoord2d"); glad_glMultiTexCoord2dv = (PFNGLMULTITEXCOORD2DVPROC)load("glMultiTexCoord2dv"); glad_glMultiTexCoord2f = (PFNGLMULTITEXCOORD2FPROC)load("glMultiTexCoord2f"); glad_glMultiTexCoord2fv = (PFNGLMULTITEXCOORD2FVPROC)load("glMultiTexCoord2fv"); glad_glMultiTexCoord2i = (PFNGLMULTITEXCOORD2IPROC)load("glMultiTexCoord2i"); glad_glMultiTexCoord2iv = (PFNGLMULTITEXCOORD2IVPROC)load("glMultiTexCoord2iv"); glad_glMultiTexCoord2s = (PFNGLMULTITEXCOORD2SPROC)load("glMultiTexCoord2s"); glad_glMultiTexCoord2sv = (PFNGLMULTITEXCOORD2SVPROC)load("glMultiTexCoord2sv"); glad_glMultiTexCoord3d = (PFNGLMULTITEXCOORD3DPROC)load("glMultiTexCoord3d"); glad_glMultiTexCoord3dv = (PFNGLMULTITEXCOORD3DVPROC)load("glMultiTexCoord3dv"); glad_glMultiTexCoord3f = (PFNGLMULTITEXCOORD3FPROC)load("glMultiTexCoord3f"); glad_glMultiTexCoord3fv = (PFNGLMULTITEXCOORD3FVPROC)load("glMultiTexCoord3fv"); glad_glMultiTexCoord3i = (PFNGLMULTITEXCOORD3IPROC)load("glMultiTexCoord3i"); glad_glMultiTexCoord3iv = (PFNGLMULTITEXCOORD3IVPROC)load("glMultiTexCoord3iv"); glad_glMultiTexCoord3s = (PFNGLMULTITEXCOORD3SPROC)load("glMultiTexCoord3s"); glad_glMultiTexCoord3sv = (PFNGLMULTITEXCOORD3SVPROC)load("glMultiTexCoord3sv"); glad_glMultiTexCoord4d = (PFNGLMULTITEXCOORD4DPROC)load("glMultiTexCoord4d"); glad_glMultiTexCoord4dv = (PFNGLMULTITEXCOORD4DVPROC)load("glMultiTexCoord4dv"); glad_glMultiTexCoord4f = (PFNGLMULTITEXCOORD4FPROC)load("glMultiTexCoord4f"); glad_glMultiTexCoord4fv = (PFNGLMULTITEXCOORD4FVPROC)load("glMultiTexCoord4fv"); glad_glMultiTexCoord4i = (PFNGLMULTITEXCOORD4IPROC)load("glMultiTexCoord4i"); glad_glMultiTexCoord4iv = (PFNGLMULTITEXCOORD4IVPROC)load("glMultiTexCoord4iv"); glad_glMultiTexCoord4s = (PFNGLMULTITEXCOORD4SPROC)load("glMultiTexCoord4s"); glad_glMultiTexCoord4sv = (PFNGLMULTITEXCOORD4SVPROC)load("glMultiTexCoord4sv"); glad_glLoadTransposeMatrixf = (PFNGLLOADTRANSPOSEMATRIXFPROC)load("glLoadTransposeMatrixf"); glad_glLoadTransposeMatrixd = (PFNGLLOADTRANSPOSEMATRIXDPROC)load("glLoadTransposeMatrixd"); glad_glMultTransposeMatrixf = (PFNGLMULTTRANSPOSEMATRIXFPROC)load("glMultTransposeMatrixf"); glad_glMultTransposeMatrixd = (PFNGLMULTTRANSPOSEMATRIXDPROC)load("glMultTransposeMatrixd"); } static void load_GL_VERSION_1_4(GLADloadproc load) { if(!GLAD_GL_VERSION_1_4) return; glad_glBlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEPROC)load("glBlendFuncSeparate"); glad_glMultiDrawArrays = (PFNGLMULTIDRAWARRAYSPROC)load("glMultiDrawArrays"); glad_glMultiDrawElements = (PFNGLMULTIDRAWELEMENTSPROC)load("glMultiDrawElements"); glad_glPointParameterf = (PFNGLPOINTPARAMETERFPROC)load("glPointParameterf"); glad_glPointParameterfv = (PFNGLPOINTPARAMETERFVPROC)load("glPointParameterfv"); glad_glPointParameteri = (PFNGLPOINTPARAMETERIPROC)load("glPointParameteri"); glad_glPointParameteriv = (PFNGLPOINTPARAMETERIVPROC)load("glPointParameteriv"); glad_glFogCoordf = (PFNGLFOGCOORDFPROC)load("glFogCoordf"); glad_glFogCoordfv = (PFNGLFOGCOORDFVPROC)load("glFogCoordfv"); glad_glFogCoordd = (PFNGLFOGCOORDDPROC)load("glFogCoordd"); glad_glFogCoorddv = (PFNGLFOGCOORDDVPROC)load("glFogCoorddv"); glad_glFogCoordPointer = (PFNGLFOGCOORDPOINTERPROC)load("glFogCoordPointer"); glad_glSecondaryColor3b = (PFNGLSECONDARYCOLOR3BPROC)load("glSecondaryColor3b"); glad_glSecondaryColor3bv = (PFNGLSECONDARYCOLOR3BVPROC)load("glSecondaryColor3bv"); glad_glSecondaryColor3d = (PFNGLSECONDARYCOLOR3DPROC)load("glSecondaryColor3d"); glad_glSecondaryColor3dv = (PFNGLSECONDARYCOLOR3DVPROC)load("glSecondaryColor3dv"); glad_glSecondaryColor3f = (PFNGLSECONDARYCOLOR3FPROC)load("glSecondaryColor3f"); glad_glSecondaryColor3fv = (PFNGLSECONDARYCOLOR3FVPROC)load("glSecondaryColor3fv"); glad_glSecondaryColor3i = (PFNGLSECONDARYCOLOR3IPROC)load("glSecondaryColor3i"); glad_glSecondaryColor3iv = (PFNGLSECONDARYCOLOR3IVPROC)load("glSecondaryColor3iv"); glad_glSecondaryColor3s = (PFNGLSECONDARYCOLOR3SPROC)load("glSecondaryColor3s"); glad_glSecondaryColor3sv = (PFNGLSECONDARYCOLOR3SVPROC)load("glSecondaryColor3sv"); glad_glSecondaryColor3ub = (PFNGLSECONDARYCOLOR3UBPROC)load("glSecondaryColor3ub"); glad_glSecondaryColor3ubv = (PFNGLSECONDARYCOLOR3UBVPROC)load("glSecondaryColor3ubv"); glad_glSecondaryColor3ui = (PFNGLSECONDARYCOLOR3UIPROC)load("glSecondaryColor3ui"); glad_glSecondaryColor3uiv = (PFNGLSECONDARYCOLOR3UIVPROC)load("glSecondaryColor3uiv"); glad_glSecondaryColor3us = (PFNGLSECONDARYCOLOR3USPROC)load("glSecondaryColor3us"); glad_glSecondaryColor3usv = (PFNGLSECONDARYCOLOR3USVPROC)load("glSecondaryColor3usv"); glad_glSecondaryColorPointer = (PFNGLSECONDARYCOLORPOINTERPROC)load("glSecondaryColorPointer"); glad_glWindowPos2d = (PFNGLWINDOWPOS2DPROC)load("glWindowPos2d"); glad_glWindowPos2dv = (PFNGLWINDOWPOS2DVPROC)load("glWindowPos2dv"); glad_glWindowPos2f = (PFNGLWINDOWPOS2FPROC)load("glWindowPos2f"); glad_glWindowPos2fv = (PFNGLWINDOWPOS2FVPROC)load("glWindowPos2fv"); glad_glWindowPos2i = (PFNGLWINDOWPOS2IPROC)load("glWindowPos2i"); glad_glWindowPos2iv = (PFNGLWINDOWPOS2IVPROC)load("glWindowPos2iv"); glad_glWindowPos2s = (PFNGLWINDOWPOS2SPROC)load("glWindowPos2s"); glad_glWindowPos2sv = (PFNGLWINDOWPOS2SVPROC)load("glWindowPos2sv"); glad_glWindowPos3d = (PFNGLWINDOWPOS3DPROC)load("glWindowPos3d"); glad_glWindowPos3dv = (PFNGLWINDOWPOS3DVPROC)load("glWindowPos3dv"); glad_glWindowPos3f = (PFNGLWINDOWPOS3FPROC)load("glWindowPos3f"); glad_glWindowPos3fv = (PFNGLWINDOWPOS3FVPROC)load("glWindowPos3fv"); glad_glWindowPos3i = (PFNGLWINDOWPOS3IPROC)load("glWindowPos3i"); glad_glWindowPos3iv = (PFNGLWINDOWPOS3IVPROC)load("glWindowPos3iv"); glad_glWindowPos3s = (PFNGLWINDOWPOS3SPROC)load("glWindowPos3s"); glad_glWindowPos3sv = (PFNGLWINDOWPOS3SVPROC)load("glWindowPos3sv"); glad_glBlendColor = (PFNGLBLENDCOLORPROC)load("glBlendColor"); glad_glBlendEquation = (PFNGLBLENDEQUATIONPROC)load("glBlendEquation"); } static void load_GL_VERSION_1_5(GLADloadproc load) { if(!GLAD_GL_VERSION_1_5) return; glad_glGenQueries = (PFNGLGENQUERIESPROC)load("glGenQueries"); glad_glDeleteQueries = (PFNGLDELETEQUERIESPROC)load("glDeleteQueries"); glad_glIsQuery = (PFNGLISQUERYPROC)load("glIsQuery"); glad_glBeginQuery = (PFNGLBEGINQUERYPROC)load("glBeginQuery"); glad_glEndQuery = (PFNGLENDQUERYPROC)load("glEndQuery"); glad_glGetQueryiv = (PFNGLGETQUERYIVPROC)load("glGetQueryiv"); glad_glGetQueryObjectiv = (PFNGLGETQUERYOBJECTIVPROC)load("glGetQueryObjectiv"); glad_glGetQueryObjectuiv = (PFNGLGETQUERYOBJECTUIVPROC)load("glGetQueryObjectuiv"); glad_glBindBuffer = (PFNGLBINDBUFFERPROC)load("glBindBuffer"); glad_glDeleteBuffers = (PFNGLDELETEBUFFERSPROC)load("glDeleteBuffers"); glad_glGenBuffers = (PFNGLGENBUFFERSPROC)load("glGenBuffers"); glad_glIsBuffer = (PFNGLISBUFFERPROC)load("glIsBuffer"); glad_glBufferData = (PFNGLBUFFERDATAPROC)load("glBufferData"); glad_glBufferSubData = (PFNGLBUFFERSUBDATAPROC)load("glBufferSubData"); glad_glGetBufferSubData = (PFNGLGETBUFFERSUBDATAPROC)load("glGetBufferSubData"); glad_glMapBuffer = (PFNGLMAPBUFFERPROC)load("glMapBuffer"); glad_glUnmapBuffer = (PFNGLUNMAPBUFFERPROC)load("glUnmapBuffer"); glad_glGetBufferParameteriv = (PFNGLGETBUFFERPARAMETERIVPROC)load("glGetBufferParameteriv"); glad_glGetBufferPointerv = (PFNGLGETBUFFERPOINTERVPROC)load("glGetBufferPointerv"); } static void load_GL_VERSION_2_0(GLADloadproc load) { if(!GLAD_GL_VERSION_2_0) return; glad_glBlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEPROC)load("glBlendEquationSeparate"); glad_glDrawBuffers = (PFNGLDRAWBUFFERSPROC)load("glDrawBuffers"); glad_glStencilOpSeparate = (PFNGLSTENCILOPSEPARATEPROC)load("glStencilOpSeparate"); glad_glStencilFuncSeparate = (PFNGLSTENCILFUNCSEPARATEPROC)load("glStencilFuncSeparate"); glad_glStencilMaskSeparate = (PFNGLSTENCILMASKSEPARATEPROC)load("glStencilMaskSeparate"); glad_glAttachShader = (PFNGLATTACHSHADERPROC)load("glAttachShader"); glad_glBindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC)load("glBindAttribLocation"); glad_glCompileShader = (PFNGLCOMPILESHADERPROC)load("glCompileShader"); glad_glCreateProgram = (PFNGLCREATEPROGRAMPROC)load("glCreateProgram"); glad_glCreateShader = (PFNGLCREATESHADERPROC)load("glCreateShader"); glad_glDeleteProgram = (PFNGLDELETEPROGRAMPROC)load("glDeleteProgram"); glad_glDeleteShader = (PFNGLDELETESHADERPROC)load("glDeleteShader"); glad_glDetachShader = (PFNGLDETACHSHADERPROC)load("glDetachShader"); glad_glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC)load("glDisableVertexAttribArray"); glad_glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC)load("glEnableVertexAttribArray"); glad_glGetActiveAttrib = (PFNGLGETACTIVEATTRIBPROC)load("glGetActiveAttrib"); glad_glGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC)load("glGetActiveUniform"); glad_glGetAttachedShaders = (PFNGLGETATTACHEDSHADERSPROC)load("glGetAttachedShaders"); glad_glGetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC)load("glGetAttribLocation"); glad_glGetProgramiv = (PFNGLGETPROGRAMIVPROC)load("glGetProgramiv"); glad_glGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC)load("glGetProgramInfoLog"); glad_glGetShaderiv = (PFNGLGETSHADERIVPROC)load("glGetShaderiv"); glad_glGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC)load("glGetShaderInfoLog"); glad_glGetShaderSource = (PFNGLGETSHADERSOURCEPROC)load("glGetShaderSource"); glad_glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC)load("glGetUniformLocation"); glad_glGetUniformfv = (PFNGLGETUNIFORMFVPROC)load("glGetUniformfv"); glad_glGetUniformiv = (PFNGLGETUNIFORMIVPROC)load("glGetUniformiv"); glad_glGetVertexAttribdv = (PFNGLGETVERTEXATTRIBDVPROC)load("glGetVertexAttribdv"); glad_glGetVertexAttribfv = (PFNGLGETVERTEXATTRIBFVPROC)load("glGetVertexAttribfv"); glad_glGetVertexAttribiv = (PFNGLGETVERTEXATTRIBIVPROC)load("glGetVertexAttribiv"); glad_glGetVertexAttribPointerv = (PFNGLGETVERTEXATTRIBPOINTERVPROC)load("glGetVertexAttribPointerv"); glad_glIsProgram = (PFNGLISPROGRAMPROC)load("glIsProgram"); glad_glIsShader = (PFNGLISSHADERPROC)load("glIsShader"); glad_glLinkProgram = (PFNGLLINKPROGRAMPROC)load("glLinkProgram"); glad_glShaderSource = (PFNGLSHADERSOURCEPROC)load("glShaderSource"); glad_glUseProgram = (PFNGLUSEPROGRAMPROC)load("glUseProgram"); glad_glUniform1f = (PFNGLUNIFORM1FPROC)load("glUniform1f"); glad_glUniform2f = (PFNGLUNIFORM2FPROC)load("glUniform2f"); glad_glUniform3f = (PFNGLUNIFORM3FPROC)load("glUniform3f"); glad_glUniform4f = (PFNGLUNIFORM4FPROC)load("glUniform4f"); glad_glUniform1i = (PFNGLUNIFORM1IPROC)load("glUniform1i"); glad_glUniform2i = (PFNGLUNIFORM2IPROC)load("glUniform2i"); glad_glUniform3i = (PFNGLUNIFORM3IPROC)load("glUniform3i"); glad_glUniform4i = (PFNGLUNIFORM4IPROC)load("glUniform4i"); glad_glUniform1fv = (PFNGLUNIFORM1FVPROC)load("glUniform1fv"); glad_glUniform2fv = (PFNGLUNIFORM2FVPROC)load("glUniform2fv"); glad_glUniform3fv = (PFNGLUNIFORM3FVPROC)load("glUniform3fv"); glad_glUniform4fv = (PFNGLUNIFORM4FVPROC)load("glUniform4fv"); glad_glUniform1iv = (PFNGLUNIFORM1IVPROC)load("glUniform1iv"); glad_glUniform2iv = (PFNGLUNIFORM2IVPROC)load("glUniform2iv"); glad_glUniform3iv = (PFNGLUNIFORM3IVPROC)load("glUniform3iv"); glad_glUniform4iv = (PFNGLUNIFORM4IVPROC)load("glUniform4iv"); glad_glUniformMatrix2fv = (PFNGLUNIFORMMATRIX2FVPROC)load("glUniformMatrix2fv"); glad_glUniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC)load("glUniformMatrix3fv"); glad_glUniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC)load("glUniformMatrix4fv"); glad_glValidateProgram = (PFNGLVALIDATEPROGRAMPROC)load("glValidateProgram"); glad_glVertexAttrib1d = (PFNGLVERTEXATTRIB1DPROC)load("glVertexAttrib1d"); glad_glVertexAttrib1dv = (PFNGLVERTEXATTRIB1DVPROC)load("glVertexAttrib1dv"); glad_glVertexAttrib1f = (PFNGLVERTEXATTRIB1FPROC)load("glVertexAttrib1f"); glad_glVertexAttrib1fv = (PFNGLVERTEXATTRIB1FVPROC)load("glVertexAttrib1fv"); glad_glVertexAttrib1s = (PFNGLVERTEXATTRIB1SPROC)load("glVertexAttrib1s"); glad_glVertexAttrib1sv = (PFNGLVERTEXATTRIB1SVPROC)load("glVertexAttrib1sv"); glad_glVertexAttrib2d = (PFNGLVERTEXATTRIB2DPROC)load("glVertexAttrib2d"); glad_glVertexAttrib2dv = (PFNGLVERTEXATTRIB2DVPROC)load("glVertexAttrib2dv"); glad_glVertexAttrib2f = (PFNGLVERTEXATTRIB2FPROC)load("glVertexAttrib2f"); glad_glVertexAttrib2fv = (PFNGLVERTEXATTRIB2FVPROC)load("glVertexAttrib2fv"); glad_glVertexAttrib2s = (PFNGLVERTEXATTRIB2SPROC)load("glVertexAttrib2s"); glad_glVertexAttrib2sv = (PFNGLVERTEXATTRIB2SVPROC)load("glVertexAttrib2sv"); glad_glVertexAttrib3d = (PFNGLVERTEXATTRIB3DPROC)load("glVertexAttrib3d"); glad_glVertexAttrib3dv = (PFNGLVERTEXATTRIB3DVPROC)load("glVertexAttrib3dv"); glad_glVertexAttrib3f = (PFNGLVERTEXATTRIB3FPROC)load("glVertexAttrib3f"); glad_glVertexAttrib3fv = (PFNGLVERTEXATTRIB3FVPROC)load("glVertexAttrib3fv"); glad_glVertexAttrib3s = (PFNGLVERTEXATTRIB3SPROC)load("glVertexAttrib3s"); glad_glVertexAttrib3sv = (PFNGLVERTEXATTRIB3SVPROC)load("glVertexAttrib3sv"); glad_glVertexAttrib4Nbv = (PFNGLVERTEXATTRIB4NBVPROC)load("glVertexAttrib4Nbv"); glad_glVertexAttrib4Niv = (PFNGLVERTEXATTRIB4NIVPROC)load("glVertexAttrib4Niv"); glad_glVertexAttrib4Nsv = (PFNGLVERTEXATTRIB4NSVPROC)load("glVertexAttrib4Nsv"); glad_glVertexAttrib4Nub = (PFNGLVERTEXATTRIB4NUBPROC)load("glVertexAttrib4Nub"); glad_glVertexAttrib4Nubv = (PFNGLVERTEXATTRIB4NUBVPROC)load("glVertexAttrib4Nubv"); glad_glVertexAttrib4Nuiv = (PFNGLVERTEXATTRIB4NUIVPROC)load("glVertexAttrib4Nuiv"); glad_glVertexAttrib4Nusv = (PFNGLVERTEXATTRIB4NUSVPROC)load("glVertexAttrib4Nusv"); glad_glVertexAttrib4bv = (PFNGLVERTEXATTRIB4BVPROC)load("glVertexAttrib4bv"); glad_glVertexAttrib4d = (PFNGLVERTEXATTRIB4DPROC)load("glVertexAttrib4d"); glad_glVertexAttrib4dv = (PFNGLVERTEXATTRIB4DVPROC)load("glVertexAttrib4dv"); glad_glVertexAttrib4f = (PFNGLVERTEXATTRIB4FPROC)load("glVertexAttrib4f"); glad_glVertexAttrib4fv = (PFNGLVERTEXATTRIB4FVPROC)load("glVertexAttrib4fv"); glad_glVertexAttrib4iv = (PFNGLVERTEXATTRIB4IVPROC)load("glVertexAttrib4iv"); glad_glVertexAttrib4s = (PFNGLVERTEXATTRIB4SPROC)load("glVertexAttrib4s"); glad_glVertexAttrib4sv = (PFNGLVERTEXATTRIB4SVPROC)load("glVertexAttrib4sv"); glad_glVertexAttrib4ubv = (PFNGLVERTEXATTRIB4UBVPROC)load("glVertexAttrib4ubv"); glad_glVertexAttrib4uiv = (PFNGLVERTEXATTRIB4UIVPROC)load("glVertexAttrib4uiv"); glad_glVertexAttrib4usv = (PFNGLVERTEXATTRIB4USVPROC)load("glVertexAttrib4usv"); glad_glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC)load("glVertexAttribPointer"); } static void load_GL_VERSION_2_1(GLADloadproc load) { if(!GLAD_GL_VERSION_2_1) return; glad_glUniformMatrix2x3fv = (PFNGLUNIFORMMATRIX2X3FVPROC)load("glUniformMatrix2x3fv"); glad_glUniformMatrix3x2fv = (PFNGLUNIFORMMATRIX3X2FVPROC)load("glUniformMatrix3x2fv"); glad_glUniformMatrix2x4fv = (PFNGLUNIFORMMATRIX2X4FVPROC)load("glUniformMatrix2x4fv"); glad_glUniformMatrix4x2fv = (PFNGLUNIFORMMATRIX4X2FVPROC)load("glUniformMatrix4x2fv"); glad_glUniformMatrix3x4fv = (PFNGLUNIFORMMATRIX3X4FVPROC)load("glUniformMatrix3x4fv"); glad_glUniformMatrix4x3fv = (PFNGLUNIFORMMATRIX4X3FVPROC)load("glUniformMatrix4x3fv"); } static void load_GL_VERSION_3_0(GLADloadproc load) { if(!GLAD_GL_VERSION_3_0) return; glad_glColorMaski = (PFNGLCOLORMASKIPROC)load("glColorMaski"); glad_glGetBooleani_v = (PFNGLGETBOOLEANI_VPROC)load("glGetBooleani_v"); glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC)load("glGetIntegeri_v"); glad_glEnablei = (PFNGLENABLEIPROC)load("glEnablei"); glad_glDisablei = (PFNGLDISABLEIPROC)load("glDisablei"); glad_glIsEnabledi = (PFNGLISENABLEDIPROC)load("glIsEnabledi"); glad_glBeginTransformFeedback = (PFNGLBEGINTRANSFORMFEEDBACKPROC)load("glBeginTransformFeedback"); glad_glEndTransformFeedback = (PFNGLENDTRANSFORMFEEDBACKPROC)load("glEndTransformFeedback"); glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC)load("glBindBufferRange"); glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC)load("glBindBufferBase"); glad_glTransformFeedbackVaryings = (PFNGLTRANSFORMFEEDBACKVARYINGSPROC)load("glTransformFeedbackVaryings"); glad_glGetTransformFeedbackVarying = (PFNGLGETTRANSFORMFEEDBACKVARYINGPROC)load("glGetTransformFeedbackVarying"); glad_glClampColor = (PFNGLCLAMPCOLORPROC)load("glClampColor"); glad_glBeginConditionalRender = (PFNGLBEGINCONDITIONALRENDERPROC)load("glBeginConditionalRender"); glad_glEndConditionalRender = (PFNGLENDCONDITIONALRENDERPROC)load("glEndConditionalRender"); glad_glVertexAttribIPointer = (PFNGLVERTEXATTRIBIPOINTERPROC)load("glVertexAttribIPointer"); glad_glGetVertexAttribIiv = (PFNGLGETVERTEXATTRIBIIVPROC)load("glGetVertexAttribIiv"); glad_glGetVertexAttribIuiv = (PFNGLGETVERTEXATTRIBIUIVPROC)load("glGetVertexAttribIuiv"); glad_glVertexAttribI1i = (PFNGLVERTEXATTRIBI1IPROC)load("glVertexAttribI1i"); glad_glVertexAttribI2i = (PFNGLVERTEXATTRIBI2IPROC)load("glVertexAttribI2i"); glad_glVertexAttribI3i = (PFNGLVERTEXATTRIBI3IPROC)load("glVertexAttribI3i"); glad_glVertexAttribI4i = (PFNGLVERTEXATTRIBI4IPROC)load("glVertexAttribI4i"); glad_glVertexAttribI1ui = (PFNGLVERTEXATTRIBI1UIPROC)load("glVertexAttribI1ui"); glad_glVertexAttribI2ui = (PFNGLVERTEXATTRIBI2UIPROC)load("glVertexAttribI2ui"); glad_glVertexAttribI3ui = (PFNGLVERTEXATTRIBI3UIPROC)load("glVertexAttribI3ui"); glad_glVertexAttribI4ui = (PFNGLVERTEXATTRIBI4UIPROC)load("glVertexAttribI4ui"); glad_glVertexAttribI1iv = (PFNGLVERTEXATTRIBI1IVPROC)load("glVertexAttribI1iv"); glad_glVertexAttribI2iv = (PFNGLVERTEXATTRIBI2IVPROC)load("glVertexAttribI2iv"); glad_glVertexAttribI3iv = (PFNGLVERTEXATTRIBI3IVPROC)load("glVertexAttribI3iv"); glad_glVertexAttribI4iv = (PFNGLVERTEXATTRIBI4IVPROC)load("glVertexAttribI4iv"); glad_glVertexAttribI1uiv = (PFNGLVERTEXATTRIBI1UIVPROC)load("glVertexAttribI1uiv"); glad_glVertexAttribI2uiv = (PFNGLVERTEXATTRIBI2UIVPROC)load("glVertexAttribI2uiv"); glad_glVertexAttribI3uiv = (PFNGLVERTEXATTRIBI3UIVPROC)load("glVertexAttribI3uiv"); glad_glVertexAttribI4uiv = (PFNGLVERTEXATTRIBI4UIVPROC)load("glVertexAttribI4uiv"); glad_glVertexAttribI4bv = (PFNGLVERTEXATTRIBI4BVPROC)load("glVertexAttribI4bv"); glad_glVertexAttribI4sv = (PFNGLVERTEXATTRIBI4SVPROC)load("glVertexAttribI4sv"); glad_glVertexAttribI4ubv = (PFNGLVERTEXATTRIBI4UBVPROC)load("glVertexAttribI4ubv"); glad_glVertexAttribI4usv = (PFNGLVERTEXATTRIBI4USVPROC)load("glVertexAttribI4usv"); glad_glGetUniformuiv = (PFNGLGETUNIFORMUIVPROC)load("glGetUniformuiv"); glad_glBindFragDataLocation = (PFNGLBINDFRAGDATALOCATIONPROC)load("glBindFragDataLocation"); glad_glGetFragDataLocation = (PFNGLGETFRAGDATALOCATIONPROC)load("glGetFragDataLocation"); glad_glUniform1ui = (PFNGLUNIFORM1UIPROC)load("glUniform1ui"); glad_glUniform2ui = (PFNGLUNIFORM2UIPROC)load("glUniform2ui"); glad_glUniform3ui = (PFNGLUNIFORM3UIPROC)load("glUniform3ui"); glad_glUniform4ui = (PFNGLUNIFORM4UIPROC)load("glUniform4ui"); glad_glUniform1uiv = (PFNGLUNIFORM1UIVPROC)load("glUniform1uiv"); glad_glUniform2uiv = (PFNGLUNIFORM2UIVPROC)load("glUniform2uiv"); glad_glUniform3uiv = (PFNGLUNIFORM3UIVPROC)load("glUniform3uiv"); glad_glUniform4uiv = (PFNGLUNIFORM4UIVPROC)load("glUniform4uiv"); glad_glTexParameterIiv = (PFNGLTEXPARAMETERIIVPROC)load("glTexParameterIiv"); glad_glTexParameterIuiv = (PFNGLTEXPARAMETERIUIVPROC)load("glTexParameterIuiv"); glad_glGetTexParameterIiv = (PFNGLGETTEXPARAMETERIIVPROC)load("glGetTexParameterIiv"); glad_glGetTexParameterIuiv = (PFNGLGETTEXPARAMETERIUIVPROC)load("glGetTexParameterIuiv"); glad_glClearBufferiv = (PFNGLCLEARBUFFERIVPROC)load("glClearBufferiv"); glad_glClearBufferuiv = (PFNGLCLEARBUFFERUIVPROC)load("glClearBufferuiv"); glad_glClearBufferfv = (PFNGLCLEARBUFFERFVPROC)load("glClearBufferfv"); glad_glClearBufferfi = (PFNGLCLEARBUFFERFIPROC)load("glClearBufferfi"); glad_glGetStringi = (PFNGLGETSTRINGIPROC)load("glGetStringi"); glad_glIsRenderbuffer = (PFNGLISRENDERBUFFERPROC)load("glIsRenderbuffer"); glad_glBindRenderbuffer = (PFNGLBINDRENDERBUFFERPROC)load("glBindRenderbuffer"); glad_glDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSPROC)load("glDeleteRenderbuffers"); glad_glGenRenderbuffers = (PFNGLGENRENDERBUFFERSPROC)load("glGenRenderbuffers"); glad_glRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEPROC)load("glRenderbufferStorage"); glad_glGetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVPROC)load("glGetRenderbufferParameteriv"); glad_glIsFramebuffer = (PFNGLISFRAMEBUFFERPROC)load("glIsFramebuffer"); glad_glBindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC)load("glBindFramebuffer"); glad_glDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSPROC)load("glDeleteFramebuffers"); glad_glGenFramebuffers = (PFNGLGENFRAMEBUFFERSPROC)load("glGenFramebuffers"); glad_glCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSPROC)load("glCheckFramebufferStatus"); glad_glFramebufferTexture1D = (PFNGLFRAMEBUFFERTEXTURE1DPROC)load("glFramebufferTexture1D"); glad_glFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DPROC)load("glFramebufferTexture2D"); glad_glFramebufferTexture3D = (PFNGLFRAMEBUFFERTEXTURE3DPROC)load("glFramebufferTexture3D"); glad_glFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFERPROC)load("glFramebufferRenderbuffer"); glad_glGetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC)load("glGetFramebufferAttachmentParameteriv"); glad_glGenerateMipmap = (PFNGLGENERATEMIPMAPPROC)load("glGenerateMipmap"); glad_glBlitFramebuffer = (PFNGLBLITFRAMEBUFFERPROC)load("glBlitFramebuffer"); glad_glRenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC)load("glRenderbufferStorageMultisample"); glad_glFramebufferTextureLayer = (PFNGLFRAMEBUFFERTEXTURELAYERPROC)load("glFramebufferTextureLayer"); glad_glMapBufferRange = (PFNGLMAPBUFFERRANGEPROC)load("glMapBufferRange"); glad_glFlushMappedBufferRange = (PFNGLFLUSHMAPPEDBUFFERRANGEPROC)load("glFlushMappedBufferRange"); glad_glBindVertexArray = (PFNGLBINDVERTEXARRAYPROC)load("glBindVertexArray"); glad_glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSPROC)load("glDeleteVertexArrays"); glad_glGenVertexArrays = (PFNGLGENVERTEXARRAYSPROC)load("glGenVertexArrays"); glad_glIsVertexArray = (PFNGLISVERTEXARRAYPROC)load("glIsVertexArray"); } static void load_GL_VERSION_3_1(GLADloadproc load) { if(!GLAD_GL_VERSION_3_1) return; glad_glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC)load("glDrawArraysInstanced"); glad_glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC)load("glDrawElementsInstanced"); glad_glTexBuffer = (PFNGLTEXBUFFERPROC)load("glTexBuffer"); glad_glPrimitiveRestartIndex = (PFNGLPRIMITIVERESTARTINDEXPROC)load("glPrimitiveRestartIndex"); glad_glCopyBufferSubData = (PFNGLCOPYBUFFERSUBDATAPROC)load("glCopyBufferSubData"); glad_glGetUniformIndices = (PFNGLGETUNIFORMINDICESPROC)load("glGetUniformIndices"); glad_glGetActiveUniformsiv = (PFNGLGETACTIVEUNIFORMSIVPROC)load("glGetActiveUniformsiv"); glad_glGetActiveUniformName = (PFNGLGETACTIVEUNIFORMNAMEPROC)load("glGetActiveUniformName"); glad_glGetUniformBlockIndex = (PFNGLGETUNIFORMBLOCKINDEXPROC)load("glGetUniformBlockIndex"); glad_glGetActiveUniformBlockiv = (PFNGLGETACTIVEUNIFORMBLOCKIVPROC)load("glGetActiveUniformBlockiv"); glad_glGetActiveUniformBlockName = (PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC)load("glGetActiveUniformBlockName"); glad_glUniformBlockBinding = (PFNGLUNIFORMBLOCKBINDINGPROC)load("glUniformBlockBinding"); glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC)load("glBindBufferRange"); glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC)load("glBindBufferBase"); glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC)load("glGetIntegeri_v"); } static void load_GL_VERSION_3_2(GLADloadproc load) { if(!GLAD_GL_VERSION_3_2) return; glad_glDrawElementsBaseVertex = (PFNGLDRAWELEMENTSBASEVERTEXPROC)load("glDrawElementsBaseVertex"); glad_glDrawRangeElementsBaseVertex = (PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC)load("glDrawRangeElementsBaseVertex"); glad_glDrawElementsInstancedBaseVertex = (PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC)load("glDrawElementsInstancedBaseVertex"); glad_glMultiDrawElementsBaseVertex = (PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC)load("glMultiDrawElementsBaseVertex"); glad_glProvokingVertex = (PFNGLPROVOKINGVERTEXPROC)load("glProvokingVertex"); glad_glFenceSync = (PFNGLFENCESYNCPROC)load("glFenceSync"); glad_glIsSync = (PFNGLISSYNCPROC)load("glIsSync"); glad_glDeleteSync = (PFNGLDELETESYNCPROC)load("glDeleteSync"); glad_glClientWaitSync = (PFNGLCLIENTWAITSYNCPROC)load("glClientWaitSync"); glad_glWaitSync = (PFNGLWAITSYNCPROC)load("glWaitSync"); glad_glGetInteger64v = (PFNGLGETINTEGER64VPROC)load("glGetInteger64v"); glad_glGetSynciv = (PFNGLGETSYNCIVPROC)load("glGetSynciv"); glad_glGetInteger64i_v = (PFNGLGETINTEGER64I_VPROC)load("glGetInteger64i_v"); glad_glGetBufferParameteri64v = (PFNGLGETBUFFERPARAMETERI64VPROC)load("glGetBufferParameteri64v"); glad_glFramebufferTexture = (PFNGLFRAMEBUFFERTEXTUREPROC)load("glFramebufferTexture"); glad_glTexImage2DMultisample = (PFNGLTEXIMAGE2DMULTISAMPLEPROC)load("glTexImage2DMultisample"); glad_glTexImage3DMultisample = (PFNGLTEXIMAGE3DMULTISAMPLEPROC)load("glTexImage3DMultisample"); glad_glGetMultisamplefv = (PFNGLGETMULTISAMPLEFVPROC)load("glGetMultisamplefv"); glad_glSampleMaski = (PFNGLSAMPLEMASKIPROC)load("glSampleMaski"); } static void load_GL_VERSION_3_3(GLADloadproc load) { if(!GLAD_GL_VERSION_3_3) return; glad_glBindFragDataLocationIndexed = (PFNGLBINDFRAGDATALOCATIONINDEXEDPROC)load("glBindFragDataLocationIndexed"); glad_glGetFragDataIndex = (PFNGLGETFRAGDATAINDEXPROC)load("glGetFragDataIndex"); glad_glGenSamplers = (PFNGLGENSAMPLERSPROC)load("glGenSamplers"); glad_glDeleteSamplers = (PFNGLDELETESAMPLERSPROC)load("glDeleteSamplers"); glad_glIsSampler = (PFNGLISSAMPLERPROC)load("glIsSampler"); glad_glBindSampler = (PFNGLBINDSAMPLERPROC)load("glBindSampler"); glad_glSamplerParameteri = (PFNGLSAMPLERPARAMETERIPROC)load("glSamplerParameteri"); glad_glSamplerParameteriv = (PFNGLSAMPLERPARAMETERIVPROC)load("glSamplerParameteriv"); glad_glSamplerParameterf = (PFNGLSAMPLERPARAMETERFPROC)load("glSamplerParameterf"); glad_glSamplerParameterfv = (PFNGLSAMPLERPARAMETERFVPROC)load("glSamplerParameterfv"); glad_glSamplerParameterIiv = (PFNGLSAMPLERPARAMETERIIVPROC)load("glSamplerParameterIiv"); glad_glSamplerParameterIuiv = (PFNGLSAMPLERPARAMETERIUIVPROC)load("glSamplerParameterIuiv"); glad_glGetSamplerParameteriv = (PFNGLGETSAMPLERPARAMETERIVPROC)load("glGetSamplerParameteriv"); glad_glGetSamplerParameterIiv = (PFNGLGETSAMPLERPARAMETERIIVPROC)load("glGetSamplerParameterIiv"); glad_glGetSamplerParameterfv = (PFNGLGETSAMPLERPARAMETERFVPROC)load("glGetSamplerParameterfv"); glad_glGetSamplerParameterIuiv = (PFNGLGETSAMPLERPARAMETERIUIVPROC)load("glGetSamplerParameterIuiv"); glad_glQueryCounter = (PFNGLQUERYCOUNTERPROC)load("glQueryCounter"); glad_glGetQueryObjecti64v = (PFNGLGETQUERYOBJECTI64VPROC)load("glGetQueryObjecti64v"); glad_glGetQueryObjectui64v = (PFNGLGETQUERYOBJECTUI64VPROC)load("glGetQueryObjectui64v"); glad_glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC)load("glVertexAttribDivisor"); glad_glVertexAttribP1ui = (PFNGLVERTEXATTRIBP1UIPROC)load("glVertexAttribP1ui"); glad_glVertexAttribP1uiv = (PFNGLVERTEXATTRIBP1UIVPROC)load("glVertexAttribP1uiv"); glad_glVertexAttribP2ui = (PFNGLVERTEXATTRIBP2UIPROC)load("glVertexAttribP2ui"); glad_glVertexAttribP2uiv = (PFNGLVERTEXATTRIBP2UIVPROC)load("glVertexAttribP2uiv"); glad_glVertexAttribP3ui = (PFNGLVERTEXATTRIBP3UIPROC)load("glVertexAttribP3ui"); glad_glVertexAttribP3uiv = (PFNGLVERTEXATTRIBP3UIVPROC)load("glVertexAttribP3uiv"); glad_glVertexAttribP4ui = (PFNGLVERTEXATTRIBP4UIPROC)load("glVertexAttribP4ui"); glad_glVertexAttribP4uiv = (PFNGLVERTEXATTRIBP4UIVPROC)load("glVertexAttribP4uiv"); glad_glVertexP2ui = (PFNGLVERTEXP2UIPROC)load("glVertexP2ui"); glad_glVertexP2uiv = (PFNGLVERTEXP2UIVPROC)load("glVertexP2uiv"); glad_glVertexP3ui = (PFNGLVERTEXP3UIPROC)load("glVertexP3ui"); glad_glVertexP3uiv = (PFNGLVERTEXP3UIVPROC)load("glVertexP3uiv"); glad_glVertexP4ui = (PFNGLVERTEXP4UIPROC)load("glVertexP4ui"); glad_glVertexP4uiv = (PFNGLVERTEXP4UIVPROC)load("glVertexP4uiv"); glad_glTexCoordP1ui = (PFNGLTEXCOORDP1UIPROC)load("glTexCoordP1ui"); glad_glTexCoordP1uiv = (PFNGLTEXCOORDP1UIVPROC)load("glTexCoordP1uiv"); glad_glTexCoordP2ui = (PFNGLTEXCOORDP2UIPROC)load("glTexCoordP2ui"); glad_glTexCoordP2uiv = (PFNGLTEXCOORDP2UIVPROC)load("glTexCoordP2uiv"); glad_glTexCoordP3ui = (PFNGLTEXCOORDP3UIPROC)load("glTexCoordP3ui"); glad_glTexCoordP3uiv = (PFNGLTEXCOORDP3UIVPROC)load("glTexCoordP3uiv"); glad_glTexCoordP4ui = (PFNGLTEXCOORDP4UIPROC)load("glTexCoordP4ui"); glad_glTexCoordP4uiv = (PFNGLTEXCOORDP4UIVPROC)load("glTexCoordP4uiv"); glad_glMultiTexCoordP1ui = (PFNGLMULTITEXCOORDP1UIPROC)load("glMultiTexCoordP1ui"); glad_glMultiTexCoordP1uiv = (PFNGLMULTITEXCOORDP1UIVPROC)load("glMultiTexCoordP1uiv"); glad_glMultiTexCoordP2ui = (PFNGLMULTITEXCOORDP2UIPROC)load("glMultiTexCoordP2ui"); glad_glMultiTexCoordP2uiv = (PFNGLMULTITEXCOORDP2UIVPROC)load("glMultiTexCoordP2uiv"); glad_glMultiTexCoordP3ui = (PFNGLMULTITEXCOORDP3UIPROC)load("glMultiTexCoordP3ui"); glad_glMultiTexCoordP3uiv = (PFNGLMULTITEXCOORDP3UIVPROC)load("glMultiTexCoordP3uiv"); glad_glMultiTexCoordP4ui = (PFNGLMULTITEXCOORDP4UIPROC)load("glMultiTexCoordP4ui"); glad_glMultiTexCoordP4uiv = (PFNGLMULTITEXCOORDP4UIVPROC)load("glMultiTexCoordP4uiv"); glad_glNormalP3ui = (PFNGLNORMALP3UIPROC)load("glNormalP3ui"); glad_glNormalP3uiv = (PFNGLNORMALP3UIVPROC)load("glNormalP3uiv"); glad_glColorP3ui = (PFNGLCOLORP3UIPROC)load("glColorP3ui"); glad_glColorP3uiv = (PFNGLCOLORP3UIVPROC)load("glColorP3uiv"); glad_glColorP4ui = (PFNGLCOLORP4UIPROC)load("glColorP4ui"); glad_glColorP4uiv = (PFNGLCOLORP4UIVPROC)load("glColorP4uiv"); glad_glSecondaryColorP3ui = (PFNGLSECONDARYCOLORP3UIPROC)load("glSecondaryColorP3ui"); glad_glSecondaryColorP3uiv = (PFNGLSECONDARYCOLORP3UIVPROC)load("glSecondaryColorP3uiv"); } static void load_GL_VERSION_4_0(GLADloadproc load) { if(!GLAD_GL_VERSION_4_0) return; glad_glMinSampleShading = (PFNGLMINSAMPLESHADINGPROC)load("glMinSampleShading"); glad_glBlendEquationi = (PFNGLBLENDEQUATIONIPROC)load("glBlendEquationi"); glad_glBlendEquationSeparatei = (PFNGLBLENDEQUATIONSEPARATEIPROC)load("glBlendEquationSeparatei"); glad_glBlendFunci = (PFNGLBLENDFUNCIPROC)load("glBlendFunci"); glad_glBlendFuncSeparatei = (PFNGLBLENDFUNCSEPARATEIPROC)load("glBlendFuncSeparatei"); glad_glDrawArraysIndirect = (PFNGLDRAWARRAYSINDIRECTPROC)load("glDrawArraysIndirect"); glad_glDrawElementsIndirect = (PFNGLDRAWELEMENTSINDIRECTPROC)load("glDrawElementsIndirect"); glad_glUniform1d = (PFNGLUNIFORM1DPROC)load("glUniform1d"); glad_glUniform2d = (PFNGLUNIFORM2DPROC)load("glUniform2d"); glad_glUniform3d = (PFNGLUNIFORM3DPROC)load("glUniform3d"); glad_glUniform4d = (PFNGLUNIFORM4DPROC)load("glUniform4d"); glad_glUniform1dv = (PFNGLUNIFORM1DVPROC)load("glUniform1dv"); glad_glUniform2dv = (PFNGLUNIFORM2DVPROC)load("glUniform2dv"); glad_glUniform3dv = (PFNGLUNIFORM3DVPROC)load("glUniform3dv"); glad_glUniform4dv = (PFNGLUNIFORM4DVPROC)load("glUniform4dv"); glad_glUniformMatrix2dv = (PFNGLUNIFORMMATRIX2DVPROC)load("glUniformMatrix2dv"); glad_glUniformMatrix3dv = (PFNGLUNIFORMMATRIX3DVPROC)load("glUniformMatrix3dv"); glad_glUniformMatrix4dv = (PFNGLUNIFORMMATRIX4DVPROC)load("glUniformMatrix4dv"); glad_glUniformMatrix2x3dv = (PFNGLUNIFORMMATRIX2X3DVPROC)load("glUniformMatrix2x3dv"); glad_glUniformMatrix2x4dv = (PFNGLUNIFORMMATRIX2X4DVPROC)load("glUniformMatrix2x4dv"); glad_glUniformMatrix3x2dv = (PFNGLUNIFORMMATRIX3X2DVPROC)load("glUniformMatrix3x2dv"); glad_glUniformMatrix3x4dv = (PFNGLUNIFORMMATRIX3X4DVPROC)load("glUniformMatrix3x4dv"); glad_glUniformMatrix4x2dv = (PFNGLUNIFORMMATRIX4X2DVPROC)load("glUniformMatrix4x2dv"); glad_glUniformMatrix4x3dv = (PFNGLUNIFORMMATRIX4X3DVPROC)load("glUniformMatrix4x3dv"); glad_glGetUniformdv = (PFNGLGETUNIFORMDVPROC)load("glGetUniformdv"); glad_glGetSubroutineUniformLocation = (PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC)load("glGetSubroutineUniformLocation"); glad_glGetSubroutineIndex = (PFNGLGETSUBROUTINEINDEXPROC)load("glGetSubroutineIndex"); glad_glGetActiveSubroutineUniformiv = (PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC)load("glGetActiveSubroutineUniformiv"); glad_glGetActiveSubroutineUniformName = (PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC)load("glGetActiveSubroutineUniformName"); glad_glGetActiveSubroutineName = (PFNGLGETACTIVESUBROUTINENAMEPROC)load("glGetActiveSubroutineName"); glad_glUniformSubroutinesuiv = (PFNGLUNIFORMSUBROUTINESUIVPROC)load("glUniformSubroutinesuiv"); glad_glGetUniformSubroutineuiv = (PFNGLGETUNIFORMSUBROUTINEUIVPROC)load("glGetUniformSubroutineuiv"); glad_glGetProgramStageiv = (PFNGLGETPROGRAMSTAGEIVPROC)load("glGetProgramStageiv"); glad_glPatchParameteri = (PFNGLPATCHPARAMETERIPROC)load("glPatchParameteri"); glad_glPatchParameterfv = (PFNGLPATCHPARAMETERFVPROC)load("glPatchParameterfv"); glad_glBindTransformFeedback = (PFNGLBINDTRANSFORMFEEDBACKPROC)load("glBindTransformFeedback"); glad_glDeleteTransformFeedbacks = (PFNGLDELETETRANSFORMFEEDBACKSPROC)load("glDeleteTransformFeedbacks"); glad_glGenTransformFeedbacks = (PFNGLGENTRANSFORMFEEDBACKSPROC)load("glGenTransformFeedbacks"); glad_glIsTransformFeedback = (PFNGLISTRANSFORMFEEDBACKPROC)load("glIsTransformFeedback"); glad_glPauseTransformFeedback = (PFNGLPAUSETRANSFORMFEEDBACKPROC)load("glPauseTransformFeedback"); glad_glResumeTransformFeedback = (PFNGLRESUMETRANSFORMFEEDBACKPROC)load("glResumeTransformFeedback"); glad_glDrawTransformFeedback = (PFNGLDRAWTRANSFORMFEEDBACKPROC)load("glDrawTransformFeedback"); glad_glDrawTransformFeedbackStream = (PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC)load("glDrawTransformFeedbackStream"); glad_glBeginQueryIndexed = (PFNGLBEGINQUERYINDEXEDPROC)load("glBeginQueryIndexed"); glad_glEndQueryIndexed = (PFNGLENDQUERYINDEXEDPROC)load("glEndQueryIndexed"); glad_glGetQueryIndexediv = (PFNGLGETQUERYINDEXEDIVPROC)load("glGetQueryIndexediv"); } static void load_GL_VERSION_4_1(GLADloadproc load) { if(!GLAD_GL_VERSION_4_1) return; glad_glReleaseShaderCompiler = (PFNGLRELEASESHADERCOMPILERPROC)load("glReleaseShaderCompiler"); glad_glShaderBinary = (PFNGLSHADERBINARYPROC)load("glShaderBinary"); glad_glGetShaderPrecisionFormat = (PFNGLGETSHADERPRECISIONFORMATPROC)load("glGetShaderPrecisionFormat"); glad_glDepthRangef = (PFNGLDEPTHRANGEFPROC)load("glDepthRangef"); glad_glClearDepthf = (PFNGLCLEARDEPTHFPROC)load("glClearDepthf"); glad_glGetProgramBinary = (PFNGLGETPROGRAMBINARYPROC)load("glGetProgramBinary"); glad_glProgramBinary = (PFNGLPROGRAMBINARYPROC)load("glProgramBinary"); glad_glProgramParameteri = (PFNGLPROGRAMPARAMETERIPROC)load("glProgramParameteri"); glad_glUseProgramStages = (PFNGLUSEPROGRAMSTAGESPROC)load("glUseProgramStages"); glad_glActiveShaderProgram = (PFNGLACTIVESHADERPROGRAMPROC)load("glActiveShaderProgram"); glad_glCreateShaderProgramv = (PFNGLCREATESHADERPROGRAMVPROC)load("glCreateShaderProgramv"); glad_glBindProgramPipeline = (PFNGLBINDPROGRAMPIPELINEPROC)load("glBindProgramPipeline"); glad_glDeleteProgramPipelines = (PFNGLDELETEPROGRAMPIPELINESPROC)load("glDeleteProgramPipelines"); glad_glGenProgramPipelines = (PFNGLGENPROGRAMPIPELINESPROC)load("glGenProgramPipelines"); glad_glIsProgramPipeline = (PFNGLISPROGRAMPIPELINEPROC)load("glIsProgramPipeline"); glad_glGetProgramPipelineiv = (PFNGLGETPROGRAMPIPELINEIVPROC)load("glGetProgramPipelineiv"); glad_glProgramParameteri = (PFNGLPROGRAMPARAMETERIPROC)load("glProgramParameteri"); glad_glProgramUniform1i = (PFNGLPROGRAMUNIFORM1IPROC)load("glProgramUniform1i"); glad_glProgramUniform1iv = (PFNGLPROGRAMUNIFORM1IVPROC)load("glProgramUniform1iv"); glad_glProgramUniform1f = (PFNGLPROGRAMUNIFORM1FPROC)load("glProgramUniform1f"); glad_glProgramUniform1fv = (PFNGLPROGRAMUNIFORM1FVPROC)load("glProgramUniform1fv"); glad_glProgramUniform1d = (PFNGLPROGRAMUNIFORM1DPROC)load("glProgramUniform1d"); glad_glProgramUniform1dv = (PFNGLPROGRAMUNIFORM1DVPROC)load("glProgramUniform1dv"); glad_glProgramUniform1ui = (PFNGLPROGRAMUNIFORM1UIPROC)load("glProgramUniform1ui"); glad_glProgramUniform1uiv = (PFNGLPROGRAMUNIFORM1UIVPROC)load("glProgramUniform1uiv"); glad_glProgramUniform2i = (PFNGLPROGRAMUNIFORM2IPROC)load("glProgramUniform2i"); glad_glProgramUniform2iv = (PFNGLPROGRAMUNIFORM2IVPROC)load("glProgramUniform2iv"); glad_glProgramUniform2f = (PFNGLPROGRAMUNIFORM2FPROC)load("glProgramUniform2f"); glad_glProgramUniform2fv = (PFNGLPROGRAMUNIFORM2FVPROC)load("glProgramUniform2fv"); glad_glProgramUniform2d = (PFNGLPROGRAMUNIFORM2DPROC)load("glProgramUniform2d"); glad_glProgramUniform2dv = (PFNGLPROGRAMUNIFORM2DVPROC)load("glProgramUniform2dv"); glad_glProgramUniform2ui = (PFNGLPROGRAMUNIFORM2UIPROC)load("glProgramUniform2ui"); glad_glProgramUniform2uiv = (PFNGLPROGRAMUNIFORM2UIVPROC)load("glProgramUniform2uiv"); glad_glProgramUniform3i = (PFNGLPROGRAMUNIFORM3IPROC)load("glProgramUniform3i"); glad_glProgramUniform3iv = (PFNGLPROGRAMUNIFORM3IVPROC)load("glProgramUniform3iv"); glad_glProgramUniform3f = (PFNGLPROGRAMUNIFORM3FPROC)load("glProgramUniform3f"); glad_glProgramUniform3fv = (PFNGLPROGRAMUNIFORM3FVPROC)load("glProgramUniform3fv"); glad_glProgramUniform3d = (PFNGLPROGRAMUNIFORM3DPROC)load("glProgramUniform3d"); glad_glProgramUniform3dv = (PFNGLPROGRAMUNIFORM3DVPROC)load("glProgramUniform3dv"); glad_glProgramUniform3ui = (PFNGLPROGRAMUNIFORM3UIPROC)load("glProgramUniform3ui"); glad_glProgramUniform3uiv = (PFNGLPROGRAMUNIFORM3UIVPROC)load("glProgramUniform3uiv"); glad_glProgramUniform4i = (PFNGLPROGRAMUNIFORM4IPROC)load("glProgramUniform4i"); glad_glProgramUniform4iv = (PFNGLPROGRAMUNIFORM4IVPROC)load("glProgramUniform4iv"); glad_glProgramUniform4f = (PFNGLPROGRAMUNIFORM4FPROC)load("glProgramUniform4f"); glad_glProgramUniform4fv = (PFNGLPROGRAMUNIFORM4FVPROC)load("glProgramUniform4fv"); glad_glProgramUniform4d = (PFNGLPROGRAMUNIFORM4DPROC)load("glProgramUniform4d"); glad_glProgramUniform4dv = (PFNGLPROGRAMUNIFORM4DVPROC)load("glProgramUniform4dv"); glad_glProgramUniform4ui = (PFNGLPROGRAMUNIFORM4UIPROC)load("glProgramUniform4ui"); glad_glProgramUniform4uiv = (PFNGLPROGRAMUNIFORM4UIVPROC)load("glProgramUniform4uiv"); glad_glProgramUniformMatrix2fv = (PFNGLPROGRAMUNIFORMMATRIX2FVPROC)load("glProgramUniformMatrix2fv"); glad_glProgramUniformMatrix3fv = (PFNGLPROGRAMUNIFORMMATRIX3FVPROC)load("glProgramUniformMatrix3fv"); glad_glProgramUniformMatrix4fv = (PFNGLPROGRAMUNIFORMMATRIX4FVPROC)load("glProgramUniformMatrix4fv"); glad_glProgramUniformMatrix2dv = (PFNGLPROGRAMUNIFORMMATRIX2DVPROC)load("glProgramUniformMatrix2dv"); glad_glProgramUniformMatrix3dv = (PFNGLPROGRAMUNIFORMMATRIX3DVPROC)load("glProgramUniformMatrix3dv"); glad_glProgramUniformMatrix4dv = (PFNGLPROGRAMUNIFORMMATRIX4DVPROC)load("glProgramUniformMatrix4dv"); glad_glProgramUniformMatrix2x3fv = (PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC)load("glProgramUniformMatrix2x3fv"); glad_glProgramUniformMatrix3x2fv = (PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC)load("glProgramUniformMatrix3x2fv"); glad_glProgramUniformMatrix2x4fv = (PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC)load("glProgramUniformMatrix2x4fv"); glad_glProgramUniformMatrix4x2fv = (PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC)load("glProgramUniformMatrix4x2fv"); glad_glProgramUniformMatrix3x4fv = (PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC)load("glProgramUniformMatrix3x4fv"); glad_glProgramUniformMatrix4x3fv = (PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC)load("glProgramUniformMatrix4x3fv"); glad_glProgramUniformMatrix2x3dv = (PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC)load("glProgramUniformMatrix2x3dv"); glad_glProgramUniformMatrix3x2dv = (PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC)load("glProgramUniformMatrix3x2dv"); glad_glProgramUniformMatrix2x4dv = (PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC)load("glProgramUniformMatrix2x4dv"); glad_glProgramUniformMatrix4x2dv = (PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC)load("glProgramUniformMatrix4x2dv"); glad_glProgramUniformMatrix3x4dv = (PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC)load("glProgramUniformMatrix3x4dv"); glad_glProgramUniformMatrix4x3dv = (PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC)load("glProgramUniformMatrix4x3dv"); glad_glValidateProgramPipeline = (PFNGLVALIDATEPROGRAMPIPELINEPROC)load("glValidateProgramPipeline"); glad_glGetProgramPipelineInfoLog = (PFNGLGETPROGRAMPIPELINEINFOLOGPROC)load("glGetProgramPipelineInfoLog"); glad_glVertexAttribL1d = (PFNGLVERTEXATTRIBL1DPROC)load("glVertexAttribL1d"); glad_glVertexAttribL2d = (PFNGLVERTEXATTRIBL2DPROC)load("glVertexAttribL2d"); glad_glVertexAttribL3d = (PFNGLVERTEXATTRIBL3DPROC)load("glVertexAttribL3d"); glad_glVertexAttribL4d = (PFNGLVERTEXATTRIBL4DPROC)load("glVertexAttribL4d"); glad_glVertexAttribL1dv = (PFNGLVERTEXATTRIBL1DVPROC)load("glVertexAttribL1dv"); glad_glVertexAttribL2dv = (PFNGLVERTEXATTRIBL2DVPROC)load("glVertexAttribL2dv"); glad_glVertexAttribL3dv = (PFNGLVERTEXATTRIBL3DVPROC)load("glVertexAttribL3dv"); glad_glVertexAttribL4dv = (PFNGLVERTEXATTRIBL4DVPROC)load("glVertexAttribL4dv"); glad_glVertexAttribLPointer = (PFNGLVERTEXATTRIBLPOINTERPROC)load("glVertexAttribLPointer"); glad_glGetVertexAttribLdv = (PFNGLGETVERTEXATTRIBLDVPROC)load("glGetVertexAttribLdv"); glad_glViewportArrayv = (PFNGLVIEWPORTARRAYVPROC)load("glViewportArrayv"); glad_glViewportIndexedf = (PFNGLVIEWPORTINDEXEDFPROC)load("glViewportIndexedf"); glad_glViewportIndexedfv = (PFNGLVIEWPORTINDEXEDFVPROC)load("glViewportIndexedfv"); glad_glScissorArrayv = (PFNGLSCISSORARRAYVPROC)load("glScissorArrayv"); glad_glScissorIndexed = (PFNGLSCISSORINDEXEDPROC)load("glScissorIndexed"); glad_glScissorIndexedv = (PFNGLSCISSORINDEXEDVPROC)load("glScissorIndexedv"); glad_glDepthRangeArrayv = (PFNGLDEPTHRANGEARRAYVPROC)load("glDepthRangeArrayv"); glad_glDepthRangeIndexed = (PFNGLDEPTHRANGEINDEXEDPROC)load("glDepthRangeIndexed"); glad_glGetFloati_v = (PFNGLGETFLOATI_VPROC)load("glGetFloati_v"); glad_glGetDoublei_v = (PFNGLGETDOUBLEI_VPROC)load("glGetDoublei_v"); } static void load_GL_VERSION_4_2(GLADloadproc load) { if(!GLAD_GL_VERSION_4_2) return; glad_glDrawArraysInstancedBaseInstance = (PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC)load("glDrawArraysInstancedBaseInstance"); glad_glDrawElementsInstancedBaseInstance = (PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC)load("glDrawElementsInstancedBaseInstance"); glad_glDrawElementsInstancedBaseVertexBaseInstance = (PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC)load("glDrawElementsInstancedBaseVertexBaseInstance"); glad_glGetInternalformativ = (PFNGLGETINTERNALFORMATIVPROC)load("glGetInternalformativ"); glad_glGetActiveAtomicCounterBufferiv = (PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC)load("glGetActiveAtomicCounterBufferiv"); glad_glBindImageTexture = (PFNGLBINDIMAGETEXTUREPROC)load("glBindImageTexture"); glad_glMemoryBarrier = (PFNGLMEMORYBARRIERPROC)load("glMemoryBarrier"); glad_glTexStorage1D = (PFNGLTEXSTORAGE1DPROC)load("glTexStorage1D"); glad_glTexStorage2D = (PFNGLTEXSTORAGE2DPROC)load("glTexStorage2D"); glad_glTexStorage3D = (PFNGLTEXSTORAGE3DPROC)load("glTexStorage3D"); glad_glDrawTransformFeedbackInstanced = (PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC)load("glDrawTransformFeedbackInstanced"); glad_glDrawTransformFeedbackStreamInstanced = (PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC)load("glDrawTransformFeedbackStreamInstanced"); } static void load_GL_VERSION_4_3(GLADloadproc load) { if(!GLAD_GL_VERSION_4_3) return; glad_glClearBufferData = (PFNGLCLEARBUFFERDATAPROC)load("glClearBufferData"); glad_glClearBufferSubData = (PFNGLCLEARBUFFERSUBDATAPROC)load("glClearBufferSubData"); glad_glDispatchCompute = (PFNGLDISPATCHCOMPUTEPROC)load("glDispatchCompute"); glad_glDispatchComputeIndirect = (PFNGLDISPATCHCOMPUTEINDIRECTPROC)load("glDispatchComputeIndirect"); glad_glCopyImageSubData = (PFNGLCOPYIMAGESUBDATAPROC)load("glCopyImageSubData"); glad_glFramebufferParameteri = (PFNGLFRAMEBUFFERPARAMETERIPROC)load("glFramebufferParameteri"); glad_glGetFramebufferParameteriv = (PFNGLGETFRAMEBUFFERPARAMETERIVPROC)load("glGetFramebufferParameteriv"); glad_glGetInternalformati64v = (PFNGLGETINTERNALFORMATI64VPROC)load("glGetInternalformati64v"); glad_glInvalidateTexSubImage = (PFNGLINVALIDATETEXSUBIMAGEPROC)load("glInvalidateTexSubImage"); glad_glInvalidateTexImage = (PFNGLINVALIDATETEXIMAGEPROC)load("glInvalidateTexImage"); glad_glInvalidateBufferSubData = (PFNGLINVALIDATEBUFFERSUBDATAPROC)load("glInvalidateBufferSubData"); glad_glInvalidateBufferData = (PFNGLINVALIDATEBUFFERDATAPROC)load("glInvalidateBufferData"); glad_glInvalidateFramebuffer = (PFNGLINVALIDATEFRAMEBUFFERPROC)load("glInvalidateFramebuffer"); glad_glInvalidateSubFramebuffer = (PFNGLINVALIDATESUBFRAMEBUFFERPROC)load("glInvalidateSubFramebuffer"); glad_glMultiDrawArraysIndirect = (PFNGLMULTIDRAWARRAYSINDIRECTPROC)load("glMultiDrawArraysIndirect"); glad_glMultiDrawElementsIndirect = (PFNGLMULTIDRAWELEMENTSINDIRECTPROC)load("glMultiDrawElementsIndirect"); glad_glGetProgramInterfaceiv = (PFNGLGETPROGRAMINTERFACEIVPROC)load("glGetProgramInterfaceiv"); glad_glGetProgramResourceIndex = (PFNGLGETPROGRAMRESOURCEINDEXPROC)load("glGetProgramResourceIndex"); glad_glGetProgramResourceName = (PFNGLGETPROGRAMRESOURCENAMEPROC)load("glGetProgramResourceName"); glad_glGetProgramResourceiv = (PFNGLGETPROGRAMRESOURCEIVPROC)load("glGetProgramResourceiv"); glad_glGetProgramResourceLocation = (PFNGLGETPROGRAMRESOURCELOCATIONPROC)load("glGetProgramResourceLocation"); glad_glGetProgramResourceLocationIndex = (PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC)load("glGetProgramResourceLocationIndex"); glad_glShaderStorageBlockBinding = (PFNGLSHADERSTORAGEBLOCKBINDINGPROC)load("glShaderStorageBlockBinding"); glad_glTexBufferRange = (PFNGLTEXBUFFERRANGEPROC)load("glTexBufferRange"); glad_glTexStorage2DMultisample = (PFNGLTEXSTORAGE2DMULTISAMPLEPROC)load("glTexStorage2DMultisample"); glad_glTexStorage3DMultisample = (PFNGLTEXSTORAGE3DMULTISAMPLEPROC)load("glTexStorage3DMultisample"); glad_glTextureView = (PFNGLTEXTUREVIEWPROC)load("glTextureView"); glad_glBindVertexBuffer = (PFNGLBINDVERTEXBUFFERPROC)load("glBindVertexBuffer"); glad_glVertexAttribFormat = (PFNGLVERTEXATTRIBFORMATPROC)load("glVertexAttribFormat"); glad_glVertexAttribIFormat = (PFNGLVERTEXATTRIBIFORMATPROC)load("glVertexAttribIFormat"); glad_glVertexAttribLFormat = (PFNGLVERTEXATTRIBLFORMATPROC)load("glVertexAttribLFormat"); glad_glVertexAttribBinding = (PFNGLVERTEXATTRIBBINDINGPROC)load("glVertexAttribBinding"); glad_glVertexBindingDivisor = (PFNGLVERTEXBINDINGDIVISORPROC)load("glVertexBindingDivisor"); glad_glDebugMessageControl = (PFNGLDEBUGMESSAGECONTROLPROC)load("glDebugMessageControl"); glad_glDebugMessageInsert = (PFNGLDEBUGMESSAGEINSERTPROC)load("glDebugMessageInsert"); glad_glDebugMessageCallback = (PFNGLDEBUGMESSAGECALLBACKPROC)load("glDebugMessageCallback"); glad_glGetDebugMessageLog = (PFNGLGETDEBUGMESSAGELOGPROC)load("glGetDebugMessageLog"); glad_glPushDebugGroup = (PFNGLPUSHDEBUGGROUPPROC)load("glPushDebugGroup"); glad_glPopDebugGroup = (PFNGLPOPDEBUGGROUPPROC)load("glPopDebugGroup"); glad_glObjectLabel = (PFNGLOBJECTLABELPROC)load("glObjectLabel"); glad_glGetObjectLabel = (PFNGLGETOBJECTLABELPROC)load("glGetObjectLabel"); glad_glObjectPtrLabel = (PFNGLOBJECTPTRLABELPROC)load("glObjectPtrLabel"); glad_glGetObjectPtrLabel = (PFNGLGETOBJECTPTRLABELPROC)load("glGetObjectPtrLabel"); glad_glGetPointerv = (PFNGLGETPOINTERVPROC)load("glGetPointerv"); } static void load_GL_VERSION_4_4(GLADloadproc load) { if(!GLAD_GL_VERSION_4_4) return; glad_glBufferStorage = (PFNGLBUFFERSTORAGEPROC)load("glBufferStorage"); glad_glClearTexImage = (PFNGLCLEARTEXIMAGEPROC)load("glClearTexImage"); glad_glClearTexSubImage = (PFNGLCLEARTEXSUBIMAGEPROC)load("glClearTexSubImage"); glad_glBindBuffersBase = (PFNGLBINDBUFFERSBASEPROC)load("glBindBuffersBase"); glad_glBindBuffersRange = (PFNGLBINDBUFFERSRANGEPROC)load("glBindBuffersRange"); glad_glBindTextures = (PFNGLBINDTEXTURESPROC)load("glBindTextures"); glad_glBindSamplers = (PFNGLBINDSAMPLERSPROC)load("glBindSamplers"); glad_glBindImageTextures = (PFNGLBINDIMAGETEXTURESPROC)load("glBindImageTextures"); glad_glBindVertexBuffers = (PFNGLBINDVERTEXBUFFERSPROC)load("glBindVertexBuffers"); } static void load_GL_VERSION_4_5(GLADloadproc load) { if(!GLAD_GL_VERSION_4_5) return; glad_glClipControl = (PFNGLCLIPCONTROLPROC)load("glClipControl"); glad_glCreateTransformFeedbacks = (PFNGLCREATETRANSFORMFEEDBACKSPROC)load("glCreateTransformFeedbacks"); glad_glTransformFeedbackBufferBase = (PFNGLTRANSFORMFEEDBACKBUFFERBASEPROC)load("glTransformFeedbackBufferBase"); glad_glTransformFeedbackBufferRange = (PFNGLTRANSFORMFEEDBACKBUFFERRANGEPROC)load("glTransformFeedbackBufferRange"); glad_glGetTransformFeedbackiv = (PFNGLGETTRANSFORMFEEDBACKIVPROC)load("glGetTransformFeedbackiv"); glad_glGetTransformFeedbacki_v = (PFNGLGETTRANSFORMFEEDBACKI_VPROC)load("glGetTransformFeedbacki_v"); glad_glGetTransformFeedbacki64_v = (PFNGLGETTRANSFORMFEEDBACKI64_VPROC)load("glGetTransformFeedbacki64_v"); glad_glCreateBuffers = (PFNGLCREATEBUFFERSPROC)load("glCreateBuffers"); glad_glNamedBufferStorage = (PFNGLNAMEDBUFFERSTORAGEPROC)load("glNamedBufferStorage"); glad_glNamedBufferData = (PFNGLNAMEDBUFFERDATAPROC)load("glNamedBufferData"); glad_glNamedBufferSubData = (PFNGLNAMEDBUFFERSUBDATAPROC)load("glNamedBufferSubData"); glad_glCopyNamedBufferSubData = (PFNGLCOPYNAMEDBUFFERSUBDATAPROC)load("glCopyNamedBufferSubData"); glad_glClearNamedBufferData = (PFNGLCLEARNAMEDBUFFERDATAPROC)load("glClearNamedBufferData"); glad_glClearNamedBufferSubData = (PFNGLCLEARNAMEDBUFFERSUBDATAPROC)load("glClearNamedBufferSubData"); glad_glMapNamedBuffer = (PFNGLMAPNAMEDBUFFERPROC)load("glMapNamedBuffer"); glad_glMapNamedBufferRange = (PFNGLMAPNAMEDBUFFERRANGEPROC)load("glMapNamedBufferRange"); glad_glUnmapNamedBuffer = (PFNGLUNMAPNAMEDBUFFERPROC)load("glUnmapNamedBuffer"); glad_glFlushMappedNamedBufferRange = (PFNGLFLUSHMAPPEDNAMEDBUFFERRANGEPROC)load("glFlushMappedNamedBufferRange"); glad_glGetNamedBufferParameteriv = (PFNGLGETNAMEDBUFFERPARAMETERIVPROC)load("glGetNamedBufferParameteriv"); glad_glGetNamedBufferParameteri64v = (PFNGLGETNAMEDBUFFERPARAMETERI64VPROC)load("glGetNamedBufferParameteri64v"); glad_glGetNamedBufferPointerv = (PFNGLGETNAMEDBUFFERPOINTERVPROC)load("glGetNamedBufferPointerv"); glad_glGetNamedBufferSubData = (PFNGLGETNAMEDBUFFERSUBDATAPROC)load("glGetNamedBufferSubData"); glad_glCreateFramebuffers = (PFNGLCREATEFRAMEBUFFERSPROC)load("glCreateFramebuffers"); glad_glNamedFramebufferRenderbuffer = (PFNGLNAMEDFRAMEBUFFERRENDERBUFFERPROC)load("glNamedFramebufferRenderbuffer"); glad_glNamedFramebufferParameteri = (PFNGLNAMEDFRAMEBUFFERPARAMETERIPROC)load("glNamedFramebufferParameteri"); glad_glNamedFramebufferTexture = (PFNGLNAMEDFRAMEBUFFERTEXTUREPROC)load("glNamedFramebufferTexture"); glad_glNamedFramebufferTextureLayer = (PFNGLNAMEDFRAMEBUFFERTEXTURELAYERPROC)load("glNamedFramebufferTextureLayer"); glad_glNamedFramebufferDrawBuffer = (PFNGLNAMEDFRAMEBUFFERDRAWBUFFERPROC)load("glNamedFramebufferDrawBuffer"); glad_glNamedFramebufferDrawBuffers = (PFNGLNAMEDFRAMEBUFFERDRAWBUFFERSPROC)load("glNamedFramebufferDrawBuffers"); glad_glNamedFramebufferReadBuffer = (PFNGLNAMEDFRAMEBUFFERREADBUFFERPROC)load("glNamedFramebufferReadBuffer"); glad_glInvalidateNamedFramebufferData = (PFNGLINVALIDATENAMEDFRAMEBUFFERDATAPROC)load("glInvalidateNamedFramebufferData"); glad_glInvalidateNamedFramebufferSubData = (PFNGLINVALIDATENAMEDFRAMEBUFFERSUBDATAPROC)load("glInvalidateNamedFramebufferSubData"); glad_glClearNamedFramebufferiv = (PFNGLCLEARNAMEDFRAMEBUFFERIVPROC)load("glClearNamedFramebufferiv"); glad_glClearNamedFramebufferuiv = (PFNGLCLEARNAMEDFRAMEBUFFERUIVPROC)load("glClearNamedFramebufferuiv"); glad_glClearNamedFramebufferfv = (PFNGLCLEARNAMEDFRAMEBUFFERFVPROC)load("glClearNamedFramebufferfv"); glad_glClearNamedFramebufferfi = (PFNGLCLEARNAMEDFRAMEBUFFERFIPROC)load("glClearNamedFramebufferfi"); glad_glBlitNamedFramebuffer = (PFNGLBLITNAMEDFRAMEBUFFERPROC)load("glBlitNamedFramebuffer"); glad_glCheckNamedFramebufferStatus = (PFNGLCHECKNAMEDFRAMEBUFFERSTATUSPROC)load("glCheckNamedFramebufferStatus"); glad_glGetNamedFramebufferParameteriv = (PFNGLGETNAMEDFRAMEBUFFERPARAMETERIVPROC)load("glGetNamedFramebufferParameteriv"); glad_glGetNamedFramebufferAttachmentParameteriv = (PFNGLGETNAMEDFRAMEBUFFERATTACHMENTPARAMETERIVPROC)load("glGetNamedFramebufferAttachmentParameteriv"); glad_glCreateRenderbuffers = (PFNGLCREATERENDERBUFFERSPROC)load("glCreateRenderbuffers"); glad_glNamedRenderbufferStorage = (PFNGLNAMEDRENDERBUFFERSTORAGEPROC)load("glNamedRenderbufferStorage"); glad_glNamedRenderbufferStorageMultisample = (PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEPROC)load("glNamedRenderbufferStorageMultisample"); glad_glGetNamedRenderbufferParameteriv = (PFNGLGETNAMEDRENDERBUFFERPARAMETERIVPROC)load("glGetNamedRenderbufferParameteriv"); glad_glCreateTextures = (PFNGLCREATETEXTURESPROC)load("glCreateTextures"); glad_glTextureBuffer = (PFNGLTEXTUREBUFFERPROC)load("glTextureBuffer"); glad_glTextureBufferRange = (PFNGLTEXTUREBUFFERRANGEPROC)load("glTextureBufferRange"); glad_glTextureStorage1D = (PFNGLTEXTURESTORAGE1DPROC)load("glTextureStorage1D"); glad_glTextureStorage2D = (PFNGLTEXTURESTORAGE2DPROC)load("glTextureStorage2D"); glad_glTextureStorage3D = (PFNGLTEXTURESTORAGE3DPROC)load("glTextureStorage3D"); glad_glTextureStorage2DMultisample = (PFNGLTEXTURESTORAGE2DMULTISAMPLEPROC)load("glTextureStorage2DMultisample"); glad_glTextureStorage3DMultisample = (PFNGLTEXTURESTORAGE3DMULTISAMPLEPROC)load("glTextureStorage3DMultisample"); glad_glTextureSubImage1D = (PFNGLTEXTURESUBIMAGE1DPROC)load("glTextureSubImage1D"); glad_glTextureSubImage2D = (PFNGLTEXTURESUBIMAGE2DPROC)load("glTextureSubImage2D"); glad_glTextureSubImage3D = (PFNGLTEXTURESUBIMAGE3DPROC)load("glTextureSubImage3D"); glad_glCompressedTextureSubImage1D = (PFNGLCOMPRESSEDTEXTURESUBIMAGE1DPROC)load("glCompressedTextureSubImage1D"); glad_glCompressedTextureSubImage2D = (PFNGLCOMPRESSEDTEXTURESUBIMAGE2DPROC)load("glCompressedTextureSubImage2D"); glad_glCompressedTextureSubImage3D = (PFNGLCOMPRESSEDTEXTURESUBIMAGE3DPROC)load("glCompressedTextureSubImage3D"); glad_glCopyTextureSubImage1D = (PFNGLCOPYTEXTURESUBIMAGE1DPROC)load("glCopyTextureSubImage1D"); glad_glCopyTextureSubImage2D = (PFNGLCOPYTEXTURESUBIMAGE2DPROC)load("glCopyTextureSubImage2D"); glad_glCopyTextureSubImage3D = (PFNGLCOPYTEXTURESUBIMAGE3DPROC)load("glCopyTextureSubImage3D"); glad_glTextureParameterf = (PFNGLTEXTUREPARAMETERFPROC)load("glTextureParameterf"); glad_glTextureParameterfv = (PFNGLTEXTUREPARAMETERFVPROC)load("glTextureParameterfv"); glad_glTextureParameteri = (PFNGLTEXTUREPARAMETERIPROC)load("glTextureParameteri"); glad_glTextureParameterIiv = (PFNGLTEXTUREPARAMETERIIVPROC)load("glTextureParameterIiv"); glad_glTextureParameterIuiv = (PFNGLTEXTUREPARAMETERIUIVPROC)load("glTextureParameterIuiv"); glad_glTextureParameteriv = (PFNGLTEXTUREPARAMETERIVPROC)load("glTextureParameteriv"); glad_glGenerateTextureMipmap = (PFNGLGENERATETEXTUREMIPMAPPROC)load("glGenerateTextureMipmap"); glad_glBindTextureUnit = (PFNGLBINDTEXTUREUNITPROC)load("glBindTextureUnit"); glad_glGetTextureImage = (PFNGLGETTEXTUREIMAGEPROC)load("glGetTextureImage"); glad_glGetCompressedTextureImage = (PFNGLGETCOMPRESSEDTEXTUREIMAGEPROC)load("glGetCompressedTextureImage"); glad_glGetTextureLevelParameterfv = (PFNGLGETTEXTURELEVELPARAMETERFVPROC)load("glGetTextureLevelParameterfv"); glad_glGetTextureLevelParameteriv = (PFNGLGETTEXTURELEVELPARAMETERIVPROC)load("glGetTextureLevelParameteriv"); glad_glGetTextureParameterfv = (PFNGLGETTEXTUREPARAMETERFVPROC)load("glGetTextureParameterfv"); glad_glGetTextureParameterIiv = (PFNGLGETTEXTUREPARAMETERIIVPROC)load("glGetTextureParameterIiv"); glad_glGetTextureParameterIuiv = (PFNGLGETTEXTUREPARAMETERIUIVPROC)load("glGetTextureParameterIuiv"); glad_glGetTextureParameteriv = (PFNGLGETTEXTUREPARAMETERIVPROC)load("glGetTextureParameteriv"); glad_glCreateVertexArrays = (PFNGLCREATEVERTEXARRAYSPROC)load("glCreateVertexArrays"); glad_glDisableVertexArrayAttrib = (PFNGLDISABLEVERTEXARRAYATTRIBPROC)load("glDisableVertexArrayAttrib"); glad_glEnableVertexArrayAttrib = (PFNGLENABLEVERTEXARRAYATTRIBPROC)load("glEnableVertexArrayAttrib"); glad_glVertexArrayElementBuffer = (PFNGLVERTEXARRAYELEMENTBUFFERPROC)load("glVertexArrayElementBuffer"); glad_glVertexArrayVertexBuffer = (PFNGLVERTEXARRAYVERTEXBUFFERPROC)load("glVertexArrayVertexBuffer"); glad_glVertexArrayVertexBuffers = (PFNGLVERTEXARRAYVERTEXBUFFERSPROC)load("glVertexArrayVertexBuffers"); glad_glVertexArrayAttribBinding = (PFNGLVERTEXARRAYATTRIBBINDINGPROC)load("glVertexArrayAttribBinding"); glad_glVertexArrayAttribFormat = (PFNGLVERTEXARRAYATTRIBFORMATPROC)load("glVertexArrayAttribFormat"); glad_glVertexArrayAttribIFormat = (PFNGLVERTEXARRAYATTRIBIFORMATPROC)load("glVertexArrayAttribIFormat"); glad_glVertexArrayAttribLFormat = (PFNGLVERTEXARRAYATTRIBLFORMATPROC)load("glVertexArrayAttribLFormat"); glad_glVertexArrayBindingDivisor = (PFNGLVERTEXARRAYBINDINGDIVISORPROC)load("glVertexArrayBindingDivisor"); glad_glGetVertexArrayiv = (PFNGLGETVERTEXARRAYIVPROC)load("glGetVertexArrayiv"); glad_glGetVertexArrayIndexediv = (PFNGLGETVERTEXARRAYINDEXEDIVPROC)load("glGetVertexArrayIndexediv"); glad_glGetVertexArrayIndexed64iv = (PFNGLGETVERTEXARRAYINDEXED64IVPROC)load("glGetVertexArrayIndexed64iv"); glad_glCreateSamplers = (PFNGLCREATESAMPLERSPROC)load("glCreateSamplers"); glad_glCreateProgramPipelines = (PFNGLCREATEPROGRAMPIPELINESPROC)load("glCreateProgramPipelines"); glad_glCreateQueries = (PFNGLCREATEQUERIESPROC)load("glCreateQueries"); glad_glGetQueryBufferObjecti64v = (PFNGLGETQUERYBUFFEROBJECTI64VPROC)load("glGetQueryBufferObjecti64v"); glad_glGetQueryBufferObjectiv = (PFNGLGETQUERYBUFFEROBJECTIVPROC)load("glGetQueryBufferObjectiv"); glad_glGetQueryBufferObjectui64v = (PFNGLGETQUERYBUFFEROBJECTUI64VPROC)load("glGetQueryBufferObjectui64v"); glad_glGetQueryBufferObjectuiv = (PFNGLGETQUERYBUFFEROBJECTUIVPROC)load("glGetQueryBufferObjectuiv"); glad_glMemoryBarrierByRegion = (PFNGLMEMORYBARRIERBYREGIONPROC)load("glMemoryBarrierByRegion"); glad_glGetTextureSubImage = (PFNGLGETTEXTURESUBIMAGEPROC)load("glGetTextureSubImage"); glad_glGetCompressedTextureSubImage = (PFNGLGETCOMPRESSEDTEXTURESUBIMAGEPROC)load("glGetCompressedTextureSubImage"); glad_glGetGraphicsResetStatus = (PFNGLGETGRAPHICSRESETSTATUSPROC)load("glGetGraphicsResetStatus"); glad_glGetnCompressedTexImage = (PFNGLGETNCOMPRESSEDTEXIMAGEPROC)load("glGetnCompressedTexImage"); glad_glGetnTexImage = (PFNGLGETNTEXIMAGEPROC)load("glGetnTexImage"); glad_glGetnUniformdv = (PFNGLGETNUNIFORMDVPROC)load("glGetnUniformdv"); glad_glGetnUniformfv = (PFNGLGETNUNIFORMFVPROC)load("glGetnUniformfv"); glad_glGetnUniformiv = (PFNGLGETNUNIFORMIVPROC)load("glGetnUniformiv"); glad_glGetnUniformuiv = (PFNGLGETNUNIFORMUIVPROC)load("glGetnUniformuiv"); glad_glReadnPixels = (PFNGLREADNPIXELSPROC)load("glReadnPixels"); glad_glGetnMapdv = (PFNGLGETNMAPDVPROC)load("glGetnMapdv"); glad_glGetnMapfv = (PFNGLGETNMAPFVPROC)load("glGetnMapfv"); glad_glGetnMapiv = (PFNGLGETNMAPIVPROC)load("glGetnMapiv"); glad_glGetnPixelMapfv = (PFNGLGETNPIXELMAPFVPROC)load("glGetnPixelMapfv"); glad_glGetnPixelMapuiv = (PFNGLGETNPIXELMAPUIVPROC)load("glGetnPixelMapuiv"); glad_glGetnPixelMapusv = (PFNGLGETNPIXELMAPUSVPROC)load("glGetnPixelMapusv"); glad_glGetnPolygonStipple = (PFNGLGETNPOLYGONSTIPPLEPROC)load("glGetnPolygonStipple"); glad_glGetnColorTable = (PFNGLGETNCOLORTABLEPROC)load("glGetnColorTable"); glad_glGetnConvolutionFilter = (PFNGLGETNCONVOLUTIONFILTERPROC)load("glGetnConvolutionFilter"); glad_glGetnSeparableFilter = (PFNGLGETNSEPARABLEFILTERPROC)load("glGetnSeparableFilter"); glad_glGetnHistogram = (PFNGLGETNHISTOGRAMPROC)load("glGetnHistogram"); glad_glGetnMinmax = (PFNGLGETNMINMAXPROC)load("glGetnMinmax"); glad_glTextureBarrier = (PFNGLTEXTUREBARRIERPROC)load("glTextureBarrier"); } static void load_GL_VERSION_4_6(GLADloadproc load) { if(!GLAD_GL_VERSION_4_6) return; glad_glSpecializeShader = (PFNGLSPECIALIZESHADERPROC)load("glSpecializeShader"); glad_glMultiDrawArraysIndirectCount = (PFNGLMULTIDRAWARRAYSINDIRECTCOUNTPROC)load("glMultiDrawArraysIndirectCount"); glad_glMultiDrawElementsIndirectCount = (PFNGLMULTIDRAWELEMENTSINDIRECTCOUNTPROC)load("glMultiDrawElementsIndirectCount"); glad_glPolygonOffsetClamp = (PFNGLPOLYGONOFFSETCLAMPPROC)load("glPolygonOffsetClamp"); } static int find_extensionsGL(void) { if (!get_exts()) return 0; (void)&has_ext; free_exts(); return 1; } static void find_coreGL(void) { /* Thank you @elmindreda * https://github.com/elmindreda/greg/blob/master/templates/greg.c.in#L176 * https://github.com/glfw/glfw/blob/master/src/context.c#L36 */ int i, major, minor; const char* version; const char* prefixes[] = { "OpenGL ES-CM ", "OpenGL ES-CL ", "OpenGL ES ", NULL }; version = (const char*) glGetString(GL_VERSION); if (!version) return; for (i = 0; prefixes[i]; i++) { const size_t length = strlen(prefixes[i]); if (strncmp(version, prefixes[i], length) == 0) { version += length; break; } } /* PR #18 */ #ifdef _MSC_VER sscanf_s(version, "%d.%d", &major, &minor); #else sscanf(version, "%d.%d", &major, &minor); #endif GLVersion.major = major; GLVersion.minor = minor; max_loaded_major = major; max_loaded_minor = minor; GLAD_GL_VERSION_1_0 = (major == 1 && minor >= 0) || major > 1; GLAD_GL_VERSION_1_1 = (major == 1 && minor >= 1) || major > 1; GLAD_GL_VERSION_1_2 = (major == 1 && minor >= 2) || major > 1; GLAD_GL_VERSION_1_3 = (major == 1 && minor >= 3) || major > 1; GLAD_GL_VERSION_1_4 = (major == 1 && minor >= 4) || major > 1; GLAD_GL_VERSION_1_5 = (major == 1 && minor >= 5) || major > 1; GLAD_GL_VERSION_2_0 = (major == 2 && minor >= 0) || major > 2; GLAD_GL_VERSION_2_1 = (major == 2 && minor >= 1) || major > 2; GLAD_GL_VERSION_3_0 = (major == 3 && minor >= 0) || major > 3; GLAD_GL_VERSION_3_1 = (major == 3 && minor >= 1) || major > 3; GLAD_GL_VERSION_3_2 = (major == 3 && minor >= 2) || major > 3; GLAD_GL_VERSION_3_3 = (major == 3 && minor >= 3) || major > 3; GLAD_GL_VERSION_4_0 = (major == 4 && minor >= 0) || major > 4; GLAD_GL_VERSION_4_1 = (major == 4 && minor >= 1) || major > 4; GLAD_GL_VERSION_4_2 = (major == 4 && minor >= 2) || major > 4; GLAD_GL_VERSION_4_3 = (major == 4 && minor >= 3) || major > 4; GLAD_GL_VERSION_4_4 = (major == 4 && minor >= 4) || major > 4; GLAD_GL_VERSION_4_5 = (major == 4 && minor >= 5) || major > 4; GLAD_GL_VERSION_4_6 = (major == 4 && minor >= 6) || major > 4; if (GLVersion.major > 4 || (GLVersion.major >= 4 && GLVersion.minor >= 6)) { max_loaded_major = 4; max_loaded_minor = 6; } } int gladLoadGLLoader(GLADloadproc load) { GLVersion.major = 0; GLVersion.minor = 0; glGetString = (PFNGLGETSTRINGPROC)load("glGetString"); if(glGetString == NULL) return 0; if(glGetString(GL_VERSION) == NULL) return 0; find_coreGL(); load_GL_VERSION_1_0(load); load_GL_VERSION_1_1(load); load_GL_VERSION_1_2(load); load_GL_VERSION_1_3(load); load_GL_VERSION_1_4(load); load_GL_VERSION_1_5(load); load_GL_VERSION_2_0(load); load_GL_VERSION_2_1(load); load_GL_VERSION_3_0(load); load_GL_VERSION_3_1(load); load_GL_VERSION_3_2(load); load_GL_VERSION_3_3(load); load_GL_VERSION_4_0(load); load_GL_VERSION_4_1(load); load_GL_VERSION_4_2(load); load_GL_VERSION_4_3(load); load_GL_VERSION_4_4(load); load_GL_VERSION_4_5(load); load_GL_VERSION_4_6(load); if (!find_extensionsGL()) return 0; return GLVersion.major != 0 || GLVersion.minor != 0; }
135825.c
/** * @file skam437xx_pinmux.c * * @brief * This is the pin configuration for IDK EVM AM437x. * * ============================================================================= * Copyright (c) 2015, Texas Instruments Incorporated * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of Texas Instruments Incorporated nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include "board_internal.h" #include "types.h" #include "hw_types.h" #include "chipdb.h" #include "pinmux.h" #include "error.h" #include "am43xx_pinmux.h" #include "debug.h" Board_STATUS Board_pinmuxConfig (void) { int32_t status; /* UART */ status = PINMUXModuleConfig(CHIPDB_MOD_ID_UART, 0U, NULL); /* I2C */ if(S_PASS == status) { status = PINMUXModuleConfig(CHIPDB_MOD_ID_I2C, 0U, NULL); } /* I2C */ if(S_PASS == status) { status = PINMUXModuleConfig(CHIPDB_MOD_ID_I2C, 1U, NULL); } /* MMCSD */ if(S_PASS == status) { status = PINMUXModuleConfig(CHIPDB_MOD_ID_MMCSD, 0U, NULL); } /* QSPI */ if(S_PASS == status) { status = PINMUXModuleConfig(CHIPDB_MOD_ID_QSPI, 0U, NULL); } /*GPIO*/ if(S_PASS == status) { status = PINMUXModuleConfig(CHIPDB_MOD_ID_GPIO, 0U, NULL); } if(S_PASS == status) { status = PINMUXModuleConfig(CHIPDB_MOD_ID_GPIO, 5U, NULL); } /* CPSW */ if(S_PASS == status) { status = PINMUXModuleConfig(CHIPDB_MOD_ID_CPSW, 0U, NULL); } return BOARD_SOK; } int32_t PINMUXModuleConfig(chipdbModuleID_t moduleId, uint32_t instNum, void* pParam1) { pinmuxModuleCfg_t* pModuleData = NULL; pinmuxPerCfg_t* pInstanceData = NULL; volatile const pinmuxBoardCfg_t* pPinmuxData = NULL; uint32_t ctrlModBase = CHIPDBBaseAddress(CHIPDB_MOD_ID_CONTROL_MODULE, 0); int32_t status = E_FAIL; uint32_t index = 0; /* Get module Data */ pPinmuxData = gEvmskPinmuxData; ASSERT(NULL != pPinmuxData); status = E_INVALID_MODULE_ID; for(index = 0; ((S_PASS != status) && (CHIPDB_MOD_ID_INVALID != pPinmuxData[index].moduleId)); index++) { if(pPinmuxData[index].moduleId == moduleId) { pModuleData = pPinmuxData[index].modulePinCfg; ASSERT(NULL != pModuleData); status = S_PASS; } } /* Get instance Data */ if(S_PASS == status) { status = E_INST_NOT_SUPP; for(index = 0; ((S_PASS != status) && (CHIPDB_INVALID_INSTANCE_NUM != pModuleData[index].modInstNum)); index++) { if(pModuleData[index].modInstNum == instNum) { pInstanceData = pModuleData[index].instPins; ASSERT(NULL != pInstanceData) status = S_PASS; } } } /* Configure Pinmux */ if(S_PASS == status) { for(index = 0; ((uint16_t)PINMUX_INVALID_PIN != pInstanceData[index].pinOffset); index++) { if(NULL != pParam1) { if(pInstanceData[index].optParam == *(uint16_t*)pParam1) { HW_WR_REG32((ctrlModBase + pInstanceData[index].pinOffset), pInstanceData[index].pinSettings); status = S_PASS; break; } } else { HW_WR_REG32((ctrlModBase + pInstanceData[index].pinOffset), pInstanceData[index].pinSettings); } } if((NULL != pParam1) && ((uint16_t)PINMUX_INVALID_PIN == pInstanceData[index].pinOffset)) { status = E_FAIL; } } return status; }
940146.c
/* * mini.c: The new Mono code generator. * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * Copyright 2002-2003 Ximian, Inc. * Copyright 2003-2010 Novell, Inc. * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) */ #define MONO_LLVM_IN_MINI 1 #include <config.h> #include <signal.h> #ifdef HAVE_ALLOCA_H #include <alloca.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <math.h> #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #include <mono/utils/memcheck.h> #include <mono/metadata/assembly.h> #include <mono/metadata/loader.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/class.h> #include <mono/metadata/object.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/threads.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/io-layer/io-layer.h> #include "mono/metadata/profiler.h" #include <mono/metadata/profiler-private.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/environment.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internal.h> #include <mono/metadata/threads-types.h> #include <mono/metadata/verify.h> #include <mono/metadata/verify-internals.h> #include <mono/metadata/mempool-internals.h> #include <mono/metadata/attach.h> #include <mono/metadata/runtime.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-counters.h> #include <mono/utils/mono-logger-internal.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-tls.h> #include <mono/utils/dtrace.h> #include "mini.h" #include "mini-llvm.h" #include "tasklets.h" #include <string.h> #include <ctype.h> #include "trace.h" #include "version.h" #include "jit-icalls.h" #include "debug-mini.h" #include "mini-gc.h" #include "debugger-agent.h" static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, MonoException **ex); static guint32 default_opt = 0; static gboolean default_opt_set = FALSE; MonoNativeTlsKey mono_jit_tls_id; #ifdef MONO_HAVE_FAST_TLS MONO_FAST_TLS_DECLARE(mono_jit_tls); #endif #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT #define MONO_ARCH_MONITOR_ENTER_ADJUSTMENT 1 #endif MonoTraceSpec *mono_jit_trace_calls = NULL; gboolean mono_compile_aot = FALSE; /* If this is set, no code is generated dynamically, everything is taken from AOT files */ gboolean mono_aot_only = FALSE; /* Whenever to use IMT */ #ifdef MONO_ARCH_HAVE_IMT gboolean mono_use_imt = TRUE; #else gboolean mono_use_imt = FALSE; #endif MonoMethodDesc *mono_inject_async_exc_method = NULL; int mono_inject_async_exc_pos; MonoMethodDesc *mono_break_at_bb_method = NULL; int mono_break_at_bb_bb_num; gboolean mono_do_x86_stack_align = TRUE; const char *mono_build_date; gboolean mono_do_signal_chaining; static gboolean mono_using_xdebug; static int mini_verbose = 0; /* * This flag controls whenever the runtime uses LLVM for JIT compilation, and whenever * it can load AOT code compiled by LLVM. */ gboolean mono_use_llvm = FALSE; #define mono_jit_lock() EnterCriticalSection (&jit_mutex) #define mono_jit_unlock() LeaveCriticalSection (&jit_mutex) static CRITICAL_SECTION jit_mutex; static MonoCodeManager *global_codeman = NULL; static GHashTable *jit_icall_name_hash = NULL; static MonoDebugOptions debug_options; #ifdef VALGRIND_JIT_REGISTER_MAP static int valgrind_register = 0; #endif /* * Table written to by the debugger with a 1-based index into the * mono_breakpoint_info table, which contains changes made to * the JIT instructions by the debugger. */ gssize mono_breakpoint_info_index [MONO_BREAKPOINT_ARRAY_SIZE]; /* Whenever to check for pending exceptions in managed-to-native wrappers */ gboolean check_for_pending_exc = TRUE; /* Whenever to disable passing/returning small valuetypes in registers for managed methods */ gboolean disable_vtypes_in_regs = FALSE; gboolean mono_dont_free_global_codeman; gpointer mono_realloc_native_code (MonoCompile *cfg) { #if defined(__default_codegen__) return g_realloc (cfg->native_code, cfg->code_size); #elif defined(__native_client_codegen__) guint old_padding; gpointer native_code; guint alignment_check; /* Save the old alignment offset so we can re-align after the realloc. */ old_padding = (guint)(cfg->native_code - cfg->native_code_alloc); cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc, cfg->code_size + kNaClAlignment ); /* Align native_code to next nearest kNaClAlignment byte. */ native_code = (guint)cfg->native_code_alloc + kNaClAlignment; native_code = (guint)native_code & ~kNaClAlignmentMask; /* Shift the data to be 32-byte aligned again. */ memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size); alignment_check = (guint)native_code & kNaClAlignmentMask; g_assert (alignment_check == 0); return native_code; #else g_assert_not_reached (); return cfg->native_code; #endif } #ifdef __native_client_codegen__ /* Prevent instructions from straddling a 32-byte alignment boundary. */ /* Instructions longer than 32 bytes must be aligned internally. */ /* IN: pcode, instlen */ /* OUT: pcode */ void mono_nacl_align_inst(guint8 **pcode, int instlen) { int space_in_block; space_in_block = kNaClAlignment - ((uintptr_t)(*pcode) & kNaClAlignmentMask); if (G_UNLIKELY (instlen >= kNaClAlignment)) { g_assert_not_reached(); } else if (instlen > space_in_block) { *pcode = mono_arch_nacl_pad(*pcode, space_in_block); } } /* Move emitted call sequence to the end of a kNaClAlignment-byte block. */ /* IN: start pointer to start of call sequence */ /* IN: pcode pointer to end of call sequence (current "IP") */ /* OUT: start pointer to the start of the call sequence after padding */ /* OUT: pcode pointer to the end of the call sequence after padding */ void mono_nacl_align_call(guint8 **start, guint8 **pcode) { const size_t MAX_NACL_CALL_LENGTH = kNaClAlignment; guint8 copy_of_call[MAX_NACL_CALL_LENGTH]; guint8 *temp; const size_t length = (size_t)((*pcode)-(*start)); g_assert(length < MAX_NACL_CALL_LENGTH); memcpy(copy_of_call, *start, length); temp = mono_nacl_pad_call(*start, (guint8)length); memcpy(temp, copy_of_call, length); (*start) = temp; (*pcode) = temp + length; } /* mono_nacl_pad_call(): Insert padding for Native Client call instructions */ /* code pointer to buffer for emitting code */ /* ilength length of call instruction */ guint8 *mono_nacl_pad_call(guint8 *code, guint8 ilength) { int freeSpaceInBlock = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask); int padding = freeSpaceInBlock - ilength; if (padding < 0) { /* There isn't enough space in this block for the instruction. */ /* Fill this block and start a new one. */ code = mono_arch_nacl_pad(code, freeSpaceInBlock); freeSpaceInBlock = kNaClAlignment; padding = freeSpaceInBlock - ilength; } g_assert(ilength > 0); g_assert(padding >= 0); g_assert(padding < kNaClAlignment); if (0 == padding) return code; return mono_arch_nacl_pad(code, padding); } guint8 *mono_nacl_align(guint8 *code) { int padding = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask); if (padding != kNaClAlignment) code = mono_arch_nacl_pad(code, padding); return code; } void mono_nacl_fix_patches(const guint8 *code, MonoJumpInfo *ji) { MonoJumpInfo *patch_info; for (patch_info = ji; patch_info; patch_info = patch_info->next) { unsigned char *ip = patch_info->ip.i + code; ip = mono_arch_nacl_skip_nops(ip); patch_info->ip.i = ip - code; } } #endif /* __native_client_codegen__ */ gboolean mono_running_on_valgrind (void) { if (RUNNING_ON_VALGRIND){ #ifdef VALGRIND_JIT_REGISTER_MAP valgrind_register = TRUE; #endif return TRUE; } else return FALSE; } typedef struct { MonoExceptionClause *clause; MonoBasicBlock *basic_block; int start_offset; } TryBlockHole; typedef struct { void *ip; MonoMethod *method; } FindTrampUserData; static void find_tramp (gpointer key, gpointer value, gpointer user_data) { FindTrampUserData *ud = (FindTrampUserData*)user_data; if (value == ud->ip) ud->method = (MonoMethod*)key; } /* debug function */ G_GNUC_UNUSED static char* get_method_from_ip (void *ip) { MonoJitInfo *ji; char *method; char *res; MonoDomain *domain = mono_domain_get (); MonoDebugSourceLocation *location; FindTrampUserData user_data; if (!domain) domain = mono_get_root_domain (); ji = mono_jit_info_table_find (domain, ip); if (!ji) { user_data.ip = ip; user_data.method = NULL; mono_domain_lock (domain); g_hash_table_foreach (domain_jit_info (domain)->jit_trampoline_hash, find_tramp, &user_data); mono_domain_unlock (domain); if (user_data.method) { char *mname = mono_method_full_name (user_data.method, TRUE); res = g_strdup_printf ("<%p - JIT trampoline for %s>", ip, mname); g_free (mname); return res; } else return NULL; } method = mono_method_full_name (ji->method, TRUE); /* FIXME: unused ? */ location = mono_debug_lookup_source_location (ji->method, (guint32)((guint8*)ip - (guint8*)ji->code_start), domain); res = g_strdup_printf (" %s + 0x%x (%p %p) [%p - %s]", method, (int)((char*)ip - (char*)ji->code_start), ji->code_start, (char*)ji->code_start + ji->code_size, domain, domain->friendly_name); mono_debug_free_source_location (location); g_free (method); return res; } /** * mono_pmip: * @ip: an instruction pointer address * * This method is used from a debugger to get the name of the * method at address @ip. This routine is typically invoked from * a debugger like this: * * (gdb) print mono_pmip ($pc) * * Returns: the name of the method at address @ip. */ G_GNUC_UNUSED char * mono_pmip (void *ip) { return get_method_from_ip (ip); } /** * mono_print_method_from_ip * @ip: an instruction pointer address * * This method is used from a debugger to get the name of the * method at address @ip. * * This prints the name of the method at address @ip in the standard * output. Unlike mono_pmip which returns a string, this routine * prints the value on the standard output. */ #ifdef __GNUC__ /* Prevent the linker from optimizing this away in embedding setups to help debugging */ __attribute__((used)) #endif void mono_print_method_from_ip (void *ip) { MonoJitInfo *ji; char *method; MonoDebugSourceLocation *source; MonoDomain *domain = mono_domain_get (); MonoDomain *target_domain = mono_domain_get (); FindTrampUserData user_data; ji = mini_jit_info_table_find (domain, ip, &target_domain); if (!ji) { user_data.ip = ip; user_data.method = NULL; mono_domain_lock (domain); g_hash_table_foreach (domain_jit_info (domain)->jit_trampoline_hash, find_tramp, &user_data); mono_domain_unlock (domain); if (user_data.method) { char *mname = mono_method_full_name (user_data.method, TRUE); printf ("IP %p is a JIT trampoline for %s\n", ip, mname); g_free (mname); } else g_print ("No method at %p\n", ip); fflush (stdout); return; } method = mono_method_full_name (ji->method, TRUE); source = mono_debug_lookup_source_location (ji->method, (guint32)((guint8*)ip - (guint8*)ji->code_start), target_domain); g_print ("IP %p at offset 0x%x of method %s (%p %p)[domain %p - %s]\n", ip, (int)((char*)ip - (char*)ji->code_start), method, ji->code_start, (char*)ji->code_start + ji->code_size, target_domain, target_domain->friendly_name); if (source) g_print ("%s:%d\n", source->source_file, source->row); fflush (stdout); mono_debug_free_source_location (source); g_free (method); } /* * mono_method_same_domain: * * Determine whenever two compiled methods are in the same domain, thus * the address of the callee can be embedded in the caller. */ gboolean mono_method_same_domain (MonoJitInfo *caller, MonoJitInfo *callee) { if (!caller || !callee) return FALSE; /* * If the call was made from domain-neutral to domain-specific * code, we can't patch the call site. */ if (caller->domain_neutral && !callee->domain_neutral) return FALSE; if ((caller->method->klass == mono_defaults.appdomain_class) && (strstr (caller->method->name, "InvokeInDomain"))) { /* The InvokeInDomain methods change the current appdomain */ return FALSE; } return TRUE; } /* * mono_global_codeman_reserve: * * Allocate code memory from the global code manager. */ void *mono_global_codeman_reserve (int size) { void *ptr; if (mono_aot_only) g_error ("Attempting to allocate from the global code manager while running with --aot-only.\n"); if (!global_codeman) { /* This can happen during startup */ global_codeman = mono_code_manager_new (); return mono_code_manager_reserve (global_codeman, size); } else { mono_jit_lock (); ptr = mono_code_manager_reserve (global_codeman, size); mono_jit_unlock (); return ptr; } } #if defined(__native_client_codegen__) && defined(__native_client__) /* Given the temporary buffer (allocated by mono_global_codeman_reserve) into * which we are generating code, return a pointer to the destination in the * dynamic code segment into which the code will be copied when * mono_global_codeman_commit is called. * LOCKING: Acquires the jit lock. */ void* nacl_global_codeman_get_dest (void *data) { void *dest; mono_jit_lock (); dest = nacl_code_manager_get_code_dest (global_codeman, data); mono_jit_unlock (); return dest; } void mono_global_codeman_commit (void *data, int size, int newsize) { mono_jit_lock (); mono_code_manager_commit (global_codeman, data, size, newsize); mono_jit_unlock (); } /* * Convenience function which calls mono_global_codeman_commit to validate and * copy the code. The caller sets *buf_base and *buf_size to the start and size * of the buffer (allocated by mono_global_codeman_reserve), and *code_end to * the byte after the last instruction byte. On return, *buf_base will point to * the start of the copied in the code segment, and *code_end will point after * the end of the copied code. */ void nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end) { guint8 *tmp = nacl_global_codeman_get_dest (*buf_base); mono_global_codeman_commit (*buf_base, buf_size, *code_end - *buf_base); *code_end = tmp + (*code_end - *buf_base); *buf_base = tmp; } #else /* no-op versions of Native Client functions */ void* nacl_global_codeman_get_dest (void *data) { return data; } void mono_global_codeman_commit (void *data, int size, int newsize) { } void nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end) { } #endif /* __native_client__ */ /** * mono_create_unwind_op: * * Create an unwind op with the given parameters. */ MonoUnwindOp* mono_create_unwind_op (int when, int tag, int reg, int val) { MonoUnwindOp *op = g_new0 (MonoUnwindOp, 1); op->op = tag; op->reg = reg; op->val = val; op->when = when; return op; } /** * mono_emit_unwind_op: * * Add an unwind op with the given parameters for the list of unwind ops stored in * cfg->unwind_ops. */ void mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val) { MonoUnwindOp *op = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp)); op->op = tag; op->reg = reg; op->val = val; op->when = when; cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op); } MonoJumpInfoToken * mono_jump_info_token_new2 (MonoMemPool *mp, MonoImage *image, guint32 token, MonoGenericContext *context) { MonoJumpInfoToken *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoToken)); res->image = image; res->token = token; res->has_context = context != NULL; if (context) memcpy (&res->context, context, sizeof (MonoGenericContext)); return res; } MonoJumpInfoToken * mono_jump_info_token_new (MonoMemPool *mp, MonoImage *image, guint32 token) { return mono_jump_info_token_new2 (mp, image, token, NULL); } /* * mono_tramp_info_create: * * Create a MonoTrampInfo structure from the arguments. This function assumes ownership * of NAME, JI, and UNWIND_OPS. */ MonoTrampInfo* mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops) { MonoTrampInfo *info = g_new0 (MonoTrampInfo, 1); info->name = (char*)name; info->code = code; info->code_size = code_size; info->ji = ji; info->unwind_ops = unwind_ops; return info; } void mono_tramp_info_free (MonoTrampInfo *info) { GSList *l; g_free (info->name); // FIXME: ji for (l = info->unwind_ops; l; l = l->next) g_free (l->data); g_slist_free (info->unwind_ops); g_free (info); } #define MONO_INIT_VARINFO(vi,id) do { \ (vi)->range.first_use.pos.bid = 0xffff; \ (vi)->reg = -1; \ (vi)->idx = (id); \ } while (0) /** * mono_unlink_bblock: * * Unlink two basic blocks. */ void mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to) { int i, pos; gboolean found; found = FALSE; for (i = 0; i < from->out_count; ++i) { if (to == from->out_bb [i]) { found = TRUE; break; } } if (found) { pos = 0; for (i = 0; i < from->out_count; ++i) { if (from->out_bb [i] != to) from->out_bb [pos ++] = from->out_bb [i]; } g_assert (pos == from->out_count - 1); from->out_count--; } found = FALSE; for (i = 0; i < to->in_count; ++i) { if (from == to->in_bb [i]) { found = TRUE; break; } } if (found) { pos = 0; for (i = 0; i < to->in_count; ++i) { if (to->in_bb [i] != from) to->in_bb [pos ++] = to->in_bb [i]; } g_assert (pos == to->in_count - 1); to->in_count--; } } /* * mono_bblocks_linked: * * Return whenever BB1 and BB2 are linked in the CFG. */ gboolean mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2) { int i; for (i = 0; i < bb1->out_count; ++i) { if (bb1->out_bb [i] == bb2) return TRUE; } return FALSE; } static int mono_find_block_region_notry (MonoCompile *cfg, int offset) { MonoMethodHeader *header = cfg->header; MonoExceptionClause *clause; int i; for (i = 0; i < header->num_clauses; ++i) { clause = &header->clauses [i]; if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) && (offset < (clause->handler_offset))) return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags; if (MONO_OFFSET_IN_HANDLER (clause, offset)) { if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY) return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags; else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags; else return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags; } } return -1; } /* * mono_get_block_region_notry: * * Return the region corresponding to REGION, ignoring try clauses nested inside * finally clauses. */ int mono_get_block_region_notry (MonoCompile *cfg, int region) { if ((region & (0xf << 4)) == MONO_REGION_TRY) { MonoMethodHeader *header = cfg->header; /* * This can happen if a try clause is nested inside a finally clause. */ int clause_index = (region >> 8) - 1; g_assert (clause_index >= 0 && clause_index < header->num_clauses); region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset); } return region; } MonoInst * mono_find_spvar_for_region (MonoCompile *cfg, int region) { region = mono_get_block_region_notry (cfg, region); return g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region)); } static void df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array) { int i; array [*dfn] = start; /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */ for (i = 0; i < start->out_count; ++i) { if (start->out_bb [i]->dfn) continue; (*dfn)++; start->out_bb [i]->dfn = *dfn; start->out_bb [i]->df_parent = start; array [*dfn] = start->out_bb [i]; df_visit (start->out_bb [i], dfn, array); } } guint32 mono_reverse_branch_op (guint32 opcode) { static const int reverse_map [] = { CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE, CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN }; static const int reverse_fmap [] = { OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE, OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN }; static const int reverse_lmap [] = { OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE, OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN }; static const int reverse_imap [] = { OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE, OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN }; if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) { opcode = reverse_map [opcode - CEE_BEQ]; } else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) { opcode = reverse_fmap [opcode - OP_FBEQ]; } else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) { opcode = reverse_lmap [opcode - OP_LBEQ]; } else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) { opcode = reverse_imap [opcode - OP_IBEQ]; } else g_assert_not_reached (); return opcode; } guint mono_type_to_store_membase (MonoCompile *cfg, MonoType *type) { if (type->byref) return OP_STORE_MEMBASE_REG; handle_enum: switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: return OP_STOREI1_MEMBASE_REG; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: return OP_STOREI2_MEMBASE_REG; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_STOREI4_MEMBASE_REG; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return OP_STORE_MEMBASE_REG; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return OP_STORE_MEMBASE_REG; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_STOREI8_MEMBASE_REG; case MONO_TYPE_R4: return OP_STORER4_MEMBASE_REG; case MONO_TYPE_R8: return OP_STORER8_MEMBASE_REG; case MONO_TYPE_VALUETYPE: if (type->data.klass->enumtype) { type = mono_class_enum_basetype (type->data.klass); goto handle_enum; } if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type))) return OP_STOREX_MEMBASE; return OP_STOREV_MEMBASE; case MONO_TYPE_TYPEDBYREF: return OP_STOREV_MEMBASE; case MONO_TYPE_GENERICINST: type = &type->data.generic_class->container_class->byval_arg; goto handle_enum; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* FIXME: all the arguments must be references for now, * later look inside cfg and see if the arg num is * really a reference */ g_assert (cfg->generic_sharing_context); return OP_STORE_MEMBASE_REG; default: g_error ("unknown type 0x%02x in type_to_store_membase", type->type); } return -1; } guint mono_type_to_load_membase (MonoCompile *cfg, MonoType *type) { if (type->byref) return OP_LOAD_MEMBASE; type = mono_type_get_underlying_type (type); switch (type->type) { case MONO_TYPE_I1: return OP_LOADI1_MEMBASE; case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: return OP_LOADU1_MEMBASE; case MONO_TYPE_I2: return OP_LOADI2_MEMBASE; case MONO_TYPE_U2: case MONO_TYPE_CHAR: return OP_LOADU2_MEMBASE; case MONO_TYPE_I4: return OP_LOADI4_MEMBASE; case MONO_TYPE_U4: return OP_LOADU4_MEMBASE; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return OP_LOAD_MEMBASE; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return OP_LOAD_MEMBASE; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_LOADI8_MEMBASE; case MONO_TYPE_R4: return OP_LOADR4_MEMBASE; case MONO_TYPE_R8: return OP_LOADR8_MEMBASE; case MONO_TYPE_VALUETYPE: if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type))) return OP_LOADX_MEMBASE; case MONO_TYPE_TYPEDBYREF: return OP_LOADV_MEMBASE; case MONO_TYPE_GENERICINST: if (mono_type_generic_inst_is_valuetype (type)) return OP_LOADV_MEMBASE; else return OP_LOAD_MEMBASE; break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* FIXME: all the arguments must be references for now, * later look inside cfg and see if the arg num is * really a reference */ g_assert (cfg->generic_sharing_context); return OP_LOAD_MEMBASE; default: g_error ("unknown type 0x%02x in type_to_load_membase", type->type); } return -1; } static guint mini_type_to_ldind (MonoCompile* cfg, MonoType *type) { if (cfg->generic_sharing_context && !type->byref) { /* FIXME: all the arguments must be references for now, * later look inside cfg and see if the arg num is * really a reference */ if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) return CEE_LDIND_REF; } return mono_type_to_ldind (type); } guint mini_type_to_stind (MonoCompile* cfg, MonoType *type) { if (cfg->generic_sharing_context && !type->byref) { /* FIXME: all the arguments must be references for now, * later look inside cfg and see if the arg num is * really a reference */ if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) return CEE_STIND_REF; } return mono_type_to_stind (type); } int mono_op_imm_to_op (int opcode) { switch (opcode) { case OP_ADD_IMM: #if SIZEOF_REGISTER == 4 return OP_IADD; #else return OP_LADD; #endif case OP_IADD_IMM: return OP_IADD; case OP_LADD_IMM: return OP_LADD; case OP_ISUB_IMM: return OP_ISUB; case OP_LSUB_IMM: return OP_LSUB; case OP_IMUL_IMM: return OP_IMUL; case OP_AND_IMM: #if SIZEOF_REGISTER == 4 return OP_IAND; #else return OP_LAND; #endif case OP_OR_IMM: #if SIZEOF_REGISTER == 4 return OP_IOR; #else return OP_LOR; #endif case OP_XOR_IMM: #if SIZEOF_REGISTER == 4 return OP_IXOR; #else return OP_LXOR; #endif case OP_IAND_IMM: return OP_IAND; case OP_LAND_IMM: return OP_LAND; case OP_IOR_IMM: return OP_IOR; case OP_LOR_IMM: return OP_LOR; case OP_IXOR_IMM: return OP_IXOR; case OP_LXOR_IMM: return OP_LXOR; case OP_ISHL_IMM: return OP_ISHL; case OP_LSHL_IMM: return OP_LSHL; case OP_ISHR_IMM: return OP_ISHR; case OP_LSHR_IMM: return OP_LSHR; case OP_ISHR_UN_IMM: return OP_ISHR_UN; case OP_LSHR_UN_IMM: return OP_LSHR_UN; case OP_IDIV_IMM: return OP_IDIV; case OP_IDIV_UN_IMM: return OP_IDIV_UN; case OP_IREM_UN_IMM: return OP_IREM_UN; case OP_IREM_IMM: return OP_IREM; case OP_DIV_IMM: #if SIZEOF_REGISTER == 4 return OP_IDIV; #else return OP_LDIV; #endif case OP_REM_IMM: #if SIZEOF_REGISTER == 4 return OP_IREM; #else return OP_LREM; #endif case OP_ADDCC_IMM: return OP_ADDCC; case OP_ADC_IMM: return OP_ADC; case OP_SUBCC_IMM: return OP_SUBCC; case OP_SBB_IMM: return OP_SBB; case OP_IADC_IMM: return OP_IADC; case OP_ISBB_IMM: return OP_ISBB; case OP_COMPARE_IMM: return OP_COMPARE; case OP_ICOMPARE_IMM: return OP_ICOMPARE; case OP_LOCALLOC_IMM: return OP_LOCALLOC; default: printf ("%s\n", mono_inst_name (opcode)); g_assert_not_reached (); return -1; } } /* * mono_decompose_op_imm: * * Replace the OP_.._IMM INS with its non IMM variant. */ void mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins) { MonoInst *temp; MONO_INST_NEW (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); mono_bblock_insert_before_ins (bb, ins, temp); ins->opcode = mono_op_imm_to_op (ins->opcode); if (ins->opcode == OP_LOCALLOC) ins->sreg1 = temp->dreg; else ins->sreg2 = temp->dreg; bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg); } static void set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst) { if (vreg >= cfg->vreg_to_inst_len) { MonoInst **tmp = cfg->vreg_to_inst; int size = cfg->vreg_to_inst_len; while (vreg >= cfg->vreg_to_inst_len) cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32; cfg->vreg_to_inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len); if (size) memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*)); } cfg->vreg_to_inst [vreg] = inst; } #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8))) #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4))) #ifdef DISABLE_JIT MonoInst* mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode) { return NULL; } #else MonoInst* mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg) { MonoInst *inst; int num = cfg->num_varinfo; gboolean regpair; if ((num + 1) >= cfg->varinfo_count) { int orig_count = cfg->varinfo_count; cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 64; cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count); cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count); memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar)); } cfg->stat_allocate_var++; MONO_INST_NEW (cfg, inst, opcode); inst->inst_c0 = num; inst->inst_vtype = type; inst->klass = mono_class_from_mono_type (type); type_to_eval_stack_type (cfg, type, inst); /* if set to 1 the variable is native */ inst->backend.is_pinvoke = 0; inst->dreg = vreg; if (cfg->compute_gc_maps) { if (type->byref) { mono_mark_vreg_as_mp (cfg, vreg); } else { MonoType *t = mini_type_get_underlying_type (NULL, type); if ((MONO_TYPE_ISSTRUCT (t) && inst->klass->has_references) || mini_type_is_reference (cfg, t)) { inst->flags |= MONO_INST_GC_TRACK; mono_mark_vreg_as_ref (cfg, vreg); } } } cfg->varinfo [num] = inst; MONO_INIT_VARINFO (&cfg->vars [num], num); MONO_VARINFO (cfg, num)->vreg = vreg; if (vreg != -1) set_vreg_to_inst (cfg, vreg, inst); #if SIZEOF_REGISTER == 4 #ifdef MONO_ARCH_SOFT_FLOAT regpair = mono_type_is_long (type) || mono_type_is_float (type); #else regpair = mono_type_is_long (type); #endif #else regpair = FALSE; #endif if (regpair) { MonoInst *tree; /* * These two cannot be allocated using create_var_for_vreg since that would * put it into the cfg->varinfo array, confusing many parts of the JIT. */ /* * Set flags to VOLATILE so SSA skips it. */ if (cfg->verbose_level >= 4) { printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, inst->dreg + 1, inst->dreg + 2); } #ifdef MONO_ARCH_SOFT_FLOAT if (cfg->opt & MONO_OPT_SSA) { if (mono_type_is_float (type)) inst->flags = MONO_INST_VOLATILE; } #endif /* Allocate a dummy MonoInst for the first vreg */ MONO_INST_NEW (cfg, tree, OP_LOCAL); tree->dreg = inst->dreg + 1; if (cfg->opt & MONO_OPT_SSA) tree->flags = MONO_INST_VOLATILE; tree->inst_c0 = num; tree->type = STACK_I4; tree->inst_vtype = &mono_defaults.int32_class->byval_arg; tree->klass = mono_class_from_mono_type (tree->inst_vtype); set_vreg_to_inst (cfg, inst->dreg + 1, tree); /* Allocate a dummy MonoInst for the second vreg */ MONO_INST_NEW (cfg, tree, OP_LOCAL); tree->dreg = inst->dreg + 2; if (cfg->opt & MONO_OPT_SSA) tree->flags = MONO_INST_VOLATILE; tree->inst_c0 = num; tree->type = STACK_I4; tree->inst_vtype = &mono_defaults.int32_class->byval_arg; tree->klass = mono_class_from_mono_type (tree->inst_vtype); set_vreg_to_inst (cfg, inst->dreg + 2, tree); } cfg->num_varinfo++; if (cfg->verbose_level > 2) g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type)); return inst; } MonoInst* mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode) { int dreg; if (mono_type_is_long (type)) dreg = mono_alloc_dreg (cfg, STACK_I8); #ifdef MONO_ARCH_SOFT_FLOAT else if (mono_type_is_float (type)) dreg = mono_alloc_dreg (cfg, STACK_R8); #endif else /* All the others are unified */ dreg = mono_alloc_preg (cfg); return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg); } /* * Transform a MonoInst into a load from the variable of index var_index. */ void mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index) { memset (dest, 0, sizeof (MonoInst)); dest->inst_i0 = cfg->varinfo [var_index]; dest->opcode = mini_type_to_ldind (cfg, dest->inst_i0->inst_vtype); type_to_eval_stack_type (cfg, dest->inst_i0->inst_vtype, dest); dest->klass = dest->inst_i0->klass; } #endif void mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg) { if (vreg >= cfg->vreg_is_ref_len) { gboolean *tmp = cfg->vreg_is_ref; int size = cfg->vreg_is_ref_len; while (vreg >= cfg->vreg_is_ref_len) cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32; cfg->vreg_is_ref = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len); if (size) memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean)); } cfg->vreg_is_ref [vreg] = TRUE; } void mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg) { if (vreg >= cfg->vreg_is_mp_len) { gboolean *tmp = cfg->vreg_is_mp; int size = cfg->vreg_is_mp_len; while (vreg >= cfg->vreg_is_mp_len) cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32; cfg->vreg_is_mp = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len); if (size) memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean)); } cfg->vreg_is_mp [vreg] = TRUE; } static MonoType* type_from_stack_type (MonoInst *ins) { switch (ins->type) { case STACK_I4: return &mono_defaults.int32_class->byval_arg; case STACK_I8: return &mono_defaults.int64_class->byval_arg; case STACK_PTR: return &mono_defaults.int_class->byval_arg; case STACK_R8: return &mono_defaults.double_class->byval_arg; case STACK_MP: /* * this if used to be commented without any specific reason, but * it breaks #80235 when commented */ if (ins->klass) return &ins->klass->this_arg; else return &mono_defaults.object_class->this_arg; case STACK_OBJ: /* ins->klass may not be set for ldnull. * Also, if we have a boxed valuetype, we want an object lass, * not the valuetype class */ if (ins->klass && !ins->klass->valuetype) return &ins->klass->byval_arg; return &mono_defaults.object_class->byval_arg; case STACK_VTYPE: return &ins->klass->byval_arg; default: g_error ("stack type %d to montype not handled\n", ins->type); } return NULL; } MonoType* mono_type_from_stack_type (MonoInst *ins) { return type_from_stack_type (ins); } /* * mono_add_ins_to_end: * * Same as MONO_ADD_INS, but add INST before any branches at the end of BB. */ void mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst) { int opcode; if (!bb->code) { MONO_ADD_INS (bb, inst); return; } switch (bb->last_ins->opcode) { case OP_BR: case OP_BR_REG: case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT: case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN: case OP_SWITCH: mono_bblock_insert_before_ins (bb, bb->last_ins, inst); break; default: if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) { /* Need to insert the ins before the compare */ if (bb->code == bb->last_ins) { mono_bblock_insert_before_ins (bb, bb->last_ins, inst); return; } if (bb->code->next == bb->last_ins) { /* Only two instructions */ opcode = bb->code->opcode; if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) { /* NEW IR */ mono_bblock_insert_before_ins (bb, bb->code, inst); } else { mono_bblock_insert_before_ins (bb, bb->last_ins, inst); } } else { opcode = bb->last_ins->prev->opcode; if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) { /* NEW IR */ mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst); } else { mono_bblock_insert_before_ins (bb, bb->last_ins, inst); } } } else MONO_ADD_INS (bb, inst); break; } } void mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks) { MonoJumpInfo *ji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo)); MonoJumpInfoBBTable *table; table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable)); table->table = bbs; table->table_size = num_blocks; ji->ip.label = label; ji->type = MONO_PATCH_INFO_SWITCH; ji->data.table = table; ji->next = cfg->patch_info; cfg->patch_info = ji; } static MonoMethodSignature * mono_get_array_new_va_signature (int arity) { static GHashTable *sighash = NULL; MonoMethodSignature *res; int i; mono_jit_lock (); if (!sighash) { sighash = g_hash_table_new (NULL, NULL); } else if ((res = g_hash_table_lookup (sighash, GINT_TO_POINTER (arity)))) { mono_jit_unlock (); return res; } res = mono_metadata_signature_alloc (mono_defaults.corlib, arity + 1); res->pinvoke = 1; #ifdef MONO_ARCH_VARARG_ICALLS /* Only set this only some archs since not all backends can handle varargs+pinvoke */ res->call_convention = MONO_CALL_VARARG; #endif #ifdef TARGET_WIN32 res->call_convention = MONO_CALL_C; #endif res->params [0] = &mono_defaults.int_class->byval_arg; for (i = 0; i < arity; i++) res->params [i + 1] = &mono_defaults.int_class->byval_arg; res->ret = &mono_defaults.object_class->byval_arg; g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res); mono_jit_unlock (); return res; } MonoJitICallInfo * mono_get_array_new_va_icall (int rank) { MonoMethodSignature *esig; char icall_name [256]; char *name; MonoJitICallInfo *info; /* Need to register the icall so it gets an icall wrapper */ sprintf (icall_name, "ves_array_new_va_%d", rank); mono_jit_lock (); info = mono_find_jit_icall_by_name (icall_name); if (info == NULL) { esig = mono_get_array_new_va_signature (rank); name = g_strdup (icall_name); info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE); g_hash_table_insert (jit_icall_name_hash, name, name); } mono_jit_unlock (); return info; } gboolean mini_class_is_system_array (MonoClass *klass) { if (klass->parent == mono_defaults.array_class) return TRUE; else return FALSE; } gboolean mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method) { MonoAssembly *assembly = method->klass->image->assembly; if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) return FALSE; if (assembly->in_gac || assembly->image == mono_defaults.corlib) return FALSE; if (mono_security_get_mode () != MONO_SECURITY_MODE_NONE) return FALSE; return mono_assembly_has_skip_verification (assembly); } /* * mini_method_verify: * * Verify the method using the new verfier. * * Returns true if the method is invalid. */ static gboolean mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile) { GSList *tmp, *res; gboolean is_fulltrust; MonoLoaderError *error; if (method->verification_success) return FALSE; if (!mono_verifier_is_enabled_for_method (method)) return FALSE; /*skip verification implies the assembly must be */ is_fulltrust = mono_verifier_is_method_full_trust (method) || mini_assembly_can_skip_verification (cfg->domain, method); res = mono_method_verify_with_current_settings (method, cfg->skip_visibility, is_fulltrust); if ((error = mono_loader_get_last_error ())) { if (fail_compile) cfg->exception_type = error->exception_type; else mono_loader_clear_error (); if (res) mono_free_verify_list (res); return TRUE; } if (res) { for (tmp = res; tmp; tmp = tmp->next) { MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data; if (info->info.status == MONO_VERIFY_ERROR) { if (fail_compile) { char *method_name = mono_method_full_name (method, TRUE); cfg->exception_type = info->exception_type; cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message); g_free (method_name); } mono_free_verify_list (res); return TRUE; } if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) { if (fail_compile) { char *method_name = mono_method_full_name (method, TRUE); cfg->exception_type = info->exception_type; cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message); g_free (method_name); } mono_free_verify_list (res); return TRUE; } } mono_free_verify_list (res); } method->verification_success = 1; return FALSE; } /*Returns true if something went wrong*/ gboolean mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile) { MonoMethod *method_definition = method; gboolean dont_verify = method->klass->image->assembly->corlib_internal; while (method_definition->is_inflated) { MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition; method_definition = imethod->declaring; } return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile); } static gconstpointer mono_icall_get_wrapper_full (MonoJitICallInfo* callinfo, gboolean do_compile) { char *name; MonoMethod *wrapper; gconstpointer trampoline; MonoDomain *domain = mono_get_root_domain (); if (callinfo->wrapper) { return callinfo->wrapper; } if (callinfo->trampoline) return callinfo->trampoline; /* * We use the lock on the root domain instead of the JIT lock to protect * callinfo->trampoline, since we do a lot of stuff inside the critical section. */ mono_loader_lock (); /*FIXME mono_compile_method requires the loader lock, by large.*/ mono_domain_lock (domain); if (callinfo->trampoline) { mono_domain_unlock (domain); mono_loader_unlock (); return callinfo->trampoline; } name = g_strdup_printf ("__icall_wrapper_%s", callinfo->name); wrapper = mono_marshal_get_icall_wrapper (callinfo->sig, name, callinfo->func, check_for_pending_exc); g_free (name); if (do_compile) trampoline = mono_compile_method (wrapper); else trampoline = mono_create_ftnptr (domain, mono_create_jit_trampoline_in_domain (domain, wrapper)); mono_register_jit_icall_wrapper (callinfo, trampoline); callinfo->trampoline = trampoline; mono_domain_unlock (domain); mono_loader_unlock (); return callinfo->trampoline; } gconstpointer mono_icall_get_wrapper (MonoJitICallInfo* callinfo) { return mono_icall_get_wrapper_full (callinfo, FALSE); } static void mono_dynamic_code_hash_insert (MonoDomain *domain, MonoMethod *method, MonoJitDynamicMethodInfo *ji) { if (!domain_jit_info (domain)->dynamic_code_hash) domain_jit_info (domain)->dynamic_code_hash = g_hash_table_new (NULL, NULL); g_hash_table_insert (domain_jit_info (domain)->dynamic_code_hash, method, ji); } static MonoJitDynamicMethodInfo* mono_dynamic_code_hash_lookup (MonoDomain *domain, MonoMethod *method) { MonoJitDynamicMethodInfo *res; if (domain_jit_info (domain)->dynamic_code_hash) res = g_hash_table_lookup (domain_jit_info (domain)->dynamic_code_hash, method); else res = NULL; return res; } typedef struct { MonoClass *vtype; GList *active, *inactive; GSList *slots; } StackSlotInfo; static gint compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b) { MonoMethodVar *v1 = (MonoMethodVar*)a; MonoMethodVar *v2 = (MonoMethodVar*)b; if (v1 == v2) return 0; else if (v1->interval->range && v2->interval->range) return v1->interval->range->from - v2->interval->range->from; else if (v1->interval->range) return -1; else return 1; } #ifndef DISABLE_JIT #if 0 #define LSCAN_DEBUG(a) do { a; } while (0) #else #define LSCAN_DEBUG(a) #endif static gint32* mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align) { int i, slot, offset, size; guint32 align; MonoMethodVar *vmv; MonoInst *inst; gint32 *offsets; GList *vars = NULL, *l, *unhandled; StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info; MonoType *t; int nvtypes; gboolean reuse_slot; LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE))); scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED); vtype_stack_slots = NULL; nvtypes = 0; offsets = mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo); for (i = 0; i < cfg->num_varinfo; ++i) offsets [i] = -1; for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { inst = cfg->varinfo [i]; vmv = MONO_VARINFO (cfg, i); if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET) continue; vars = g_list_prepend (vars, vmv); } vars = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func); /* Sanity check */ /* i = 0; for (unhandled = vars; unhandled; unhandled = unhandled->next) { MonoMethodVar *current = unhandled->data; if (current->interval->range) { g_assert (current->interval->range->from >= i); i = current->interval->range->from; } } */ offset = 0; *stack_align = 0; for (unhandled = vars; unhandled; unhandled = unhandled->next) { MonoMethodVar *current = unhandled->data; vmv = current; inst = cfg->varinfo [vmv->idx]; /* inst->backend.is_pinvoke indicates native sized value types, this is used by the * pinvoke wrappers when they call functions returning structures */ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) { size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align); } else { int ialign; size = mono_type_size (inst->inst_vtype, &ialign); align = ialign; if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (inst->inst_vtype))) align = 16; } reuse_slot = TRUE; if (cfg->disable_reuse_stack_slots) reuse_slot = FALSE; t = mono_type_get_underlying_type (inst->inst_vtype); switch (t->type) { case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) { slot_info = &scalar_stack_slots [t->type]; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: if (!vtype_stack_slots) vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256); for (i = 0; i < nvtypes; ++i) if (t->data.klass == vtype_stack_slots [i].vtype) break; if (i < nvtypes) slot_info = &vtype_stack_slots [i]; else { g_assert (nvtypes < 256); vtype_stack_slots [nvtypes].vtype = t->data.klass; slot_info = &vtype_stack_slots [nvtypes]; nvtypes ++; } if (cfg->disable_reuse_ref_stack_slots) reuse_slot = FALSE; break; case MONO_TYPE_PTR: case MONO_TYPE_I: case MONO_TYPE_U: #if SIZEOF_VOID_P == 4 case MONO_TYPE_I4: #else case MONO_TYPE_I8: #endif if (cfg->disable_ref_noref_stack_slot_share) { slot_info = &scalar_stack_slots [MONO_TYPE_I]; break; } /* Fall through */ case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: case MONO_TYPE_STRING: /* Share non-float stack slots of the same size */ slot_info = &scalar_stack_slots [MONO_TYPE_CLASS]; if (cfg->disable_reuse_ref_stack_slots) reuse_slot = FALSE; break; default: slot_info = &scalar_stack_slots [t->type]; } slot = 0xffffff; if (cfg->comp_done & MONO_COMP_LIVENESS) { int pos; gboolean changed; //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos); if (!current->interval->range) { if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) pos = ~0; else { /* Dead */ inst->flags |= MONO_INST_IS_DEAD; continue; } } else pos = current->interval->range->from; LSCAN_DEBUG (printf ("process R%d ", inst->dreg)); if (current->interval->range) LSCAN_DEBUG (mono_linterval_print (current->interval)); LSCAN_DEBUG (printf ("\n")); /* Check for intervals in active which expired or inactive */ changed = TRUE; /* FIXME: Optimize this */ while (changed) { changed = FALSE; for (l = slot_info->active; l != NULL; l = l->next) { MonoMethodVar *v = (MonoMethodVar*)l->data; if (v->interval->last_range->to < pos) { slot_info->active = g_list_delete_link (slot_info->active, l); slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx])); LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx])); changed = TRUE; break; } else if (!mono_linterval_covers (v->interval, pos)) { slot_info->inactive = g_list_append (slot_info->inactive, v); slot_info->active = g_list_delete_link (slot_info->active, l); LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg)); changed = TRUE; break; } } } /* Check for intervals in inactive which expired or active */ changed = TRUE; /* FIXME: Optimize this */ while (changed) { changed = FALSE; for (l = slot_info->inactive; l != NULL; l = l->next) { MonoMethodVar *v = (MonoMethodVar*)l->data; if (v->interval->last_range->to < pos) { slot_info->inactive = g_list_delete_link (slot_info->inactive, l); // FIXME: Enabling this seems to cause impossible to debug crashes //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx])); LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx])); changed = TRUE; break; } else if (mono_linterval_covers (v->interval, pos)) { slot_info->active = g_list_append (slot_info->active, v); slot_info->inactive = g_list_delete_link (slot_info->inactive, l); LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg)); changed = TRUE; break; } } } /* * This also handles the case when the variable is used in an * exception region, as liveness info is not computed there. */ /* * FIXME: All valuetypes are marked as INDIRECT because of LDADDR * opcodes. */ if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) { if (slot_info->slots) { slot = GPOINTER_TO_INT (slot_info->slots->data); slot_info->slots = slot_info->slots->next; } /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */ slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE); } } #if 0 { static int count = 0; count ++; if (count == atoi (getenv ("COUNT3"))) printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE)); if (count > atoi (getenv ("COUNT3"))) slot = 0xffffff; else { mono_print_ins (inst); } } #endif LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot)); if (inst->flags & MONO_INST_LMF) { size = sizeof (MonoLMF); align = sizeof (mgreg_t); reuse_slot = FALSE; } if (!reuse_slot) slot = 0xffffff; if (slot == 0xffffff) { /* * Allways allocate valuetypes to sizeof (gpointer) to allow more * efficient copying (and to work around the fact that OP_MEMCPY * and OP_MEMSET ignores alignment). */ if (MONO_TYPE_ISSTRUCT (t)) { align = MAX (align, sizeof (gpointer)); align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t))); } if (backward) { offset += size; offset += align - 1; offset &= ~(align - 1); slot = offset; } else { offset += align - 1; offset &= ~(align - 1); slot = offset; offset += size; } if (*stack_align == 0) *stack_align = align; } offsets [vmv->idx] = slot; } g_list_free (vars); for (i = 0; i < MONO_TYPE_PINNED; ++i) { if (scalar_stack_slots [i].active) g_list_free (scalar_stack_slots [i].active); } for (i = 0; i < nvtypes; ++i) { if (vtype_stack_slots [i].active) g_list_free (vtype_stack_slots [i].active); } cfg->stat_locals_stack_size += offset; *stack_size = offset; return offsets; } /* * mono_allocate_stack_slots: * * Allocate stack slots for all non register allocated variables using a * linear scan algorithm. * Returns: an array of stack offsets. * STACK_SIZE is set to the amount of stack space needed. * STACK_ALIGN is set to the alignment needed by the locals area. */ gint32* mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align) { int i, slot, offset, size; guint32 align; MonoMethodVar *vmv; MonoInst *inst; gint32 *offsets; GList *vars = NULL, *l; StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info; MonoType *t; int nvtypes; gboolean reuse_slot; if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval) return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align); scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED); vtype_stack_slots = NULL; nvtypes = 0; offsets = mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo); for (i = 0; i < cfg->num_varinfo; ++i) offsets [i] = -1; for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { inst = cfg->varinfo [i]; vmv = MONO_VARINFO (cfg, i); if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET) continue; vars = g_list_prepend (vars, vmv); } vars = mono_varlist_sort (cfg, vars, 0); offset = 0; *stack_align = sizeof(mgreg_t); for (l = vars; l; l = l->next) { vmv = l->data; inst = cfg->varinfo [vmv->idx]; /* inst->backend.is_pinvoke indicates native sized value types, this is used by the * pinvoke wrappers when they call functions returning structures */ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) { size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align); } else { int ialign; size = mono_type_size (inst->inst_vtype, &ialign); align = ialign; if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (inst->inst_vtype))) align = 16; } reuse_slot = TRUE; if (cfg->disable_reuse_stack_slots) reuse_slot = FALSE; t = mono_type_get_underlying_type (inst->inst_vtype); if (t->byref) { slot_info = &scalar_stack_slots [MONO_TYPE_I]; } else { switch (t->type) { case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) { slot_info = &scalar_stack_slots [t->type]; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: if (!vtype_stack_slots) vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256); for (i = 0; i < nvtypes; ++i) if (t->data.klass == vtype_stack_slots [i].vtype) break; if (i < nvtypes) slot_info = &vtype_stack_slots [i]; else { g_assert (nvtypes < 256); vtype_stack_slots [nvtypes].vtype = t->data.klass; slot_info = &vtype_stack_slots [nvtypes]; nvtypes ++; } if (cfg->disable_reuse_ref_stack_slots) reuse_slot = FALSE; break; case MONO_TYPE_PTR: case MONO_TYPE_I: case MONO_TYPE_U: #if SIZEOF_VOID_P == 4 case MONO_TYPE_I4: #else case MONO_TYPE_I8: #endif if (cfg->disable_ref_noref_stack_slot_share) { slot_info = &scalar_stack_slots [MONO_TYPE_I]; break; } /* Fall through */ case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: case MONO_TYPE_STRING: /* Share non-float stack slots of the same size */ slot_info = &scalar_stack_slots [MONO_TYPE_CLASS]; if (cfg->disable_reuse_ref_stack_slots) reuse_slot = FALSE; break; default: slot_info = &scalar_stack_slots [t->type]; } } slot = 0xffffff; if (cfg->comp_done & MONO_COMP_LIVENESS) { //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos); /* expire old intervals in active */ while (slot_info->active) { MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data; if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos) break; //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg); slot_info->active = g_list_delete_link (slot_info->active, slot_info->active); slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx])); } /* * This also handles the case when the variable is used in an * exception region, as liveness info is not computed there. */ /* * FIXME: All valuetypes are marked as INDIRECT because of LDADDR * opcodes. */ if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) { if (slot_info->slots) { slot = GPOINTER_TO_INT (slot_info->slots->data); slot_info->slots = slot_info->slots->next; } slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE); } } { static int count = 0; count ++; /* if (count == atoi (getenv ("COUNT"))) printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE)); if (count > atoi (getenv ("COUNT"))) slot = 0xffffff; else { mono_print_ins (inst); } */ } if (inst->flags & MONO_INST_LMF) { /* * This variable represents a MonoLMF structure, which has no corresponding * CLR type, so hard-code its size/alignment. */ size = sizeof (MonoLMF); align = sizeof (mgreg_t); reuse_slot = FALSE; } if (!reuse_slot) slot = 0xffffff; if (slot == 0xffffff) { /* * Allways allocate valuetypes to sizeof (gpointer) to allow more * efficient copying (and to work around the fact that OP_MEMCPY * and OP_MEMSET ignores alignment). */ if (MONO_TYPE_ISSTRUCT (t)) { align = MAX (align, sizeof (gpointer)); align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t))); /* * Align the size too so the code generated for passing vtypes in * registers doesn't overwrite random locals. */ size = (size + (align - 1)) & ~(align -1); } if (backward) { offset += size; offset += align - 1; offset &= ~(align - 1); slot = offset; } else { offset += align - 1; offset &= ~(align - 1); slot = offset; offset += size; } *stack_align = MAX (*stack_align, align); } offsets [vmv->idx] = slot; } g_list_free (vars); for (i = 0; i < MONO_TYPE_PINNED; ++i) { if (scalar_stack_slots [i].active) g_list_free (scalar_stack_slots [i].active); } for (i = 0; i < nvtypes; ++i) { if (vtype_stack_slots [i].active) g_list_free (vtype_stack_slots [i].active); } cfg->stat_locals_stack_size += offset; *stack_size = offset; return offsets; } #else gint32* mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align) { g_assert_not_reached (); return NULL; } #endif /* DISABLE_JIT */ #define EMUL_HIT_SHIFT 3 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1) /* small hit bitmap cache */ static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0}; static short emul_opcode_num = 0; static short emul_opcode_alloced = 0; static short *emul_opcode_opcodes = NULL; static MonoJitICallInfo **emul_opcode_map = NULL; MonoJitICallInfo * mono_find_jit_opcode_emulation (int opcode) { g_assert (opcode >= 0 && opcode <= OP_LAST); if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) { int i; for (i = 0; i < emul_opcode_num; ++i) { if (emul_opcode_opcodes [i] == opcode) return emul_opcode_map [i]; } } return NULL; } void mono_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, gboolean no_throw) { MonoJitICallInfo *info; MonoMethodSignature *sig = mono_create_icall_signature (sigstr); g_assert (!sig->hasthis); g_assert (sig->param_count < 3); info = mono_register_jit_icall (func, name, sig, no_throw); if (emul_opcode_num >= emul_opcode_alloced) { int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16; emul_opcode_alloced += incr; emul_opcode_map = g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced); emul_opcode_opcodes = g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced); } emul_opcode_map [emul_opcode_num] = info; emul_opcode_opcodes [emul_opcode_num] = opcode; emul_opcode_num++; emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK)); } static void register_icall (gpointer func, const char *name, const char *sigstr, gboolean save) { MonoMethodSignature *sig; if (sigstr) sig = mono_create_icall_signature (sigstr); else sig = NULL; mono_register_jit_icall (func, name, sig, save); } static void print_dfn (MonoCompile *cfg) { int i, j; char *code; MonoBasicBlock *bb; MonoInst *c; { char *method_name = mono_method_full_name (cfg->method, TRUE); g_print ("IR code for method %s\n", method_name); g_free (method_name); } for (i = 0; i < cfg->num_bblocks; ++i) { bb = cfg->bblocks [i]; /*if (bb->cil_code) { char* code1, *code2; code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL); if (bb->last_ins->cil_code) code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL); else code2 = g_strdup (""); code1 [strlen (code1) - 1] = 0; code = g_strdup_printf ("%s -> %s", code1, code2); g_free (code1); g_free (code2); } else*/ code = g_strdup ("\n"); g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code); MONO_BB_FOR_EACH_INS (bb, c) { mono_print_ins_index (-1, c); } g_print ("\tprev:"); for (j = 0; j < bb->in_count; ++j) { g_print (" BB%d", bb->in_bb [j]->block_num); } g_print ("\t\tsucc:"); for (j = 0; j < bb->out_count; ++j) { g_print (" BB%d", bb->out_bb [j]->block_num); } g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1); if (bb->idom) g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn)); if (bb->dominators) mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1); if (bb->dfrontier) mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1); g_free (code); } g_print ("\n"); } void mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst) { MONO_ADD_INS (bb, inst); } void mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert) { if (ins == NULL) { ins = bb->code; bb->code = ins_to_insert; /* Link with next */ ins_to_insert->next = ins; if (ins) ins->prev = ins_to_insert; if (bb->last_ins == NULL) bb->last_ins = ins_to_insert; } else { /* Link with next */ ins_to_insert->next = ins->next; if (ins->next) ins->next->prev = ins_to_insert; /* Link with previous */ ins->next = ins_to_insert; ins_to_insert->prev = ins; if (bb->last_ins == ins) bb->last_ins = ins_to_insert; } } void mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert) { if (ins == NULL) { ins = bb->code; if (ins) ins->prev = ins_to_insert; bb->code = ins_to_insert; ins_to_insert->next = ins; if (bb->last_ins == NULL) bb->last_ins = ins_to_insert; } else { /* Link with previous */ if (ins->prev) ins->prev->next = ins_to_insert; ins_to_insert->prev = ins->prev; /* Link with next */ ins->prev = ins_to_insert; ins_to_insert->next = ins; if (bb->code == ins) bb->code = ins_to_insert; } } /* * mono_verify_bblock: * * Verify that the next and prev pointers are consistent inside the instructions in BB. */ void mono_verify_bblock (MonoBasicBlock *bb) { MonoInst *ins, *prev; prev = NULL; for (ins = bb->code; ins; ins = ins->next) { g_assert (ins->prev == prev); prev = ins; } if (bb->last_ins) g_assert (!bb->last_ins->next); } /* * mono_verify_cfg: * * Perform consistency checks on the JIT data structures and the IR */ void mono_verify_cfg (MonoCompile *cfg) { MonoBasicBlock *bb; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) mono_verify_bblock (bb); } void mono_destroy_compile (MonoCompile *cfg) { GSList *l; if (cfg->header) mono_metadata_free_mh (cfg->header); //mono_mempool_stats (cfg->mempool); mono_free_loop_info (cfg); if (cfg->rs) mono_regstate_free (cfg->rs); if (cfg->spvars) g_hash_table_destroy (cfg->spvars); if (cfg->exvars) g_hash_table_destroy (cfg->exvars); for (l = cfg->headers_to_free; l; l = l->next) mono_metadata_free_mh (l->data); g_list_free (cfg->ldstr_list); g_hash_table_destroy (cfg->token_info_hash); if (cfg->abs_patches) g_hash_table_destroy (cfg->abs_patches); mono_mempool_destroy (cfg->mempool); mono_debug_free_method (cfg); g_free (cfg->varinfo); g_free (cfg->vars); g_free (cfg->exception_message); g_free (cfg); } #ifdef MONO_HAVE_FAST_TLS MONO_FAST_TLS_DECLARE(mono_lmf_addr); #ifdef MONO_ARCH_ENABLE_MONO_LMF_VAR /* * When this is defined, the current lmf is stored in this tls variable instead of in * jit_tls->lmf. */ MONO_FAST_TLS_DECLARE(mono_lmf); #endif #endif MonoNativeTlsKey mono_get_jit_tls_key (void) { return mono_jit_tls_id; } gint32 mono_get_jit_tls_offset (void) { int offset; MONO_THREAD_VAR_OFFSET (mono_jit_tls, offset); return offset; } gint32 mono_get_lmf_tls_offset (void) { #if defined(MONO_ARCH_ENABLE_MONO_LMF_VAR) int offset; MONO_THREAD_VAR_OFFSET(mono_lmf,offset); return offset; #else return -1; #endif } gint32 mono_get_lmf_addr_tls_offset (void) { int offset; MONO_THREAD_VAR_OFFSET(mono_lmf_addr,offset); return offset; } MonoLMF * mono_get_lmf (void) { #if defined(MONO_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR) return MONO_FAST_TLS_GET (mono_lmf); #else MonoJitTlsData *jit_tls; if ((jit_tls = mono_native_tls_get_value (mono_jit_tls_id))) return jit_tls->lmf; /* * We do not assert here because this function can be called from * mini-gc.c on a thread that has not executed any managed code, yet * (the thread object allocation can trigger a collection). */ return NULL; #endif } MonoLMF ** mono_get_lmf_addr (void) { #ifdef MONO_HAVE_FAST_TLS return MONO_FAST_TLS_GET (mono_lmf_addr); #else MonoJitTlsData *jit_tls; if ((jit_tls = mono_native_tls_get_value (mono_jit_tls_id))) return &jit_tls->lmf; /* * When resolving the call to mono_jit_thread_attach full-aot will look * in the plt, which causes a call into the generic trampoline, which in turn * tries to resolve the lmf_addr creating a cyclic dependency. We cannot * call mono_jit_thread_attach from the native-to-managed wrapper, without * mono_get_lmf_addr, and mono_get_lmf_addr requires the thread to be attached. */ mono_jit_thread_attach (NULL); if ((jit_tls = mono_native_tls_get_value (mono_jit_tls_id))) return &jit_tls->lmf; g_assert_not_reached (); return NULL; #endif } void mono_set_lmf (MonoLMF *lmf) { #if defined(MONO_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR) MONO_FAST_TLS_SET (mono_lmf, lmf); #endif (*mono_get_lmf_addr ()) = lmf; } static void mono_set_jit_tls (MonoJitTlsData *jit_tls) { mono_native_tls_set_value (mono_jit_tls_id, jit_tls); #ifdef MONO_HAVE_FAST_TLS MONO_FAST_TLS_SET (mono_jit_tls, jit_tls); #endif } static void mono_set_lmf_addr (gpointer lmf_addr) { #ifdef MONO_HAVE_FAST_TLS MONO_FAST_TLS_SET (mono_lmf_addr, lmf_addr); #endif } /* * mono_jit_thread_attach: * * Called by native->managed wrappers. Returns the original domain which needs to be * restored, or NULL. */ MonoDomain* mono_jit_thread_attach (MonoDomain *domain) { MonoDomain *orig; if (!domain) /* * Happens when called from AOTed code which is only used in the root * domain. */ domain = mono_get_root_domain (); #ifdef MONO_HAVE_FAST_TLS if (!MONO_FAST_TLS_GET (mono_lmf_addr)) { mono_thread_attach (domain); // #678164 mono_thread_set_state (mono_thread_internal_current (), ThreadState_Background); } #else if (!mono_native_tls_get_value (mono_jit_tls_id)) { mono_thread_attach (domain); mono_thread_set_state (mono_thread_internal_current (), ThreadState_Background); } #endif orig = mono_domain_get (); if (orig != domain) mono_domain_set (domain, TRUE); return orig != domain ? orig : NULL; } /* Called by native->managed wrappers */ void mono_jit_set_domain (MonoDomain *domain) { if (domain) mono_domain_set (domain, TRUE); } /** * mono_thread_abort: * @obj: exception object * * abort the thread, print exception information and stack trace */ static void mono_thread_abort (MonoObject *obj) { /* MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id); */ /* handle_remove should be eventually called for this thread, too g_free (jit_tls);*/ if ((mono_runtime_unhandled_exception_policy_get () == MONO_UNHANDLED_POLICY_LEGACY) || (obj->vtable->klass == mono_defaults.threadabortexception_class)) { mono_thread_exit (); } else { MonoObject *other = NULL; MonoString *str = mono_object_to_string (obj, &other); if (str) { char *msg = mono_string_to_utf8 (str); fprintf (stderr, "[ERROR] FATAL UNHANDLED EXCEPTION: %s\n", msg); fflush (stderr); g_free (msg); } exit (mono_environment_exitcode_get ()); } } static void* setup_jit_tls_data (gpointer stack_start, gpointer abort_func) { MonoJitTlsData *jit_tls; MonoLMF *lmf; jit_tls = mono_native_tls_get_value (mono_jit_tls_id); if (jit_tls) return jit_tls; jit_tls = g_new0 (MonoJitTlsData, 1); jit_tls->abort_func = abort_func; jit_tls->end_of_stack = stack_start; mono_set_jit_tls (jit_tls); lmf = g_new0 (MonoLMF, 1); MONO_ARCH_INIT_TOP_LMF_ENTRY (lmf); jit_tls->first_lmf = lmf; #if defined(MONO_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR) /* jit_tls->lmf is unused */ MONO_FAST_TLS_SET (mono_lmf, lmf); mono_set_lmf_addr (MONO_FAST_TLS_ADDR (mono_lmf)); #else mono_set_lmf_addr (&jit_tls->lmf); jit_tls->lmf = lmf; #endif mono_setup_altstack (jit_tls); return jit_tls; } static void free_jit_tls_data (MonoJitTlsData *jit_tls) { mono_arch_free_jit_tls_data (jit_tls); mono_free_altstack (jit_tls); g_free (jit_tls->first_lmf); g_free (jit_tls); } static void mono_thread_start_cb (intptr_t tid, gpointer stack_start, gpointer func) { MonoInternalThread *thread; void *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort); thread = mono_thread_internal_current (); mono_debugger_thread_created (tid, thread->root_domain_thread, jit_tls, func); if (thread) thread->jit_data = jit_tls; mono_arch_cpu_init (); } void (*mono_thread_attach_aborted_cb ) (MonoObject *obj) = NULL; static void mono_thread_abort_dummy (MonoObject *obj) { if (mono_thread_attach_aborted_cb) mono_thread_attach_aborted_cb (obj); else mono_thread_abort (obj); } static void mono_thread_attach_cb (intptr_t tid, gpointer stack_start) { MonoInternalThread *thread; void *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort_dummy); thread = mono_thread_internal_current (); mono_debugger_thread_created (tid, thread->root_domain_thread, (MonoJitTlsData *) jit_tls, NULL); if (thread) thread->jit_data = jit_tls; if (mono_profiler_get_events () & MONO_PROFILE_STATISTICAL) mono_runtime_setup_stat_profiler (); mono_arch_cpu_init (); } static void mini_thread_cleanup (MonoInternalThread *thread) { MonoJitTlsData *jit_tls = thread->jit_data; if (jit_tls) { mono_debugger_thread_cleanup (jit_tls); /* We can't clean up tls information if we are on another thread, it will clean up the wrong stuff * It would be nice to issue a warning when this happens outside of the shutdown sequence. but it's * not a trivial thing. * * The current offender is mono_thread_manage which cleanup threads from the outside. */ if (thread == mono_thread_internal_current ()) mono_set_jit_tls (NULL); /* If we attach a thread but never call into managed land, we might never get an lmf.*/ if (mono_get_lmf ()) { mono_set_lmf (NULL); mono_set_lmf_addr (NULL); } free_jit_tls_data (jit_tls); thread->jit_data = NULL; } } static MonoInst* mono_create_tls_get (MonoCompile *cfg, int offset) { #ifdef MONO_ARCH_HAVE_TLS_GET if (MONO_ARCH_HAVE_TLS_GET) { MonoInst* ins; if (offset == -1) return NULL; MONO_INST_NEW (cfg, ins, OP_TLS_GET); ins->dreg = mono_alloc_preg (cfg); ins->inst_offset = offset; return ins; } #endif return NULL; } MonoInst* mono_get_jit_tls_intrinsic (MonoCompile *cfg) { return mono_create_tls_get (cfg, mono_get_jit_tls_offset ()); } MonoInst* mono_get_domain_intrinsic (MonoCompile* cfg) { return mono_create_tls_get (cfg, mono_domain_get_tls_offset ()); } MonoInst* mono_get_thread_intrinsic (MonoCompile* cfg) { return mono_create_tls_get (cfg, mono_thread_get_tls_offset ()); } MonoInst* mono_get_lmf_intrinsic (MonoCompile* cfg) { return mono_create_tls_get (cfg, mono_get_lmf_tls_offset ()); } void mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target) { MonoJumpInfo *ji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo)); ji->ip.i = ip; ji->type = type; ji->data.target = target; ji->next = cfg->patch_info; cfg->patch_info = ji; } MonoJumpInfo * mono_patch_info_list_prepend (MonoJumpInfo *list, int ip, MonoJumpInfoType type, gconstpointer target) { MonoJumpInfo *ji = g_new0 (MonoJumpInfo, 1); ji->ip.i = ip; ji->type = type; ji->data.target = target; ji->next = list; return ji; } void mono_remove_patch_info (MonoCompile *cfg, int ip) { MonoJumpInfo **ji = &cfg->patch_info; while (*ji) { if ((*ji)->ip.i == ip) *ji = (*ji)->next; else ji = &((*ji)->next); } } /** * mono_patch_info_dup_mp: * * Make a copy of PATCH_INFO, allocating memory from the mempool MP. */ MonoJumpInfo* mono_patch_info_dup_mp (MonoMemPool *mp, MonoJumpInfo *patch_info) { MonoJumpInfo *res = mono_mempool_alloc (mp, sizeof (MonoJumpInfo)); memcpy (res, patch_info, sizeof (MonoJumpInfo)); switch (patch_info->type) { case MONO_PATCH_INFO_RVA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_DECLSEC: res->data.token = mono_mempool_alloc (mp, sizeof (MonoJumpInfoToken)); memcpy (res->data.token, patch_info->data.token, sizeof (MonoJumpInfoToken)); break; case MONO_PATCH_INFO_SWITCH: res->data.table = mono_mempool_alloc (mp, sizeof (MonoJumpInfoBBTable)); memcpy (res->data.table, patch_info->data.table, sizeof (MonoJumpInfoBBTable)); res->data.table->table = mono_mempool_alloc (mp, sizeof (MonoBasicBlock*) * patch_info->data.table->table_size); memcpy (res->data.table->table, patch_info->data.table->table, sizeof (MonoBasicBlock*) * patch_info->data.table->table_size); break; case MONO_PATCH_INFO_RGCTX_FETCH: res->data.rgctx_entry = mono_mempool_alloc (mp, sizeof (MonoJumpInfoRgctxEntry)); memcpy (res->data.rgctx_entry, patch_info->data.rgctx_entry, sizeof (MonoJumpInfoRgctxEntry)); res->data.rgctx_entry->data = mono_patch_info_dup_mp (mp, res->data.rgctx_entry->data); break; default: break; } return res; } guint mono_patch_info_hash (gconstpointer data) { const MonoJumpInfo *ji = (MonoJumpInfo*)data; switch (ji->type) { case MONO_PATCH_INFO_RVA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_DECLSEC: return (ji->type << 8) | ji->data.token->token; case MONO_PATCH_INFO_TYPE_FROM_HANDLE: return (ji->type << 8) | ji->data.token->token | (ji->data.token->has_context ? (gsize)ji->data.token->context.class_inst : 0); case MONO_PATCH_INFO_INTERNAL_METHOD: return (ji->type << 8) | g_str_hash (ji->data.name); case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_ADJUSTED_IID: case MONO_PATCH_INFO_CLASS_INIT: case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHOD_JUMP: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_SFLDA: case MONO_PATCH_INFO_SEQ_POINT_INFO: return (ji->type << 8) | (gssize)ji->data.target; default: return (ji->type << 8); } } /* * mono_patch_info_equal: * * This might fail to recognize equivalent patches, i.e. floats, so its only * usable in those cases where this is not a problem, i.e. sharing GOT slots * in AOT. */ gint mono_patch_info_equal (gconstpointer ka, gconstpointer kb) { const MonoJumpInfo *ji1 = (MonoJumpInfo*)ka; const MonoJumpInfo *ji2 = (MonoJumpInfo*)kb; if (ji1->type != ji2->type) return 0; switch (ji1->type) { case MONO_PATCH_INFO_RVA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_DECLSEC: if ((ji1->data.token->image != ji2->data.token->image) || (ji1->data.token->token != ji2->data.token->token) || (ji1->data.token->has_context != ji2->data.token->has_context) || (ji1->data.token->context.class_inst != ji2->data.token->context.class_inst) || (ji1->data.token->context.method_inst != ji2->data.token->context.method_inst)) return 0; break; case MONO_PATCH_INFO_INTERNAL_METHOD: return g_str_equal (ji1->data.name, ji2->data.name); case MONO_PATCH_INFO_RGCTX_FETCH: { MonoJumpInfoRgctxEntry *e1 = ji1->data.rgctx_entry; MonoJumpInfoRgctxEntry *e2 = ji2->data.rgctx_entry; return e1->method == e2->method && e1->in_mrgctx == e2->in_mrgctx && e1->info_type == e2->info_type && mono_patch_info_equal (e1->data, e2->data); } default: if (ji1->data.target != ji2->data.target) return 0; break; } return 1; } gpointer mono_resolve_patch_target (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *patch_info, gboolean run_cctors) { unsigned char *ip = patch_info->ip.i + code; gconstpointer target = NULL; switch (patch_info->type) { case MONO_PATCH_INFO_BB: /* * FIXME: This could be hit for methods without a prolog. Should use -1 * but too much code depends on a 0 initial value. */ //g_assert (patch_info->data.bb->native_offset); target = patch_info->data.bb->native_offset + code; break; case MONO_PATCH_INFO_ABS: target = patch_info->data.target; break; case MONO_PATCH_INFO_LABEL: target = patch_info->data.inst->inst_c0 + code; break; case MONO_PATCH_INFO_IP: #if defined(__native_client__) && defined(__native_client_codegen__) /* Need to transform to the destination address, it's */ /* emitted as an immediate in the code. */ target = nacl_inverse_modify_patch_target(ip); #else target = ip; #endif break; case MONO_PATCH_INFO_METHOD_REL: target = code + patch_info->data.offset; break; case MONO_PATCH_INFO_INTERNAL_METHOD: { MonoJitICallInfo *mi = mono_find_jit_icall_by_name (patch_info->data.name); if (!mi) { g_warning ("unknown MONO_PATCH_INFO_INTERNAL_METHOD %s", patch_info->data.name); g_assert_not_reached (); } target = mono_icall_get_wrapper (mi); break; } case MONO_PATCH_INFO_METHOD_JUMP: target = mono_create_jump_trampoline (domain, patch_info->data.method, FALSE); #if defined(__native_client__) && defined(__native_client_codegen__) #if defined(TARGET_AMD64) /* This target is an absolute address, not relative to the */ /* current code being emitted on AMD64. */ target = nacl_inverse_modify_patch_target(target); #endif #endif break; case MONO_PATCH_INFO_METHOD: if (patch_info->data.method == method) { target = code; } else { /* get the trampoline to the method from the domain */ target = mono_create_jit_trampoline_in_domain (domain, patch_info->data.method); } break; case MONO_PATCH_INFO_SWITCH: { gpointer *jump_table; int i; #if defined(__native_client__) && defined(__native_client_codegen__) /* This memory will leak, but we don't care if we're */ /* not deleting JIT'd methods anyway */ jump_table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size); #else if (method && method->dynamic) { jump_table = mono_code_manager_reserve (mono_dynamic_code_hash_lookup (domain, method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size); } else { if (mono_aot_only) { jump_table = mono_domain_alloc (domain, sizeof (gpointer) * patch_info->data.table->table_size); } else { jump_table = mono_domain_code_reserve (domain, sizeof (gpointer) * patch_info->data.table->table_size); } } #endif for (i = 0; i < patch_info->data.table->table_size; i++) { #if defined(__native_client__) && defined(__native_client_codegen__) /* 'code' is relative to the current code blob, we */ /* need to do this transform on it to make the */ /* pointers in this table absolute */ jump_table [i] = nacl_inverse_modify_patch_target (code) + GPOINTER_TO_INT (patch_info->data.table->table [i]); #else jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]); #endif } #if defined(__native_client__) && defined(__native_client_codegen__) /* jump_table is in the data section, we need to transform */ /* it here so when it gets modified in amd64_patch it will */ /* then point back to the absolute data address */ target = nacl_inverse_modify_patch_target (jump_table); #else target = jump_table; #endif break; } case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_SIGNATURE: target = patch_info->data.target; break; case MONO_PATCH_INFO_IID: mono_class_init (patch_info->data.klass); target = GINT_TO_POINTER ((int)patch_info->data.klass->interface_id); break; case MONO_PATCH_INFO_ADJUSTED_IID: mono_class_init (patch_info->data.klass); target = GINT_TO_POINTER ((int)(-((patch_info->data.klass->interface_id + 1) * SIZEOF_VOID_P))); break; case MONO_PATCH_INFO_VTABLE: target = mono_class_vtable (domain, patch_info->data.klass); g_assert (target); break; case MONO_PATCH_INFO_CLASS_INIT: { MonoVTable *vtable = mono_class_vtable (domain, patch_info->data.klass); g_assert (vtable); target = mono_create_class_init_trampoline (vtable); break; } case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: target = mono_create_delegate_trampoline (domain, patch_info->data.klass); break; case MONO_PATCH_INFO_SFLDA: { MonoVTable *vtable = mono_class_vtable (domain, patch_info->data.field->parent); if (mono_class_field_is_special_static (patch_info->data.field)) { gpointer addr = NULL; mono_domain_lock (domain); if (domain->special_static_fields) addr = g_hash_table_lookup (domain->special_static_fields, patch_info->data.field); mono_domain_unlock (domain); g_assert (addr); return addr; } g_assert (vtable); if (!vtable->initialized && !(vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) && (method && mono_class_needs_cctor_run (vtable->klass, method))) /* Done by the generated code */ ; else { if (run_cctors) mono_runtime_class_init (vtable); } target = (char*)mono_vtable_get_static_field_data (vtable) + patch_info->data.field->offset; break; } case MONO_PATCH_INFO_RVA: { guint32 field_index = mono_metadata_token_index (patch_info->data.token->token); guint32 rva; mono_metadata_field_info (patch_info->data.token->image, field_index - 1, NULL, &rva, NULL); target = mono_image_rva_map (patch_info->data.token->image, rva); break; } case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R8: target = patch_info->data.target; break; case MONO_PATCH_INFO_EXC_NAME: target = patch_info->data.name; break; case MONO_PATCH_INFO_LDSTR: target = mono_ldstr (domain, patch_info->data.token->image, mono_metadata_token_index (patch_info->data.token->token)); break; case MONO_PATCH_INFO_TYPE_FROM_HANDLE: { gpointer handle; MonoClass *handle_class; handle = mono_ldtoken (patch_info->data.token->image, patch_info->data.token->token, &handle_class, patch_info->data.token->has_context ? &patch_info->data.token->context : NULL); mono_class_init (handle_class); mono_class_init (mono_class_from_mono_type (handle)); target = mono_type_get_object (domain, handle); break; } case MONO_PATCH_INFO_LDTOKEN: { gpointer handle; MonoClass *handle_class; handle = mono_ldtoken (patch_info->data.token->image, patch_info->data.token->token, &handle_class, NULL); mono_class_init (handle_class); target = handle; break; } case MONO_PATCH_INFO_DECLSEC: target = (mono_metadata_blob_heap (patch_info->data.token->image, patch_info->data.token->token) + 2); break; case MONO_PATCH_INFO_ICALL_ADDR: /* run_cctors == 0 -> AOT */ if (patch_info->data.method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { if (run_cctors) { target = mono_lookup_pinvoke_call (patch_info->data.method, NULL, NULL); if (!target) g_error ("Unable to resolve pinvoke method '%s' Re-run with MONO_LOG_LEVEL=debug for more information.\n", mono_method_full_name (patch_info->data.method, TRUE)); } else { target = NULL; } } else { target = mono_lookup_internal_call (patch_info->data.method); if (!target && run_cctors) g_error ("Unregistered icall '%s'\n", mono_method_full_name (patch_info->data.method, TRUE)); } break; case MONO_PATCH_INFO_JIT_ICALL_ADDR: { MonoJitICallInfo *mi = mono_find_jit_icall_by_name (patch_info->data.name); if (!mi) { g_warning ("unknown MONO_PATCH_INFO_JIT_ICALL_ADDR %s", patch_info->data.name); g_assert_not_reached (); } target = mi->func; break; } case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG: target = mono_thread_interruption_request_flag (); break; case MONO_PATCH_INFO_METHOD_RGCTX: { MonoVTable *vtable = mono_class_vtable (domain, patch_info->data.method->klass); g_assert (vtable); target = mono_method_lookup_rgctx (vtable, mini_method_get_context (patch_info->data.method)->method_inst); break; } case MONO_PATCH_INFO_BB_OVF: case MONO_PATCH_INFO_EXC_OVF: case MONO_PATCH_INFO_GOT_OFFSET: case MONO_PATCH_INFO_NONE: break; case MONO_PATCH_INFO_RGCTX_FETCH: { MonoJumpInfoRgctxEntry *entry = patch_info->data.rgctx_entry; guint32 slot = -1; switch (entry->data->type) { case MONO_PATCH_INFO_CLASS: slot = mono_method_lookup_or_register_other_info (entry->method, entry->in_mrgctx, &entry->data->data.klass->byval_arg, entry->info_type, mono_method_get_context (entry->method)); break; case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHODCONST: slot = mono_method_lookup_or_register_other_info (entry->method, entry->in_mrgctx, entry->data->data.method, entry->info_type, mono_method_get_context (entry->method)); break; case MONO_PATCH_INFO_FIELD: slot = mono_method_lookup_or_register_other_info (entry->method, entry->in_mrgctx, entry->data->data.field, entry->info_type, mono_method_get_context (entry->method)); break; default: g_assert_not_reached (); break; } target = mono_create_rgctx_lazy_fetch_trampoline (slot); break; } case MONO_PATCH_INFO_GENERIC_CLASS_INIT: target = mono_create_generic_class_init_trampoline (); break; case MONO_PATCH_INFO_MONITOR_ENTER: target = mono_create_monitor_enter_trampoline (); break; case MONO_PATCH_INFO_MONITOR_EXIT: target = mono_create_monitor_exit_trampoline (); break; #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED case MONO_PATCH_INFO_SEQ_POINT_INFO: if (!run_cctors) /* AOT, not needed */ target = NULL; else target = mono_arch_get_seq_point_info (domain, code); break; #endif case MONO_PATCH_INFO_LLVM_IMT_TRAMPOLINE: #ifdef MONO_ARCH_LLVM_SUPPORTED g_assert (mono_use_llvm); target = mono_create_llvm_imt_trampoline (domain, patch_info->data.imt_tramp->method, patch_info->data.imt_tramp->vt_offset); #else g_assert_not_reached (); #endif break; case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: { int card_table_shift_bits; gpointer card_table_mask; target = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask); break; } case MONO_PATCH_INFO_CASTCLASS_CACHE: { target = mono_domain_alloc0 (domain, sizeof (gpointer)); break; } default: g_assert_not_reached (); } return (gpointer)target; } void mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset) { ins->inst_offset = native_offset; g_ptr_array_add (cfg->seq_points, ins); bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins); bb->last_seq_point = ins; } void mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to) { MonoDwarfLocListEntry *entry = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry)); if (is_reg) g_assert (offset == 0); entry->is_reg = is_reg; entry->reg = reg; entry->offset = offset; entry->from = from; entry->to = to; if (var == cfg->args [0]) cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry); else if (var == cfg->rgctx_var) cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry); } #ifndef DISABLE_JIT static void mono_compile_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; int i; header = cfg->header; sig = mono_method_signature (cfg->method); if (!MONO_TYPE_IS_VOID (sig->ret)) { cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG); /* Inhibit optimizations */ cfg->ret->flags |= MONO_INST_VOLATILE; } if (cfg->verbose_level > 2) g_print ("creating vars\n"); cfg->args = mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*)); if (sig->hasthis) cfg->args [0] = mono_compile_create_var (cfg, &cfg->method->klass->this_arg, OP_ARG); for (i = 0; i < sig->param_count; ++i) { cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG); } if (cfg->verbose_level > 2) { if (cfg->ret) { printf ("\treturn : "); mono_print_ins (cfg->ret); } if (sig->hasthis) { printf ("\tthis: "); mono_print_ins (cfg->args [0]); } for (i = 0; i < sig->param_count; ++i) { printf ("\targ [%d]: ", i); mono_print_ins (cfg->args [i + sig->hasthis]); } } cfg->locals_start = cfg->num_varinfo; cfg->locals = mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*)); if (cfg->verbose_level > 2) g_print ("creating locals\n"); for (i = 0; i < header->num_locals; ++i) cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL); if (cfg->verbose_level > 2) g_print ("locals done\n"); mono_arch_create_vars (cfg); } #endif /* #ifndef DISABLE_JIT */ void mono_print_code (MonoCompile *cfg, const char* msg) { MonoBasicBlock *bb; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) mono_print_bb (bb, msg); } #ifndef DISABLE_JIT static void mono_postprocess_patches (MonoCompile *cfg) { MonoJumpInfo *patch_info; int i; for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_ABS: { MonoJitICallInfo *info = mono_find_jit_icall_by_addr (patch_info->data.target); /* * Change patches of type MONO_PATCH_INFO_ABS into patches describing the * absolute address. */ if (info) { //printf ("TEST %s %p\n", info->name, patch_info->data.target); // FIXME: CLEAN UP THIS MESS. if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && strstr (cfg->method->name, info->name)) { /* * This is an icall wrapper, and this is a call to the * wrapped function. */ if (cfg->compile_aot) { patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ADDR; patch_info->data.name = info->name; } } else { /* for these array methods we currently register the same function pointer * since it's a vararg function. But this means that mono_find_jit_icall_by_addr () * will return the incorrect one depending on the order they are registered. * See tests/test-arr.cs */ if (strstr (info->name, "ves_array_new_va_") == NULL && strstr (info->name, "ves_array_element_address_") == NULL) { patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD; patch_info->data.name = info->name; } } } if (patch_info->type == MONO_PATCH_INFO_ABS) { if (cfg->abs_patches) { MonoJumpInfo *abs_ji = g_hash_table_lookup (cfg->abs_patches, patch_info->data.target); if (abs_ji) { patch_info->type = abs_ji->type; patch_info->data.target = abs_ji->data.target; } } } break; } case MONO_PATCH_INFO_SWITCH: { gpointer *table; #if defined(__native_client__) && defined(__native_client_codegen__) /* This memory will leak. */ /* TODO: can we free this when */ /* making the final jump table? */ table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size); #else if (cfg->method->dynamic) { table = mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size); } else { table = mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size); } #endif for (i = 0; i < patch_info->data.table->table_size; i++) { /* Might be NULL if the switch is eliminated */ if (patch_info->data.table->table [i]) { g_assert (patch_info->data.table->table [i]->native_offset); table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset); } else { table [i] = NULL; } } patch_info->data.table->table = (MonoBasicBlock**)table; break; } case MONO_PATCH_INFO_METHOD_JUMP: { MonoJumpList *jlist; MonoDomain *domain = cfg->domain; unsigned char *ip = cfg->native_code + patch_info->ip.i; #if defined(__native_client__) && defined(__native_client_codegen__) /* When this jump target gets evaluated, the method */ /* will be installed in the dynamic code section, */ /* not at the location of cfg->native_code. */ ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i; #endif mono_domain_lock (domain); jlist = g_hash_table_lookup (domain_jit_info (domain)->jump_target_hash, patch_info->data.method); if (!jlist) { jlist = mono_domain_alloc0 (domain, sizeof (MonoJumpList)); g_hash_table_insert (domain_jit_info (domain)->jump_target_hash, patch_info->data.method, jlist); } jlist->list = g_slist_prepend (jlist->list, ip); mono_domain_unlock (domain); break; } default: /* do nothing */ break; } } } static void collect_pred_seq_points (MonoBasicBlock *bb, MonoInst *ins, GSList **next, int depth) { int i; MonoBasicBlock *in_bb; GSList *l; for (i = 0; i < bb->in_count; ++i) { in_bb = bb->in_bb [i]; if (in_bb->last_seq_point) { int src_index = in_bb->last_seq_point->backend.size; int dst_index = ins->backend.size; /* bb->in_bb might contain duplicates */ for (l = next [src_index]; l; l = l->next) if (GPOINTER_TO_UINT (l->data) == dst_index) break; if (!l) next [src_index] = g_slist_append (next [src_index], GUINT_TO_POINTER (dst_index)); } else { /* Have to look at its predecessors */ if (depth < 5) collect_pred_seq_points (in_bb, ins, next, depth + 1); } } } static void mono_save_seq_point_info (MonoCompile *cfg) { MonoBasicBlock *bb; GSList *bb_seq_points, *l; MonoInst *last; MonoDomain *domain = cfg->domain; int i; MonoSeqPointInfo *info; GSList **next; if (!cfg->seq_points) return; info = g_malloc0 (sizeof (MonoSeqPointInfo) + (cfg->seq_points->len - MONO_ZERO_LEN_ARRAY) * sizeof (SeqPoint)); info->len = cfg->seq_points->len; for (i = 0; i < cfg->seq_points->len; ++i) { SeqPoint *sp = &info->seq_points [i]; MonoInst *ins = g_ptr_array_index (cfg->seq_points, i); sp->il_offset = ins->inst_imm; sp->native_offset = ins->inst_offset; /* Used below */ ins->backend.size = i; } /* * For each sequence point, compute the list of sequence points immediately * following it, this is needed to implement 'step over' in the debugger agent. */ next = g_new0 (GSList*, cfg->seq_points->len); for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { bb_seq_points = g_slist_reverse (bb->seq_points); last = NULL; for (l = bb_seq_points; l; l = l->next) { MonoInst *ins = l->data; if (ins->inst_imm == METHOD_ENTRY_IL_OFFSET || ins->inst_imm == METHOD_EXIT_IL_OFFSET) /* Used to implement method entry/exit events */ continue; if (last != NULL) { /* Link with the previous seq point in the same bb */ next [last->backend.size] = g_slist_append (next [last->backend.size], GUINT_TO_POINTER (ins->backend.size)); } else { /* Link with the last bb in the previous bblocks */ collect_pred_seq_points (bb, ins, next, 0); } last = ins; } } if (cfg->verbose_level > 2) { printf ("\nSEQ POINT MAP: \n"); } for (i = 0; i < cfg->seq_points->len; ++i) { SeqPoint *sp = &info->seq_points [i]; GSList *l; int j, next_index; sp->next_len = g_slist_length (next [i]); sp->next = g_new (int, sp->next_len); j = 0; if (cfg->verbose_level > 2 && next [i]) { printf ("\t0x%x ->", sp->il_offset); for (l = next [i]; l; l = l->next) { next_index = GPOINTER_TO_UINT (l->data); printf (" 0x%x", info->seq_points [next_index].il_offset); } printf ("\n"); } for (l = next [i]; l; l = l->next) { next_index = GPOINTER_TO_UINT (l->data); sp->next [j ++] = next_index; } g_slist_free (next [i]); } g_free (next); cfg->seq_point_info = info; // FIXME: dynamic methods if (!cfg->compile_aot) { mono_domain_lock (domain); // FIXME: How can the lookup succeed ? if (!g_hash_table_lookup (domain_jit_info (domain)->seq_points, cfg->method_to_register)) g_hash_table_insert (domain_jit_info (domain)->seq_points, cfg->method_to_register, info); mono_domain_unlock (domain); } g_ptr_array_free (cfg->seq_points, TRUE); cfg->seq_points = NULL; } void mono_codegen (MonoCompile *cfg) { MonoBasicBlock *bb; int max_epilog_size; guint8 *code; MonoDomain *code_domain; if (mono_using_xdebug) /* * Recent gdb versions have trouble processing symbol files containing * overlapping address ranges, so allocate all code from the code manager * of the root domain. (#666152). */ code_domain = mono_get_root_domain (); else code_domain = cfg->domain; #if defined(__native_client_codegen__) && defined(__native_client__) void *code_dest; /* This keeps patch targets from being transformed during * ordinary method compilation, for local branches and jumps. */ nacl_allow_target_modification (FALSE); #endif for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { cfg->spill_count = 0; /* we reuse dfn here */ /* bb->dfn = bb_count++; */ mono_arch_lowering_pass (cfg, bb); if (cfg->opt & MONO_OPT_PEEPHOLE) mono_arch_peephole_pass_1 (cfg, bb); if (!cfg->globalra) mono_local_regalloc (cfg, bb); if (cfg->opt & MONO_OPT_PEEPHOLE) mono_arch_peephole_pass_2 (cfg, bb); } if (cfg->prof_options & MONO_PROFILE_COVERAGE) cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, cfg->num_bblocks); code = mono_arch_emit_prolog (cfg); if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) code = mono_arch_instrument_prolog (cfg, mono_profiler_method_enter, code, FALSE); cfg->code_len = code - cfg->native_code; cfg->prolog_end = cfg->code_len; mono_debug_open_method (cfg); /* emit code all basic blocks */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { bb->native_offset = cfg->code_len; bb->real_native_offset = cfg->code_len; //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn)) mono_arch_output_basic_block (cfg, bb); bb->native_length = cfg->code_len - bb->native_offset; if (bb == cfg->bb_exit) { cfg->epilog_begin = cfg->code_len; if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) { code = cfg->native_code + cfg->code_len; code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE); cfg->code_len = code - cfg->native_code; g_assert (cfg->code_len < cfg->code_size); } mono_arch_emit_epilog (cfg); } } #ifdef __native_client_codegen__ mono_nacl_fix_patches (cfg->native_code, cfg->patch_info); #endif mono_arch_emit_exceptions (cfg); max_epilog_size = 0; /* we always allocate code in cfg->domain->code_mp to increase locality */ cfg->code_size = cfg->code_len + max_epilog_size; /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */ if (cfg->method->dynamic) { guint unwindlen = 0; #ifdef MONO_ARCH_HAVE_UNWIND_TABLE unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo); #endif /* Allocate the code into a separate memory pool so it can be freed */ cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1); cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic (); mono_domain_lock (cfg->domain); mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info); mono_domain_unlock (cfg->domain); if (mono_using_xdebug) /* See the comment for cfg->code_domain */ code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen); else code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen); } else { guint unwindlen = 0; #ifdef MONO_ARCH_HAVE_UNWIND_TABLE unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo); #endif code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen); } #if defined(__native_client_codegen__) && defined(__native_client__) nacl_allow_target_modification (TRUE); #endif g_assert (code); memcpy (code, cfg->native_code, cfg->code_len); #if defined(__default_codegen__) g_free (cfg->native_code); #elif defined(__native_client_codegen__) if (cfg->native_code_alloc) { g_free (cfg->native_code_alloc); cfg->native_code_alloc = 0; } else if (cfg->native_code) { g_free (cfg->native_code); } #endif /* __native_client_codegen__ */ cfg->native_code = code; code = cfg->native_code + cfg->code_len; /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */ mono_postprocess_patches (cfg); #ifdef VALGRIND_JIT_REGISTER_MAP if (valgrind_register){ char* nm = mono_method_full_name (cfg->method, TRUE); VALGRIND_JIT_REGISTER_MAP (nm, cfg->native_code, cfg->native_code + cfg->code_len); g_free (nm); } #endif if (cfg->verbose_level > 0) { char* nm = mono_method_full_name (cfg->method, TRUE); g_print ("Method %s emitted at %p to %p (code length %d) [%s]\n", nm, cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name); g_free (nm); } { gboolean is_generic = FALSE; if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) || cfg->method->klass->generic_container || cfg->method->klass->generic_class) { is_generic = TRUE; } if (cfg->generic_sharing_context) g_assert (is_generic); } #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO mono_arch_save_unwind_info (cfg); #endif #if defined(__native_client_codegen__) && defined(__native_client__) if (!cfg->compile_aot) { if (cfg->method->dynamic) { code_dest = nacl_code_manager_get_code_dest(cfg->dynamic_info->code_mp, cfg->native_code); } else { code_dest = nacl_domain_get_code_dest(cfg->domain, cfg->native_code); } } #endif #if defined(__native_client_codegen__) mono_nacl_fix_patches (cfg->native_code, cfg->patch_info); #endif mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->dynamic_info ? cfg->dynamic_info->code_mp : NULL, cfg->run_cctors); if (cfg->method->dynamic) { if (mono_using_xdebug) mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len); else mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len); } else { mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len); } #if defined(__native_client_codegen__) && defined(__native_client__) cfg->native_code = code_dest; #endif mono_profiler_code_buffer_new (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method); mono_arch_flush_icache (cfg->native_code, cfg->code_len); mono_debug_close_method (cfg); #ifdef MONO_ARCH_HAVE_UNWIND_TABLE mono_arch_unwindinfo_install_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len); #endif } static void compute_reachable (MonoBasicBlock *bb) { int i; if (!(bb->flags & BB_VISITED)) { bb->flags |= BB_VISITED; for (i = 0; i < bb->out_count; ++i) compute_reachable (bb->out_bb [i]); } } static void mono_handle_out_of_line_bblock (MonoCompile *cfg) { MonoBasicBlock *bb; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_BR); MONO_ADD_INS (bb, ins); ins->inst_target_bb = bb->next_bb; } } } static MonoJitInfo* create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile) { GSList *tmp; MonoMethodHeader *header; MonoJitInfo *jinfo; int num_clauses; int generic_info_size, arch_eh_info_size = 0; int holes_size = 0, num_holes = 0; guint32 stack_size = 0; g_assert (method_to_compile == cfg->method); header = cfg->header; if (cfg->generic_sharing_context) generic_info_size = sizeof (MonoGenericJitInfo); else generic_info_size = 0; if (cfg->arch_eh_jit_info) { MonoJitArgumentInfo *arg_info; MonoMethodSignature *sig = mono_method_signature (cfg->method_to_register); /* * This cannot be computed during stack walking, as * mono_arch_get_argument_info () is not signal safe. */ arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1); stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info); if (stack_size) arch_eh_info_size = sizeof (MonoArchEHJitInfo); } if (cfg->try_block_holes) { for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) { TryBlockHole *hole = tmp->data; MonoExceptionClause *ec = hole->clause; int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length; MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len]; g_assert (clause_last_bb); /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/ if (clause_last_bb->native_offset != hole_end) ++num_holes; } if (num_holes) holes_size = sizeof (MonoTryBlockHoleTableJitInfo) + num_holes * sizeof (MonoTryBlockHoleJitInfo); if (G_UNLIKELY (cfg->verbose_level >= 4)) printf ("Number of try block holes %d\n", num_holes); } if (COMPILE_LLVM (cfg)) num_clauses = cfg->llvm_ex_info_len; else num_clauses = header->num_clauses; if (cfg->method->dynamic) { jinfo = g_malloc0 (MONO_SIZEOF_JIT_INFO + (num_clauses * sizeof (MonoJitExceptionInfo)) + generic_info_size + holes_size + arch_eh_info_size); } else { jinfo = mono_domain_alloc0 (cfg->domain, MONO_SIZEOF_JIT_INFO + (num_clauses * sizeof (MonoJitExceptionInfo)) + generic_info_size + holes_size + arch_eh_info_size); } jinfo->method = cfg->method_to_register; jinfo->code_start = cfg->native_code; jinfo->code_size = cfg->code_len; jinfo->used_regs = cfg->used_int_regs; jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0; jinfo->cas_inited = FALSE; /* initialization delayed at the first stalk walk using this method */ jinfo->num_clauses = num_clauses; if (COMPILE_LLVM (cfg)) jinfo->from_llvm = TRUE; if (cfg->generic_sharing_context) { MonoInst *inst; MonoGenericJitInfo *gi; GSList *loclist = NULL; jinfo->has_generic_jit_info = 1; gi = mono_jit_info_get_generic_jit_info (jinfo); g_assert (gi); gi->generic_sharing_context = cfg->generic_sharing_context; if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) || mini_method_get_context (method_to_compile)->method_inst || method_to_compile->klass->valuetype) { g_assert (cfg->rgctx_var); } gi->has_this = 1; if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) || mini_method_get_context (method_to_compile)->method_inst || method_to_compile->klass->valuetype) { inst = cfg->rgctx_var; if (!COMPILE_LLVM (cfg)) g_assert (inst->opcode == OP_REGOFFSET); loclist = cfg->rgctx_loclist; } else { inst = cfg->args [0]; loclist = cfg->this_loclist; } if (loclist) { /* Needed to handle async exceptions */ GSList *l; int i; gi->nlocs = g_slist_length (loclist); if (cfg->method->dynamic) gi->locations = g_malloc0 (gi->nlocs * sizeof (MonoDwarfLocListEntry)); else gi->locations = mono_domain_alloc0 (cfg->domain, gi->nlocs * sizeof (MonoDwarfLocListEntry)); i = 0; for (l = loclist; l; l = l->next) { memcpy (&(gi->locations [i]), l->data, sizeof (MonoDwarfLocListEntry)); i ++; } } if (COMPILE_LLVM (cfg)) { g_assert (cfg->llvm_this_reg != -1); gi->this_in_reg = 0; gi->this_reg = cfg->llvm_this_reg; gi->this_offset = cfg->llvm_this_offset; } else if (inst->opcode == OP_REGVAR) { gi->this_in_reg = 1; gi->this_reg = inst->dreg; } else { g_assert (inst->opcode == OP_REGOFFSET); #ifdef TARGET_X86 g_assert (inst->inst_basereg == X86_EBP); #elif defined(TARGET_AMD64) g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP); #endif g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32); gi->this_in_reg = 0; gi->this_reg = inst->inst_basereg; gi->this_offset = inst->inst_offset; } } if (num_holes) { MonoTryBlockHoleTableJitInfo *table; int i; jinfo->has_try_block_holes = 1; table = mono_jit_info_get_try_block_hole_table_info (jinfo); table->num_holes = (guint16)num_holes; i = 0; for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) { guint32 start_bb_offset; MonoTryBlockHoleJitInfo *hole; TryBlockHole *hole_data = tmp->data; MonoExceptionClause *ec = hole_data->clause; int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length; MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len]; g_assert (clause_last_bb); /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/ if (clause_last_bb->native_offset == hole_end) continue; start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset; hole = &table->holes [i++]; hole->clause = hole_data->clause - &header->clauses [0]; hole->offset = (guint32)hole_data->start_offset; hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset); if (G_UNLIKELY (cfg->verbose_level >= 4)) printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length); } g_assert (i == num_holes); } if (arch_eh_info_size) { MonoArchEHJitInfo *info; jinfo->has_arch_eh_info = 1; info = mono_jit_info_get_arch_eh_info (jinfo); info->stack_size = stack_size; } if (COMPILE_LLVM (cfg)) { if (num_clauses) memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo)); } else if (header->num_clauses) { int i; for (i = 0; i < header->num_clauses; i++) { MonoExceptionClause *ec = &header->clauses [i]; MonoJitExceptionInfo *ei = &jinfo->clauses [i]; MonoBasicBlock *tblock; MonoInst *exvar; ei->flags = ec->flags; exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset); ei->exvar_offset = exvar ? exvar->inst_offset : 0; if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) { tblock = cfg->cil_offset_to_bb [ec->data.filter_offset]; g_assert (tblock); ei->data.filter = cfg->native_code + tblock->native_offset; } else { ei->data.catch_class = ec->data.catch_class; } tblock = cfg->cil_offset_to_bb [ec->try_offset]; g_assert (tblock); g_assert (tblock->native_offset); ei->try_start = cfg->native_code + tblock->native_offset; if (tblock->extend_try_block) { /* * Extend the try block backwards to include parts of the previous call * instruction. */ ei->try_start = (guint8*)ei->try_start - MONO_ARCH_MONITOR_ENTER_ADJUSTMENT; } tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len]; g_assert (tblock); if (!tblock->native_offset) { int j, end; for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) { MonoBasicBlock *bb = cfg->cil_offset_to_bb [j]; if (bb && bb->native_offset) { tblock = bb; break; } } } ei->try_end = cfg->native_code + tblock->native_offset; g_assert (tblock->native_offset); tblock = cfg->cil_offset_to_bb [ec->handler_offset]; g_assert (tblock); ei->handler_start = cfg->native_code + tblock->native_offset; for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) { TryBlockHole *hole = tmp->data; gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length); if (hole->clause == ec && hole_end == ei->try_end) { if (G_UNLIKELY (cfg->verbose_level >= 4)) printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset); ei->try_end = cfg->native_code + hole->start_offset; break; } } if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) { int end_offset; if (ec->handler_offset + ec->handler_len < header->code_size) { tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len]; g_assert (tblock); end_offset = tblock->native_offset; } else { end_offset = cfg->epilog_begin; } ei->data.handler_end = cfg->native_code + end_offset; } } } if (G_UNLIKELY (cfg->verbose_level >= 4)) { int i; for (i = 0; i < jinfo->num_clauses; i++) { MonoJitExceptionInfo *ei = &jinfo->clauses [i]; int start = (guint8*)ei->try_start - cfg->native_code; int end = (guint8*)ei->try_end - cfg->native_code; int handler = (guint8*)ei->handler_start - cfg->native_code; printf ("JitInfo EH clause %d flags %x try %x-%x handler %x\n", i, ei->flags, start, end, handler); } } /* * Its possible to generate dwarf unwind info for xdebug etc, but not actually * using it during runtime, hence the define. */ #ifdef MONO_ARCH_HAVE_XP_UNWIND if (cfg->encoded_unwind_ops) { jinfo->used_regs = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len); g_free (cfg->encoded_unwind_ops); } else if (cfg->unwind_ops) { guint32 info_len; guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len); jinfo->used_regs = mono_cache_unwind_info (unwind_info, info_len); g_free (unwind_info); } #endif return jinfo; } #endif /* * mini_get_shared_method: * * Return the method which is actually compiled/registered when doing generic sharing. */ MonoMethod* mini_get_shared_method (MonoMethod *method) { MonoGenericContext shared_context; MonoMethod *declaring_method, *res; int i; gboolean partial = FALSE; if (method->is_generic || method->klass->generic_container) declaring_method = method; else declaring_method = mono_method_get_declaring_generic_method (method); if (declaring_method->is_generic) shared_context = mono_method_get_generic_container (declaring_method)->context; else shared_context = declaring_method->klass->generic_container->context; /* Handle partial sharing */ if (method != declaring_method && method->is_inflated && !mono_method_is_generic_sharable_impl_full (method, FALSE, FALSE)) { MonoGenericContext *context = mono_method_get_context (method); MonoGenericInst *inst; MonoType **type_argv; /* * Create the shared context by replacing the ref type arguments with * type parameters, and keeping the rest. */ partial = TRUE; inst = context->class_inst; if (inst) { type_argv = g_new0 (MonoType*, inst->type_argc); for (i = 0; i < inst->type_argc; ++i) { if (MONO_TYPE_IS_REFERENCE (inst->type_argv [i]) || inst->type_argv [i]->type == MONO_TYPE_VAR || inst->type_argv [i]->type == MONO_TYPE_MVAR) type_argv [i] = shared_context.class_inst->type_argv [i]; else type_argv [i] = inst->type_argv [i]; } shared_context.class_inst = mono_metadata_get_generic_inst (inst->type_argc, type_argv); g_free (type_argv); } inst = context->method_inst; if (inst) { type_argv = g_new0 (MonoType*, inst->type_argc); for (i = 0; i < inst->type_argc; ++i) { if (MONO_TYPE_IS_REFERENCE (inst->type_argv [i]) || inst->type_argv [i]->type == MONO_TYPE_VAR || inst->type_argv [i]->type == MONO_TYPE_MVAR) type_argv [i] = shared_context.method_inst->type_argv [i]; else type_argv [i] = inst->type_argv [i]; } shared_context.method_inst = mono_metadata_get_generic_inst (inst->type_argc, type_argv); g_free (type_argv); } } res = mono_class_inflate_generic_method (declaring_method, &shared_context); if (!partial) { /* The result should be an inflated method whose parent is not inflated */ g_assert (!res->klass->is_inflated); } return res; } #ifndef DISABLE_JIT /* * mini_method_compile: * @method: the method to compile * @opts: the optimization flags to use * @domain: the domain where the method will be compiled in * @run_cctors: whether we should run type ctors if possible * @compile_aot: whether this is an AOT compilation * @parts: debug flag * * Returns: a MonoCompile* pointer. Caller must check the exception_type * field in the returned struct to see if compilation succeded. */ MonoCompile* mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, gboolean run_cctors, gboolean compile_aot, int parts) { MonoMethodHeader *header; MonoMethodSignature *sig; MonoError err; guint8 *ip; MonoCompile *cfg; int dfn, i, code_size_ratio; gboolean deadce_has_run = FALSE; gboolean try_generic_shared, try_llvm = FALSE; MonoMethod *method_to_compile, *method_to_register; InterlockedIncrement (&mono_jit_stats.methods_compiled); if (mono_profiler_get_events () & MONO_PROFILE_JIT_COMPILATION) mono_profiler_method_jit (method); if (MONO_PROBE_METHOD_COMPILE_BEGIN_ENABLED ()) MONO_PROBE_METHOD_COMPILE_BEGIN (method); if (compile_aot) /* * We might get passed the original generic method definition or * instances with type parameters. * FIXME: Remove the method->klass->generic_class limitation. */ try_generic_shared = mono_class_generic_sharing_enabled (method->klass) && (opts & MONO_OPT_GSHARED) && ((method->is_generic || method->klass->generic_container) || (!method->klass->generic_class && mono_method_is_generic_sharable_impl (method, TRUE))); else try_generic_shared = mono_class_generic_sharing_enabled (method->klass) && (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_impl (method, FALSE); if (opts & MONO_OPT_GSHARED) { if (try_generic_shared) mono_stats.generics_sharable_methods++; else if (mono_method_is_generic_impl (method)) mono_stats.generics_unsharable_methods++; } #ifdef ENABLE_LLVM try_llvm = mono_use_llvm; #endif restart_compile: if (try_generic_shared) { method_to_compile = mini_get_shared_method (method); g_assert (method_to_compile); } else { method_to_compile = method; } cfg = g_new0 (MonoCompile, 1); cfg->method = method_to_compile; cfg->header = mono_method_get_header (cfg->method); cfg->mempool = mono_mempool_new (); cfg->opt = opts; cfg->prof_options = mono_profiler_get_events (); cfg->run_cctors = run_cctors; cfg->domain = domain; cfg->verbose_level = mini_verbose; cfg->compile_aot = compile_aot; cfg->skip_visibility = method->skip_visibility; cfg->orig_method = method; cfg->gen_seq_points = debug_options.gen_seq_points; cfg->explicit_null_checks = debug_options.explicit_null_checks; cfg->soft_breakpoints = debug_options.soft_breakpoints; if (try_generic_shared) cfg->generic_sharing_context = (MonoGenericSharingContext*)&cfg->generic_sharing_context; cfg->compile_llvm = try_llvm; cfg->token_info_hash = g_hash_table_new (NULL, NULL); if (cfg->gen_seq_points) cfg->seq_points = g_ptr_array_new (); if (cfg->compile_aot && !try_generic_shared && (method->is_generic || method->klass->generic_container)) { cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; return cfg; } if (cfg->generic_sharing_context) { method_to_register = method_to_compile; } else { g_assert (method == method_to_compile); method_to_register = method; } cfg->method_to_register = method_to_register; mono_error_init (&err); sig = mono_method_signature_checked (cfg->method, &err); if (!sig) { cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD; cfg->exception_message = g_strdup (mono_error_get_message (&err)); mono_error_cleanup (&err); if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ()) MONO_PROBE_METHOD_COMPILE_END (method, FALSE); return cfg; } header = cfg->header; if (!header) { MonoLoaderError *error; if ((error = mono_loader_get_last_error ())) { cfg->exception_type = error->exception_type; } else { cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM; cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name); } if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ()) MONO_PROBE_METHOD_COMPILE_END (method, FALSE); return cfg; } #ifdef ENABLE_LLVM { static gboolean inited; if (!inited) { inited = TRUE; } /* * Check for methods which cannot be compiled by LLVM early, to avoid * the extra compilation pass. */ if (COMPILE_LLVM (cfg)) { mono_llvm_check_method_supported (cfg); if (cfg->disable_llvm) { if (cfg->verbose_level >= 1) { //nm = mono_method_full_name (cfg->method, TRUE); printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message); //g_free (nm); } mono_destroy_compile (cfg); try_llvm = FALSE; goto restart_compile; } } } #endif /* The debugger has no liveness information, so avoid sharing registers/stack slots */ if (mono_debug_using_mono_debugger () || debug_options.mdb_optimizations) { cfg->disable_reuse_registers = TRUE; cfg->disable_reuse_stack_slots = TRUE; /* * This decreases the change the debugger will read registers/stack slots which are * not yet initialized. */ cfg->disable_initlocals_opt = TRUE; cfg->extend_live_ranges = TRUE; /* Temporarily disable this when running in the debugger until we have support * for this in the debugger. */ /* This is no longer needed with sdb */ //cfg->disable_omit_fp = TRUE; /* The debugger needs all locals to be on the stack or in a global register */ cfg->disable_vreg_to_lvreg = TRUE; /* Don't remove unused variables when running inside the debugger since the user * may still want to view them. */ cfg->disable_deadce_vars = TRUE; // cfg->opt |= MONO_OPT_SHARED; cfg->opt &= ~MONO_OPT_DEADCE; cfg->opt &= ~MONO_OPT_INLINE; cfg->opt &= ~MONO_OPT_COPYPROP; cfg->opt &= ~MONO_OPT_CONSPROP; /* This is no longer needed with sdb */ //cfg->opt &= ~MONO_OPT_GSHARED; /* This is needed for the soft debugger, which doesn't like code after the epilog */ cfg->disable_out_of_line_bblocks = TRUE; } if (mono_using_xdebug) { /* * Make each variable use its own register/stack slot and extend * their liveness to cover the whole method, making them displayable * in gdb even after they are dead. */ cfg->disable_reuse_registers = TRUE; cfg->disable_reuse_stack_slots = TRUE; cfg->extend_live_ranges = TRUE; cfg->compute_precise_live_ranges = TRUE; } mini_gc_init_cfg (cfg); if (COMPILE_LLVM (cfg)) { cfg->opt |= MONO_OPT_ABCREM; } if (getenv ("MONO_VERBOSE_METHOD")) { char *name = getenv ("MONO_VERBOSE_METHOD"); if ((strchr (name, '.') > name) || strchr (name, ':')) { MonoMethodDesc *desc; desc = mono_method_desc_new (name, TRUE); if (mono_method_desc_full_match (desc, cfg->method)) { cfg->verbose_level = 4; } mono_method_desc_free (desc); } else { if (strcmp (cfg->method->name, getenv ("MONO_VERBOSE_METHOD")) == 0) cfg->verbose_level = 4; } } ip = (guint8 *)header->code; cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack); if (cfg->verbose_level > 0) { char *method_name; if (COMPILE_LLVM (cfg)) g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE)); else if (cfg->generic_sharing_context) g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE)); else g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE)); g_free (method_name); } if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE)) cfg->opt |= MONO_OPT_SSA; /* if ((cfg->method->klass->image != mono_defaults.corlib) || (strstr (cfg->method->klass->name, "StackOverflowException") && strstr (cfg->method->name, ".ctor")) || (strstr (cfg->method->klass->name, "OutOfMemoryException") && strstr (cfg->method->name, ".ctor"))) cfg->globalra = TRUE; */ //cfg->globalra = TRUE; //if (!strcmp (cfg->method->klass->name, "Tests") && !cfg->method->wrapper_type) // cfg->globalra = TRUE; { static int count = 0; count ++; /* if (getenv ("COUNT2")) { cfg->globalra = TRUE; if (count == atoi (getenv ("COUNT2"))) printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE)); if (count > atoi (getenv ("COUNT2"))) cfg->globalra = FALSE; } */ } if (header->clauses) cfg->globalra = FALSE; if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) /* The code in the prolog clobbers caller saved registers */ cfg->globalra = FALSE; // FIXME: Disable globalra in case of tracing/profiling if (cfg->method->save_lmf) /* The LMF saving code might clobber caller saved registers */ cfg->globalra = FALSE; if (header->code_size > 5000) // FIXME: /* Too large bblocks could overflow the ins positions */ cfg->globalra = FALSE; cfg->rs = mono_regstate_new (); if (cfg->globalra) cfg->rs->next_vreg = MONO_MAX_IREGS + MONO_MAX_FREGS; cfg->next_vreg = cfg->rs->next_vreg; /* FIXME: Fix SSA to handle branches inside bblocks */ if (cfg->opt & MONO_OPT_SSA) cfg->enable_extended_bblocks = FALSE; /* * FIXME: This confuses liveness analysis because variables which are assigned after * a branch inside a bblock become part of the kill set, even though the assignment * might not get executed. This causes the optimize_initlocals pass to delete some * assignments which are needed. * Also, the mono_if_conversion pass needs to be modified to recognize the code * created by this. */ //cfg->enable_extended_bblocks = TRUE; /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/ if (mono_compile_is_broken (cfg, cfg->method, TRUE)) { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); return cfg; } /* * create MonoInst* which represents arguments and local variables */ mono_compile_create_vars (cfg); /* SSAPRE is not supported on linear IR */ cfg->opt &= ~MONO_OPT_SSAPRE; i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, NULL, 0, FALSE); if (i < 0) { if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) { if (compile_aot) { if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ()) MONO_PROBE_METHOD_COMPILE_END (method, FALSE); return cfg; } mono_destroy_compile (cfg); try_generic_shared = FALSE; goto restart_compile; } g_assert (cfg->exception_type != MONO_EXCEPTION_GENERIC_SHARING_FAILED); if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ()) MONO_PROBE_METHOD_COMPILE_END (method, FALSE); /* cfg contains the details of the failure, so let the caller cleanup */ return cfg; } cfg->stat_basic_blocks += cfg->num_bblocks; if (COMPILE_LLVM (cfg)) { MonoInst *ins; /* The IR has to be in SSA form for LLVM */ cfg->opt |= MONO_OPT_SSA; // FIXME: if (cfg->ret) { // Allow SSA on the result value cfg->ret->flags &= ~MONO_INST_VOLATILE; // Add an explicit return instruction referencing the return value MONO_INST_NEW (cfg, ins, OP_SETRET); ins->sreg1 = cfg->ret->dreg; MONO_ADD_INS (cfg->bb_exit, ins); } cfg->opt &= ~MONO_OPT_LINEARS; /* FIXME: */ cfg->opt &= ~MONO_OPT_BRANCH; } /* todo: remove code when we have verified that the liveness for try/catch blocks * works perfectly */ /* * Currently, this can't be commented out since exception blocks are not * processed during liveness analysis. * It is also needed, because otherwise the local optimization passes would * delete assignments in cases like this: * r1 <- 1 * <something which throws> * r1 <- 2 * This also allows SSA to be run on methods containing exception clauses, since * SSA will ignore variables marked VOLATILE. */ mono_liveness_handle_exception_clauses (cfg); mono_handle_out_of_line_bblock (cfg); /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/ if (!COMPILE_LLVM (cfg)) mono_decompose_long_opts (cfg); /* Should be done before branch opts */ if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) mono_local_cprop (cfg); if (cfg->opt & MONO_OPT_BRANCH) mono_optimize_branches (cfg); /* This must be done _before_ global reg alloc and _after_ decompose */ mono_handle_global_vregs (cfg); if (cfg->opt & MONO_OPT_DEADCE) mono_local_deadce (cfg); /* Disable this for LLVM to make the IR easier to handle */ if (!COMPILE_LLVM (cfg)) mono_if_conversion (cfg); if ((cfg->opt & MONO_OPT_SSAPRE) || cfg->globalra) mono_remove_critical_edges (cfg); /* Depth-first ordering on basic blocks */ cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1)); cfg->max_block_num = cfg->num_bblocks; dfn = 0; df_visit (cfg->bb_entry, &dfn, cfg->bblocks); if (cfg->num_bblocks != dfn + 1) { MonoBasicBlock *bb; cfg->num_bblocks = dfn + 1; /* remove unreachable code, because the code in them may be * inconsistent (access to dead variables for example) */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) bb->flags &= ~BB_VISITED; compute_reachable (cfg->bb_entry); for (bb = cfg->bb_entry; bb; bb = bb->next_bb) if (bb->flags & BB_EXCEPTION_HANDLER) compute_reachable (bb); for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { if (!(bb->flags & BB_VISITED)) { if (cfg->verbose_level > 1) g_print ("found unreachable code in BB%d\n", bb->block_num); bb->code = bb->last_ins = NULL; while (bb->out_count) mono_unlink_bblock (cfg, bb, bb->out_bb [0]); } } for (bb = cfg->bb_entry; bb; bb = bb->next_bb) bb->flags &= ~BB_VISITED; } if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) { /* * we disable some optimizations if there are too many variables * because JIT time may become too expensive. The actual number needs * to be tweaked and eventually the non-linear algorithms should be fixed. */ cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP); cfg->disable_ssa = TRUE; } if (cfg->opt & MONO_OPT_LOOP) { mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM); mono_compute_natural_loops (cfg); } /* after method_to_ir */ if (parts == 1) { if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ()) MONO_PROBE_METHOD_COMPILE_END (method, TRUE); return cfg; } /* if (header->num_clauses) cfg->disable_ssa = TRUE; */ //#define DEBUGSSA "logic_run" #define DEBUGSSA_CLASS "Tests" #ifdef DEBUGSSA if (!cfg->disable_ssa) { mono_local_cprop (cfg); #ifndef DISABLE_SSA mono_ssa_compute (cfg); #endif } #else if (cfg->opt & MONO_OPT_SSA) { if (!(cfg->comp_done & MONO_COMP_SSA) && !cfg->disable_ssa) { #ifndef DISABLE_SSA mono_ssa_compute (cfg); #endif if (cfg->verbose_level >= 2) { print_dfn (cfg); } } } #endif /* after SSA translation */ if (parts == 2) { if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ()) MONO_PROBE_METHOD_COMPILE_END (method, TRUE); return cfg; } if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) { if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) { #ifndef DISABLE_SSA mono_ssa_cprop (cfg); #endif } } #ifndef DISABLE_SSA if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) { //mono_ssa_strength_reduction (cfg); if (cfg->opt & MONO_OPT_SSAPRE) { mono_perform_ssapre (cfg); //mono_local_cprop (cfg); } if (cfg->opt & MONO_OPT_DEADCE) { mono_ssa_deadce (cfg); deadce_has_run = TRUE; } if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM)) mono_perform_abc_removal (cfg); mono_ssa_remove (cfg); mono_local_cprop (cfg); mono_handle_global_vregs (cfg); if (cfg->opt & MONO_OPT_DEADCE) mono_local_deadce (cfg); if (cfg->opt & MONO_OPT_BRANCH) { MonoBasicBlock *bb; mono_optimize_branches (cfg); /* Have to recompute cfg->bblocks and bb->dfn */ if (cfg->globalra) { mono_remove_critical_edges (cfg); for (bb = cfg->bb_entry; bb; bb = bb->next_bb) bb->dfn = 0; /* Depth-first ordering on basic blocks */ cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1)); dfn = 0; df_visit (cfg->bb_entry, &dfn, cfg->bblocks); cfg->num_bblocks = dfn + 1; } } } #endif if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) { /* This removes MONO_INST_FAULT flags too so perform it unconditionally */ if (cfg->opt & MONO_OPT_ABCREM) mono_perform_abc_removal (cfg); } /* after SSA removal */ if (parts == 3) { if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ()) MONO_PROBE_METHOD_COMPILE_END (method, TRUE); return cfg; } #ifdef MONO_ARCH_SOFT_FLOAT if (!COMPILE_LLVM (cfg)) mono_decompose_soft_float (cfg); #endif if (!COMPILE_LLVM (cfg)) mono_decompose_vtype_opts (cfg); if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS) mono_decompose_array_access_opts (cfg); if (cfg->got_var) { #ifndef MONO_ARCH_GOT_REG GList *regs; #endif int got_reg; g_assert (cfg->got_var_allocated); /* * Allways allocate the GOT var to a register, because keeping it * in memory will increase the number of live temporaries in some * code created by inssel.brg, leading to the well known spills+ * branches problem. Testcase: mcs crash in * System.MonoCustomAttrs:GetCustomAttributes. */ #ifdef MONO_ARCH_GOT_REG got_reg = MONO_ARCH_GOT_REG; #else regs = mono_arch_get_global_int_regs (cfg); g_assert (regs); got_reg = GPOINTER_TO_INT (regs->data); g_list_free (regs); #endif cfg->got_var->opcode = OP_REGVAR; cfg->got_var->dreg = got_reg; cfg->used_int_regs |= 1LL << cfg->got_var->dreg; } /* * Have to call this again to process variables added since the first call. */ mono_liveness_handle_exception_clauses (cfg); if (cfg->globalra) { MonoBasicBlock *bb; /* Have to do this before regalloc since it can create vregs */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) mono_arch_lowering_pass (cfg, bb); mono_global_regalloc (cfg); } if ((cfg->opt & MONO_OPT_LINEARS) && !cfg->globalra) { GList *vars, *regs, *l; /* fixme: maybe we can avoid to compute livenesss here if already computed ? */ cfg->comp_done &= ~MONO_COMP_LIVENESS; if (!(cfg->comp_done & MONO_COMP_LIVENESS)) mono_analyze_liveness (cfg); if ((vars = mono_arch_get_allocatable_int_vars (cfg))) { regs = mono_arch_get_global_int_regs (cfg); /* Remove the reg reserved for holding the GOT address */ if (cfg->got_var) { for (l = regs; l; l = l->next) { if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) { regs = g_list_delete_link (regs, l); break; } } } mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs); } } //mono_print_code (cfg, ""); //print_dfn (cfg); /* variables are allocated after decompose, since decompose could create temps */ if (!cfg->globalra && !COMPILE_LLVM (cfg)) { mono_arch_allocate_vars (cfg); if (cfg->exception_type) return cfg; } { MonoBasicBlock *bb; gboolean need_local_opts; if (!cfg->globalra && !COMPILE_LLVM (cfg)) { mono_spill_global_vars (cfg, &need_local_opts); if (need_local_opts || cfg->compile_aot) { /* To optimize code created by spill_global_vars */ mono_local_cprop (cfg); if (cfg->opt & MONO_OPT_DEADCE) mono_local_deadce (cfg); } } /* Add branches between non-consecutive bblocks */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) && bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) { /* we are careful when inverting, since bugs like #59580 * could show up when dealing with NaNs. */ if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) { MonoBasicBlock *tmp = bb->last_ins->inst_true_bb; bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb; bb->last_ins->inst_false_bb = tmp; bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode); } else { MonoInst *inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst)); inst->opcode = OP_BR; inst->inst_target_bb = bb->last_ins->inst_false_bb; mono_bblock_add_inst (bb, inst); } } } if (cfg->verbose_level >= 4 && !cfg->globalra) { for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *tree = bb->code; g_print ("DUMP BLOCK %d:\n", bb->block_num); if (!tree) continue; for (; tree; tree = tree->next) { mono_print_ins_index (-1, tree); } } } /* FIXME: */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { bb->max_vreg = cfg->next_vreg; } } if (COMPILE_LLVM (cfg)) { #ifdef ENABLE_LLVM char *nm; /* The IR has to be in SSA form for LLVM */ if (!(cfg->comp_done & MONO_COMP_SSA)) { cfg->exception_message = g_strdup ("SSA disabled."); cfg->disable_llvm = TRUE; } if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS) mono_decompose_array_access_opts (cfg); if (!cfg->disable_llvm) mono_llvm_emit_method (cfg); if (cfg->disable_llvm) { if (cfg->verbose_level >= 1) { //nm = mono_method_full_name (cfg->method, TRUE); printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message); //g_free (nm); } mono_destroy_compile (cfg); try_llvm = FALSE; goto restart_compile; } if (cfg->verbose_level > 0 && !cfg->compile_aot) { nm = mono_method_full_name (cfg->method, TRUE); g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n", nm, cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name); g_free (nm); } #endif } else { mono_codegen (cfg); } if (COMPILE_LLVM (cfg)) InterlockedIncrement (&mono_jit_stats.methods_with_llvm); else InterlockedIncrement (&mono_jit_stats.methods_without_llvm); cfg->jit_info = create_jit_info (cfg, method_to_compile); #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS if (cfg->extend_live_ranges) { /* Extend live ranges to cover the whole method */ for (i = 0; i < cfg->num_varinfo; ++i) MONO_VARINFO (cfg, i)->live_range_end = cfg->code_len; } #endif if (!cfg->compile_aot) mono_save_xdebug_info (cfg); mini_gc_create_gc_map (cfg); mono_save_seq_point_info (cfg); if (cfg->verbose_level >= 2) { char *id = mono_method_full_name (cfg->method, FALSE); mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3); g_free (id); } if (!cfg->compile_aot) { mono_domain_lock (cfg->domain); mono_jit_info_table_add (cfg->domain, cfg->jit_info); if (cfg->method->dynamic) mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = cfg->jit_info; mono_domain_unlock (cfg->domain); } /* collect statistics */ mono_perfcounters->jit_methods++; mono_perfcounters->jit_bytes += header->code_size; mono_jit_stats.allocated_code_size += cfg->code_len; code_size_ratio = cfg->code_len; if (code_size_ratio > mono_jit_stats.biggest_method_size && mono_jit_stats.enabled) { mono_jit_stats.biggest_method_size = code_size_ratio; g_free (mono_jit_stats.biggest_method); mono_jit_stats.biggest_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name); } code_size_ratio = (code_size_ratio * 100) / header->code_size; if (code_size_ratio > mono_jit_stats.max_code_size_ratio && mono_jit_stats.enabled) { mono_jit_stats.max_code_size_ratio = code_size_ratio; g_free (mono_jit_stats.max_ratio_method); mono_jit_stats.max_ratio_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name); } mono_jit_stats.native_code_size += cfg->code_len; if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ()) MONO_PROBE_METHOD_COMPILE_END (method, TRUE); return cfg; } #else MonoCompile* mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, gboolean run_cctors, gboolean compile_aot, int parts) { g_assert_not_reached (); return NULL; } #endif /* DISABLE_JIT */ MonoJitInfo* mono_domain_lookup_shared_generic (MonoDomain *domain, MonoMethod *method) { static gboolean inited = FALSE; static int lookups = 0; static int failed_lookups = 0; MonoJitInfo *ji; ji = mono_internal_hash_table_lookup (&domain->jit_code_hash, mini_get_shared_method (method)); if (ji && !ji->has_generic_jit_info) ji = NULL; if (!inited) { mono_counters_register ("Shared generic lookups", MONO_COUNTER_INT|MONO_COUNTER_GENERICS, &lookups); mono_counters_register ("Failed shared generic lookups", MONO_COUNTER_INT|MONO_COUNTER_GENERICS, &failed_lookups); inited = TRUE; } ++lookups; if (!ji) ++failed_lookups; return ji; } /* * LOCKING: Assumes domain->jit_code_hash_lock is held. */ static MonoJitInfo* lookup_method_inner (MonoDomain *domain, MonoMethod *method) { MonoJitInfo *ji = mono_internal_hash_table_lookup (&domain->jit_code_hash, method); if (ji) return ji; if (!mono_method_is_generic_sharable_impl (method, FALSE)) return NULL; return mono_domain_lookup_shared_generic (domain, method); } static MonoJitInfo* lookup_method (MonoDomain *domain, MonoMethod *method) { MonoJitInfo *info; mono_loader_lock (); /*FIXME lookup_method_inner acquired it*/ mono_domain_jit_code_hash_lock (domain); info = lookup_method_inner (domain, method); mono_domain_jit_code_hash_unlock (domain); mono_loader_unlock (); return info; } #if ENABLE_JIT_MAP static FILE* perf_map_file = NULL; void mono_enable_jit_map (void) { if (!perf_map_file) { char name [64]; g_snprintf (name, sizeof (name), "/tmp/perf-%d.map", getpid ()); unlink (name); perf_map_file = fopen (name, "w"); } } void mono_emit_jit_tramp (void *start, int size, const char *desc) { if (perf_map_file) fprintf (perf_map_file, "%llx %x %s\n", (long long unsigned int)(gsize)start, size, desc); } void mono_emit_jit_map (MonoJitInfo *jinfo) { if (perf_map_file) { char *name = mono_method_full_name (jinfo->method, TRUE); mono_emit_jit_tramp (jinfo->code_start, jinfo->code_size, name); g_free (name); } } gboolean mono_jit_map_is_enabled (void) { return perf_map_file != NULL; } #endif static gpointer mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt, MonoException **jit_ex) { MonoCompile *cfg; gpointer code = NULL; MonoJitInfo *jinfo, *info; MonoVTable *vtable; MonoException *ex = NULL; guint32 prof_options; GTimer *jit_timer; MonoMethod *prof_method; #ifdef MONO_USE_AOT_COMPILER if (opt & MONO_OPT_AOT) { MonoDomain *domain = mono_domain_get (); mono_class_init (method->klass); if ((code = mono_aot_get_method (domain, method))) { vtable = mono_class_vtable (domain, method->klass); g_assert (vtable); mono_runtime_class_init (vtable); return code; } } #endif if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) { MonoMethod *nm; MonoMethodPInvoke* piinfo = (MonoMethodPInvoke *) method; if (!piinfo->addr) { if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) piinfo->addr = mono_lookup_internal_call (method); else if (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE) #ifdef HOST_WIN32 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono in modules loaded from byte arrays. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name); #else g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono on this platform. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name); #endif else mono_lookup_pinvoke_call (method, NULL, NULL); } nm = mono_marshal_get_native_wrapper (method, check_for_pending_exc, FALSE); code = mono_get_addr_from_ftnptr (mono_compile_method (nm)); jinfo = mono_jit_info_table_find (target_domain, code); if (!jinfo) jinfo = mono_jit_info_table_find (mono_domain_get (), code); if (jinfo) mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK); return code; //if (mono_debug_format != MONO_DEBUG_FORMAT_NONE) //mono_debug_add_wrapper (method, nm); } else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) { const char *name = method->name; char *full_name, *msg; MonoMethod *nm; if (method->klass->parent == mono_defaults.multicastdelegate_class) { if (*name == '.' && (strcmp (name, ".ctor") == 0)) { MonoJitICallInfo *mi = mono_find_jit_icall_by_name ("mono_delegate_ctor"); g_assert (mi); /* * We need to make sure this wrapper * is compiled because it might end up * in an (M)RGCTX if generic sharing * is enabled, and would be called * indirectly. If it were a * trampoline we'd try to patch that * indirect call, which is not * possible. */ return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE)); } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) { #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE return mono_create_delegate_trampoline (target_domain, method->klass); #else nm = mono_marshal_get_delegate_invoke (method, NULL); return mono_get_addr_from_ftnptr (mono_compile_method (nm)); #endif } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) { nm = mono_marshal_get_delegate_begin_invoke (method); return mono_get_addr_from_ftnptr (mono_compile_method (nm)); } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) { nm = mono_marshal_get_delegate_end_invoke (method); return mono_get_addr_from_ftnptr (mono_compile_method (nm)); } } full_name = mono_method_full_name (method, TRUE); msg = g_strdup_printf ("Unrecognizable runtime implemented method '%s'", full_name); *jit_ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", msg); g_free (full_name); g_free (msg); return NULL; } if (mono_aot_only) { char *fullname = mono_method_full_name (method, TRUE); char *msg = g_strdup_printf ("Attempting to JIT compile method '%s' while running with --aot-only.\n", fullname); *jit_ex = mono_get_exception_execution_engine (msg); g_free (fullname); g_free (msg); return NULL; } jit_timer = g_timer_new (); cfg = mini_method_compile (method, opt, target_domain, TRUE, FALSE, 0); prof_method = cfg->method; g_timer_stop (jit_timer); mono_jit_stats.jit_time += g_timer_elapsed (jit_timer, NULL); g_timer_destroy (jit_timer); switch (cfg->exception_type) { case MONO_EXCEPTION_NONE: break; case MONO_EXCEPTION_TYPE_LOAD: case MONO_EXCEPTION_MISSING_FIELD: case MONO_EXCEPTION_MISSING_METHOD: case MONO_EXCEPTION_FILE_NOT_FOUND: case MONO_EXCEPTION_BAD_IMAGE: { /* Throw a type load exception if needed */ MonoLoaderError *error = mono_loader_get_last_error (); if (error) { ex = mono_loader_error_prepare_exception (error); } else { if (cfg->exception_ptr) { ex = mono_class_get_exception_for_failure (cfg->exception_ptr); } else { if (cfg->exception_type == MONO_EXCEPTION_MISSING_FIELD) ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingFieldException", cfg->exception_message); else if (cfg->exception_type == MONO_EXCEPTION_MISSING_METHOD) ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingMethodException", cfg->exception_message); else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD) ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message); else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND) ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FileNotFoundException", cfg->exception_message); else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE) ex = mono_get_exception_bad_image_format (cfg->exception_message); else g_assert_not_reached (); } } break; } case MONO_EXCEPTION_INVALID_PROGRAM: ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", cfg->exception_message); break; case MONO_EXCEPTION_UNVERIFIABLE_IL: ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.Security", "VerificationException", cfg->exception_message); break; case MONO_EXCEPTION_METHOD_ACCESS: ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MethodAccessException", cfg->exception_message); break; case MONO_EXCEPTION_FIELD_ACCESS: ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FieldAccessException", cfg->exception_message); break; /* this can only be set if the security manager is active */ case MONO_EXCEPTION_SECURITY_LINKDEMAND: { MonoSecurityManager* secman = mono_security_manager_get_methods (); MonoObject *exc = NULL; gpointer args [2]; args [0] = &cfg->exception_data; args [1] = &method; mono_runtime_invoke (secman->linkdemandsecurityexception, NULL, args, &exc); ex = (MonoException*)exc; break; } case MONO_EXCEPTION_OBJECT_SUPPLIED: { MonoException *exp = cfg->exception_ptr; MONO_GC_UNREGISTER_ROOT (cfg->exception_ptr); ex = exp; break; } case MONO_EXCEPTION_OUT_OF_MEMORY: ex = mono_domain_get ()->out_of_memory_ex; break; default: g_assert_not_reached (); } if (ex) { if (cfg->prof_options & MONO_PROFILE_JIT_COMPILATION) mono_profiler_method_end_jit (method, NULL, MONO_PROFILE_FAILED); mono_destroy_compile (cfg); *jit_ex = ex; return NULL; } mono_loader_lock (); /*FIXME lookup_method_inner requires the loader lock*/ mono_domain_lock (target_domain); /* Check if some other thread already did the job. In this case, we can discard the code this thread generated. */ mono_domain_jit_code_hash_lock (target_domain); info = lookup_method_inner (target_domain, method); if (info) { /* We can't use a domain specific method in another domain */ if ((target_domain == mono_domain_get ()) || info->domain_neutral) { code = info->code_start; // printf("Discarding code for method %s\n", method->name); } } if (code == NULL) { mono_internal_hash_table_insert (&target_domain->jit_code_hash, cfg->jit_info->method, cfg->jit_info); mono_domain_jit_code_hash_unlock (target_domain); code = cfg->native_code; if (cfg->generic_sharing_context && mono_method_is_generic_sharable_impl (method, FALSE)) mono_stats.generics_shared_methods++; } else { mono_domain_jit_code_hash_unlock (target_domain); } jinfo = cfg->jit_info; prof_options = cfg->prof_options; /* * Update global stats while holding a lock, instead of doing many * InterlockedIncrement operations during JITting. */ mono_jit_stats.allocate_var += cfg->stat_allocate_var; mono_jit_stats.locals_stack_size += cfg->stat_locals_stack_size; mono_jit_stats.basic_blocks += cfg->stat_basic_blocks; mono_jit_stats.max_basic_blocks = MAX (cfg->stat_basic_blocks, mono_jit_stats.max_basic_blocks); mono_jit_stats.cil_code_size += cfg->stat_cil_code_size; mono_jit_stats.regvars += cfg->stat_n_regvars; mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods; mono_jit_stats.inlined_methods += cfg->stat_inlined_methods; mono_jit_stats.cas_demand_generation += cfg->stat_cas_demand_generation; mono_jit_stats.code_reallocs += cfg->stat_code_reallocs; mono_destroy_compile (cfg); #ifndef DISABLE_JIT if (domain_jit_info (target_domain)->jump_target_hash) { MonoJumpInfo patch_info; MonoJumpList *jlist; GSList *tmp; jlist = g_hash_table_lookup (domain_jit_info (target_domain)->jump_target_hash, method); if (jlist) { patch_info.next = NULL; patch_info.ip.i = 0; patch_info.type = MONO_PATCH_INFO_METHOD_JUMP; patch_info.data.method = method; g_hash_table_remove (domain_jit_info (target_domain)->jump_target_hash, method); for (tmp = jlist->list; tmp; tmp = tmp->next) mono_arch_patch_code (NULL, target_domain, tmp->data, &patch_info, NULL, TRUE); } } mono_emit_jit_map (jinfo); #endif mono_domain_unlock (target_domain); mono_loader_unlock (); vtable = mono_class_vtable (target_domain, method->klass); if (!vtable) { ex = mono_class_get_exception_for_failure (method->klass); g_assert (ex); *jit_ex = ex; return NULL; } if (prof_options & MONO_PROFILE_JIT_COMPILATION) { if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) { if (mono_marshal_method_from_wrapper (method)) { /* Native func wrappers have no method */ /* The profiler doesn't know about wrappers, so pass the original icall method */ mono_profiler_method_end_jit (mono_marshal_method_from_wrapper (method), jinfo, MONO_PROFILE_OK); } } mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK); if (prof_method != method) { mono_profiler_method_end_jit (prof_method, jinfo, MONO_PROFILE_OK); } } ex = mono_runtime_class_init_full (vtable, FALSE); if (ex) { *jit_ex = ex; return NULL; } return code; } static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, MonoException **ex) { MonoDomain *target_domain, *domain = mono_domain_get (); MonoJitInfo *info; gpointer code, p; MonoJitICallInfo *callinfo = NULL; /* * ICALL wrappers are handled specially, since there is only one copy of them * shared by all appdomains. */ if ((method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) { const char *icall_name; icall_name = method->name + strlen ("__icall_wrapper_"); g_assert (icall_name); callinfo = mono_find_jit_icall_by_name (icall_name); g_assert (callinfo); /* Must be domain neutral since there is only one copy */ opt |= MONO_OPT_SHARED; } if (method->dynamic) opt &= ~MONO_OPT_SHARED; /* These methods can become invalid when a domain is unloaded */ if (method->klass->image != mono_get_corlib () || method->is_inflated) opt &= ~MONO_OPT_SHARED; if (opt & MONO_OPT_SHARED) target_domain = mono_get_root_domain (); else target_domain = domain; if (method->wrapper_type == MONO_WRAPPER_UNKNOWN) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); g_assert (info); if (info->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER) method = info->d.synchronized_inner.method; } info = lookup_method (target_domain, method); if (info) { /* We can't use a domain specific method in another domain */ if (! ((domain != target_domain) && !info->domain_neutral)) { MonoVTable *vtable; MonoException *tmpEx; mono_jit_stats.methods_lookups++; vtable = mono_class_vtable (domain, method->klass); g_assert (vtable); tmpEx = mono_runtime_class_init_full (vtable, ex == NULL); if (tmpEx) { *ex = tmpEx; return NULL; } return mono_create_ftnptr (target_domain, info->code_start); } } code = mono_jit_compile_method_inner (method, target_domain, opt, ex); if (!code) return NULL; p = mono_create_ftnptr (target_domain, code); if (callinfo) { /*mono_register_jit_icall_wrapper takes the loader lock, so we take it on the outside. */ mono_loader_lock (); mono_jit_lock (); if (!callinfo->wrapper) { callinfo->wrapper = p; mono_register_jit_icall_wrapper (callinfo, p); mono_debug_add_icall_wrapper (method, callinfo); } mono_jit_unlock (); mono_loader_unlock (); } return p; } gpointer mono_jit_compile_method (MonoMethod *method) { MonoException *ex = NULL; gpointer code; code = mono_jit_compile_method_with_opt (method, default_opt, &ex); if (!code) { g_assert (ex); mono_raise_exception (ex); } return code; } #ifdef MONO_ARCH_HAVE_INVALIDATE_METHOD static void invalidated_delegate_trampoline (char *desc) { g_error ("Unmanaged code called delegate of type %s which was already garbage collected.\n" "See http://www.go-mono.com/delegate.html for an explanation and ways to fix this.", desc); } #endif /* * mono_jit_free_method: * * Free all memory allocated by the JIT for METHOD. */ static void mono_jit_free_method (MonoDomain *domain, MonoMethod *method) { MonoJitDynamicMethodInfo *ji; gboolean destroy = TRUE; GHashTableIter iter; MonoJumpList *jlist; g_assert (method->dynamic); mono_domain_lock (domain); ji = mono_dynamic_code_hash_lookup (domain, method); mono_domain_unlock (domain); if (!ji) return; mono_debug_remove_method (method, domain); mono_domain_lock (domain); g_hash_table_remove (domain_jit_info (domain)->dynamic_code_hash, method); mono_internal_hash_table_remove (&domain->jit_code_hash, method); g_hash_table_remove (domain_jit_info (domain)->jump_trampoline_hash, method); g_hash_table_remove (domain_jit_info (domain)->runtime_invoke_hash, method); /* Remove jump targets in this method */ g_hash_table_iter_init (&iter, domain_jit_info (domain)->jump_target_hash); while (g_hash_table_iter_next (&iter, NULL, (void**)&jlist)) { GSList *tmp, *remove; remove = NULL; for (tmp = jlist->list; tmp; tmp = tmp->next) { guint8 *ip = tmp->data; if (ip >= (guint8*)ji->ji->code_start && ip < (guint8*)ji->ji->code_start + ji->ji->code_size) remove = g_slist_prepend (remove, tmp); } for (tmp = remove; tmp; tmp = tmp->next) { jlist->list = g_slist_delete_link (jlist->list, tmp->data); } g_slist_free (remove); } mono_domain_unlock (domain); #ifdef MONO_ARCH_HAVE_INVALIDATE_METHOD if (debug_options.keep_delegates && method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) { /* * Instead of freeing the code, change it to call an error routine * so people can fix their code. */ char *type = mono_type_full_name (&method->klass->byval_arg); char *type_and_method = g_strdup_printf ("%s.%s", type, method->name); g_free (type); mono_arch_invalidate_method (ji->ji, invalidated_delegate_trampoline, type_and_method); destroy = FALSE; } #endif /* * This needs to be done before freeing code_mp, since the code address is the * key in the table, so if we free the code_mp first, another thread can grab the * same code address and replace our entry in the table. */ mono_jit_info_table_remove (domain, ji->ji); if (destroy) mono_code_manager_destroy (ji->code_mp); g_free (ji); } gpointer mono_jit_find_compiled_method_with_jit_info (MonoDomain *domain, MonoMethod *method, MonoJitInfo **ji) { MonoDomain *target_domain; MonoJitInfo *info; if (default_opt & MONO_OPT_SHARED) target_domain = mono_get_root_domain (); else target_domain = domain; info = lookup_method (target_domain, method); if (info) { /* We can't use a domain specific method in another domain */ if (! ((domain != target_domain) && !info->domain_neutral)) { mono_jit_stats.methods_lookups++; if (ji) *ji = info; return info->code_start; } } if (ji) *ji = NULL; return NULL; } gpointer mono_jit_find_compiled_method (MonoDomain *domain, MonoMethod *method) { return mono_jit_find_compiled_method_with_jit_info (domain, method, NULL); } typedef struct { MonoMethod *method; gpointer compiled_method; gpointer runtime_invoke; MonoVTable *vtable; MonoDynCallInfo *dyn_call_info; MonoClass *ret_box_class; } RuntimeInvokeInfo; /** * mono_jit_runtime_invoke: * @method: the method to invoke * @obj: this pointer * @params: array of parameter values. * @exc: used to catch exceptions objects */ static MonoObject* mono_jit_runtime_invoke (MonoMethod *method, void *obj, void **params, MonoObject **exc) { MonoMethod *invoke; MonoObject *(*runtime_invoke) (MonoObject *this, void **params, MonoObject **exc, void* compiled_method); MonoDomain *domain = mono_domain_get (); MonoJitDomainInfo *domain_info; RuntimeInvokeInfo *info, *info2; if (obj == NULL && !(method->flags & METHOD_ATTRIBUTE_STATIC) && !method->string_ctor && (method->wrapper_type == 0)) { g_warning ("Ignoring invocation of an instance method on a NULL instance.\n"); return NULL; } domain_info = domain_jit_info (domain); mono_domain_lock (domain); info = g_hash_table_lookup (domain_info->runtime_invoke_hash, method); mono_domain_unlock (domain); if (!info) { if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) { /* * This might be redundant since mono_class_vtable () already does this, * but keep it just in case for moonlight. */ mono_class_setup_vtable (method->klass); if (method->klass->exception_type != MONO_EXCEPTION_NONE) { if (exc) *exc = (MonoObject*)mono_class_get_exception_for_failure (method->klass); else mono_raise_exception (mono_class_get_exception_for_failure (method->klass)); return NULL; } } info = g_new0 (RuntimeInvokeInfo, 1); invoke = mono_marshal_get_runtime_invoke (method, FALSE); info->vtable = mono_class_vtable_full (domain, method->klass, TRUE); g_assert (info->vtable); if (method->klass->rank && (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE)) { /* * Array Get/Set/Address methods. The JIT implements them using inline code * inside the runtime invoke wrappers, so no need to compile them. */ info->compiled_method = NULL; } else { MonoException *jit_ex = NULL; info->compiled_method = mono_jit_compile_method_with_opt (method, default_opt, &jit_ex); if (!info->compiled_method) { g_free (info); g_assert (jit_ex); if (exc) { *exc = (MonoObject*)jit_ex; return NULL; } else { mono_raise_exception (jit_ex); } } if (mono_method_needs_static_rgctx_invoke (method, FALSE)) info->compiled_method = mono_create_static_rgctx_trampoline (method, info->compiled_method); } /* * We want to avoid AOTing 1000s of runtime-invoke wrappers when running * in full-aot mode, so we use a slower, but more generic wrapper if * possible, built on top of the OP_DYN_CALL opcode provided by the JIT. */ #ifdef MONO_ARCH_DYN_CALL_SUPPORTED if (mono_aot_only || debug_options.dyn_runtime_invoke) { MonoMethodSignature *sig = mono_method_signature (method); gboolean supported = TRUE; int i; if (method->string_ctor) sig = mono_marshal_get_string_ctor_signature (method); for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type (t))) supported = FALSE; } if (method->klass->contextbound || !info->compiled_method) supported = FALSE; if (supported) info->dyn_call_info = mono_arch_dyn_call_prepare (sig); if (info->dyn_call_info) { switch (sig->ret->type) { case MONO_TYPE_VOID: break; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_R4: case MONO_TYPE_R8: info->ret_box_class = mono_class_from_mono_type (sig->ret); break; case MONO_TYPE_PTR: info->ret_box_class = mono_defaults.int_class; break; case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: case MONO_TYPE_OBJECT: break; case MONO_TYPE_GENERICINST: if (!MONO_TYPE_IS_REFERENCE (sig->ret)) info->ret_box_class = mono_class_from_mono_type (sig->ret); break; case MONO_TYPE_VALUETYPE: info->ret_box_class = mono_class_from_mono_type (sig->ret); break; default: g_assert_not_reached (); break; } } } #endif if (!info->dyn_call_info) info->runtime_invoke = mono_jit_compile_method (invoke); mono_domain_lock (domain); info2 = g_hash_table_lookup (domain_info->runtime_invoke_hash, method); if (info2) { g_free (info); info = info2; } else { g_hash_table_insert (domain_info->runtime_invoke_hash, method, info); } mono_domain_unlock (domain); } runtime_invoke = info->runtime_invoke; /* * We need this here because mono_marshal_get_runtime_invoke can place * the helper method in System.Object and not the target class. */ if (exc) { *exc = (MonoObject*)mono_runtime_class_init_full (info->vtable, FALSE); if (*exc) return NULL; } else { mono_runtime_class_init (info->vtable); } /* The wrappers expect this to be initialized to NULL */ if (exc) *exc = NULL; #ifdef MONO_ARCH_DYN_CALL_SUPPORTED if (info->dyn_call_info) { MonoMethodSignature *sig = mono_method_signature (method); gpointer *args; static RuntimeInvokeDynamicFunction dyn_runtime_invoke; int i, pindex; guint8 buf [128]; guint8 retval [128]; if (!dyn_runtime_invoke) { invoke = mono_marshal_get_runtime_invoke_dynamic (); dyn_runtime_invoke = mono_jit_compile_method (invoke); } /* Convert the arguments to the format expected by start_dyn_call () */ args = g_alloca ((sig->param_count + sig->hasthis) * sizeof (gpointer)); pindex = 0; if (sig->hasthis) args [pindex ++] = &obj; for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (t->byref) { args [pindex ++] = &params [i]; } else if (MONO_TYPE_IS_REFERENCE (t) || t->type == MONO_TYPE_PTR) { args [pindex ++] = &params [i]; } else { args [pindex ++] = params [i]; } } //printf ("M: %s\n", mono_method_full_name (method, TRUE)); mono_arch_start_dyn_call (info->dyn_call_info, (gpointer**)args, retval, buf, sizeof (buf)); dyn_runtime_invoke (buf, exc, info->compiled_method); mono_arch_finish_dyn_call (info->dyn_call_info, buf); if (info->ret_box_class) return mono_value_box (domain, info->ret_box_class, retval); else return *(MonoObject**)retval; } #endif return runtime_invoke (obj, params, exc, info->compiled_method); } void SIG_HANDLER_SIGNATURE (mono_sigfpe_signal_handler) { MonoException *exc = NULL; MonoJitInfo *ji; #if !(defined(MONO_ARCH_USE_SIGACTION) || defined(HOST_WIN32)) void *info = NULL; #endif GET_CONTEXT; ji = mono_jit_info_table_find (mono_domain_get (), mono_arch_ip_from_context (ctx)); #if defined(MONO_ARCH_HAVE_IS_INT_OVERFLOW) if (mono_arch_is_int_overflow (ctx, info)) /* * The spec says this throws ArithmeticException, but MS throws the derived * OverflowException. */ exc = mono_get_exception_overflow (); else exc = mono_get_exception_divide_by_zero (); #else exc = mono_get_exception_divide_by_zero (); #endif if (!ji) { if (mono_chain_signal (SIG_HANDLER_PARAMS)) return; mono_handle_native_sigsegv (SIGSEGV, ctx); } mono_arch_handle_exception (ctx, exc); } void SIG_HANDLER_SIGNATURE (mono_sigill_signal_handler) { MonoException *exc; GET_CONTEXT; exc = mono_get_exception_execution_engine ("SIGILL"); mono_arch_handle_exception (ctx, exc); } #if defined(MONO_ARCH_USE_SIGACTION) || defined(HOST_WIN32) #define HAVE_SIG_INFO #endif void SIG_HANDLER_SIGNATURE (mono_sigsegv_signal_handler) { MonoJitInfo *ji; MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id); gpointer fault_addr = NULL; GET_CONTEXT; #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED) && defined(HAVE_SIG_INFO) if (mono_arch_is_single_step_event (info, ctx)) { mono_debugger_agent_single_step_event (ctx); return; } else if (mono_arch_is_breakpoint_event (info, ctx)) { mono_debugger_agent_breakpoint_hit (ctx); return; } #endif #if !defined(HOST_WIN32) && defined(HAVE_SIG_INFO) fault_addr = info->si_addr; if (mono_aot_is_pagefault (info->si_addr)) { mono_aot_handle_pagefault (info->si_addr); return; } #endif /* The thread might no be registered with the runtime */ if (!mono_domain_get () || !jit_tls) { if (mono_chain_signal (SIG_HANDLER_PARAMS)) return; mono_handle_native_sigsegv (SIGSEGV, ctx); } ji = mono_jit_info_table_find (mono_domain_get (), mono_arch_ip_from_context (ctx)); #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK if (mono_handle_soft_stack_ovf (jit_tls, ji, ctx, (guint8*)info->si_addr)) return; #ifdef MONO_ARCH_HAVE_SIGCTX_TO_MONOCTX /* info->si_addr seems to be NULL on some kernels when handling stack overflows */ fault_addr = info->si_addr; if (fault_addr == NULL) { MonoContext mctx; mono_arch_sigctx_to_monoctx (ctx, &mctx); fault_addr = MONO_CONTEXT_GET_SP (&mctx); } #endif if (jit_tls->stack_size && ABS ((guint8*)fault_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 8192 * sizeof (gpointer)) { /* * The hard-guard page has been hit: there is not much we can do anymore * Print a hopefully clear message and abort. */ mono_handle_hard_stack_ovf (jit_tls, ji, ctx, (guint8*)info->si_addr); g_assert_not_reached (); } else { /* The original handler might not like that it is executed on an altstack... */ if (!ji && mono_chain_signal (SIG_HANDLER_PARAMS)) return; mono_arch_handle_altstack_exception (ctx, info->si_addr, FALSE); } #else if (!ji) { if (mono_chain_signal (SIG_HANDLER_PARAMS)) return; mono_handle_native_sigsegv (SIGSEGV, ctx); } mono_arch_handle_exception (ctx, NULL); #endif } void SIG_HANDLER_SIGNATURE (mono_sigint_signal_handler) { MonoException *exc; GET_CONTEXT; exc = mono_get_exception_execution_engine ("Interrupted (SIGINT)."); mono_arch_handle_exception (ctx, exc); } /* mono_jit_create_remoting_trampoline: * @method: pointer to the method info * * Creates a trampoline which calls the remoting functions. This * is used in the vtable of transparent proxies. * * Returns: a pointer to the newly created code */ static gpointer mono_jit_create_remoting_trampoline (MonoDomain *domain, MonoMethod *method, MonoRemotingTarget target) { MonoMethod *nm; guint8 *addr = NULL; if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && mono_method_signature (method)->generic_param_count) { return mono_create_specific_trampoline (method, MONO_TRAMPOLINE_GENERIC_VIRTUAL_REMOTING, domain, NULL); } if ((method->flags & METHOD_ATTRIBUTE_ABSTRACT) || (mono_method_signature (method)->hasthis && (method->klass->marshalbyref || method->klass == mono_defaults.object_class))) { nm = mono_marshal_get_remoting_invoke_for_target (method, target); addr = mono_compile_method (nm); } else { addr = mono_compile_method (method); } return mono_get_addr_from_ftnptr (addr); } static gpointer *vtable_trampolines; static int vtable_trampolines_size; gpointer mini_get_vtable_trampoline (int slot_index) { int index = slot_index + MONO_IMT_SIZE; g_assert (slot_index >= - MONO_IMT_SIZE); if (!vtable_trampolines || slot_index + MONO_IMT_SIZE >= vtable_trampolines_size) { mono_jit_lock (); if (!vtable_trampolines || index >= vtable_trampolines_size) { int new_size; gpointer new_table; new_size = vtable_trampolines_size ? vtable_trampolines_size * 2 : 128; while (new_size <= index) new_size *= 2; new_table = g_new0 (gpointer, new_size); if (vtable_trampolines) memcpy (new_table, vtable_trampolines, vtable_trampolines_size * sizeof (gpointer)); g_free (vtable_trampolines); mono_memory_barrier (); vtable_trampolines = new_table; vtable_trampolines_size = new_size; } mono_jit_unlock (); } if (!vtable_trampolines [index]) vtable_trampolines [index] = mono_create_specific_trampoline (GUINT_TO_POINTER (slot_index), MONO_TRAMPOLINE_VCALL, mono_get_root_domain (), NULL); return vtable_trampolines [index]; } static gpointer mini_get_imt_trampoline (int slot_index) { return mini_get_vtable_trampoline (slot_index - MONO_IMT_SIZE); } static void mini_parse_debug_options (void) { char *options = getenv ("MONO_DEBUG"); gchar **args, **ptr; if (!options) return; args = g_strsplit (options, ",", -1); for (ptr = args; ptr && *ptr; ptr++) { const char *arg = *ptr; if (!strcmp (arg, "handle-sigint")) debug_options.handle_sigint = TRUE; else if (!strcmp (arg, "keep-delegates")) debug_options.keep_delegates = TRUE; else if (!strcmp (arg, "reverse-pinvoke-exceptions")) debug_options.reverse_pinvoke_exceptions = TRUE; else if (!strcmp (arg, "collect-pagefault-stats")) debug_options.collect_pagefault_stats = TRUE; else if (!strcmp (arg, "break-on-unverified")) debug_options.break_on_unverified = TRUE; else if (!strcmp (arg, "no-gdb-backtrace")) debug_options.no_gdb_backtrace = TRUE; else if (!strcmp (arg, "suspend-on-sigsegv")) debug_options.suspend_on_sigsegv = TRUE; else if (!strcmp (arg, "suspend-on-unhandled")) debug_options.suspend_on_unhandled = TRUE; else if (!strcmp (arg, "dont-free-domains")) mono_dont_free_domains = TRUE; else if (!strcmp (arg, "dyn-runtime-invoke")) debug_options.dyn_runtime_invoke = TRUE; else if (!strcmp (arg, "gdb")) debug_options.gdb = TRUE; else if (!strcmp (arg, "explicit-null-checks")) debug_options.explicit_null_checks = TRUE; else if (!strcmp (arg, "gen-seq-points")) debug_options.gen_seq_points = TRUE; else if (!strcmp (arg, "init-stacks")) debug_options.init_stacks = TRUE; else if (!strcmp (arg, "casts")) debug_options.better_cast_details = TRUE; else if (!strcmp (arg, "soft-breakpoints")) debug_options.soft_breakpoints = TRUE; else { fprintf (stderr, "Invalid option for the MONO_DEBUG env variable: %s\n", arg); fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'reverse-pinvoke-exceptions', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'dont-free-domains', 'suspend-on-sigsegv', 'suspend-on-unhandled', 'dyn-runtime-invoke', 'gdb', 'explicit-null-checks', 'init-stacks'\n"); exit (1); } } g_strfreev (args); } MonoDebugOptions * mini_get_debug_options (void) { return &debug_options; } static gpointer mini_create_ftnptr (MonoDomain *domain, gpointer addr) { #if !defined(__ia64__) && !defined(__ppc64__) && !defined(__powerpc64__) return addr; #else gpointer* desc = NULL; if ((desc = g_hash_table_lookup (domain->ftnptrs_hash, addr))) return desc; # ifdef __ia64__ desc = mono_domain_code_reserve (domain, 2 * sizeof (gpointer)); desc [0] = addr; desc [1] = NULL; # elif defined(__ppc64__) || defined(__powerpc64__) gpointer *desc; desc = mono_domain_alloc0 (domain, 3 * sizeof (gpointer)); desc [0] = addr; desc [1] = NULL; desc [2] = NULL; # endif g_hash_table_insert (domain->ftnptrs_hash, addr, desc); return desc; #endif } static gpointer mini_get_addr_from_ftnptr (gpointer descr) { #if defined(__ia64__) || defined(__ppc64__) || defined(__powerpc64__) return *(gpointer*)descr; #else return descr; #endif } static void register_jit_stats (void) { mono_counters_register ("Compiled methods", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &mono_jit_stats.methods_compiled); mono_counters_register ("Methods from AOT", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &mono_jit_stats.methods_aot); mono_counters_register ("Methods JITted using mono JIT", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_without_llvm); mono_counters_register ("Methods JITted using LLVM", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_with_llvm); mono_counters_register ("Total time spent JITting (sec)", MONO_COUNTER_JIT | MONO_COUNTER_DOUBLE, &mono_jit_stats.jit_time); mono_counters_register ("Basic blocks", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.basic_blocks); mono_counters_register ("Max basic blocks", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.max_basic_blocks); mono_counters_register ("Allocated vars", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocate_var); mono_counters_register ("Code reallocs", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.code_reallocs); mono_counters_register ("Allocated code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocated_code_size); mono_counters_register ("Inlineable methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.inlineable_methods); mono_counters_register ("Inlined methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.inlined_methods); mono_counters_register ("Regvars", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.regvars); mono_counters_register ("Locals stack size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.locals_stack_size); mono_counters_register ("Method cache lookups", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_lookups); mono_counters_register ("Compiled CIL code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.cil_code_size); mono_counters_register ("Native code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.native_code_size); } static void runtime_invoke_info_free (gpointer value); static void mini_create_jit_domain_info (MonoDomain *domain) { MonoJitDomainInfo *info = g_new0 (MonoJitDomainInfo, 1); info->class_init_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); info->jump_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); info->jit_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); info->delegate_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); info->llvm_vcall_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); info->runtime_invoke_hash = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, runtime_invoke_info_free); info->seq_points = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, g_free); info->arch_seq_points = g_hash_table_new (mono_aligned_addr_hash, NULL); info->jump_target_hash = g_hash_table_new (NULL, NULL); domain->runtime_info = info; } static void delete_jump_list (gpointer key, gpointer value, gpointer user_data) { MonoJumpList *jlist = value; g_slist_free (jlist->list); } static void dynamic_method_info_free (gpointer key, gpointer value, gpointer user_data) { MonoJitDynamicMethodInfo *di = value; mono_code_manager_destroy (di->code_mp); g_free (di); } static void runtime_invoke_info_free (gpointer value) { RuntimeInvokeInfo *info = (RuntimeInvokeInfo*)value; #ifdef MONO_ARCH_DYN_CALL_SUPPORTED if (info->dyn_call_info) mono_arch_dyn_call_free (info->dyn_call_info); #endif g_free (info); } static void mini_free_jit_domain_info (MonoDomain *domain) { MonoJitDomainInfo *info = domain_jit_info (domain); g_hash_table_foreach (info->jump_target_hash, delete_jump_list, NULL); g_hash_table_destroy (info->jump_target_hash); if (info->jump_target_got_slot_hash) { g_hash_table_foreach (info->jump_target_got_slot_hash, delete_jump_list, NULL); g_hash_table_destroy (info->jump_target_got_slot_hash); } if (info->dynamic_code_hash) { g_hash_table_foreach (info->dynamic_code_hash, dynamic_method_info_free, NULL); g_hash_table_destroy (info->dynamic_code_hash); } if (info->method_code_hash) g_hash_table_destroy (info->method_code_hash); g_hash_table_destroy (info->class_init_trampoline_hash); g_hash_table_destroy (info->jump_trampoline_hash); g_hash_table_destroy (info->jit_trampoline_hash); g_hash_table_destroy (info->delegate_trampoline_hash); if (info->static_rgctx_trampoline_hash) g_hash_table_destroy (info->static_rgctx_trampoline_hash); g_hash_table_destroy (info->llvm_vcall_trampoline_hash); g_hash_table_destroy (info->runtime_invoke_hash); g_hash_table_destroy (info->seq_points); g_hash_table_destroy (info->arch_seq_points); if (info->agent_info) mono_debugger_agent_free_domain_info (domain); g_free (domain->runtime_info); domain->runtime_info = NULL; } MonoDomain * mini_init (const char *filename, const char *runtime_version) { MonoDomain *domain; MonoRuntimeCallbacks callbacks; MonoThreadInfoRuntimeCallbacks ticallbacks; MONO_PROBE_VES_INIT_BEGIN (); #if defined(__linux__) && !defined(__native_client__) if (access ("/proc/self/maps", F_OK) != 0) { g_print ("Mono requires /proc to be mounted.\n"); exit (1); } #endif /* Happens when using the embedding interface */ if (!default_opt_set) default_opt = mono_parse_default_optimizations (NULL); InitializeCriticalSection (&jit_mutex); #ifdef MONO_DEBUGGER_SUPPORTED if (mini_debug_running_inside_mdb ()) mini_debugger_init (); #endif #ifdef MONO_HAVE_FAST_TLS MONO_FAST_TLS_INIT (mono_jit_tls); MONO_FAST_TLS_INIT (mono_lmf_addr); #ifdef MONO_ARCH_ENABLE_MONO_LMF_VAR MONO_FAST_TLS_INIT (mono_lmf); #endif #endif #ifdef MONO_ARCH_HAVE_TLS_GET mono_runtime_set_has_tls_get (MONO_ARCH_HAVE_TLS_GET); #else mono_runtime_set_has_tls_get (FALSE); #endif if (!global_codeman) global_codeman = mono_code_manager_new (); jit_icall_name_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); memset (&callbacks, 0, sizeof (callbacks)); callbacks.create_ftnptr = mini_create_ftnptr; callbacks.get_addr_from_ftnptr = mini_get_addr_from_ftnptr; callbacks.get_runtime_build_info = mono_get_runtime_build_info; callbacks.set_cast_details = mono_set_cast_details; callbacks.debug_log = mono_debugger_agent_debug_log; callbacks.debug_log_is_enabled = mono_debugger_agent_debug_log_is_enabled; #ifdef MONO_ARCH_HAVE_IMT if (mono_use_imt) { callbacks.get_vtable_trampoline = mini_get_vtable_trampoline; callbacks.get_imt_trampoline = mini_get_imt_trampoline; } #endif mono_install_callbacks (&callbacks); memset (&ticallbacks, 0, sizeof (ticallbacks)); ticallbacks.setup_async_callback = mono_setup_async_callback; ticallbacks.thread_state_init_from_sigctx = mono_thread_state_init_from_sigctx; ticallbacks.thread_state_init_from_handle = mono_thread_state_init_from_handle; mono_threads_runtime_init (&ticallbacks); if (getenv ("MONO_DEBUG") != NULL) mini_parse_debug_options (); mono_arch_cpu_init (); mono_arch_init (); mono_unwind_init (); if (getenv ("MONO_XDEBUG")) { char *xdebug_opts = getenv ("MONO_XDEBUG"); mono_xdebug_init (xdebug_opts); /* So methods for multiple domains don't have the same address */ mono_dont_free_domains = TRUE; mono_using_xdebug = TRUE; } else if (mini_get_debug_options ()->gdb) { mono_xdebug_init ((char*)"gdb"); mono_dont_free_domains = TRUE; mono_using_xdebug = TRUE; } #ifdef ENABLE_LLVM if (mono_use_llvm) { if (!mono_llvm_load (NULL)) { mono_use_llvm = FALSE; fprintf (stderr, "Mono Warning: llvm support could not be loaded.\n"); } } if (mono_use_llvm) mono_llvm_init (); #endif mono_trampolines_init (); if (!g_thread_supported ()) g_thread_init (NULL); mono_native_tls_alloc (&mono_jit_tls_id, NULL); setup_jit_tls_data ((gpointer)-1, mono_thread_abort); if (default_opt & MONO_OPT_AOT) mono_aot_init (); mono_debugger_agent_init (); #ifdef MONO_ARCH_GSHARED_SUPPORTED mono_set_generic_sharing_supported (TRUE); #endif #ifndef MONO_CROSS_COMPILE mono_runtime_install_handlers (); #endif mono_threads_install_cleanup (mini_thread_cleanup); #ifdef MONO_ARCH_HAVE_NOTIFY_PENDING_EXC // This is experimental code so provide an env var to switch it off if (getenv ("MONO_DISABLE_PENDING_EXCEPTIONS")) { printf ("MONO_DISABLE_PENDING_EXCEPTIONS env var set.\n"); } else { check_for_pending_exc = FALSE; mono_threads_install_notify_pending_exc (mono_arch_notify_pending_exc); } #endif #define JIT_TRAMPOLINES_WORK #ifdef JIT_TRAMPOLINES_WORK mono_install_compile_method (mono_jit_compile_method); mono_install_free_method (mono_jit_free_method); mono_install_trampoline (mono_create_jit_trampoline); mono_install_jump_trampoline (mono_create_jump_trampoline); mono_install_remoting_trampoline (mono_jit_create_remoting_trampoline); mono_install_delegate_trampoline (mono_create_delegate_trampoline); mono_install_create_domain_hook (mini_create_jit_domain_info); mono_install_free_domain_hook (mini_free_jit_domain_info); #endif #define JIT_INVOKE_WORKS #ifdef JIT_INVOKE_WORKS mono_install_runtime_invoke (mono_jit_runtime_invoke); #endif mono_install_get_cached_class_info (mono_aot_get_cached_class_info); mono_install_get_class_from_name (mono_aot_get_class_from_name); mono_install_jit_info_find_in_aot (mono_aot_find_jit_info); if (debug_options.collect_pagefault_stats) { mono_aot_set_make_unreadable (TRUE); } if (runtime_version) domain = mono_init_version (filename, runtime_version); else domain = mono_init_from_assembly (filename, filename); if (mono_aot_only) { /* This helps catch code allocation requests */ mono_code_manager_set_read_only (domain->code_mp); } #ifdef MONO_ARCH_HAVE_IMT if (mono_use_imt) { if (mono_aot_only) mono_install_imt_thunk_builder (mono_aot_get_imt_thunk); else mono_install_imt_thunk_builder (mono_arch_build_imt_thunk); } #endif /*Init arch tls information only after the metadata side is inited to make sure we see dynamic appdomain tls keys*/ mono_arch_finish_init (); /* This must come after mono_init () in the aot-only case */ mono_exceptions_init (); mono_icall_init (); /* This should come after mono_init () too */ mini_gc_init (); mono_add_internal_call ("System.Diagnostics.StackFrame::get_frame_info", ves_icall_get_frame_info); mono_add_internal_call ("System.Diagnostics.StackTrace::get_trace", ves_icall_get_trace); mono_add_internal_call ("System.Exception::get_trace", ves_icall_System_Exception_get_trace); mono_add_internal_call ("System.Security.SecurityFrame::_GetSecurityFrame", ves_icall_System_Security_SecurityFrame_GetSecurityFrame); mono_add_internal_call ("System.Security.SecurityFrame::_GetSecurityStack", ves_icall_System_Security_SecurityFrame_GetSecurityStack); mono_add_internal_call ("Mono.Runtime::mono_runtime_install_handlers", mono_runtime_install_handlers); mono_create_helper_signatures (); register_jit_stats (); #define JIT_CALLS_WORK #ifdef JIT_CALLS_WORK /* Needs to be called here since register_jit_icall depends on it */ mono_marshal_init (); mono_arch_register_lowlevel_calls (); register_icall (mono_profiler_method_enter, "mono_profiler_method_enter", NULL, TRUE); register_icall (mono_profiler_method_leave, "mono_profiler_method_leave", NULL, TRUE); register_icall (mono_trace_enter_method, "mono_trace_enter_method", NULL, TRUE); register_icall (mono_trace_leave_method, "mono_trace_leave_method", NULL, TRUE); register_icall (mono_get_lmf_addr, "mono_get_lmf_addr", "ptr", TRUE); register_icall (mono_jit_thread_attach, "mono_jit_thread_attach", "ptr ptr", TRUE); register_icall (mono_jit_set_domain, "mono_jit_set_domain", "void ptr", TRUE); register_icall (mono_domain_get, "mono_domain_get", "ptr", TRUE); register_icall (mono_get_throw_exception (), "mono_arch_throw_exception", "void object", TRUE); register_icall (mono_get_rethrow_exception (), "mono_arch_rethrow_exception", "void object", TRUE); register_icall (mono_get_throw_corlib_exception (), "mono_arch_throw_corlib_exception", "void ptr", TRUE); register_icall (mono_thread_get_undeniable_exception, "mono_thread_get_undeniable_exception", "object", FALSE); register_icall (mono_thread_interruption_checkpoint, "mono_thread_interruption_checkpoint", "void", FALSE); register_icall (mono_thread_force_interruption_checkpoint, "mono_thread_force_interruption_checkpoint", "void", FALSE); register_icall (mono_load_remote_field_new, "mono_load_remote_field_new", "object object ptr ptr", FALSE); register_icall (mono_store_remote_field_new, "mono_store_remote_field_new", "void object ptr ptr object", FALSE); #if defined(__native_client__) || defined(__native_client_codegen__) register_icall (mono_nacl_gc, "mono_nacl_gc", "void", TRUE); #endif /* * NOTE, NOTE, NOTE, NOTE: * when adding emulation for some opcodes, remember to also add a dummy * rule to the burg files, because we need the arity information to be correct. */ #ifndef MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS mono_register_opcode_emulation (OP_LMUL, "__emul_lmul", "long long long", mono_llmult, TRUE); mono_register_opcode_emulation (OP_LDIV, "__emul_ldiv", "long long long", mono_lldiv, FALSE); mono_register_opcode_emulation (OP_LDIV_UN, "__emul_ldiv_un", "long long long", mono_lldiv_un, FALSE); mono_register_opcode_emulation (OP_LREM, "__emul_lrem", "long long long", mono_llrem, FALSE); mono_register_opcode_emulation (OP_LREM_UN, "__emul_lrem_un", "long long long", mono_llrem_un, FALSE); mono_register_opcode_emulation (OP_LMUL_OVF_UN, "__emul_lmul_ovf_un", "long long long", mono_llmult_ovf_un, FALSE); mono_register_opcode_emulation (OP_LMUL_OVF, "__emul_lmul_ovf", "long long long", mono_llmult_ovf, FALSE); #endif #ifndef MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS mono_register_opcode_emulation (OP_LSHL, "__emul_lshl", "long long int32", mono_lshl, TRUE); mono_register_opcode_emulation (OP_LSHR, "__emul_lshr", "long long int32", mono_lshr, TRUE); mono_register_opcode_emulation (OP_LSHR_UN, "__emul_lshr_un", "long long int32", mono_lshr_un, TRUE); #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV) mono_register_opcode_emulation (OP_IDIV, "__emul_op_idiv", "int32 int32 int32", mono_idiv, FALSE); mono_register_opcode_emulation (OP_IDIV_UN, "__emul_op_idiv_un", "int32 int32 int32", mono_idiv_un, FALSE); mono_register_opcode_emulation (OP_IREM, "__emul_op_irem", "int32 int32 int32", mono_irem, FALSE); mono_register_opcode_emulation (OP_IREM_UN, "__emul_op_irem_un", "int32 int32 int32", mono_irem_un, FALSE); #endif #ifdef MONO_ARCH_EMULATE_MUL_DIV mono_register_opcode_emulation (OP_IMUL, "__emul_op_imul", "int32 int32 int32", mono_imul, TRUE); #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_MUL_OVF) mono_register_opcode_emulation (OP_IMUL_OVF, "__emul_op_imul_ovf", "int32 int32 int32", mono_imul_ovf, FALSE); mono_register_opcode_emulation (OP_IMUL_OVF_UN, "__emul_op_imul_ovf_un", "int32 int32 int32", mono_imul_ovf_un, FALSE); #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_SOFT_FLOAT) mono_register_opcode_emulation (OP_FDIV, "__emul_fdiv", "double double double", mono_fdiv, FALSE); #endif mono_register_opcode_emulation (OP_FCONV_TO_U8, "__emul_fconv_to_u8", "ulong double", mono_fconv_u8, FALSE); mono_register_opcode_emulation (OP_FCONV_TO_U4, "__emul_fconv_to_u4", "uint32 double", mono_fconv_u4, FALSE); mono_register_opcode_emulation (OP_FCONV_TO_OVF_I8, "__emul_fconv_to_ovf_i8", "long double", mono_fconv_ovf_i8, FALSE); mono_register_opcode_emulation (OP_FCONV_TO_OVF_U8, "__emul_fconv_to_ovf_u8", "ulong double", mono_fconv_ovf_u8, FALSE); #ifdef MONO_ARCH_EMULATE_FCONV_TO_I8 mono_register_opcode_emulation (OP_FCONV_TO_I8, "__emul_fconv_to_i8", "long double", mono_fconv_i8, FALSE); #endif #ifdef MONO_ARCH_EMULATE_CONV_R8_UN mono_register_opcode_emulation (OP_ICONV_TO_R_UN, "__emul_iconv_to_r_un", "double int32", mono_conv_to_r8_un, FALSE); #endif #ifdef MONO_ARCH_EMULATE_LCONV_TO_R8 mono_register_opcode_emulation (OP_LCONV_TO_R8, "__emul_lconv_to_r8", "double long", mono_lconv_to_r8, FALSE); #endif #ifdef MONO_ARCH_EMULATE_LCONV_TO_R4 mono_register_opcode_emulation (OP_LCONV_TO_R4, "__emul_lconv_to_r4", "float long", mono_lconv_to_r4, FALSE); #endif #ifdef MONO_ARCH_EMULATE_LCONV_TO_R8_UN mono_register_opcode_emulation (OP_LCONV_TO_R_UN, "__emul_lconv_to_r8_un", "double long", mono_lconv_to_r8_un, FALSE); #endif #ifdef MONO_ARCH_EMULATE_FREM #if defined(__default_codegen__) mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", fmod, FALSE); #elif defined(__native_client_codegen__) mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", mono_fmod, FALSE); #endif #endif #ifdef MONO_ARCH_SOFT_FLOAT mono_register_opcode_emulation (OP_FSUB, "__emul_fsub", "double double double", mono_fsub, FALSE); mono_register_opcode_emulation (OP_FADD, "__emul_fadd", "double double double", mono_fadd, FALSE); mono_register_opcode_emulation (OP_FMUL, "__emul_fmul", "double double double", mono_fmul, FALSE); mono_register_opcode_emulation (OP_FNEG, "__emul_fneg", "double double", mono_fneg, FALSE); mono_register_opcode_emulation (OP_ICONV_TO_R8, "__emul_iconv_to_r8", "double int32", mono_conv_to_r8, FALSE); mono_register_opcode_emulation (OP_ICONV_TO_R4, "__emul_iconv_to_r4", "double int32", mono_conv_to_r4, FALSE); mono_register_opcode_emulation (OP_FCONV_TO_R4, "__emul_fconv_to_r4", "double double", mono_fconv_r4, FALSE); mono_register_opcode_emulation (OP_FCONV_TO_I1, "__emul_fconv_to_i1", "int8 double", mono_fconv_i1, FALSE); mono_register_opcode_emulation (OP_FCONV_TO_I2, "__emul_fconv_to_i2", "int16 double", mono_fconv_i2, FALSE); mono_register_opcode_emulation (OP_FCONV_TO_I4, "__emul_fconv_to_i4", "int32 double", mono_fconv_i4, FALSE); mono_register_opcode_emulation (OP_FCONV_TO_U1, "__emul_fconv_to_u1", "uint8 double", mono_fconv_u1, FALSE); mono_register_opcode_emulation (OP_FCONV_TO_U2, "__emul_fconv_to_u2", "uint16 double", mono_fconv_u2, FALSE); #if SIZEOF_VOID_P == 4 mono_register_opcode_emulation (OP_FCONV_TO_I, "__emul_fconv_to_i", "int32 double", mono_fconv_i4, FALSE); #endif mono_register_opcode_emulation (OP_FBEQ, "__emul_fcmp_eq", "uint32 double double", mono_fcmp_eq, FALSE); mono_register_opcode_emulation (OP_FBLT, "__emul_fcmp_lt", "uint32 double double", mono_fcmp_lt, FALSE); mono_register_opcode_emulation (OP_FBGT, "__emul_fcmp_gt", "uint32 double double", mono_fcmp_gt, FALSE); mono_register_opcode_emulation (OP_FBLE, "__emul_fcmp_le", "uint32 double double", mono_fcmp_le, FALSE); mono_register_opcode_emulation (OP_FBGE, "__emul_fcmp_ge", "uint32 double double", mono_fcmp_ge, FALSE); mono_register_opcode_emulation (OP_FBNE_UN, "__emul_fcmp_ne_un", "uint32 double double", mono_fcmp_ne_un, FALSE); mono_register_opcode_emulation (OP_FBLT_UN, "__emul_fcmp_lt_un", "uint32 double double", mono_fcmp_lt_un, FALSE); mono_register_opcode_emulation (OP_FBGT_UN, "__emul_fcmp_gt_un", "uint32 double double", mono_fcmp_gt_un, FALSE); mono_register_opcode_emulation (OP_FBLE_UN, "__emul_fcmp_le_un", "uint32 double double", mono_fcmp_le_un, FALSE); mono_register_opcode_emulation (OP_FBGE_UN, "__emul_fcmp_ge_un", "uint32 double double", mono_fcmp_ge_un, FALSE); mono_register_opcode_emulation (OP_FCEQ, "__emul_fcmp_ceq", "uint32 double double", mono_fceq, FALSE); mono_register_opcode_emulation (OP_FCGT, "__emul_fcmp_cgt", "uint32 double double", mono_fcgt, FALSE); mono_register_opcode_emulation (OP_FCGT_UN, "__emul_fcmp_cgt_un", "uint32 double double", mono_fcgt_un, FALSE); mono_register_opcode_emulation (OP_FCLT, "__emul_fcmp_clt", "uint32 double double", mono_fclt, FALSE); mono_register_opcode_emulation (OP_FCLT_UN, "__emul_fcmp_clt_un", "uint32 double double", mono_fclt_un, FALSE); register_icall (mono_fload_r4, "mono_fload_r4", "double ptr", FALSE); register_icall (mono_fstore_r4, "mono_fstore_r4", "void double ptr", FALSE); register_icall (mono_fload_r4_arg, "mono_fload_r4_arg", "uint32 double", FALSE); register_icall (mono_isfinite, "mono_isfinite", "uint32 double", FALSE); #endif #ifdef COMPRESSED_INTERFACE_BITMAP register_icall (mono_class_interface_match, "mono_class_interface_match", "uint32 ptr int32", TRUE); #endif #if SIZEOF_REGISTER == 4 mono_register_opcode_emulation (OP_FCONV_TO_U, "__emul_fconv_to_u", "uint32 double", mono_fconv_u4, TRUE); #endif /* other jit icalls */ register_icall (mono_delegate_ctor, "mono_delegate_ctor", "void object object ptr", FALSE); register_icall (mono_class_static_field_address , "mono_class_static_field_address", "ptr ptr ptr", FALSE); register_icall (mono_ldtoken_wrapper, "mono_ldtoken_wrapper", "ptr ptr ptr ptr", FALSE); register_icall (mono_ldtoken_wrapper_generic_shared, "mono_ldtoken_wrapper_generic_shared", "ptr ptr ptr ptr", FALSE); register_icall (mono_get_special_static_data, "mono_get_special_static_data", "ptr int", FALSE); register_icall (mono_ldstr, "mono_ldstr", "object ptr ptr int32", FALSE); register_icall (mono_helper_stelem_ref_check, "helper_stelem_ref_check", "void object object", FALSE); register_icall (mono_object_new, "mono_object_new", "object ptr ptr", FALSE); register_icall (mono_object_new_specific, "mono_object_new_specific", "object ptr", FALSE); register_icall (mono_array_new, "mono_array_new", "object ptr ptr int32", FALSE); register_icall (mono_array_new_specific, "mono_array_new_specific", "object ptr int32", FALSE); register_icall (mono_runtime_class_init, "mono_runtime_class_init", "void ptr", FALSE); register_icall (mono_ldftn, "mono_ldftn", "ptr ptr", FALSE); register_icall (mono_ldvirtfn, "mono_ldvirtfn", "ptr object ptr", FALSE); register_icall (mono_ldvirtfn_gshared, "mono_ldvirtfn_gshared", "ptr object ptr", FALSE); register_icall (mono_helper_compile_generic_method, "compile_generic_method", "ptr object ptr ptr", FALSE); register_icall (mono_helper_ldstr, "helper_ldstr", "object ptr int", FALSE); register_icall (mono_helper_ldstr_mscorlib, "helper_ldstr_mscorlib", "object int", FALSE); register_icall (mono_helper_newobj_mscorlib, "helper_newobj_mscorlib", "object int", FALSE); register_icall (mono_value_copy, "mono_value_copy", "void ptr ptr ptr", FALSE); register_icall (mono_object_castclass, "mono_object_castclass", "object object ptr", FALSE); register_icall (mono_break, "mono_break", NULL, TRUE); register_icall (mono_create_corlib_exception_0, "mono_create_corlib_exception_0", "object int", TRUE); register_icall (mono_create_corlib_exception_1, "mono_create_corlib_exception_1", "object int object", TRUE); register_icall (mono_create_corlib_exception_2, "mono_create_corlib_exception_2", "object int object object", TRUE); register_icall (mono_array_new_1, "mono_array_new_1", "object ptr int", FALSE); register_icall (mono_array_new_2, "mono_array_new_2", "object ptr int int", FALSE); register_icall (mono_array_new_3, "mono_array_new_3", "object ptr int int int", FALSE); register_icall (mono_get_native_calli_wrapper, "mono_get_native_calli_wrapper", "ptr ptr ptr ptr", FALSE); register_icall (mono_resume_unwind, "mono_resume_unwind", "void", TRUE); register_icall (mono_gc_wbarrier_value_copy_bitmap, "mono_gc_wbarrier_value_copy_bitmap", "void ptr ptr int int", FALSE); register_icall (mono_object_castclass_with_cache, "mono_object_castclass_with_cache", "object object ptr ptr", FALSE); register_icall (mono_object_isinst_with_cache, "mono_object_isinst_with_cache", "object object ptr ptr", FALSE); register_icall (mono_debugger_agent_user_break, "mono_debugger_agent_user_break", "void", FALSE); #endif mono_generic_sharing_init (); #ifdef MONO_ARCH_SIMD_INTRINSICS mono_simd_intrinsics_init (); #endif #if MONO_SUPPORT_TASKLETS mono_tasklets_init (); #endif if (mono_compile_aot) /* * Avoid running managed code when AOT compiling, since the platform * might only support aot-only execution. */ mono_runtime_set_no_exec (TRUE); #define JIT_RUNTIME_WORKS #ifdef JIT_RUNTIME_WORKS mono_install_runtime_cleanup ((MonoDomainFunc)mini_cleanup); mono_runtime_init (domain, mono_thread_start_cb, mono_thread_attach_cb); mono_thread_attach (domain); #endif mono_profiler_runtime_initialized (); MONO_PROBE_VES_INIT_END (); return domain; } MonoJitStats mono_jit_stats = {0}; static void print_jit_stats (void) { if (mono_jit_stats.enabled) { g_print ("Mono Jit statistics\n"); g_print ("Max code size ratio: %.2f (%s)\n", mono_jit_stats.max_code_size_ratio/100.0, mono_jit_stats.max_ratio_method); g_print ("Biggest method: %ld (%s)\n", mono_jit_stats.biggest_method_size, mono_jit_stats.biggest_method); g_print ("\nCreated object count: %ld\n", mono_stats.new_object_count); g_print ("Delegates created: %ld\n", mono_stats.delegate_creations); g_print ("Initialized classes: %ld\n", mono_stats.initialized_class_count); g_print ("Used classes: %ld\n", mono_stats.used_class_count); g_print ("Generic vtables: %ld\n", mono_stats.generic_vtable_count); g_print ("Methods: %ld\n", mono_stats.method_count); g_print ("Static data size: %ld\n", mono_stats.class_static_data_size); g_print ("VTable data size: %ld\n", mono_stats.class_vtable_size); g_print ("Mscorlib mempool size: %d\n", mono_mempool_get_allocated (mono_defaults.corlib->mempool)); g_print ("\nInitialized classes: %ld\n", mono_stats.generic_class_count); g_print ("Inflated types: %ld\n", mono_stats.inflated_type_count); g_print ("Generics virtual invokes: %ld\n", mono_jit_stats.generic_virtual_invocations); g_print ("Sharable generic methods: %ld\n", mono_stats.generics_sharable_methods); g_print ("Unsharable generic methods: %ld\n", mono_stats.generics_unsharable_methods); g_print ("Shared generic methods: %ld\n", mono_stats.generics_shared_methods); g_print ("Dynamic code allocs: %ld\n", mono_stats.dynamic_code_alloc_count); g_print ("Dynamic code bytes: %ld\n", mono_stats.dynamic_code_bytes_count); g_print ("Dynamic code frees: %ld\n", mono_stats.dynamic_code_frees_count); g_print ("IMT tables size: %ld\n", mono_stats.imt_tables_size); g_print ("IMT number of tables: %ld\n", mono_stats.imt_number_of_tables); g_print ("IMT number of methods: %ld\n", mono_stats.imt_number_of_methods); g_print ("IMT used slots: %ld\n", mono_stats.imt_used_slots); g_print ("IMT colliding slots: %ld\n", mono_stats.imt_slots_with_collisions); g_print ("IMT max collisions: %ld\n", mono_stats.imt_max_collisions_in_slot); g_print ("IMT methods at max col: %ld\n", mono_stats.imt_method_count_when_max_collisions); g_print ("IMT thunks size: %ld\n", mono_stats.imt_thunks_size); g_print ("JIT info table inserts: %ld\n", mono_stats.jit_info_table_insert_count); g_print ("JIT info table removes: %ld\n", mono_stats.jit_info_table_remove_count); g_print ("JIT info table lookups: %ld\n", mono_stats.jit_info_table_lookup_count); g_print ("Hazardous pointers: %ld\n", mono_stats.hazardous_pointer_count); g_print ("Minor GC collections: %ld\n", mono_stats.minor_gc_count); g_print ("Major GC collections: %ld\n", mono_stats.major_gc_count); g_print ("Minor GC time in msecs: %lf\n", (double)mono_stats.minor_gc_time_usecs / 1000.0); g_print ("Major GC time in msecs: %lf\n", (double)mono_stats.major_gc_time_usecs / 1000.0); if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) { g_print ("\nDecl security check : %ld\n", mono_jit_stats.cas_declsec_check); g_print ("LinkDemand (user) : %ld\n", mono_jit_stats.cas_linkdemand); g_print ("LinkDemand (icall) : %ld\n", mono_jit_stats.cas_linkdemand_icall); g_print ("LinkDemand (pinvoke) : %ld\n", mono_jit_stats.cas_linkdemand_pinvoke); g_print ("LinkDemand (aptc) : %ld\n", mono_jit_stats.cas_linkdemand_aptc); g_print ("Demand (code gen) : %ld\n", mono_jit_stats.cas_demand_generation); } g_free (mono_jit_stats.max_ratio_method); mono_jit_stats.max_ratio_method = NULL; g_free (mono_jit_stats.biggest_method); mono_jit_stats.biggest_method = NULL; } } void mini_cleanup (MonoDomain *domain) { mono_runtime_shutdown_stat_profiler (); #ifndef DISABLE_COM cominterop_release_all_rcws (); #endif #ifndef MONO_CROSS_COMPILE mono_runtime_shutdown (); /* * mono_runtime_cleanup() and mono_domain_finalize () need to * be called early since they need the execution engine still * fully working (mono_domain_finalize may invoke managed finalizers * and mono_runtime_cleanup will wait for other threads to finish). */ mono_domain_finalize (domain, 2000); #endif /* This accesses metadata so needs to be called before runtime shutdown */ print_jit_stats (); mono_profiler_shutdown (); #ifndef MONO_CROSS_COMPILE mono_runtime_cleanup (domain); #endif free_jit_tls_data (mono_native_tls_get_value (mono_jit_tls_id)); mono_icall_cleanup (); mono_runtime_cleanup_handlers (); mono_domain_free (domain, TRUE); mono_debugger_cleanup (); #ifdef ENABLE_LLVM if (mono_use_llvm) mono_llvm_cleanup (); #endif mono_aot_cleanup (); mono_trampolines_cleanup (); mono_unwind_cleanup (); if (!mono_dont_free_global_codeman) mono_code_manager_destroy (global_codeman); g_hash_table_destroy (jit_icall_name_hash); g_free (emul_opcode_map); g_free (emul_opcode_opcodes); g_free (vtable_trampolines); mono_arch_cleanup (); mono_generic_sharing_cleanup (); mono_cleanup (); mono_trace_cleanup (); mono_counters_dump (-1, stdout); if (mono_inject_async_exc_method) mono_method_desc_free (mono_inject_async_exc_method); mono_native_tls_free (mono_jit_tls_id); DeleteCriticalSection (&jit_mutex); DeleteCriticalSection (&mono_delegate_section); } void mono_set_defaults (int verbose_level, guint32 opts) { mini_verbose = verbose_level; default_opt = opts; default_opt_set = TRUE; } void mono_disable_optimizations (guint32 opts) { default_opt &= ~opts; } void mono_set_optimizations (guint32 opts) { default_opt = opts; default_opt_set = TRUE; } void mono_set_verbose_level (guint32 level) { mini_verbose = level; } /* * mono_get_runtime_build_info: * * Return the runtime version + build date in string format. * The returned string is owned by the caller. */ char* mono_get_runtime_build_info (void) { if (mono_build_date) return g_strdup_printf ("%s (%s %s)", VERSION, FULL_VERSION, mono_build_date); else return g_strdup_printf ("%s (%s)", VERSION, FULL_VERSION); } static void mono_precompile_assembly (MonoAssembly *ass, void *user_data) { GHashTable *assemblies = (GHashTable*)user_data; MonoImage *image = mono_assembly_get_image (ass); MonoMethod *method, *invoke; int i, count = 0; if (g_hash_table_lookup (assemblies, ass)) return; g_hash_table_insert (assemblies, ass, ass); if (mini_verbose > 0) printf ("PRECOMPILE: %s.\n", mono_image_get_filename (image)); for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { method = mono_get_method (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL); if (method->flags & METHOD_ATTRIBUTE_ABSTRACT) continue; count++; if (mini_verbose > 1) { char * desc = mono_method_full_name (method, TRUE); g_print ("Compiling %d %s\n", count, desc); g_free (desc); } mono_compile_method (method); if (strcmp (method->name, "Finalize") == 0) { invoke = mono_marshal_get_runtime_invoke (method, FALSE); mono_compile_method (invoke); } if (method->klass->marshalbyref && mono_method_signature (method)->hasthis) { invoke = mono_marshal_get_remoting_invoke_with_check (method); mono_compile_method (invoke); } } /* Load and precompile referenced assemblies as well */ for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_ASSEMBLYREF); ++i) { mono_assembly_load_reference (image, i); if (image->references [i]) mono_precompile_assembly (image->references [i], assemblies); } } void mono_precompile_assemblies () { GHashTable *assemblies = g_hash_table_new (NULL, NULL); mono_assembly_foreach ((GFunc)mono_precompile_assembly, assemblies); g_hash_table_destroy (assemblies); } #ifndef DISABLE_JIT void* mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments) { return mono_arch_instrument_epilog_full (cfg, func, p, enable_arguments, FALSE); } void mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb) { TryBlockHole *hole = mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole)); hole->clause = clause; hole->start_offset = start - cfg->native_code; hole->basic_block = bb; cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole); } void mono_cfg_set_exception (MonoCompile *cfg, int type) { cfg->exception_type = type; } #endif
967600.c
/* chainAntiRepeat - Get rid of chains that are primarily the results of repeats and degenerate DNA. */ #include "common.h" #include "linefile.h" #include "hash.h" #include "options.h" #include "jksql.h" #include "dnautil.h" #include "dnaseq.h" #include "nibTwo.h" #include "chain.h" static char const rcsid[] = "$Id: chainAntiRepeat.c,v 1.5 2006/06/20 15:50:58 angie Exp $"; int minScore = 5000; int noCheckScore = 200000; void usage() /* Explain usage and exit. */ { errAbort( "chainAntiRepeat - Get rid of chains that are primarily the results of repeats and degenerate DNA\n" "usage:\n" " chainAntiRepeat tNibDir qNibDir inChain outChain\n" "options:\n" " -minScore=N - minimum score (after repeat stuff) to pass\n" " -noCheckScore=N - score that will pass without checks (speed tweak)\n" ); } static struct optionSpec options[] = { {"minScore", OPTION_INT}, {"noCheckScore", OPTION_INT}, {NULL, 0}, }; static int isLowerDna[256]; boolean degeneracyFilter(struct dnaSeq *tSeq, struct dnaSeq *qSeq, struct chain *chain) /* Returns FALSE if matches seem to be degenerate mostly. */ { struct cBlock *b; int countBuf[5], *counts = countBuf+1; int totalMatches = 0; int sum2, best2 = 0; double okBest2 = 0.80; double observedBest2, overOk; double maxOverOk = 1.0 - okBest2; countBuf[0] = countBuf[1] = countBuf[2] = countBuf[3] = countBuf[4] = 0; /* Count up number of each nucleotide that is in a match. */ for (b = chain->blockList; b != NULL; b = b->next) { DNA *q = qSeq->dna + b->qStart - chain->qStart; DNA *t = tSeq->dna + b->tStart - chain->tStart; int size = b->tEnd - b->tStart; int i; for (i=0; i<size; ++i) { int qb = ntVal[(int)q[i]]; if (qb == ntVal[(int)t[i]]) counts[qb] += 1; } } totalMatches = counts[0] + counts[1] + counts[2] + counts[3]; best2 = counts[0] + counts[1]; sum2 = counts[0] + counts[2]; if (best2 < sum2) best2 = sum2; sum2 = counts[0] + counts[3]; if (best2 < sum2) best2 = sum2; sum2 = counts[1] + counts[2]; if (best2 < sum2) best2 = sum2; sum2 = counts[1] + counts[3]; if (best2 < sum2) best2 = sum2; sum2 = counts[2] + counts[3]; if (best2 < sum2) best2 = sum2; /* We expect the best2 to sum to 60%. If it sums to more than that * we start reducing the score proportionally, and return false if * it is less than minScore. */ observedBest2 = (double)best2/(double)totalMatches; overOk = observedBest2 - okBest2; if (overOk <= 0) return TRUE; else { double adjustFactor = 1.01 - overOk/maxOverOk; double adjustedScore = chain->score * adjustFactor; return adjustedScore >= minScore; } } boolean repeatFilter(struct dnaSeq *tSeq, struct dnaSeq *qSeq, struct chain *chain) /* Returns FALSE if matches seem to be almost entirely repeat-driven. */ { struct cBlock *b; int repCount = 0, total=0; double adjustedScore; /* Count up number of each nucleotide that is in a match. */ for (b = chain->blockList; b != NULL; b = b->next) { DNA *q = qSeq->dna + b->qStart - chain->qStart; DNA *t = tSeq->dna + b->tStart - chain->tStart; int size = b->tEnd - b->tStart; int i; for (i=0; i<size; ++i) { if (isLowerDna[(int)q[i]] || isLowerDna[(int)t[i]]) ++repCount; } total += size; } adjustedScore = (chain->score * 2.0 * (total - repCount) / total); return adjustedScore >= minScore; } void chainAntiRepeat(char *tNibDir, char *qNibDir, char *inName, char *outName) /* chainAntiRepeat - Get rid of chains that are primarily the results * of repeats and degenerate DNA. */ { struct lineFile *lf = lineFileOpen(inName, TRUE); FILE *f = mustOpen(outName, "w"); struct chain *chain; struct nibTwoCache *qc, *tc; lineFileSetMetaDataOutput(lf, f); isLowerDna['a'] = isLowerDna['c'] = isLowerDna['g'] = isLowerDna['t'] = isLowerDna['n'] = TRUE; tc = nibTwoCacheNew(tNibDir); qc = nibTwoCacheNew(qNibDir); while ((chain = chainRead(lf)) != NULL) { boolean pass = TRUE; if (chain->score < noCheckScore) { struct dnaSeq *tSeq = nibTwoCacheSeqPart(tc, chain->tName, chain->tStart, chain->tEnd - chain->tStart, NULL); struct dnaSeq *qSeq; int qFragSize = chain->qEnd - chain->qStart; if (chain->qStrand == '-') { int qStart; qStart = chain->qSize - chain->qEnd; qSeq = nibTwoCacheSeqPart(qc, chain->qName, qStart, qFragSize, NULL); reverseComplement(qSeq->dna, qFragSize); } else { qSeq = nibTwoCacheSeqPart(qc, chain->qName, chain->qStart, qFragSize, NULL); } pass = degeneracyFilter(tSeq, qSeq, chain); if (pass) pass = repeatFilter(tSeq, qSeq, chain); dnaSeqFree(&qSeq); dnaSeqFree(&tSeq); } if (pass) { chainWrite(chain, f); } chainFree(&chain); } /* Clean up time. */ nibTwoCacheFree(&tc); nibTwoCacheFree(&qc); lineFileClose(&lf); carefulClose(&f); } int main(int argc, char *argv[]) /* Process command line. */ { dnaUtilOpen(); optionInit(&argc, argv, options); if (argc != 5) usage(); minScore = optionInt("minScore", minScore); noCheckScore = optionInt("noCheckScore", noCheckScore); chainAntiRepeat(argv[1], argv[2], argv[3], argv[4]); return 0; }
849133.c
// // vorticity.c // // // Created by Shawn Shadden. // Copyright 2010 Illinois Institute of Technology. All rights reserved. // #include <math.h> #include <stdio.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include "globals.h" #include "io.h" #include "macros.h" #include "memory.h" #include "mesh.h" #include "vorticity.h" #include "structs.h" #include "tracers.h" #include "velocity.h" void AllocateVorticityFieldData(void) { /* * Allocates memory for vorticity field data for 2 time frames (moving buffer of data) */ printf("\n Allocating memory for vorticity data..."); fflush(stdout); if (Data_MeshType == CARTESIAN) { Vel_CartVorArray_wx = Calloc4D(Vel_CartVorArray_wx, 2, Vel_CartMesh.XRes - 1, Vel_CartMesh.YRes - 1, Vel_CartMesh.ZRes - 1); Vel_CartVorArray_wy = Calloc4D(Vel_CartVorArray_wy, 2, Vel_CartMesh.XRes - 1, Vel_CartMesh.YRes - 1, Vel_CartMesh.ZRes - 1); Vel_CartVorArray_wz = Calloc4D(Vel_CartVorArray_wz, 2, Vel_CartMesh.XRes - 1, Vel_CartMesh.YRes - 1, Vel_CartMesh.ZRes - 1); } else { Vel_UnstructVorArray_wx = Calloc2D(Vel_UnstructVorArray_wx, 2, Vel_MeshNumElements); Vel_UnstructVorArray_wy = Calloc2D(Vel_UnstructVorArray_wy, 2, Vel_MeshNumElements); Vel_UnstructVorArray_wz = Calloc2D(Vel_UnstructVorArray_wz, 2, Vel_MeshNumElements); } printf("OK!\n"); fflush(stdout); } void FreeVorticityData(void) { /*** Deallocate memory for vorticity field for 2 time frames (moving buffer of data) ***/ if (Data_MeshType == CARTESIAN) { Free4D(Vel_CartVorArray_wx, 2, Vel_CartMesh.XRes - 1, Vel_CartMesh.YRes - 1); Free4D(Vel_CartVorArray_wy, 2, Vel_CartMesh.XRes - 1, Vel_CartMesh.YRes - 1); Free4D(Vel_CartVorArray_wz, 2, Vel_CartMesh.XRes - 1, Vel_CartMesh.YRes - 1); } else { Free2D(Vel_UnstructVorArray_wx, 2); Free2D(Vel_UnstructVorArray_wy, 2); Free2D(Vel_UnstructVorArray_wz, 2); } } void LoadVorticityDataFrame(int frame) { if(Data_MeshType == CARTESIAN) LoadVorticityDataFrame(frame); else if(Data_MeshType == UNSTRUCTURED) LoadUnstructVorticityDataFrame(frame); else FatalError("Unknown value for Data_MeshType in LoadVorticityDataFrame()"); } void LoadCartVorticityDataFrame(int frame) { int i, j, k, ModVal, slot1, slot2; double ***tempptr = NULL, timestamp; char Data_BinFilePath[LONGSTRING]; FILE *Data_BinFileID; if(Int_TimeDirection > 0) { slot1 = 0; slot2 = 1; } else { slot1 = 1; slot2 = 0; } ModVal = fmod(frame, Data_TRes - 1); if(ModVal < 0) { ModVal += Data_TRes - 1; } if(Vel_CartVorArray_wx == NULL || Vel_CartVorArray_wy == NULL || Vel_CartVorArray_wz == NULL) AllocateVorticityFieldData(); /* Allocate memory for tempptr, used as placeholder when swapping pointers to data as window of loaded velocity data changes */ if((tempptr = (double ***)malloc((Vel_CartMesh.XRes - 1) * sizeof(double **))) == NULL) FatalError("Malloc failed for tempptr"); for(i = 0; i < Vel_CartMesh.XRes - 1; i++) if((tempptr[i] = (double **)malloc((Vel_CartMesh.YRes - 1) * sizeof(double *))) == NULL) FatalError("Malloc failed for tempptr[%d]", i); /*** Swap addresses ***/ /* wx */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) tempptr[i][j] = Vel_CartVorArray_wx[slot1][i][j]; /* Save address of slot1 */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) Vel_CartVorArray_wx[slot1][i][j] = Vel_CartVorArray_wx[slot2][i][j]; /* Change address of slot1 to address of slot2 */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) Vel_CartVorArray_wx[slot2][i][j] = tempptr[i][j]; /* Change address of slot2 to former address of slot1 */ /* wy */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) tempptr[i][j] = Vel_CartVorArray_wy[slot1][i][j]; /* Save address of slot1 */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) Vel_CartVorArray_wy[slot1][i][j] = Vel_CartVorArray_wy[slot2][i][j]; /* Change address of slot1 to address of slot2 */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) Vel_CartVorArray_wy[slot2][i][j] = tempptr[i][j]; /* Change address of slot2 to former address of slot1 */ /* wz */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) tempptr[i][j] = Vel_CartVorArray_wz[slot1][i][j]; /* Save address of slot1 */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) Vel_CartVorArray_wz[slot1][i][j] = Vel_CartVorArray_wz[slot2][i][j]; /* Change address of slot1 to address of slot2 */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) Vel_CartVorArray_wz[slot2][i][j] = tempptr[i][j]; /* Change address of slot2 to former address of slot1 */ /*** Read in new time slice of data ***/ /* Open file */ sprintf(Data_BinFilePath, "%s%s_vorticity.%d.bin", Path_Data, Data_InFilePrefix, Data_SuffixTMin + ModVal * Data_SuffixTDelta); if((Data_BinFileID = fopen(Data_BinFilePath, "rb")) == NULL) FatalError("Could not open file %s", Data_BinFilePath); /* Read time stamp */ if(fread(&timestamp, sizeof(double), 1, Data_BinFileID) < 1) FatalError("Could not read time stamp from file %s", Data_BinFilePath); printf("\nLoading vorticity data from %s (time stamp = %g)...", Data_BinFilePath, timestamp); fflush(stdout); /* Read vorticity data */ for(k = 0; k < Vel_CartMesh.ZRes - 1; k++) for(j = 0; j < Vel_CartMesh.YRes - 1; j++) for(i = 0; i < Vel_CartMesh.XRes - 1; i++) if((fread(&Vel_CartVorArray_wx[slot2][i][j][k], sizeof(double), 1, Data_BinFileID) < 1) || (fread(&Vel_CartVorArray_wy[slot2][i][j][k], sizeof(double), 1, Data_BinFileID) < 1) || (fread(&Vel_CartVorArray_wz[slot2][i][j][k], sizeof(double), 1, Data_BinFileID) < 1)) /* Read data from file to address of slot2 */ FatalError("Could not load vorticity data for index [%d][%d][%d] from %s", i, j, k, Data_BinFilePath); /* Close file */ fclose(Data_BinFileID); /* Clean up local arrays */ for(i = 0; i < Vel_CartMesh.XRes - 1; i++) { free(tempptr[i]); tempptr[i] = NULL; } free(tempptr); tempptr = NULL; printf("OK!\n"); fflush(stdout); } void LoadUnstructVorticityDataFrame(int frame) { int i, ModVal, slot1, slot2; double *tempptr_U, *tempptr_V, *tempptr_W, timestamp; char Data_BinFilePath[LONGSTRING]; FILE *Data_BinFileID; if(Int_TimeDirection > 0) { slot1 = 0; slot2 = 1; } else { slot1 = 1; slot2 = 0; } ModVal = fmod(frame, Data_TRes - 1); if(ModVal < 0) { ModVal += Data_TRes - 1; } /* Allocate memory for velocity data if needed */ if(Vel_UnstructVorArray_wx == NULL || Vel_UnstructVorArray_wy == NULL || Vel_UnstructVorArray_wz == NULL) AllocateVorticityFieldData(); /* Open binary velocity file for reading */ sprintf(Data_BinFilePath, "%s%s_vorticity.%d.bin", Path_Data, Data_InFilePrefix, Data_SuffixTMin + ModVal * Data_SuffixTDelta); if((Data_BinFileID = fopen(Data_BinFilePath, "rb")) == NULL) FatalError("Could not open file %s", Data_BinFilePath); /* Read time stamp */ if(fread(&timestamp, sizeof(double), 1, Data_BinFileID) < 1) FatalError("Could not read time stamp from file %s", Data_BinFilePath); printf("\nLoading vorticity data from %s (time stamp = %g)...", Data_BinFilePath, timestamp); fflush(stdout); /* Virtual data shift */ tempptr_U = Vel_UnstructVorArray_wx[slot1]; tempptr_V = Vel_UnstructVorArray_wy[slot1]; tempptr_W = Vel_UnstructVorArray_wz[slot1]; Vel_UnstructVorArray_wx[slot1] = Vel_UnstructVorArray_wx[slot2]; Vel_UnstructVorArray_wy[slot1] = Vel_UnstructVorArray_wy[slot2]; Vel_UnstructVorArray_wz[slot1] = Vel_UnstructVorArray_wz[slot2]; Vel_UnstructVorArray_wx[slot2] = tempptr_U; Vel_UnstructVorArray_wy[slot2] = tempptr_V; Vel_UnstructVorArray_wz[slot2] = tempptr_W; /* Read data */ for(i = 0; i < Vel_MeshNumElements; i++) if(fread(&Vel_UnstructVorArray_wx[slot2][i], sizeof(double), 1, Data_BinFileID) < 1 || fread(&Vel_UnstructVorArray_wy[slot2][i], sizeof(double), 1, Data_BinFileID) < 1 || fread(&Vel_UnstructVorArray_wz[slot2][i], sizeof(double), 1, Data_BinFileID) < 1) FatalError("Could not read vorticity data from file %s", Data_BinFilePath); fclose(Data_BinFileID); printf("OK!\n"); fflush(stdout); } void SetVorticity(double tc, LagrangianPoint *pt) { if(Data_MeshType == CARTESIAN) SetVorticity_Cartesian(tc, pt); else if(Data_MeshType == UNSTRUCTURED) SetVorticity_Unstructured(tc, pt); else if(Data_MeshType == ANALYTIC) FatalError("Data_MeshType == ANALYTIC in SetVorticity() unsupported"); else FatalError("Unrecognized value for Data_MeshType in SetVorticity()"); } void SetVorticity_Cartesian(double time, LagrangianPoint *pt) { double tloc; int i, j, k; if(TestOutsideDomain(pt->X)) FatalError("Attempting to interpolate vorticity at point that has left the domain"); tloc = (time - Data_LoadedTMin) / (Data_LoadedTMax - Data_LoadedTMin); assert((tloc < (1 + TINY)) && (tloc > (0 - TINY))); i = (int)floor((pt->X[0] - Vel_CartMesh.XMin) / Vel_CartMesh.XDelta); assert((i >= 0) && (i <= (Vel_CartMesh.XRes - 1))); if(i == (Vel_CartMesh.XRes - 1)) i = Vel_CartMesh.XRes - 2; j = (int)floor((pt->X[1] - Vel_CartMesh.YMin) / Vel_CartMesh.YDelta); assert((j >= 0) && (j <= (Vel_CartMesh.XRes - 1))); if(j == (Vel_CartMesh.YRes - 1)) j = Vel_CartMesh.YRes - 2; if(Dimensions == 3) { k = (int)floor((pt->X[2] - Vel_CartMesh.ZMin) / Vel_CartMesh.ZDelta); assert((k >= 0) && (k <= (Vel_CartMesh.ZRes - 1))); if(k == (Vel_CartMesh.ZRes - 1)) k = Vel_CartMesh.ZRes - 2; } else k = 0; pt->vorticity[0] = ((1 - tloc) * Vel_CartVorArray_wx[0][i][j][k] + tloc * Vel_CartVorArray_wx[1][i][j][k]); pt->vorticity[1] = ((1 - tloc) * Vel_CartVorArray_wy[0][i][j][k] + tloc * Vel_CartVorArray_wy[1][i][j][k]); pt->vorticity[2] = ((1 - tloc) * Vel_CartVorArray_wz[0][i][j][k] + tloc * Vel_CartVorArray_wz[1][i][j][k]); } void SetVorticity_Unstructured(double time, LagrangianPoint *pt) { double tloc; pt->ElementIndex = Get_Element_Local_Search(pt->X, pt->ElementIndex); if(pt->ElementIndex == -1) FatalError("Attempting to interpolate vorticity at point with pt->ElementIndex = -1"); /* Set tloc defining where in between loaded data to interpolate in time */ tloc = (time - Data_LoadedTMin) / (Data_LoadedTMax - Data_LoadedTMin); if((tloc > (1 + TINY)) || (tloc < (0 - TINY))) FatalError("tloc = %f and must be between 0 and 1", tloc); /* Interpolate vorticity at element in time */ pt->vorticity[0] = (1 - tloc) * Vel_UnstructVorArray_wx[0][pt->ElementIndex] + tloc * Vel_UnstructVorArray_wx[1][pt->ElementIndex]; pt->vorticity[1] = (1 - tloc) * Vel_UnstructVorArray_wy[0][pt->ElementIndex] + tloc * Vel_UnstructVorArray_wy[1][pt->ElementIndex]; pt->vorticity[2] = (1 - tloc) * Vel_UnstructVorArray_wz[0][pt->ElementIndex] + tloc * Vel_UnstructVorArray_wz[1][pt->ElementIndex]; /* printf("vorticity = (%f, %f, %f)\n", pt->vorticity[0], pt->vorticity[1], pt->vorticity[2]);*/ }
966588.c
/* * * Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200 and G400 * * (c) 1998-2002 Petr Vandrovec <[email protected]> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.65 2002/08/14 * * MTRR stuff: 1998 Tom Rini <[email protected]> * * Contributors: "menion?" <[email protected]> * Betatesting, fixes, ideas * * "Kurt Garloff" <[email protected]> * Betatesting, fixes, ideas, videomodes, videomodes timmings * * "Tom Rini" <[email protected]> * MTRR stuff, PPC cleanups, betatesting, fixes, ideas * * "Bibek Sahu" <[email protected]> * Access device through readb|w|l and write b|w|l * Extensive debugging stuff * * "Daniel Haun" <[email protected]> * Testing, hardware cursor fixes * * "Scott Wood" <[email protected]> * Fixes * * "Gerd Knorr" <[email protected]> * Betatesting * * "Kelly French" <[email protected]> * "Fernando Herrera" <[email protected]> * Betatesting, bug reporting * * "Pablo Bianucci" <[email protected]> * Fixes, ideas, betatesting * * "Inaky Perez Gonzalez" <[email protected]> * Fixes, enhandcements, ideas, betatesting * * "Ryuichi Oikawa" <[email protected]> * PPC betatesting, PPC support, backward compatibility * * "Paul Womar" <[email protected]> * "Owen Waller" <[email protected]> * PPC betatesting * * "Thomas Pornin" <[email protected]> * Alpha betatesting * * "Pieter van Leuven" <[email protected]> * "Ulf Jaenicke-Roessler" <[email protected]> * G100 testing * * "H. Peter Arvin" <[email protected]> * Ideas * * "Cort Dougan" <[email protected]> * CHRP fixes and PReP cleanup * * "Mark Vojkovich" <[email protected]> * G400 support * * "David C. Hansen" <[email protected]> * Fixes * * "Ian Romanick" <[email protected]> * Find PInS data in BIOS on PowerPC systems. * * (following author is not in any relation with this code, but his code * is included in this driver) * * Based on framebuffer driver for VBE 2.0 compliant graphic boards * (c) 1998 Gerd Knorr <[email protected]> * * (following author is not in any relation with this code, but his ideas * were used when writing this driver) * * FreeVBE/AF (Matrox), "Shawn Hargreaves" <[email protected]> * */ #include "matroxfb_misc.h" #include <linux/interrupt.h> #include <linux/matroxfb.h> void matroxfb_DAC_out(const struct matrox_fb_info *minfo, int reg, int val) { DBG_REG(__func__) mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg); mga_outb(M_RAMDAC_BASE+M_X_DATAREG, val); } int matroxfb_DAC_in(const struct matrox_fb_info *minfo, int reg) { DBG_REG(__func__) mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg); return mga_inb(M_RAMDAC_BASE+M_X_DATAREG); } void matroxfb_var2my(struct fb_var_screeninfo* var, struct my_timming* mt) { unsigned int pixclock = var->pixclock; DBG(__func__) if (!pixclock) pixclock = 10000; /* 10ns = 100MHz */ mt->pixclock = 1000000000 / pixclock; if (mt->pixclock < 1) mt->pixclock = 1; mt->mnp = -1; mt->dblscan = var->vmode & FB_VMODE_DOUBLE; mt->interlaced = var->vmode & FB_VMODE_INTERLACED; mt->HDisplay = var->xres; mt->HSyncStart = mt->HDisplay + var->right_margin; mt->HSyncEnd = mt->HSyncStart + var->hsync_len; mt->HTotal = mt->HSyncEnd + var->left_margin; mt->VDisplay = var->yres; mt->VSyncStart = mt->VDisplay + var->lower_margin; mt->VSyncEnd = mt->VSyncStart + var->vsync_len; mt->VTotal = mt->VSyncEnd + var->upper_margin; mt->sync = var->sync; } int matroxfb_PLL_calcclock(const struct matrox_pll_features* pll, unsigned int freq, unsigned int fmax, unsigned int* in, unsigned int* feed, unsigned int* post) { unsigned int bestdiff = ~0; unsigned int bestvco = 0; unsigned int fxtal = pll->ref_freq; unsigned int fwant; unsigned int p; DBG(__func__) fwant = freq; #ifdef DEBUG printk(KERN_ERR "post_shift_max: %d\n", pll->post_shift_max); printk(KERN_ERR "ref_freq: %d\n", pll->ref_freq); printk(KERN_ERR "freq: %d\n", freq); printk(KERN_ERR "vco_freq_min: %d\n", pll->vco_freq_min); printk(KERN_ERR "in_div_min: %d\n", pll->in_div_min); printk(KERN_ERR "in_div_max: %d\n", pll->in_div_max); printk(KERN_ERR "feed_div_min: %d\n", pll->feed_div_min); printk(KERN_ERR "feed_div_max: %d\n", pll->feed_div_max); printk(KERN_ERR "fmax: %d\n", fmax); #endif for (p = 1; p <= pll->post_shift_max; p++) { if (fwant * 2 > fmax) break; fwant *= 2; } if (fwant < pll->vco_freq_min) fwant = pll->vco_freq_min; if (fwant > fmax) fwant = fmax; for (; p-- > 0; fwant >>= 1, bestdiff >>= 1) { unsigned int m; if (fwant < pll->vco_freq_min) break; for (m = pll->in_div_min; m <= pll->in_div_max; m++) { unsigned int diff, fvco; unsigned int n; n = (fwant * (m + 1) + (fxtal >> 1)) / fxtal - 1; if (n > pll->feed_div_max) break; if (n < pll->feed_div_min) n = pll->feed_div_min; fvco = (fxtal * (n + 1)) / (m + 1); if (fvco < fwant) diff = fwant - fvco; else diff = fvco - fwant; if (diff < bestdiff) { bestdiff = diff; *post = p; *in = m; *feed = n; bestvco = fvco; } } } dprintk(KERN_ERR "clk: %02X %02X %02X %d %d %d\n", *in, *feed, *post, fxtal, bestvco, fwant); return bestvco; } int matroxfb_vgaHWinit(struct matrox_fb_info *minfo, struct my_timming *m) { unsigned int hd, hs, he, hbe, ht; unsigned int vd, vs, ve, vt, lc; unsigned int wd; unsigned int divider; int i; struct matrox_hw_state * const hw = &minfo->hw; DBG(__func__) hw->SEQ[0] = 0x00; hw->SEQ[1] = 0x01; /* or 0x09 */ hw->SEQ[2] = 0x0F; /* bitplanes */ hw->SEQ[3] = 0x00; hw->SEQ[4] = 0x0E; /* CRTC 0..7, 9, 16..19, 21, 22 are reprogrammed by Matrox Millennium code... Hope that by MGA1064 too */ if (m->dblscan) { m->VTotal <<= 1; m->VDisplay <<= 1; m->VSyncStart <<= 1; m->VSyncEnd <<= 1; } if (m->interlaced) { m->VTotal >>= 1; m->VDisplay >>= 1; m->VSyncStart >>= 1; m->VSyncEnd >>= 1; } /* GCTL is ignored when not using 0xA0000 aperture */ hw->GCTL[0] = 0x00; hw->GCTL[1] = 0x00; hw->GCTL[2] = 0x00; hw->GCTL[3] = 0x00; hw->GCTL[4] = 0x00; hw->GCTL[5] = 0x40; hw->GCTL[6] = 0x05; hw->GCTL[7] = 0x0F; hw->GCTL[8] = 0xFF; /* Whole ATTR is ignored in PowerGraphics mode */ for (i = 0; i < 16; i++) hw->ATTR[i] = i; hw->ATTR[16] = 0x41; hw->ATTR[17] = 0xFF; hw->ATTR[18] = 0x0F; hw->ATTR[19] = 0x00; hw->ATTR[20] = 0x00; hd = m->HDisplay >> 3; hs = m->HSyncStart >> 3; he = m->HSyncEnd >> 3; ht = m->HTotal >> 3; /* standard timmings are in 8pixels, but for interleaved we cannot */ /* do it for 4bpp (because of (4bpp >> 1(interleaved))/4 == 0) */ /* using 16 or more pixels per unit can save us */ divider = minfo->curr.final_bppShift; while (divider & 3) { hd >>= 1; hs >>= 1; he >>= 1; ht >>= 1; divider <<= 1; } divider = divider / 4; /* divider can be from 1 to 8 */ while (divider > 8) { hd <<= 1; hs <<= 1; he <<= 1; ht <<= 1; divider >>= 1; } hd = hd - 1; hs = hs - 1; he = he - 1; ht = ht - 1; vd = m->VDisplay - 1; vs = m->VSyncStart - 1; ve = m->VSyncEnd - 1; vt = m->VTotal - 2; lc = vd; /* G200 cannot work with (ht & 7) == 6 */ if (((ht & 0x07) == 0x06) || ((ht & 0x0F) == 0x04)) ht++; hbe = ht; wd = minfo->fbcon.var.xres_virtual * minfo->curr.final_bppShift / 64; hw->CRTCEXT[0] = 0; hw->CRTCEXT[5] = 0; if (m->interlaced) { hw->CRTCEXT[0] = 0x80; hw->CRTCEXT[5] = (hs + he - ht) >> 1; if (!m->dblscan) wd <<= 1; vt &= ~1; } hw->CRTCEXT[0] |= (wd & 0x300) >> 4; hw->CRTCEXT[1] = (((ht - 4) & 0x100) >> 8) | ((hd & 0x100) >> 7) | /* blanking */ ((hs & 0x100) >> 6) | /* sync start */ (hbe & 0x040); /* end hor. blanking */ if (minfo->outputs[1].src == MATROXFB_SRC_CRTC1) hw->CRTCEXT[1] |= 0x88; /* enable horizontal and vertical vidrst */ hw->CRTCEXT[2] = ((vt & 0xC00) >> 10) | ((vd & 0x400) >> 8) | /* disp end */ ((vd & 0xC00) >> 7) | /* vblanking start */ ((vs & 0xC00) >> 5) | ((lc & 0x400) >> 3); hw->CRTCEXT[3] = (divider - 1) | 0x80; hw->CRTCEXT[4] = 0; hw->CRTC[0] = ht-4; hw->CRTC[1] = hd; hw->CRTC[2] = hd; hw->CRTC[3] = (hbe & 0x1F) | 0x80; hw->CRTC[4] = hs; hw->CRTC[5] = ((hbe & 0x20) << 2) | (he & 0x1F); hw->CRTC[6] = vt & 0xFF; hw->CRTC[7] = ((vt & 0x100) >> 8) | ((vd & 0x100) >> 7) | ((vs & 0x100) >> 6) | ((vd & 0x100) >> 5) | ((lc & 0x100) >> 4) | ((vt & 0x200) >> 4) | ((vd & 0x200) >> 3) | ((vs & 0x200) >> 2); hw->CRTC[8] = 0x00; hw->CRTC[9] = ((vd & 0x200) >> 4) | ((lc & 0x200) >> 3); if (m->dblscan && !m->interlaced) hw->CRTC[9] |= 0x80; for (i = 10; i < 16; i++) hw->CRTC[i] = 0x00; hw->CRTC[16] = vs /* & 0xFF */; hw->CRTC[17] = (ve & 0x0F) | 0x20; hw->CRTC[18] = vd /* & 0xFF */; hw->CRTC[19] = wd /* & 0xFF */; hw->CRTC[20] = 0x00; hw->CRTC[21] = vd /* & 0xFF */; hw->CRTC[22] = (vt + 1) /* & 0xFF */; hw->CRTC[23] = 0xC3; hw->CRTC[24] = lc; return 0; }; void matroxfb_vgaHWrestore(struct matrox_fb_info *minfo) { int i; struct matrox_hw_state * const hw = &minfo->hw; CRITFLAGS DBG(__func__) dprintk(KERN_INFO "MiscOutReg: %02X\n", hw->MiscOutReg); dprintk(KERN_INFO "SEQ regs: "); for (i = 0; i < 5; i++) dprintk("%02X:", hw->SEQ[i]); dprintk("\n"); dprintk(KERN_INFO "GDC regs: "); for (i = 0; i < 9; i++) dprintk("%02X:", hw->GCTL[i]); dprintk("\n"); dprintk(KERN_INFO "CRTC regs: "); for (i = 0; i < 25; i++) dprintk("%02X:", hw->CRTC[i]); dprintk("\n"); dprintk(KERN_INFO "ATTR regs: "); for (i = 0; i < 21; i++) dprintk("%02X:", hw->ATTR[i]); dprintk("\n"); CRITBEGIN mga_inb(M_ATTR_RESET); mga_outb(M_ATTR_INDEX, 0); mga_outb(M_MISC_REG, hw->MiscOutReg); for (i = 1; i < 5; i++) mga_setr(M_SEQ_INDEX, i, hw->SEQ[i]); mga_setr(M_CRTC_INDEX, 17, hw->CRTC[17] & 0x7F); for (i = 0; i < 25; i++) mga_setr(M_CRTC_INDEX, i, hw->CRTC[i]); for (i = 0; i < 9; i++) mga_setr(M_GRAPHICS_INDEX, i, hw->GCTL[i]); for (i = 0; i < 21; i++) { mga_inb(M_ATTR_RESET); mga_outb(M_ATTR_INDEX, i); mga_outb(M_ATTR_INDEX, hw->ATTR[i]); } mga_outb(M_PALETTE_MASK, 0xFF); mga_outb(M_DAC_REG, 0x00); for (i = 0; i < 768; i++) mga_outb(M_DAC_VAL, hw->DACpal[i]); mga_inb(M_ATTR_RESET); mga_outb(M_ATTR_INDEX, 0x20); CRITEND } static void get_pins(unsigned char __iomem* pins, struct matrox_bios* bd) { unsigned int b0 = readb(pins); if (b0 == 0x2E && readb(pins+1) == 0x41) { unsigned int pins_len = readb(pins+2); unsigned int i; unsigned char cksum; unsigned char* dst = bd->pins; if (pins_len < 3 || pins_len > 128) { return; } *dst++ = 0x2E; *dst++ = 0x41; *dst++ = pins_len; cksum = 0x2E + 0x41 + pins_len; for (i = 3; i < pins_len; i++) { cksum += *dst++ = readb(pins+i); } if (cksum) { return; } bd->pins_len = pins_len; } else if (b0 == 0x40 && readb(pins+1) == 0x00) { unsigned int i; unsigned char* dst = bd->pins; *dst++ = 0x40; *dst++ = 0; for (i = 2; i < 0x40; i++) { *dst++ = readb(pins+i); } bd->pins_len = 0x40; } } static void get_bios_version(unsigned char __iomem * vbios, struct matrox_bios* bd) { unsigned int pcir_offset; pcir_offset = readb(vbios + 24) | (readb(vbios + 25) << 8); if (pcir_offset >= 26 && pcir_offset < 0xFFE0 && readb(vbios + pcir_offset ) == 'P' && readb(vbios + pcir_offset + 1) == 'C' && readb(vbios + pcir_offset + 2) == 'I' && readb(vbios + pcir_offset + 3) == 'R') { unsigned char h; h = readb(vbios + pcir_offset + 0x12); bd->version.vMaj = (h >> 4) & 0xF; bd->version.vMin = h & 0xF; bd->version.vRev = readb(vbios + pcir_offset + 0x13); } else { unsigned char h; h = readb(vbios + 5); bd->version.vMaj = (h >> 4) & 0xF; bd->version.vMin = h & 0xF; bd->version.vRev = 0; } } static void get_bios_output(unsigned char __iomem* vbios, struct matrox_bios* bd) { unsigned char b; b = readb(vbios + 0x7FF1); if (b == 0xFF) { b = 0; } bd->output.state = b; } static void get_bios_tvout(unsigned char __iomem* vbios, struct matrox_bios* bd) { unsigned int i; /* Check for 'IBM .*(V....TVO' string - it means TVO BIOS */ bd->output.tvout = 0; if (readb(vbios + 0x1D) != 'I' || readb(vbios + 0x1E) != 'B' || readb(vbios + 0x1F) != 'M' || readb(vbios + 0x20) != ' ') { return; } for (i = 0x2D; i < 0x2D + 128; i++) { unsigned char b = readb(vbios + i); if (b == '(' && readb(vbios + i + 1) == 'V') { if (readb(vbios + i + 6) == 'T' && readb(vbios + i + 7) == 'V' && readb(vbios + i + 8) == 'O') { bd->output.tvout = 1; } return; } if (b == 0) break; } } static void parse_bios(unsigned char __iomem* vbios, struct matrox_bios* bd) { unsigned int pins_offset; if (readb(vbios) != 0x55 || readb(vbios + 1) != 0xAA) { return; } bd->bios_valid = 1; get_bios_version(vbios, bd); get_bios_output(vbios, bd); get_bios_tvout(vbios, bd); #if defined(__powerpc__) /* On PowerPC cards, the PInS offset isn't stored at the end of the * BIOS image. Instead, you must search the entire BIOS image for * the magic PInS signature. * * This actually applies to all OpenFirmware base cards. Since these * cards could be put in a MIPS or SPARC system, should the condition * be something different? */ for ( pins_offset = 0 ; pins_offset <= 0xFF80 ; pins_offset++ ) { unsigned char header[3]; header[0] = readb(vbios + pins_offset); header[1] = readb(vbios + pins_offset + 1); header[2] = readb(vbios + pins_offset + 2); if ( (header[0] == 0x2E) && (header[1] == 0x41) && ((header[2] == 0x40) || (header[2] == 0x80)) ) { printk(KERN_INFO "PInS data found at offset %u\n", pins_offset); get_pins(vbios + pins_offset, bd); break; } } #else pins_offset = readb(vbios + 0x7FFC) | (readb(vbios + 0x7FFD) << 8); if (pins_offset <= 0xFF80) { get_pins(vbios + pins_offset, bd); } #endif } static int parse_pins1(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { unsigned int maxdac; switch (bd->pins[22]) { case 0: maxdac = 175000; break; case 1: maxdac = 220000; break; default: maxdac = 240000; break; } if (get_unaligned_le16(bd->pins + 24)) { maxdac = get_unaligned_le16(bd->pins + 24) * 10; } minfo->limits.pixel.vcomax = maxdac; minfo->values.pll.system = get_unaligned_le16(bd->pins + 28) ? get_unaligned_le16(bd->pins + 28) * 10 : 50000; /* ignore 4MB, 8MB, module clocks */ minfo->features.pll.ref_freq = 14318; minfo->values.reg.mctlwtst = 0x00030101; return 0; } static void default_pins1(struct matrox_fb_info *minfo) { /* Millennium */ minfo->limits.pixel.vcomax = 220000; minfo->values.pll.system = 50000; minfo->features.pll.ref_freq = 14318; minfo->values.reg.mctlwtst = 0x00030101; } static int parse_pins2(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = (bd->pins[41] == 0xFF) ? 230000 : ((bd->pins[41] + 100) * 1000); minfo->values.reg.mctlwtst = ((bd->pins[51] & 0x01) ? 0x00000001 : 0) | ((bd->pins[51] & 0x02) ? 0x00000100 : 0) | ((bd->pins[51] & 0x04) ? 0x00010000 : 0) | ((bd->pins[51] & 0x08) ? 0x00020000 : 0); minfo->values.pll.system = (bd->pins[43] == 0xFF) ? 50000 : ((bd->pins[43] + 100) * 1000); minfo->features.pll.ref_freq = 14318; return 0; } static void default_pins2(struct matrox_fb_info *minfo) { /* Millennium II, Mystique */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = 230000; minfo->values.reg.mctlwtst = 0x00030101; minfo->values.pll.system = 50000; minfo->features.pll.ref_freq = 14318; } static int parse_pins3(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000); minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ? 0x01250A21 : get_unaligned_le32(bd->pins + 48); /* memory config */ minfo->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) | ((bd->pins[57] << 22) & 0x00C00000) | ((bd->pins[56] << 1) & 0x000001E0) | ( bd->pins[56] & 0x0000000F); minfo->values.reg.opt = (bd->pins[54] & 7) << 10; minfo->values.reg.opt2 = bd->pins[58] << 12; minfo->features.pll.ref_freq = (bd->pins[52] & 0x20) ? 14318 : 27000; return 0; } static void default_pins3(struct matrox_fb_info *minfo) { /* G100, G200 */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = 230000; minfo->values.reg.mctlwtst = 0x01250A21; minfo->values.reg.memrdbk = 0x00000000; minfo->values.reg.opt = 0x00000C00; minfo->values.reg.opt2 = 0x00000000; minfo->features.pll.ref_freq = 27000; } static int parse_pins4(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { minfo->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000; minfo->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? minfo->limits.pixel.vcomax : bd->pins[ 38] * 4000; minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 71); minfo->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) | ((bd->pins[87] << 22) & 0x00C00000) | ((bd->pins[86] << 1) & 0x000001E0) | ( bd->pins[86] & 0x0000000F); minfo->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) | ((bd->pins[53] << 22) & 0x10000000) | ((bd->pins[53] << 7) & 0x00001C00); minfo->values.reg.opt3 = get_unaligned_le32(bd->pins + 67); minfo->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000; minfo->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000; return 0; } static void default_pins4(struct matrox_fb_info *minfo) { /* G400 */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = 252000; minfo->values.reg.mctlwtst = 0x04A450A1; minfo->values.reg.memrdbk = 0x000000E7; minfo->values.reg.opt = 0x10000400; minfo->values.reg.opt3 = 0x0190A419; minfo->values.pll.system = 200000; minfo->features.pll.ref_freq = 27000; } static int parse_pins5(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { unsigned int mult; mult = bd->pins[4]?8000:6000; minfo->limits.pixel.vcomax = (bd->pins[ 38] == 0xFF) ? 600000 : bd->pins[ 38] * mult; minfo->limits.system.vcomax = (bd->pins[ 36] == 0xFF) ? minfo->limits.pixel.vcomax : bd->pins[ 36] * mult; minfo->limits.video.vcomax = (bd->pins[ 37] == 0xFF) ? minfo->limits.system.vcomax : bd->pins[ 37] * mult; minfo->limits.pixel.vcomin = (bd->pins[123] == 0xFF) ? 256000 : bd->pins[123] * mult; minfo->limits.system.vcomin = (bd->pins[121] == 0xFF) ? minfo->limits.pixel.vcomin : bd->pins[121] * mult; minfo->limits.video.vcomin = (bd->pins[122] == 0xFF) ? minfo->limits.system.vcomin : bd->pins[122] * mult; minfo->values.pll.system = minfo->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000; minfo->values.reg.opt = get_unaligned_le32(bd->pins + 48); minfo->values.reg.opt2 = get_unaligned_le32(bd->pins + 52); minfo->values.reg.opt3 = get_unaligned_le32(bd->pins + 94); minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 98); minfo->values.reg.memmisc = get_unaligned_le32(bd->pins + 102); minfo->values.reg.memrdbk = get_unaligned_le32(bd->pins + 106); minfo->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000; minfo->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20; minfo->values.memory.dll = (bd->pins[115] & 0x02) != 0; minfo->values.memory.emrswen = (bd->pins[115] & 0x01) != 0; minfo->values.reg.maccess = minfo->values.memory.emrswen ? 0x00004000 : 0x00000000; if (bd->pins[115] & 4) { minfo->values.reg.mctlwtst_core = minfo->values.reg.mctlwtst; } else { u_int32_t wtst_xlat[] = { 0, 1, 5, 6, 7, 5, 2, 3 }; minfo->values.reg.mctlwtst_core = (minfo->values.reg.mctlwtst & ~7) | wtst_xlat[minfo->values.reg.mctlwtst & 7]; } minfo->max_pixel_clock_panellink = bd->pins[47] * 4000; return 0; } static void default_pins5(struct matrox_fb_info *minfo) { /* Mine 16MB G450 with SDRAM DDR */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = minfo->limits.video.vcomax = 600000; minfo->limits.pixel.vcomin = minfo->limits.system.vcomin = minfo->limits.video.vcomin = 256000; minfo->values.pll.system = minfo->values.pll.video = 284000; minfo->values.reg.opt = 0x404A1160; minfo->values.reg.opt2 = 0x0000AC00; minfo->values.reg.opt3 = 0x0090A409; minfo->values.reg.mctlwtst_core = minfo->values.reg.mctlwtst = 0x0C81462B; minfo->values.reg.memmisc = 0x80000004; minfo->values.reg.memrdbk = 0x01001103; minfo->features.pll.ref_freq = 27000; minfo->values.memory.ddr = 1; minfo->values.memory.dll = 1; minfo->values.memory.emrswen = 1; minfo->values.reg.maccess = 0x00004000; } static int matroxfb_set_limits(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { unsigned int pins_version; static const unsigned int pinslen[] = { 64, 64, 64, 128, 128 }; switch (minfo->chip) { case MGA_2064: default_pins1(minfo); break; case MGA_2164: case MGA_1064: case MGA_1164: default_pins2(minfo); break; case MGA_G100: case MGA_G200: default_pins3(minfo); break; case MGA_G400: default_pins4(minfo); break; case MGA_G450: case MGA_G550: default_pins5(minfo); break; } if (!bd->bios_valid) { printk(KERN_INFO "matroxfb: Your Matrox device does not have BIOS\n"); return -1; } if (bd->pins_len < 64) { printk(KERN_INFO "matroxfb: BIOS on your Matrox device does not contain powerup info\n"); return -1; } if (bd->pins[0] == 0x2E && bd->pins[1] == 0x41) { pins_version = bd->pins[5]; if (pins_version < 2 || pins_version > 5) { printk(KERN_INFO "matroxfb: Unknown version (%u) of powerup info\n", pins_version); return -1; } } else { pins_version = 1; } if (bd->pins_len != pinslen[pins_version - 1]) { printk(KERN_INFO "matroxfb: Invalid powerup info\n"); return -1; } switch (pins_version) { case 1: return parse_pins1(minfo, bd); case 2: return parse_pins2(minfo, bd); case 3: return parse_pins3(minfo, bd); case 4: return parse_pins4(minfo, bd); case 5: return parse_pins5(minfo, bd); default: printk(KERN_DEBUG "matroxfb: Powerup info version %u is not yet supported\n", pins_version); return -1; } } void matroxfb_read_pins(struct matrox_fb_info *minfo) { u32 opt; u32 biosbase; u32 fbbase; struct pci_dev *pdev = minfo->pcidev; memset(&minfo->bios, 0, sizeof(minfo->bios)); pci_read_config_dword(pdev, PCI_OPTION_REG, &opt); pci_write_config_dword(pdev, PCI_OPTION_REG, opt | PCI_OPTION_ENABLE_ROM); pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &biosbase); pci_read_config_dword(pdev, minfo->devflags.fbResource, &fbbase); pci_write_config_dword(pdev, PCI_ROM_ADDRESS, (fbbase & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE); parse_bios(vaddr_va(minfo->video.vbase), &minfo->bios); pci_write_config_dword(pdev, PCI_ROM_ADDRESS, biosbase); pci_write_config_dword(pdev, PCI_OPTION_REG, opt); #ifdef CONFIG_X86 if (!minfo->bios.bios_valid) { unsigned char __iomem* b; b = ioremap(0x000C0000, 65536); if (!b) { printk(KERN_INFO "matroxfb: Unable to map legacy BIOS\n"); } else { unsigned int ven = readb(b+0x64+0) | (readb(b+0x64+1) << 8); unsigned int dev = readb(b+0x64+2) | (readb(b+0x64+3) << 8); if (ven != pdev->vendor || dev != pdev->device) { printk(KERN_INFO "matroxfb: Legacy BIOS is for %04X:%04X, while this device is %04X:%04X\n", ven, dev, pdev->vendor, pdev->device); } else { parse_bios(b, &minfo->bios); } iounmap(b); } } #endif matroxfb_set_limits(minfo, &minfo->bios); printk(KERN_INFO "PInS memtype = %u\n", (minfo->values.reg.opt & 0x1C00) >> 10); } EXPORT_SYMBOL(matroxfb_DAC_in); EXPORT_SYMBOL(matroxfb_DAC_out); EXPORT_SYMBOL(matroxfb_var2my); EXPORT_SYMBOL(matroxfb_PLL_calcclock); EXPORT_SYMBOL(matroxfb_vgaHWinit); /* DAC1064, Ti3026 */ EXPORT_SYMBOL(matroxfb_vgaHWrestore); /* DAC1064, Ti3026 */ EXPORT_SYMBOL(matroxfb_read_pins); MODULE_AUTHOR("(c) 1999-2002 Petr Vandrovec <[email protected]>"); MODULE_DESCRIPTION("Miscellaneous support for Matrox video cards"); MODULE_LICENSE("GPL");
435078.c
// // Простой программатор микроконтроллеров AVR для Raspberry Pi // для программирования используются стандартные выводы шины GPIO // (MOSI - 19, MISO - 21, SCK - 23) + 15й пин для подачи сигнала RESET // // Written on Raspberry Pi for Raspberry Pi // // (c) Alex V. Zolotov <[email protected]>, 2013 // Fill free to copy, to compile, to use, to redistribute and etc on your own risk. // #include <bcm2835.h> #include <stdio.h> #include <time.h> enum { AT_ACT_INFO, AT_ACT_CHECK, AT_ACT_WRITE, AT_ACT_ERASE, AT_ACT_READ_FUSE, AT_ACT_WRITE_FUSE_LO, AT_ACT_WRITE_FUSE_HI, AT_ACT_WRITE_FUSE_EX }; #define AT_PB3 RPI_V2_GPIO_P1_11 #define AT_PB4 RPI_V2_GPIO_P1_13 #define AT_PWR RPI_V2_GPIO_P1_17 #define AT_RESET RPI_V2_GPIO_P1_18 #define AT_MOSI RPI_V2_GPIO_P1_19 #define AT_MISO RPI_V2_GPIO_P1_21 #define AT_SCK RPI_V2_GPIO_P1_23 #define AT_GND RPI_V2_GPIO_P1_25 #define GPIO_OUT(pin) bcm2835_gpio_fsel(pin, BCM2835_GPIO_FSEL_OUTP) #define GPIO_INP(pin) bcm2835_gpio_fsel(pin, BCM2835_GPIO_FSEL_INPT) /** * Действие которое надо выполнить */ int action; /** * Имя hex-файла */ const char *fname; unsigned int fuse_bits; /** * Нужно ли выводить дополнительный отладочный вывод или быть тихим? */ int verbose = 0; /** * Приостановить процесс на указанное число надосекунд */ void at_nanosleep(unsigned int ns) { struct timespec ts; ts.tv_sec = 0; ts.tv_nsec = ns; nanosleep(&ts, 0); } /** * Считать значение с пина */ int at_read(unsigned int pin) { return bcm2835_gpio_lev(pin); } /** * Выставить значение пина */ void at_write(unsigned int pin, unsigned int value) { bcm2835_gpio_write(pin, value); } /** * Инициализация пинов программатора */ void at_init() { // GPIO_INP(AT_PB3); // GPIO_INP(AT_PB4); GPIO_OUT(AT_RESET); at_write(AT_RESET, HIGH); bcm2835_spi_begin(); //Set CS pins polarity to low bcm2835_spi_setChipSelectPolarity(BCM2835_SPI_CS0, 0); bcm2835_spi_setChipSelectPolarity(BCM2835_SPI_CS1, 0); bcm2835_spi_setClockDivider(BCM2835_SPI_CLOCK_DIVIDER_2048); bcm2835_spi_setDataMode(BCM2835_SPI_MODE0); bcm2835_spi_chipSelect(BCM2835_SPI_CS0); // GPIO_INP(AT_PWR); // GPIO_OUT(AT_MOSI); // at_write(AT_MOSI, LOW); // GPIO_INP(AT_MISO); // GPIO_OUT(AT_SCK); // at_write(AT_SCK, LOW); // GPIO_INP(AT_GND); } /** * Формирование тактов SPI */ void at_set_clk() { at_write(AT_SCK, HIGH); at_nanosleep(1000); } /** * Формирование тактов SPI */ void at_clr_clk() { at_write(AT_SCK, LOW); at_nanosleep(1000); } /** * Отправить байт по SPI и прочитать ответный */ unsigned char at_spi_io(unsigned char data) { unsigned int result = 0; unsigned int bit; int i; for(i = 0; i < 8; i++) { bit = (data & 0x80) ? 1 : 0; at_write(AT_MOSI, bit ? HIGH : LOW); at_set_clk(); data <<=1; bit = at_read(AT_MISO); result = result * 2 + bit; at_clr_clk(); } return result; } /** * Отправить байт по SPI и прочитать ответный */ unsigned char at_spi_io_hw(unsigned char data) { return bcm2835_spi_transfer((uint8_t)data); } /** * Отправить комманду микроконтроллеру и прочтать ответ * Все комманды размером 4 байта */ unsigned int at_io(unsigned int cmd) { unsigned int result = 0; int i; for(i = 0; i < 4; i++) { unsigned char byte = cmd >> 24; byte = at_spi_io_hw(byte); result = result * 256 + byte; cmd <<= 8; } return result; } /** * Подать сигнал RESET, чтобы запустить устройство используй at_run() */ void at_reset() { if ( verbose ) { printf("reset device\n"); } at_write(AT_RESET, HIGH); } /** * Запустить устройство - вывести из состояния RESET */ void at_run() { if ( verbose ) { printf("run device\n"); } at_write(AT_RESET, LOW); } /** * Перезагрузить устройство */ void at_reboot() { if ( verbose ) { printf("reboot device\n"); } at_write(AT_RESET, HIGH); bcm2835_delay(1); at_write(AT_RESET, LOW); } /** * Подать сигнал RESET и командду "Programming Enable" */ int at_program_enable() { // at_write(AT_SCK, LOW); // at_write(AT_MOSI, LOW); at_write(AT_RESET, LOW); bcm2835_delay(1); at_write(AT_RESET, HIGH); bcm2835_delay(1); at_write(AT_RESET, LOW); bcm2835_delay(1); unsigned int r = at_io(0xAC530000); int status = (r & 0xFF00) == 0x5300; if ( verbose ) { const char *s = status ? "ok" : "fault"; printf("at_program_enable(): %s\n", s); } return status; } /** * Подать команду "Read Device Code" */ unsigned int at_chip_info() { unsigned int sig = 0; sig = sig * 256 + (at_io(0x30000000) & 0xFF); sig = sig * 256 + (at_io(0x30000100) & 0xFF); sig = sig * 256 + (at_io(0x30000200) & 0xFF); if ( verbose ) { printf("at_chip_info(): %08X\n", sig); } return sig; } /** * Прочитать байт прошивки из устройства */ unsigned char at_read_memory(unsigned int addr) { unsigned int cmd = (addr & 1) ? 0x28 : 0x20; unsigned int offset = (addr >> 1) & 0xFFFF; unsigned int byte = at_io( (cmd << 24) | (offset << 8 ) ) & 0xFF; return byte; } static active_page = 0; /** * Записать байт прошивки в устройство * NOTE: байт сначала записывается в специальный буфер, фиксация * данных происходит в функции at_flush(). Функция at_write_memory() * сама переодически вызывает at_flush() поэтому нет необходимости * часто вызывать at_flush() и необходимо только в конце, чтобы * убедиться что последние данные будут записаны в устройство */ int at_write_memory(unsigned int addr, unsigned char byte) { unsigned int cmd = (addr & 1) ? 0x48 : 0x40; unsigned int offset = (addr >> 1) & 0xFFFF; unsigned int page = (addr >> 1) & 0xFFF0; unsigned int x = 0; if ( page != active_page ) { at_flush(); active_page = page; } unsigned int result = at_io(x = (cmd << 24) | (offset << 8 ) | (byte & 0xFF) ); unsigned int r = (result >> 16) & 0xFF; int status = r == cmd; if ( verbose ) { printf("."); fflush(stdout); //printf("[%04X]=%02X%s ", offset, byte, (status ? "+" : "-")); } return status; } /** * Завершить запись данных */ int at_flush() { unsigned int cmd = 0x4C; unsigned int offset = active_page & 0xFFF0; unsigned int x = 0; unsigned int result = at_io(x = (cmd << 24) | (offset << 8 ) ); unsigned int r = (result >> 16) & 0xFF; int status = r == cmd; if ( verbose ) { printf("FLUSH[%04X]%s\n", offset, (status ? "+" : "-")); } bcm2835_delay(10); return status; } /** * выдать сообщение об ошибке в hex-файле */ void at_wrong_file() { printf("wrong hex-file\n"); } /** * Конверировать шестнадцатеричную цифру в число */ unsigned char at_hex_digit(char ch) { if ( ch >= '0' && ch <= '9' ) return ch - '0'; if ( ch >= 'A' && ch <= 'F' ) return ch - 'A' + 10; if ( ch >= 'a' && ch <= 'f' ) return ch - 'a' + 10; // TODO somethink... return 0; } unsigned int at_hex_to_int(const char *s) { unsigned int r = 0; while ( *s ) { char ch = *s++; unsigned int hex = 0x10; if ( ch >= '0' && ch <= '9' ) hex = ch - '0'; else if ( ch >= 'A' && ch <= 'F' ) hex = ch - 'A' + 10; else if ( ch >= 'a' && ch <= 'f' ) hex = ch - 'a' + 10; else hex = 0x10; if ( hex > 0xF ) return r; r = r * 0x10 + hex; } return r; } /** * Прочитать байт */ unsigned char at_hex_get_byte(const char *line, int i) { int offset = i * 2 + 1; // TODO index limit checks return at_hex_digit(line[offset]) * 16 + at_hex_digit(line[offset+1]); } /** * Прочитать слово (два байта) */ unsigned int at_hex_get_word(const char *line, int i) { return at_hex_get_byte(line, i) * 256 + at_hex_get_byte(line, i+1); } /** * Прочитать байт данных (читает из секции данных) */ unsigned char at_hex_get_data(const char *line, int i) { return at_hex_get_byte(line, i + 4); } /** * Сверить прошивку с данными из файла */ int at_check_firmware(const char *fname) { FILE *f = fopen(fname, "r"); if ( f ) { int lineno = 0; int result = 1; int bytes = 0; while ( 1 ) { char line[1024]; const char *s = fgets(line, sizeof(line), f); if ( s == NULL ) break; //printf("%s", line); lineno++; if ( line[0] != ':' ) { at_wrong_file(); break; } unsigned char len = at_hex_get_byte(line, 0); unsigned int addr = at_hex_get_word(line, 1); unsigned char type = at_hex_get_byte(line, 3); unsigned char cc = at_hex_get_byte(line, 4 + len); //printf("len: %u, addr: %u, type: %u, cc: %u\n", len, addr, type, cc); if ( type == 0 ) { int i; for(i = 0; i < len; i++) { bytes ++; unsigned char fbyte = at_hex_get_data(line, i); unsigned char dbyte = at_read_memory(addr + i); int r = (fbyte == dbyte); result = result && r; if ( verbose ) { printf("%02X%s ", fbyte, (r ? "+" : "-")); fflush(stdout); } } if ( verbose ) printf("\n"); } if ( type == 1 ) { if ( verbose) printf("end of hex-file\n"); break; } } fclose(f); if ( verbose ) { char *st = result ? "same" : "differ"; printf("firmware has checked: %s, bytes: %d\n", st, bytes); } return result; } return 0; } /** * Стереть чип */ int at_chip_erase() { if ( verbose ) { printf("erase device's firmware\n"); } unsigned int r = at_io(0xAC800000); int ok = ((r >> 16) & 0xFF) == 0xAC; if ( ok ) { bcm2835_delay(10); at_reboot(); at_program_enable(); } return ok; } /** * Записать прошивку в устройство */ int at_write_firmware(const char *fname) { FILE *f = fopen(fname, "r"); if ( f ) { int lineno = 0; int result = 1; int bytes = 0; while ( 1 ) { char line[1024]; const char *s = fgets(line, sizeof(line), f); if ( s == NULL ) break; //printf("%s", line); lineno++; if ( line[0] != ':' ) { at_wrong_file(); break; } unsigned char len = at_hex_get_byte(line, 0); unsigned int addr = at_hex_get_word(line, 1); unsigned char type = at_hex_get_byte(line, 3); unsigned char cc = at_hex_get_byte(line, 4 + len); //printf("len: %u, addr: %u, type: %u, cc: %u\n", len, addr, type, cc); if ( type == 0 ) { int i; for(i = 0; i < len; i++) { bytes++; unsigned char fbyte = at_hex_get_data(line, i); int r = at_write_memory(addr + i, fbyte); result = result && r; //printf("%02X%s ", fbyte, (r ? "+" : "-")); } //printf("\n"); } if ( type == 1 ) { if ( verbose ) printf("\nend of hex-file\n"); break; } } at_flush(); fclose(f); if ( verbose ) { char *st = result ? "ok" : "fail"; printf("memory write: %s, bytes: %d\n", st, bytes); } return result; } return 0; } /** * Действие - вывести информацию об устройстве */ int at_act_info() { unsigned int info = at_chip_info(); printf("chip signature: 0x%02X, 0x%02X, 0x%02X\n", (info >> 16) & 0xFF, (info >> 8) & 0xFF, info & 0xFF); return 1; } /** * Действие - сверить прошивку в устрействе с файлом */ int at_act_check() { int r = at_check_firmware(fname); if ( r ) printf("firmware is same\n"); else printf("firmware differ\n"); return 0; } /** * Действие - записать прошивку в устройство */ int at_act_write() { int r = at_chip_erase(); if ( r ) { r = at_write_firmware(fname); } else { printf("firmware erase fault\n"); } printf("firmware write: %s\n", (r ? "ok" : "fail")); return r; } /** * Действие - стереть чип */ int at_act_erase() { int r = at_chip_erase(); printf("chip erase: %s\n", (r ? "ok" : "fail")); } /** * Действие - прочитать биты fuse */ int at_act_read_fuse() { if ( verbose ) { printf("read device's fuses\n"); } unsigned int fuse_lo; unsigned int fuse_hi; unsigned int fuse_ex; fuse_lo = at_io(0x50000000); fuse_hi = at_io(0x58080000); fuse_ex = at_io(0x50080000); printf("fuse[lo]: %02X (%08X)\n", (fuse_lo % 0x100), fuse_lo); printf("fuse[hi]: %02X (%08X)\n", (fuse_hi % 0x100), fuse_hi); printf("fuse[ex]: %02X (%08X)\n", (fuse_ex % 0x100), fuse_ex); return 1; } /** * Действие - записать младшие биты fuse */ int at_act_write_fuse_lo() { if ( verbose ) { printf("write device's low fuse bits (0x%02X)\n", fuse_bits); } if ( fuse_bits > 0xFF ) { printf("wrong fuse bits\n"); return 0; } unsigned int r = at_io(0xACA00000 + (fuse_bits & 0xFF)); int ok = ((r >> 16) & 0xFF) == 0xAC; if ( verbose ) { const char *status = ok ? "[ ok ]" : "[ fail ]"; printf("write device's low fuse bits %s\n", status); } return ok; } /** * Действие - записать старшие биты fuse */ int at_act_write_fuse_hi() { if ( verbose ) { printf("write device's high fuse bits (0x%02X)\n", fuse_bits); } if ( fuse_bits > 0xFF ) { printf("wrong fuse bits\n"); return 0; } unsigned int r = at_io(0xACA80000 + (fuse_bits & 0xFF)); int ok = ((r >> 16) & 0xFF) == 0xAC; if ( verbose ) { const char *status = ok ? "[ ok ]" : "[ fail ]"; printf("write device's high fuse bits %s\n", status); } return ok; } /** * Действие - записать расширеные биты fuse */ int at_act_write_fuse_ex() { if ( verbose ) { printf("write device's extended fuse bits (0x%02X)\n", fuse_bits); } if ( fuse_bits > 0xFF ) { printf("wrong fuse bits\n"); return 0; } unsigned int r = at_io(0xACA40000 + (fuse_bits & 0xFF)); int ok = ((r >> 16) & 0xFF) == 0xAC; if ( verbose ) { const char *status = ok ? "[ ok ]" : "[ fail ]"; printf("write device's extended fuse bits %s\n", status); } return ok; } int run() { switch ( action ) { case AT_ACT_INFO: return at_act_info(); case AT_ACT_CHECK: return at_act_check(); case AT_ACT_WRITE: return at_act_write(); case AT_ACT_ERASE: return at_act_erase(); case AT_ACT_READ_FUSE: return at_act_read_fuse(); case AT_ACT_WRITE_FUSE_LO: return at_act_write_fuse_lo(); case AT_ACT_WRITE_FUSE_HI: return at_act_write_fuse_hi(); case AT_ACT_WRITE_FUSE_EX: return at_act_write_fuse_ex(); } printf("Victory!\n"); return 0; } int help() { printf("pigro :action: :filename: :verbose|quiet:\n"); printf(" action:\n"); printf(" info - read chip info\n"); printf(" check - read file and compare with device\n"); printf(" write - read file and write to device\n"); printf(" erase - just erase chip\n"); printf(" rfuse - read fuses\n"); printf(" wfuse_lo - write low fuse bits\n"); printf(" wfuse_hi - write high fuse bits\n"); printf(" wfuse_ex - write extended fuse bits\n"); return 0; } int main(int argc, char *argv[]) { int status = 0; if ( argc <= 1 ) return help(); if ( strcmp(argv[1], "info") == 0 ) action = AT_ACT_INFO; else if ( strcmp(argv[1], "check") == 0 ) action = AT_ACT_CHECK; else if ( strcmp(argv[1], "write") == 0 ) action = AT_ACT_WRITE; else if ( strcmp(argv[1], "erase") == 0 ) action = AT_ACT_ERASE; else if ( strcmp(argv[1], "rfuse") == 0 ) action = AT_ACT_READ_FUSE; else if ( strcmp(argv[1], "wfuse_lo") == 0 ) action = AT_ACT_WRITE_FUSE_LO; else if ( strcmp(argv[1], "wfuse_hi") == 0 ) action = AT_ACT_WRITE_FUSE_HI; else if ( strcmp(argv[1], "wfuse_ex") == 0 ) action = AT_ACT_WRITE_FUSE_EX; else return help(); fuse_bits = 0x100; if ( argc > 2 ) { fuse_bits = at_hex_to_int(argv[2]); } fname = argc > 2 ? argv[2] : "firmware.hex"; verbose = argc > 3 && strcmp(argv[3], "verbose") == 0; if ( verbose ) { printf("fname: %s\n", fname); } if ( !bcm2835_init() ) { printf("bcm2835_init() fault, try under root\n"); return 1; } at_init(); if ( at_program_enable() ) { status = run(); at_run(); } else { fprintf(stderr, "ac_program_enable() failed\n"); } // GPIO_INP(AT_PB3); // GPIO_INP(AT_PB4); GPIO_INP(AT_RESET); // GPIO_INP(AT_PWR); // GPIO_INP(AT_MOSI); // GPIO_INP(AT_MISO); // GPIO_INP(AT_SCK); // GPIO_INP(AT_GND); bcm2835_spi_end(); if ( verbose ) { printf("main() = %d\n", status); } return status; }
987346.c
/* * Copyright (C) Igor Sysoev * Copyright (C) 2007 Manlio Perillo ([email protected]) * Copyright (c) 2010-2017 Phusion Holding B.V. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_http.h> #include <sys/types.h> #include <pwd.h> #include <stdlib.h> #include <assert.h> #include "ngx_http_passenger_module.h" #include "Configuration.h" #include "ContentHandler.h" #include "ConfigGeneral/AutoGeneratedManifestDefaultsInitialization.c" #include "ConfigGeneral/AutoGeneratedSetterFuncs.c" #include "ConfigGeneral/ManifestGeneration.c" #include "MainConfig/AutoGeneratedCreateFunction.c" #include "MainConfig/AutoGeneratedManifestGeneration.c" #include "LocationConfig/AutoGeneratedCreateFunction.c" #include "LocationConfig/AutoGeneratedManifestGeneration.c" #include "cxx_supportlib/Constants.h" #include "cxx_supportlib/vendor-modified/modp_b64.h" static ngx_str_t headers_to_hide[] = { /* NOTE: Do not hide the "Status" header; some broken HTTP clients * expect this header. http://code.google.com/p/phusion-passenger/issues/detail?id=177 */ ngx_string("X-Accel-Expires"), ngx_string("X-Accel-Redirect"), ngx_string("X-Accel-Limit-Rate"), ngx_string("X-Accel-Buffering"), ngx_null_string }; passenger_main_conf_t passenger_main_conf; static ngx_int_t merge_headers(ngx_conf_t *cf, passenger_loc_conf_t *conf, passenger_loc_conf_t *prev); static ngx_int_t merge_string_array(ngx_conf_t *cf, ngx_array_t **prev, ngx_array_t **conf); static ngx_int_t merge_string_keyval_table(ngx_conf_t *cf, ngx_array_t **prev, ngx_array_t **conf); #include "LocationConfig/AutoGeneratedMergeFunction.c" #include "LocationConfig/AutoGeneratedHeaderSerialization.c" void * passenger_create_main_conf(ngx_conf_t *cf) { passenger_main_conf_t *conf; conf = ngx_pcalloc(cf->pool, sizeof(passenger_main_conf_t)); if (conf == NULL) { return NGX_CONF_ERROR; } conf->default_ruby.data = NULL; conf->default_ruby.len = 0; passenger_create_autogenerated_main_conf(&conf->autogenerated); return conf; } char * passenger_init_main_conf(ngx_conf_t *cf, void *conf_pointer) { passenger_main_conf_t *conf; struct passwd *user_entry; struct group *group_entry; char buf[128]; conf = &passenger_main_conf; *conf = *((passenger_main_conf_t *) conf_pointer); if (conf->autogenerated.abort_on_startup_error == NGX_CONF_UNSET) { conf->autogenerated.abort_on_startup_error = 0; } if (conf->autogenerated.show_version_in_header == NGX_CONF_UNSET) { conf->autogenerated.show_version_in_header = 1; } if (conf->autogenerated.default_user.len == 0) { conf->autogenerated.default_user.len = sizeof(DEFAULT_WEB_APP_USER) - 1; conf->autogenerated.default_user.data = (u_char *) DEFAULT_WEB_APP_USER; } if (conf->autogenerated.default_user.len > sizeof(buf) - 1) { return "Value for 'passenger_default_user' is too long."; } memcpy(buf, conf->autogenerated.default_user.data, conf->autogenerated.default_user.len); buf[conf->autogenerated.default_user.len] = '\0'; user_entry = getpwnam(buf); if (user_entry == NULL) { return "The user specified by the 'passenger_default_user' option does not exist."; } if (conf->autogenerated.default_group.len > 0) { if (conf->autogenerated.default_group.len > sizeof(buf) - 1) { return "Value for 'passenger_default_group' is too long."; } memcpy(buf, conf->autogenerated.default_group.data, conf->autogenerated.default_group.len); buf[conf->autogenerated.default_group.len] = '\0'; group_entry = getgrnam(buf); if (group_entry == NULL) { return "The group specified by the 'passenger_default_group' option does not exist."; } } return NGX_CONF_OK; } void * passenger_create_loc_conf(ngx_conf_t *cf) { passenger_loc_conf_t *conf; conf = ngx_pcalloc(cf->pool, sizeof(passenger_loc_conf_t)); if (conf == NULL) { return NGX_CONF_ERROR; } /* * set by ngx_pcalloc(): * * conf->upstream_config.bufs.num = 0; * conf->upstream_config.next_upstream = 0; * conf->upstream_config.temp_path = NULL; * conf->upstream_config.hide_headers_hash = { NULL, 0 }; * conf->upstream_config.hide_headers = NULL; * conf->upstream_config.pass_headers = NULL; * conf->upstream_config.uri = { 0, NULL }; * conf->upstream_config.location = NULL; * conf->upstream_config.store_lengths = NULL; * conf->upstream_config.store_values = NULL; */ conf->parent = NULL; if (ngx_array_init(&conf->children, cf->pool, 8, sizeof(passenger_loc_conf_t *)) != NGX_OK) { return NGX_CONF_ERROR; } if (cf->conf_file == NULL) { conf->context_source_file.data = (u_char *) NULL; conf->context_source_file.len = 0; conf->context_source_line = 0; } else if (cf->conf_file->file.fd == NGX_INVALID_FILE) { conf->context_source_file.data = (u_char *) "(command line)"; conf->context_source_file.len = sizeof("(command line)") - 1; conf->context_source_line = 0; } else { conf->context_source_file = cf->conf_file->file.name; conf->context_source_line = cf->conf_file->line; } conf->cscf = NULL; conf->clcf = NULL; passenger_create_autogenerated_loc_conf(&conf->autogenerated); /******************************/ /******************************/ conf->upstream_config.pass_headers = NGX_CONF_UNSET_PTR; conf->upstream_config.hide_headers = NGX_CONF_UNSET_PTR; conf->upstream_config.store = NGX_CONF_UNSET; conf->upstream_config.store_access = NGX_CONF_UNSET_UINT; #if NGINX_VERSION_NUM >= 1007005 conf->upstream_config.next_upstream_tries = NGX_CONF_UNSET_UINT; #endif conf->upstream_config.buffering = NGX_CONF_UNSET; conf->upstream_config.request_buffering = NGX_CONF_UNSET; conf->upstream_config.ignore_client_abort = NGX_CONF_UNSET; #if NGINX_VERSION_NUM >= 1007007 conf->upstream_config.force_ranges = NGX_CONF_UNSET; #endif conf->upstream_config.local = NGX_CONF_UNSET_PTR; conf->upstream_config.connect_timeout = NGX_CONF_UNSET_MSEC; conf->upstream_config.send_timeout = NGX_CONF_UNSET_MSEC; conf->upstream_config.read_timeout = NGX_CONF_UNSET_MSEC; #if NGINX_VERSION_NUM >= 1007005 conf->upstream_config.next_upstream_timeout = NGX_CONF_UNSET_MSEC; #endif conf->upstream_config.send_lowat = NGX_CONF_UNSET_SIZE; conf->upstream_config.buffer_size = NGX_CONF_UNSET_SIZE; #if NGINX_VERSION_NUM >= 1007007 conf->upstream_config.limit_rate = NGX_CONF_UNSET_SIZE; #endif conf->upstream_config.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; conf->upstream_config.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; conf->upstream_config.temp_file_write_size_conf = NGX_CONF_UNSET_SIZE; conf->upstream_config.pass_request_headers = NGX_CONF_UNSET; conf->upstream_config.pass_request_body = NGX_CONF_UNSET; #if (NGX_HTTP_CACHE) #if NGINX_VERSION_NUM >= 1007009 conf->upstream_config.cache = NGX_CONF_UNSET; #else conf->upstream_config.cache = NGX_CONF_UNSET_PTR; #endif conf->upstream_config.cache_min_uses = NGX_CONF_UNSET_UINT; conf->upstream_config.cache_bypass = NGX_CONF_UNSET_PTR; conf->upstream_config.no_cache = NGX_CONF_UNSET_PTR; conf->upstream_config.cache_valid = NGX_CONF_UNSET_PTR; conf->upstream_config.cache_lock = NGX_CONF_UNSET; conf->upstream_config.cache_lock_timeout = NGX_CONF_UNSET_MSEC; #if NGINX_VERSION_NUM >= 1007008 conf->upstream_config.cache_lock_age = NGX_CONF_UNSET_MSEC; #endif #if NGINX_VERSION_NUM >= 1006000 conf->upstream_config.cache_revalidate = NGX_CONF_UNSET; #endif #endif conf->upstream_config.intercept_errors = NGX_CONF_UNSET; conf->upstream_config.cyclic_temp_file = 0; conf->upstream_config.change_buffering = 1; ngx_str_set(&conf->upstream_config.module, "passenger"); conf->options_cache.data = NULL; conf->options_cache.len = 0; conf->env_vars_cache.data = NULL; conf->env_vars_cache.len = 0; return conf; } static ngx_int_t serialize_loc_conf_to_headers(ngx_conf_t *cf, passenger_loc_conf_t *conf) { ngx_uint_t i; ngx_keyval_t *env_vars; size_t unencoded_len; u_char *unencoded_buf; if (passenger_serialize_autogenerated_loc_conf_to_headers(cf, conf) == 0) { return NGX_ERROR; } if (conf->autogenerated.env_vars != NULL) { size_t len = 0; u_char *buf; u_char *pos; /* Cache env vars data as base64-serialized string. * First, calculate the length of the unencoded data. */ unencoded_len = 0; env_vars = (ngx_keyval_t *) conf->autogenerated.env_vars->elts; for (i = 0; i < conf->autogenerated.env_vars->nelts; i++) { unencoded_len += env_vars[i].key.len + 1 + env_vars[i].value.len + 1; } /* Create the unecoded data. */ unencoded_buf = pos = (u_char *) malloc(unencoded_len); if (unencoded_buf == NULL) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "cannot allocate buffer of %z bytes for environment variables data", unencoded_len); return NGX_ERROR; } for (i = 0; i < conf->autogenerated.env_vars->nelts; i++) { pos = ngx_copy(pos, env_vars[i].key.data, env_vars[i].key.len); *pos = '\0'; pos++; pos = ngx_copy(pos, env_vars[i].value.data, env_vars[i].value.len); *pos = '\0'; pos++; } assert((size_t) (pos - unencoded_buf) == unencoded_len); /* Create base64-serialized string. */ buf = ngx_palloc(cf->pool, modp_b64_encode_len(unencoded_len)); if (buf == NULL) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "cannot allocate buffer of %z bytes for base64 encoding", modp_b64_encode_len(unencoded_len)); return NGX_ERROR; } len = modp_b64_encode((char *) buf, (const char *) unencoded_buf, unencoded_len); if (len == (size_t) -1) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "error during base64 encoding"); free(unencoded_buf); return NGX_ERROR; } conf->env_vars_cache.data = buf; conf->env_vars_cache.len = len; free(unencoded_buf); } return NGX_OK; } char * passenger_merge_loc_conf(ngx_conf_t *cf, void *parent, void *child) { passenger_loc_conf_t *prev = parent; passenger_loc_conf_t *conf = child; passenger_loc_conf_t **children_elem; ngx_http_core_loc_conf_t *clcf; size_t size; ngx_hash_init_t hash; clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); /* The following works for all contexts within the http{} block, but does * not work for the http{} block itself. To obtain the ngx_http_core_(loc|srv)_conf_t * associated with the http{} block itself, we also set conf->(cscf|clcf) * from record_loc_conf_source_location(), which is called from the various * configuration setter functions. */ conf->cscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_core_module); conf->clcf = clcf; if (passenger_merge_autogenerated_loc_conf(&conf->autogenerated, &prev->autogenerated, cf) == 0) { return NGX_CONF_ERROR; } conf->parent = prev; children_elem = ngx_array_push(&prev->children); if (children_elem == NULL) { ngx_conf_log_error(NGX_LOG_EMERG, cf, ngx_errno, "cannot allocate memory"); return NGX_CONF_ERROR; } *children_elem = conf; if (prev->options_cache.data == NULL) { if (serialize_loc_conf_to_headers(cf, prev) != NGX_OK) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "cannot create " PROGRAM_NAME " configuration serialization cache"); return NGX_CONF_ERROR; } } /******************************/ /******************************/ #if (NGX_HTTP_CACHE) && NGINX_VERSION_NUM >= 1007009 if (conf->upstream_config.store > 0) { conf->upstream_config.cache = 0; } if (conf->upstream_config.cache > 0) { conf->upstream_config.store = 0; } #endif #if NGINX_VERSION_NUM >= 1007009 if (conf->upstream_config.store == NGX_CONF_UNSET) { ngx_conf_merge_value(conf->upstream_config.store, prev->upstream_config.store, 0); conf->upstream_config.store_lengths = prev->upstream_config.store_lengths; conf->upstream_config.store_values = prev->upstream_config.store_values; } #else if (conf->upstream_config.store != 0) { ngx_conf_merge_value(conf->upstream_config.store, prev->upstream_config.store, 0); if (conf->upstream_config.store_lengths == NULL) { conf->upstream_config.store_lengths = prev->upstream_config.store_lengths; conf->upstream_config.store_values = prev->upstream_config.store_values; } } #endif ngx_conf_merge_uint_value(conf->upstream_config.store_access, prev->upstream_config.store_access, 0600); #if NGINX_VERSION_NUM >= 1007005 ngx_conf_merge_uint_value(conf->upstream_config.next_upstream_tries, prev->upstream_config.next_upstream_tries, 0); #endif ngx_conf_merge_value(conf->upstream_config.buffering, prev->upstream_config.buffering, 0); ngx_conf_merge_value(conf->upstream_config.request_buffering, prev->upstream_config.request_buffering, 1); ngx_conf_merge_value(conf->upstream_config.ignore_client_abort, prev->upstream_config.ignore_client_abort, 0); #if NGINX_VERSION_NUM >= 1007007 ngx_conf_merge_value(conf->upstream_config.force_ranges, prev->upstream_config.force_ranges, 0); #endif ngx_conf_merge_ptr_value(conf->upstream_config.local, prev->upstream_config.local, NULL); ngx_conf_merge_msec_value(conf->upstream_config.connect_timeout, prev->upstream_config.connect_timeout, 12000000); ngx_conf_merge_msec_value(conf->upstream_config.send_timeout, prev->upstream_config.send_timeout, 12000000); ngx_conf_merge_msec_value(conf->upstream_config.read_timeout, prev->upstream_config.read_timeout, 12000000); #if NGINX_VERSION_NUM >= 1007005 ngx_conf_merge_msec_value(conf->upstream_config.next_upstream_timeout, prev->upstream_config.next_upstream_timeout, 0); #endif ngx_conf_merge_size_value(conf->upstream_config.send_lowat, prev->upstream_config.send_lowat, 0); ngx_conf_merge_size_value(conf->upstream_config.buffer_size, prev->upstream_config.buffer_size, 16 * 1024); #if NGINX_VERSION_NUM >= 1007007 ngx_conf_merge_size_value(conf->upstream_config.limit_rate, prev->upstream_config.limit_rate, 0); #endif ngx_conf_merge_bufs_value(conf->upstream_config.bufs, prev->upstream_config.bufs, 8, 16 * 1024); if (conf->upstream_config.bufs.num < 2) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "there must be at least 2 \"passenger_buffers\""); return NGX_CONF_ERROR; } size = conf->upstream_config.buffer_size; if (size < conf->upstream_config.bufs.size) { size = conf->upstream_config.bufs.size; } ngx_conf_merge_size_value(conf->upstream_config.busy_buffers_size_conf, prev->upstream_config.busy_buffers_size_conf, NGX_CONF_UNSET_SIZE); if (conf->upstream_config.busy_buffers_size_conf == NGX_CONF_UNSET_SIZE) { conf->upstream_config.busy_buffers_size = 2 * size; } else { conf->upstream_config.busy_buffers_size = conf->upstream_config.busy_buffers_size_conf; } if (conf->upstream_config.busy_buffers_size < size) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "\"passenger_busy_buffers_size\" must be equal to or greater " "than the maximum of the value of \"passenger_buffer_size\" and " "one of the \"passenger_buffers\""); return NGX_CONF_ERROR; } if (conf->upstream_config.busy_buffers_size > (conf->upstream_config.bufs.num - 1) * conf->upstream_config.bufs.size) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "\"passenger_busy_buffers_size\" must be less than " "the size of all \"passenger_buffers\" minus one buffer"); return NGX_CONF_ERROR; } ngx_conf_merge_size_value(conf->upstream_config.temp_file_write_size_conf, prev->upstream_config.temp_file_write_size_conf, NGX_CONF_UNSET_SIZE); if (conf->upstream_config.temp_file_write_size_conf == NGX_CONF_UNSET_SIZE) { conf->upstream_config.temp_file_write_size = 2 * size; } else { conf->upstream_config.temp_file_write_size = conf->upstream_config.temp_file_write_size_conf; } if (conf->upstream_config.temp_file_write_size < size) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "\"passenger_temp_file_write_size\" must be equal to or greater than " "the maximum of the value of \"passenger_buffer_size\" and " "one of the \"passenger_buffers\""); return NGX_CONF_ERROR; } ngx_conf_merge_size_value(conf->upstream_config.max_temp_file_size_conf, prev->upstream_config.max_temp_file_size_conf, NGX_CONF_UNSET_SIZE); if (conf->upstream_config.max_temp_file_size_conf == NGX_CONF_UNSET_SIZE) { conf->upstream_config.max_temp_file_size = 1024 * 1024 * 1024; } else { conf->upstream_config.max_temp_file_size = conf->upstream_config.max_temp_file_size_conf; } if (conf->upstream_config.max_temp_file_size != 0 && conf->upstream_config.max_temp_file_size < size) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "\"passenger_max_temp_file_size\" must be equal to zero to disable " "temporary files usage or must be equal to or greater than " "the maximum of the value of \"passenger_buffer_size\" and " "one of the \"passenger_buffers\""); return NGX_CONF_ERROR; } ngx_conf_merge_bitmask_value(conf->upstream_config.ignore_headers, prev->upstream_config.ignore_headers, NGX_CONF_BITMASK_SET); ngx_conf_merge_bitmask_value(conf->upstream_config.next_upstream, prev->upstream_config.next_upstream, (NGX_CONF_BITMASK_SET |NGX_HTTP_UPSTREAM_FT_ERROR |NGX_HTTP_UPSTREAM_FT_TIMEOUT)); if (conf->upstream_config.next_upstream & NGX_HTTP_UPSTREAM_FT_OFF) { conf->upstream_config.next_upstream = NGX_CONF_BITMASK_SET |NGX_HTTP_UPSTREAM_FT_OFF; } #if (NGX_HTTP_CACHE) #if NGINX_VERSION_NUM >= 1007009 if (conf->upstream_config.cache == NGX_CONF_UNSET) { ngx_conf_merge_value(conf->upstream_config.cache, prev->upstream_config.cache, 0); conf->upstream_config.cache_zone = prev->upstream_config.cache_zone; conf->upstream_config.cache_value = prev->upstream_config.cache_value; } if (conf->upstream_config.cache_zone && conf->upstream_config.cache_zone->data == NULL) { ngx_shm_zone_t *shm_zone; shm_zone = conf->upstream_config.cache_zone; ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "\"scgi_cache\" zone \"%V\" is unknown", &shm_zone->shm.name); return NGX_CONF_ERROR; } #else ngx_conf_merge_ptr_value(conf->upstream_config.cache, prev->upstream_config.cache, NULL); if (conf->upstream_config.cache && conf->upstream_config.cache->data == NULL) { ngx_shm_zone_t *shm_zone; shm_zone = conf->upstream_config.cache; ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "\"scgi_cache\" zone \"%V\" is unknown", &shm_zone->shm.name); return NGX_CONF_ERROR; } #endif ngx_conf_merge_uint_value(conf->upstream_config.cache_min_uses, prev->upstream_config.cache_min_uses, 1); ngx_conf_merge_bitmask_value(conf->upstream_config.cache_use_stale, prev->upstream_config.cache_use_stale, (NGX_CONF_BITMASK_SET | NGX_HTTP_UPSTREAM_FT_OFF)); if (conf->upstream_config.cache_use_stale & NGX_HTTP_UPSTREAM_FT_OFF) { conf->upstream_config.cache_use_stale = NGX_CONF_BITMASK_SET | NGX_HTTP_UPSTREAM_FT_OFF; } if (conf->upstream_config.cache_use_stale & NGX_HTTP_UPSTREAM_FT_ERROR) { conf->upstream_config.cache_use_stale |= NGX_HTTP_UPSTREAM_FT_NOLIVE; } if (conf->upstream_config.cache_methods == 0) { conf->upstream_config.cache_methods = prev->upstream_config.cache_methods; } conf->upstream_config.cache_methods |= NGX_HTTP_GET | NGX_HTTP_HEAD; ngx_conf_merge_ptr_value(conf->upstream_config.cache_bypass, prev->upstream_config.cache_bypass, NULL); ngx_conf_merge_ptr_value(conf->upstream_config.no_cache, prev->upstream_config.no_cache, NULL); ngx_conf_merge_ptr_value(conf->upstream_config.cache_valid, prev->upstream_config.cache_valid, NULL); if (conf->cache_key.value.data == NULL) { conf->cache_key = prev->cache_key; } ngx_conf_merge_value(conf->upstream_config.cache_lock, prev->upstream_config.cache_lock, 0); ngx_conf_merge_msec_value(conf->upstream_config.cache_lock_timeout, prev->upstream_config.cache_lock_timeout, 5000); ngx_conf_merge_value(conf->upstream_config.cache_revalidate, prev->upstream_config.cache_revalidate, 0); #if NGINX_VERSION_NUM >= 1007008 ngx_conf_merge_msec_value(conf->upstream_config.cache_lock_age, prev->upstream_config.cache_lock_age, 5000); #endif #if NGINX_VERSION_NUM >= 1006000 ngx_conf_merge_value(conf->upstream_config.cache_revalidate, prev->upstream_config.cache_revalidate, 0); #endif #endif ngx_conf_merge_value(conf->upstream_config.pass_request_headers, prev->upstream_config.pass_request_headers, 1); ngx_conf_merge_value(conf->upstream_config.pass_request_body, prev->upstream_config.pass_request_body, 1); ngx_conf_merge_value(conf->upstream_config.intercept_errors, prev->upstream_config.intercept_errors, 0); hash.max_size = 512; hash.bucket_size = ngx_align(64, ngx_cacheline_size); hash.name = "passenger_hide_headers_hash"; if (ngx_http_upstream_hide_headers_hash(cf, &conf->upstream_config, &prev->upstream_config, headers_to_hide, &hash) != NGX_OK) { return NGX_CONF_ERROR; } if (conf->upstream_config.upstream == NULL) { conf->upstream_config.upstream = prev->upstream_config.upstream; } if (conf->autogenerated.enabled == 1 /* and not NGX_CONF_UNSET */ && passenger_main_conf.autogenerated.root_dir.len != 0 && clcf->handler == NULL /* no handler set by other modules */) { clcf->handler = passenger_content_handler; } conf->autogenerated.headers_hash_bucket_size = ngx_align( conf->autogenerated.headers_hash_bucket_size, ngx_cacheline_size); hash.max_size = conf->autogenerated.headers_hash_max_size; hash.bucket_size = conf->autogenerated.headers_hash_bucket_size; hash.name = "passenger_headers_hash"; if (merge_headers(cf, conf, prev) != NGX_OK) { return NGX_CONF_ERROR; } if (serialize_loc_conf_to_headers(cf, conf) != NGX_OK) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "cannot create " PROGRAM_NAME " configuration serialization cache"); return NGX_CONF_ERROR; } return NGX_CONF_OK; } static ngx_int_t merge_headers(ngx_conf_t *cf, passenger_loc_conf_t *conf, passenger_loc_conf_t *prev) { u_char *p; size_t size; uintptr_t *code; ngx_uint_t i; ngx_array_t headers_names, headers_merged; ngx_keyval_t *src, *s; ngx_hash_key_t *hk; ngx_hash_init_t hash; ngx_http_script_compile_t sc; ngx_http_script_copy_code_t *copy; if (conf->autogenerated.headers_source == NULL) { conf->flushes = prev->flushes; conf->headers_set_len = prev->headers_set_len; conf->headers_set = prev->headers_set; conf->headers_set_hash = prev->headers_set_hash; conf->autogenerated.headers_source = prev->autogenerated.headers_source; } if (conf->headers_set_hash.buckets #if (NGX_HTTP_CACHE) #if NGINX_VERSION_NUM >= 1007009 && ((conf->upstream_config.cache == NGX_CONF_UNSET) == (prev->upstream_config.cache == NGX_CONF_UNSET)) #else && ((conf->upstream_config.cache == NGX_CONF_UNSET_PTR) == (prev->upstream_config.cache == NGX_CONF_UNSET_PTR)) #endif #endif ) { return NGX_OK; } if (ngx_array_init(&headers_names, cf->temp_pool, 4, sizeof(ngx_hash_key_t)) != NGX_OK) { return NGX_ERROR; } if (ngx_array_init(&headers_merged, cf->temp_pool, 4, sizeof(ngx_keyval_t)) != NGX_OK) { return NGX_ERROR; } if (conf->autogenerated.headers_source == NULL) { conf->autogenerated.headers_source = ngx_array_create(cf->pool, 4, sizeof(ngx_keyval_t)); if (conf->autogenerated.headers_source == NULL) { return NGX_ERROR; } } conf->headers_set_len = ngx_array_create(cf->pool, 64, 1); if (conf->headers_set_len == NULL) { return NGX_ERROR; } conf->headers_set = ngx_array_create(cf->pool, 512, 1); if (conf->headers_set == NULL) { return NGX_ERROR; } src = conf->autogenerated.headers_source->elts; for (i = 0; i < conf->autogenerated.headers_source->nelts; i++) { s = ngx_array_push(&headers_merged); if (s == NULL) { return NGX_ERROR; } *s = src[i]; } src = headers_merged.elts; for (i = 0; i < headers_merged.nelts; i++) { hk = ngx_array_push(&headers_names); if (hk == NULL) { return NGX_ERROR; } hk->key = src[i].key; hk->key_hash = ngx_hash_key_lc(src[i].key.data, src[i].key.len); hk->value = (void *) 1; if (src[i].value.len == 0) { continue; } if (ngx_http_script_variables_count(&src[i].value) == 0) { copy = ngx_array_push_n(conf->headers_set_len, sizeof(ngx_http_script_copy_code_t)); if (copy == NULL) { return NGX_ERROR; } copy->code = (ngx_http_script_code_pt) (void *) ngx_http_script_copy_len_code; copy->len = src[i].key.len + sizeof(": ") - 1 + src[i].value.len + sizeof(CRLF) - 1; size = (sizeof(ngx_http_script_copy_code_t) + src[i].key.len + sizeof(": ") - 1 + src[i].value.len + sizeof(CRLF) - 1 + sizeof(uintptr_t) - 1) & ~(sizeof(uintptr_t) - 1); copy = ngx_array_push_n(conf->headers_set, size); if (copy == NULL) { return NGX_ERROR; } copy->code = ngx_http_script_copy_code; copy->len = src[i].key.len + sizeof(": ") - 1 + src[i].value.len + sizeof(CRLF) - 1; p = (u_char *) copy + sizeof(ngx_http_script_copy_code_t); p = ngx_cpymem(p, src[i].key.data, src[i].key.len); *p++ = ':'; *p++ = ' '; p = ngx_cpymem(p, src[i].value.data, src[i].value.len); *p++ = CR; *p = LF; } else { copy = ngx_array_push_n(conf->headers_set_len, sizeof(ngx_http_script_copy_code_t)); if (copy == NULL) { return NGX_ERROR; } copy->code = (ngx_http_script_code_pt) (void *) ngx_http_script_copy_len_code; copy->len = src[i].key.len + sizeof(": ") - 1; size = (sizeof(ngx_http_script_copy_code_t) + src[i].key.len + sizeof(": ") - 1 + sizeof(uintptr_t) - 1) & ~(sizeof(uintptr_t) - 1); copy = ngx_array_push_n(conf->headers_set, size); if (copy == NULL) { return NGX_ERROR; } copy->code = ngx_http_script_copy_code; copy->len = src[i].key.len + sizeof(": ") - 1; p = (u_char *) copy + sizeof(ngx_http_script_copy_code_t); p = ngx_cpymem(p, src[i].key.data, src[i].key.len); *p++ = ':'; *p = ' '; ngx_memzero(&sc, sizeof(ngx_http_script_compile_t)); sc.cf = cf; sc.source = &src[i].value; sc.flushes = &conf->flushes; sc.lengths = &conf->headers_set_len; sc.values = &conf->headers_set; if (ngx_http_script_compile(&sc) != NGX_OK) { return NGX_ERROR; } copy = ngx_array_push_n(conf->headers_set_len, sizeof(ngx_http_script_copy_code_t)); if (copy == NULL) { return NGX_ERROR; } copy->code = (ngx_http_script_code_pt) (void *) ngx_http_script_copy_len_code; copy->len = sizeof(CRLF) - 1; size = (sizeof(ngx_http_script_copy_code_t) + sizeof(CRLF) - 1 + sizeof(uintptr_t) - 1) & ~(sizeof(uintptr_t) - 1); copy = ngx_array_push_n(conf->headers_set, size); if (copy == NULL) { return NGX_ERROR; } copy->code = ngx_http_script_copy_code; copy->len = sizeof(CRLF) - 1; p = (u_char *) copy + sizeof(ngx_http_script_copy_code_t); *p++ = CR; *p = LF; } code = ngx_array_push_n(conf->headers_set_len, sizeof(uintptr_t)); if (code == NULL) { return NGX_ERROR; } *code = (uintptr_t) NULL; code = ngx_array_push_n(conf->headers_set, sizeof(uintptr_t)); if (code == NULL) { return NGX_ERROR; } *code = (uintptr_t) NULL; } code = ngx_array_push_n(conf->headers_set_len, sizeof(uintptr_t)); if (code == NULL) { return NGX_ERROR; } *code = (uintptr_t) NULL; hash.hash = &conf->headers_set_hash; hash.key = ngx_hash_key_lc; hash.max_size = conf->autogenerated.headers_hash_max_size; hash.bucket_size = conf->autogenerated.headers_hash_bucket_size; hash.name = "passenger_headers_hash"; hash.pool = cf->pool; hash.temp_pool = NULL; return ngx_hash_init(&hash, headers_names.elts, headers_names.nelts); } static ngx_int_t merge_string_array(ngx_conf_t *cf, ngx_array_t **prev, ngx_array_t **conf) { ngx_str_t *prev_elems, *elem; ngx_uint_t i; if (*prev != NGX_CONF_UNSET_PTR) { if (*conf == NGX_CONF_UNSET_PTR) { *conf = ngx_array_create(cf->pool, 4, sizeof(ngx_str_t)); if (*conf == NULL) { return NGX_ERROR; } } prev_elems = (ngx_str_t *) (*prev)->elts; for (i = 0; i < (*prev)->nelts; i++) { elem = (ngx_str_t *) ngx_array_push(*conf); if (elem == NULL) { return NGX_ERROR; } *elem = prev_elems[i]; } } return NGX_OK; } ngx_int_t passenger_postprocess_config(ngx_conf_t *cf) { ngx_http_conf_ctx_t *http_ctx; passenger_loc_conf_t *toplevel_plcf; ngx_pool_cleanup_t *manifest_cleanup; char *dump_path, *dump_content; FILE *dump_file; u_char *end; http_ctx = cf->ctx; toplevel_plcf = http_ctx->loc_conf[ngx_http_passenger_module.ctx_index]; passenger_main_conf.default_ruby = toplevel_plcf->autogenerated.ruby; if (passenger_main_conf.default_ruby.len == 0) { passenger_main_conf.default_ruby.data = (u_char *) DEFAULT_RUBY; passenger_main_conf.default_ruby.len = strlen(DEFAULT_RUBY); } passenger_main_conf.manifest = generate_config_manifest(cf, toplevel_plcf); manifest_cleanup = ngx_pool_cleanup_add(cf->pool, 0); manifest_cleanup->handler = (ngx_pool_cleanup_pt) psg_json_value_free; manifest_cleanup->data = passenger_main_conf.manifest; if (passenger_main_conf.autogenerated.dump_config_manifest.len != 0) { dump_path = (char *) ngx_pnalloc(cf->temp_pool, passenger_main_conf.autogenerated.dump_config_manifest.len + 1); end = ngx_copy(dump_path, passenger_main_conf.autogenerated.dump_config_manifest.data, passenger_main_conf.autogenerated.dump_config_manifest.len); *end = '\0'; dump_file = fopen(dump_path, "w"); if (dump_file != NULL) { dump_content = psg_json_value_to_styled_string( passenger_main_conf.manifest); ssize_t ret = fwrite(dump_content, 1, strlen(dump_content), dump_file); (void) ret; // Ignore compilation warning. fclose(dump_file); free(dump_content); } else { ngx_conf_log_error(NGX_LOG_ALERT, cf, 0, "Error dumping " PROGRAM_NAME " configuration manifest to %V", &passenger_main_conf.autogenerated.dump_config_manifest); } } return NGX_OK; } static int string_keyval_has_key(ngx_array_t *table, ngx_str_t *key) { ngx_keyval_t *elems; ngx_uint_t i; elems = (ngx_keyval_t *) table->elts; for (i = 0; i < table->nelts; i++) { if (elems[i].key.len == key->len && memcmp(elems[i].key.data, key->data, key->len) == 0) { return 1; } } return 0; } static ngx_int_t merge_string_keyval_table(ngx_conf_t *cf, ngx_array_t **prev, ngx_array_t **conf) { ngx_keyval_t *prev_elems, *elem; ngx_uint_t i; if (*prev != NULL) { if (*conf == NULL) { *conf = ngx_array_create(cf->pool, 4, sizeof(ngx_keyval_t)); if (*conf == NULL) { return NGX_ERROR; } } prev_elems = (ngx_keyval_t *) (*prev)->elts; for (i = 0; i < (*prev)->nelts; i++) { if (!string_keyval_has_key(*conf, &prev_elems[i].key)) { elem = (ngx_keyval_t *) ngx_array_push(*conf); if (elem == NULL) { return NGX_ERROR; } *elem = prev_elems[i]; } } } return NGX_OK; } #ifndef PASSENGER_IS_ENTERPRISE static char * passenger_enterprise_only(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { return ": this feature is only available in Phusion Passenger Enterprise. " "You are currently running the open source Phusion Passenger. " "Please learn more about and/or buy Phusion Passenger Enterprise at https://www.phusionpassenger.com/enterprise ;"; } #endif static char * passenger_enabled(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { passenger_loc_conf_t *passenger_conf = conf; ngx_http_core_loc_conf_t *clcf; ngx_str_t *value; ngx_url_t upstream_url; passenger_conf->autogenerated.enabled_explicitly_set = 1; record_loc_conf_source_location(cf, passenger_conf, &passenger_conf->autogenerated.enabled_source_file, &passenger_conf->autogenerated.enabled_source_line); value = cf->args->elts; if (ngx_strcasecmp(value[1].data, (u_char *) "on") == 0) { passenger_conf->autogenerated.enabled = 1; /* Register a placeholder value as upstream address. The real upstream * address (the Passenger core socket filename) will be set while processing * requests, because we can't start the watchdog (and thus the Passenger core) * until config loading is done. */ ngx_memzero(&upstream_url, sizeof(ngx_url_t)); upstream_url.url = pp_placeholder_upstream_address; upstream_url.no_resolve = 1; passenger_conf->upstream_config.upstream = ngx_http_upstream_add(cf, &upstream_url, 0); if (passenger_conf->upstream_config.upstream == NULL) { return NGX_CONF_ERROR; } clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); clcf->handler = passenger_content_handler; if (clcf->name.data != NULL && clcf->name.data[clcf->name.len - 1] == '/') { clcf->auto_redirect = 1; } } else if (ngx_strcasecmp(value[1].data, (u_char *) "off") == 0) { passenger_conf->autogenerated.enabled = 0; } else { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "\"passenger_enabled\" must be either set to \"on\" " "or \"off\""); return NGX_CONF_ERROR; } return NGX_CONF_OK; } static char * passenger_conf_set_request_buffering(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { #ifdef NGINX_NO_SEND_REQUEST_BODY_INFINITE_LOOP_BUG passenger_loc_conf_t *passenger_conf = conf; passenger_conf->autogenerated.upstream_config_request_buffering_explicitly_set = 1; record_loc_conf_source_location(cf, passenger_conf, &passenger_conf->autogenerated.upstream_config_request_buffering_source_file, &passenger_conf->autogenerated.upstream_config_request_buffering_source_line); return ngx_conf_set_flag_slot(cf, cmd, conf); #else return "config cannot be set in Nginx < 1.15.3 due to this bug: https://trac.nginx.org/nginx/ticket/1618"; #endif /* NGINX_NO_SEND_REQUEST_BODY_INFINITE_LOOP_BUG */ } static char * rails_framework_spawner_idle_time(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_conf_log_error(NGX_LOG_ALERT, cf, 0, "The 'rails_framework_spawner_idle_time' " "directive is deprecated; please set 'passenger_max_preloader_idle_time' instead"); return NGX_CONF_OK; } static char * passenger_use_global_queue(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_conf_log_error(NGX_LOG_ALERT, cf, 0, "The 'passenger_use_global_queue' " "directive is obsolete and doesn't do anything anymore. Global queuing " "is now always enabled. Please remove this configuration directive."); return NGX_CONF_OK; } static char * passenger_obsolete_directive(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_conf_log_error(NGX_LOG_ALERT, cf, 0, "The '%V' directive is obsolete " "and doesn't do anything anymore.", &cmd->name); return NGX_CONF_OK; } PsgJsonValue * psg_json_value_set_str_array(PsgJsonValue *doc, const char *name, ngx_array_t *ary) { PsgJsonValue *subdoc = psg_json_value_new_with_type(PSG_JSON_VALUE_TYPE_ARRAY); PsgJsonValue *elem, *result; ngx_str_t *values; ngx_uint_t i; if (ary != NULL) { values = (ngx_str_t *) ary->elts; for (i = 0; i < ary->nelts; i++) { elem = psg_json_value_new_str( (const char *) values[i].data, values[i].len); psg_json_value_append_val(subdoc, elem); psg_json_value_free(elem); } } result = psg_json_value_set_value(doc, name, -1, subdoc); psg_json_value_free(subdoc); return result; } PsgJsonValue * psg_json_value_set_str_keyval(PsgJsonValue *doc, const char *name, ngx_array_t *ary) { PsgJsonValue *subdoc = psg_json_value_new_with_type(PSG_JSON_VALUE_TYPE_OBJECT); PsgJsonValue *elem, *result; ngx_keyval_t *values; ngx_uint_t i; if (ary != NULL) { values = (ngx_keyval_t *) ary->elts; for (i = 0; i < ary->nelts; i++) { elem = psg_json_value_new_str( (const char *) values[i].value.data, values[i].value.len); psg_json_value_set_value(subdoc, (const char *) values[i].key.data, values[i].key.len, elem); psg_json_value_free(elem); } } result = psg_json_value_set_value(doc, name, -1, subdoc); psg_json_value_free(subdoc); return result; } const ngx_command_t passenger_commands[] = { #include "ConfigGeneral/AutoGeneratedDefinitions.c" ngx_null_command };
233706.c
/*********************************************************************************************************************** * DISCLAIMER * This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. * No other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all * applicable laws, including copyright laws. * THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIESREGARDING THIS SOFTWARE, WHETHER EXPRESS, IMPLIED * OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED.TO THE MAXIMUM EXTENT PERMITTED NOT PROHIBITED BY * LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES SHALL BE LIABLE FOR ANY DIRECT, * INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS SOFTWARE, EVEN IF RENESAS OR * ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. * Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability * of this software. By using this software, you agree to the additional terms and conditions found by accessing the * following link: * http://www.renesas.com/disclaimer * * Copyright (C) 2011, 2017 Renesas Electronics Corporation. All rights reserved. ***********************************************************************************************************************/ /*********************************************************************************************************************** * File Name : r_cg_it.c * Version : CodeGenerator for RL78/G13 V2.05.00.06 [10 Nov 2017] * Device(s) : R5F100LE * Tool-Chain : CCRL * Description : This file implements device driver for IT module. * Creation Date: ***********************************************************************************************************************/ /*********************************************************************************************************************** Includes ***********************************************************************************************************************/ #include "r_cg_macrodriver.h" #include "r_cg_it.h" /* Start user code for include. Do not edit comment generated here */ /* End user code. Do not edit comment generated here */ #include "r_cg_userdefine.h" /*********************************************************************************************************************** Pragma directive ***********************************************************************************************************************/ /* Start user code for pragma. Do not edit comment generated here */ /* End user code. Do not edit comment generated here */ /*********************************************************************************************************************** Global variables and functions ***********************************************************************************************************************/ /* Start user code for global. Do not edit comment generated here */ /* End user code. Do not edit comment generated here */ /*********************************************************************************************************************** * Function Name: R_IT_Create * Description : This function initializes the IT module. * Arguments : None * Return Value : None ***********************************************************************************************************************/ void R_IT_Create(void) { RTCEN = 1U; /* supply IT clock */ ITMC = _0000_IT_OPERATION_DISABLE; /* disable IT operation */ ITMK = 1U; /* disable INTIT interrupt */ ITIF = 0U; /* clear INTIT interrupt flag */ /* Set INTIT low priority */ ITPR1 = 1U; ITPR0 = 1U; ITMC = _000E_ITMCMP_VALUE; } /*********************************************************************************************************************** * Function Name: R_IT_Start * Description : This function starts IT module operation. * Arguments : None * Return Value : None ***********************************************************************************************************************/ void R_IT_Start(void) { ITIF = 0U; /* clear INTIT interrupt flag */ ITMK = 0U; /* enable INTIT interrupt */ ITMC |= _8000_IT_OPERATION_ENABLE; /* enable IT operation */ } /*********************************************************************************************************************** * Function Name: R_IT_Stop * Description : This function stops IT module operation. * Arguments : None * Return Value : None ***********************************************************************************************************************/ void R_IT_Stop(void) { ITMK = 1U; /* disable INTIT interrupt */ ITIF = 0U; /* clear INTIT interrupt flag */ ITMC &= (uint16_t)~_8000_IT_OPERATION_ENABLE; /* disable IT operation */ } /* Start user code for adding. Do not edit comment generated here */ /* End user code. Do not edit comment generated here */
214040.c
// Inheritable to control the listings of members in guilds. // Basically automated through initiate and remove commands // this inheritable allows wiz control over messed up lists, // and allows the lists to be read. // If a new guild is added, this must be changed, specifically the #define // below to include the new guild. // created by Grendel@ShadowGate 3/7/98 #include <std.h> #include <daemons.h> inherit ROOM; string guild_name, guild_object; void sort(string *stuff); private void swap(int i, int j, string *stuff); void set_guild_name(string str); void set_guild_object(string str); void set_guild_name(string str) { guild_name = str; return; } void set_guild_object(string str){ guild_object = str; return; } void create() { ::create(); } void init() { ::init(); add_action("read_fcn", "read"); add_action("return_obj", "request"); if(GUILDS_D->is_leader(guild_name, TPQN) || wizardp(TP)){ add_action("appoint_hc", "appoint"); add_action("demote_hc", "demote"); } if(GUILDS_D->is_hc(guild_name,TPQN) || avatarp(TP)){ add_action("add_fcn", "add"); add_action("remove_fcn", "remove"); } } int read_fcn(string str) { string guild; string *list; int i; if (!str) return notify_fail("Read what?\n"); if (str != "sign" && str != "list") return notify_fail("That is not here.\n"); if (str == "sign") { tell_object(TP, "%^BOLD%^%^RED%^In order to read a listing of your fellow guild members, type 'read list'. If you are not on this list, or know someone that should be, please contact a wiz.%^RESET%^\n"); tell_object(TP, "%^BOLD%^%^YELLOW%^To get your guild object back, type 'request'"); if(GUILDS_D->is_leader(guild_name, TPQN) || wizardp(TP)){ tell_object(TP, "%^BOLD%^%^BLUE%^Leader commands:\n"); tell_object(TP, "%^BOLD%^%^BLUE%^ appoint <person>: Assigns <person> as HC"); tell_object(TP, "%^BOLD%^%^BLUE%^ demote <person>: Removes <person> as HC"); tell_object(TP, "%^BOLD%^%^BLUE%^ add <name>: adds named person to the guild's list,"); tell_object(TP, "%^BOLD%^%^BLUE%^ remove <name>: removes name from the guild's list."); } return 1; } if (str == "list") { if (!guild_name) { tell_object(TP, "%^BOLD%^ERROR: Notify a Wiz please!"); return 1; } if (member_array(guild_name, (string *)GUILDS_D->query_all_guilds()) == -1) { tell_object(TP, "ERROR: Guild name is not one of the legal guilds."); return 1; } list = (string *)GUILDS_D->query_guild_members(guild_name); if (list == ({})) { tell_object(TP, "%^BOLD%^The list is currently empty. Please contact a wiz."); return 1; } tell_object(TP, "For the guild "+guild_name+":"); tell_object(TP, "Your leader is: "+capitalize((string)GUILDS_D->query_guild_leader(guild_name))); tell_object(TP, "Your HC are:"); list = (string *)GUILDS_D->query_guild_hc(guild_name); sort(list); for (i=0;i<sizeof(list);i++) { tell_object(TP, "%^BOLD%^ "+capitalize(list[i])); } list = (string *)GUILDS_D->query_guild_members(guild_name); tell_object(TP, "%^BOLD%^%^BLUE%^The members of your guild are:"); sort(list); for (i=0;i<sizeof(list);i++) { tell_object(TP, "%^BOLD%^ "+capitalize(list[i])); } return 1; } } int add_fcn(string str) { string who; if (!str) return notify_fail("Add who? More info please.\n"); who = lower_case(str); if (member_array(guild_name, (string *)GUILDS_D->query_all_guilds()) == -1) { tell_object(TP, "That guild does not exist."); return 1; } if (GUILDS_D->is_member(guild_name, who)) { tell_object(TP, "That person is already on the list."); return 1; } GUILDS_D->add_guild_member(guild_name, who); tell_object(TP, "%^BOLD%^ "+who+" has been added to guild list "+guild_name+"."); return 1; } int remove_fcn(string str) { string who; object ob; if (!str) return notify_fail("Remove who from what? More info please.\n"); who = lower_case(str); if (member_array(guild_name, (string *)GUILDS_D->query_all_guilds()) == -1) { tell_object(TP, "That guild does not exist."); return 1; } if (!GUILDS_D->is_member(guild_name, who)) { tell_object(TP, "That person not on the list."); return 1; } GUILDS_D->remove_guild_member(guild_name, who); if(ob = find_player(who)){ ob->remove_guild(guild_name); } tell_object(TP, "%^BOLD%^ "+who+" has been removed from guild list "+guild_name+"."); return 1; } int appoint_hc(string str){ if(!str) return notify_fail("Who?\n"); str = lower_case(str); if(!GUILDS_D->is_leader(guild_name, TPQN) && !wizardp(TP)) return 0; if(GUILDS_D->is_hc(guild_name, str)) return notify_fail("They are already HC.\n"); if(!GUILDS_D->is_member(guild_name, str)) return notify_fail("They are not a member of this guild."); GUILDS_D->add_guild_hc(guild_name, str); tell_object(TP, str+" is now a HC of "+guild_name+"."); return 1; } int demote_hc(string str){ if(!str) return notify_fail("Who?\n"); str = lower_case(str); if(!GUILDS_D->is_leader(guild_name, TPQN) && !wizardp(TP)) if(!GUILDS_D->is_hc(guild_name, str)) return notify_fail("They are not HC.\n"); GUILDS_D->remove_guild_hc(guild_name, str); tell_object(TP, str+" is no longer HC of "+guild_name+"."); return 1; } int return_obj(string str){ object ob; if(!guild_object || guild_object == "") return notify_fail("There is no object set. Contact a wiz.\n"); if(!TP->in_guild(guild_name)) return 0; ob = new(guild_object); if(present(((string *)ob->query_id())[0], TP)) return notify_fail("You already have one!\n"); tell_object(TP, "You request a new guild insignia"); ob->move(TP); return 1; } void sort(string *stuff) { int i,j; for (j=0;j<sizeof(stuff);j++) for (i=sizeof(stuff)-1;i>j;i--) { if (stuff[i] < stuff[i-1]) { swap(i-1,i,stuff); } } } private void swap(int i, int j, string* stuff) { string tmp; tmp = stuff[i]; stuff[i]=stuff[j]; stuff[j]=tmp; }
999182.c
/* * Copyright (c) 2009-2010 Hypertriton, Inc. <http://hypertriton.com/> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <config/have_sdl.h> #include <core/core.h> #include <core/config.h> #include "geometry.h" #include "surface.h" #include "gui_math.h" #include <stdio.h> #include <string.h> #include <fcntl.h> const char *agBlendFuncNames[] = { "dst+src", "src", "dst", "1-dst", "1-src", NULL }; AG_PixelFormat *agSurfaceFmt = NULL; /* Recommended format for new surfaces */ #define COMPUTE_SHIFTLOSS(mask, shift, loss) \ shift = 0; \ loss = 8; \ if (mask) { \ for (m = mask ; !(m & 0x01); m >>= 1) { \ shift++; \ } \ for (; (m & 0x01); m >>= 1) { \ loss--; \ } \ } /* Specify a packed-pixel format from three 32-bit bitmasks. */ AG_PixelFormat * AG_PixelFormatRGB(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask) { AG_PixelFormat *pf; Uint32 m; Uint32 Amask = 0; if ((pf = TryMalloc(sizeof(AG_PixelFormat))) == NULL) { return (NULL); } pf->BitsPerPixel = bpp; pf->BytesPerPixel = (bpp+7)/8; pf->colorkey = 0; pf->alpha = AG_ALPHA_OPAQUE; pf->palette = NULL; COMPUTE_SHIFTLOSS(Rmask, pf->Rshift, pf->Rloss); COMPUTE_SHIFTLOSS(Gmask, pf->Gshift, pf->Gloss); COMPUTE_SHIFTLOSS(Bmask, pf->Bshift, pf->Bloss); COMPUTE_SHIFTLOSS(Amask, pf->Ashift, pf->Aloss); pf->Rmask = Rmask; pf->Gmask = Gmask; pf->Bmask = Bmask; pf->Amask = 0; return (pf); } /* Specify a packed-pixel format from four 32-bit bitmasks. */ AG_PixelFormat * AG_PixelFormatRGBA(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { AG_PixelFormat *pf; Uint32 m; if ((pf = TryMalloc(sizeof(AG_PixelFormat))) == NULL) { return (NULL); } pf->BitsPerPixel = bpp; pf->BytesPerPixel = (bpp+7)/8; pf->colorkey = 0; pf->alpha = AG_ALPHA_OPAQUE; pf->palette = NULL; COMPUTE_SHIFTLOSS(Rmask, pf->Rshift, pf->Rloss); COMPUTE_SHIFTLOSS(Gmask, pf->Gshift, pf->Gloss); COMPUTE_SHIFTLOSS(Bmask, pf->Bshift, pf->Bloss); COMPUTE_SHIFTLOSS(Amask, pf->Ashift, pf->Aloss); pf->Rmask = Rmask; pf->Gmask = Gmask; pf->Bmask = Bmask; pf->Amask = Amask; return (pf); } /* * Specify an indexed pixel format. If bpp=2, the palette is initialized to * [0 = white] and [1 = black], otherwise the palette is initialized to all * black. */ AG_PixelFormat * AG_PixelFormatIndexed(int bpp) { AG_PixelFormat *pf; AG_Palette *pal; if ((pf = TryMalloc(sizeof(AG_PixelFormat))) == NULL) { return (NULL); } pf->BitsPerPixel = bpp; pf->BytesPerPixel = (bpp+7)/8; pf->colorkey = 0; pf->alpha = AG_ALPHA_OPAQUE; if ((pal = pf->palette = TryMalloc(sizeof(AG_Palette))) == NULL) { Free(pf); return (NULL); } pal->nColors = 1<<bpp; if ((pal->colors = TryMalloc(pal->nColors*sizeof(AG_Color))) == NULL) { Free(pf->palette); Free(pf); return (NULL); } if (bpp == 2) { pal->colors[0].r = 255; pal->colors[0].g = 255; pal->colors[0].b = 255; pal->colors[1].r = 0; pal->colors[1].g = 0; pal->colors[1].b = 0; } else { memset(pal->colors, 0, pal->nColors*sizeof(AG_Color)); } pf->Rmask = pf->Gmask = pf->Bmask = pf->Amask = 0; pf->Rloss = pf->Gloss = pf->Bloss = pf->Aloss = 8; pf->Rshift = pf->Gshift = pf->Bshift = pf->Ashift = 0; return (pf); } /* Return a newly-allocated duplicate an AG_PixelFormat structure. */ AG_PixelFormat * AG_PixelFormatDup(const AG_PixelFormat *pf) { AG_PixelFormat *pfd; if ((pfd = TryMalloc(sizeof(AG_PixelFormat))) == NULL) { return (NULL); } if (pf->palette != NULL) { if ((pfd->palette = TryMalloc(sizeof(AG_Palette))) == NULL) { goto fail; } if ((pfd->palette->colors = TryMalloc(pf->palette->nColors*sizeof(AG_Color))) == NULL) { Free(pfd->palette); goto fail; } pfd->palette->nColors = pf->palette->nColors; memcpy(pfd->palette->colors, pf->palette->colors, pf->palette->nColors*sizeof(AG_Color)); } else { pfd->palette = NULL; } pfd->BitsPerPixel = pf->BitsPerPixel; pfd->BytesPerPixel = pf->BytesPerPixel; pfd->colorkey = pf->colorkey; pfd->alpha = pf->alpha; pfd->Rloss = pf->Rloss; pfd->Gloss = pf->Gloss; pfd->Bloss = pf->Bloss; pfd->Aloss = pf->Aloss; pfd->Rshift = pf->Rshift; pfd->Gshift = pf->Gshift; pfd->Bshift = pf->Bshift; pfd->Ashift = pf->Ashift; pfd->Rmask = pf->Rmask; pfd->Gmask = pf->Gmask; pfd->Bmask = pf->Bmask; pfd->Amask = pf->Amask; return (pfd); fail: Free(pfd); return (NULL); } /* Release an AG_PixelFormat structure. */ void AG_PixelFormatFree(AG_PixelFormat *pf) { if (pf->palette != NULL) { Free(pf->palette->colors); Free(pf->palette); } Free(pf); } /* Compare two palettes. */ int AG_PixelFormatComparePalettes(const AG_Palette *pal1, const AG_Palette *pal2) { if (pal1->nColors != pal2->nColors) { return (1); } return memcmp(pal1->colors, pal2->colors, pal1->nColors*sizeof(AG_Color)); } #undef COMPUTE_SHIFTLOSS /* Create a new surface of the specified pixel format. */ AG_Surface * AG_SurfaceNew(enum ag_surface_type type, Uint w, Uint h, const AG_PixelFormat *pf, Uint flags) { AG_Surface *s; if ((s = TryMalloc(sizeof(AG_Surface))) == NULL) { return (NULL); } if ((s->format = AG_PixelFormatDup(pf)) == NULL) { Free(s); return (NULL); } s->type = type; s->flags = flags; s->w = w; s->h = h; s->pitch = w*pf->BytesPerPixel; s->clipRect = AG_RECT(0,0,w,h); if (h*s->pitch > 0) { if ((s->pixels = TryMalloc(h*s->pitch)) == NULL) goto fail; } else { s->pixels = NULL; } return (s); fail: AG_PixelFormatFree(s->format); Free(s); return (NULL); } /* Create an empty surface. */ AG_Surface * AG_SurfaceEmpty(void) { return AG_SurfaceNew(AG_SURFACE_PACKED, 0,0, agSurfaceFmt, 0); } /* Create a new color-index surface of given dimensions and depth. */ AG_Surface * AG_SurfaceIndexed(Uint w, Uint h, int bpp, Uint flags) { AG_PixelFormat *pf; AG_Surface *s; if ((pf = AG_PixelFormatIndexed(bpp)) == NULL) { return (NULL); } s = AG_SurfaceNew(AG_SURFACE_INDEXED, w,h, pf, 0); AG_PixelFormatFree(pf); return (s); } /* Create a new packed-pixel surface with the specified RGB pixel format. */ AG_Surface * AG_SurfaceRGB(Uint w, Uint h, int bpp, Uint flags, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask) { AG_PixelFormat *pf; AG_Surface *s; if ((pf = AG_PixelFormatRGB(bpp, Rmask, Gmask, Bmask)) == NULL) { return (NULL); } s = AG_SurfaceNew(AG_SURFACE_PACKED, w,h, pf, 0); AG_PixelFormatFree(pf); return (s); } /* * Create a new packed-pixel surface with the specified RGBA pixel format. * The SRCALPHA flag is set implicitely. */ AG_Surface * AG_SurfaceRGBA(Uint w, Uint h, int bpp, Uint flags, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { AG_PixelFormat *pf; AG_Surface *s; if ((pf = AG_PixelFormatRGBA(bpp, Rmask, Gmask, Bmask, Amask)) == NULL) { return (NULL); } s = AG_SurfaceNew(AG_SURFACE_PACKED, w,h, pf, AG_SRCALPHA); AG_PixelFormatFree(pf); return (s); } /* Create a new surface from pixel data in the specified packed RGB format. */ AG_Surface * AG_SurfaceFromPixelsRGB(const void *pixels, Uint w, Uint h, int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask) { AG_PixelFormat *pf; AG_Surface *s; if ((pf = AG_PixelFormatRGB(bpp, Rmask, Gmask, Bmask)) == NULL) { return (NULL); } s = AG_SurfaceNew(AG_SURFACE_PACKED, w,h, pf, 0); memcpy(s->pixels, pixels, h*s->pitch); AG_PixelFormatFree(pf); return (s); } /* * Create a new surface from pixel data in the specified packed RGBA format. * The SRCALPHA flag is set implicitely. */ AG_Surface * AG_SurfaceFromPixelsRGBA(const void *pixels, Uint w, Uint h, int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { AG_PixelFormat *pf; AG_Surface *s; if ((pf = AG_PixelFormatRGBA(bpp, Rmask, Gmask, Bmask, Amask)) == NULL) { return (NULL); } s = AG_SurfaceNew(AG_SURFACE_PACKED, w,h, pf, AG_SRCALPHA); memcpy(s->pixels, pixels, h*s->pitch); AG_PixelFormatFree(pf); return (s); } /* Set one or more entries in an indexed surface's palette. */ int AG_SurfaceSetPalette(AG_Surface *su, AG_Color *c, Uint offs, Uint count) { Uint i; if (su->type != AG_SURFACE_INDEXED) { AG_SetError("Not an indexed surface"); return (-1); } if (offs >= su->format->palette->nColors || offs+count >= su->format->palette->nColors) { AG_SetError("Bad palette offset/count"); return (-1); } for (i = 0; i < count; i++) { su->format->palette->colors[offs+i] = c[i]; } return (0); } /* Return a newly-allocated duplicate of a surface. */ AG_Surface * AG_SurfaceDup(const AG_Surface *ss) { AG_Surface *s; s = AG_SurfaceNew(ss->type, ss->w, ss->h, ss->format, (ss->flags & AG_SAVED_SURFACE_FLAGS)); if (s == NULL) { return (NULL); } memcpy(s->pixels, ss->pixels, ss->h*ss->pitch); return (s); } /* Return a newly-allocated duplicate of a surface, in specified format. */ AG_Surface * AG_SurfaceConvert(const AG_Surface *ss, const AG_PixelFormat *pf) { AG_Surface *ds; ds = AG_SurfaceNew(ss->type, ss->w, ss->h, pf, (ss->flags & AG_SAVED_SURFACE_FLAGS)); AG_SurfaceCopy(ds, ss); return (ds); } /* * Copy pixel data from a source to a destination surface. Pixel formats * and surface dimensions of the two surfaces may differ. The destination * surface's clipping rectangle and alpha/colorkey settings are ignored. */ void AG_SurfaceCopy(AG_Surface *ds, const AG_Surface *ss) { int w, h, x, y, skipDst, skipSrc; const Uint8 *pSrc; Uint8 *pDst; if (ds->w > ss->w) { w = ss->w; skipDst = (ds->w - ss->w)*ds->format->BytesPerPixel; skipSrc = 0; } else if (ds->w < ss->w) { w = ds->w; skipDst = 0; skipSrc = (ss->w - ds->w)*ss->format->BytesPerPixel; } else { w = ds->w; skipSrc = 0; skipDst = 0; } h = MIN(ss->h, ds->h); pSrc = (Uint8 *)ss->pixels; pDst = (Uint8 *)ds->pixels; if (AG_PixelFormatCompare(ss->format, ds->format) == 0) { for (y = 0; y < h; y++) { memcpy(pDst, pSrc, w*ds->format->BytesPerPixel); pDst += w*ds->format->BytesPerPixel + skipDst; pSrc += w*ss->format->BytesPerPixel + skipSrc; } } else { /* Format conversion */ Uint32 px; AG_Color C; for (y = 0; y < h; y++) { for (x = 0; x < w; x++) { px = AG_GET_PIXEL(ss,pSrc); C = AG_GetColorRGBA(px, ss->format); AG_PUT_PIXEL(ds,pDst, AG_MapColorRGBA(ds->format, C)); pSrc += ss->format->BytesPerPixel; pDst += ds->format->BytesPerPixel; } pDst += skipDst; pSrc += skipSrc; } } } /* * Copy a region of pixels srcRect from source surface ss to destination * surface ds, at destination coordinates xDst,yDst. It is safe to exceed * the dimensions of ds. Unlike AG_SurfaceCopy(), blending and colorkey * tests are done. If srcRect is passed NULL, the entire surface is copied. */ void AG_SurfaceBlit(const AG_Surface *ss, const AG_Rect *srcRect, AG_Surface *ds, int xDst, int yDst) { Uint32 pixel; Uint8 *pSrc, *pDst; AG_Color C; AG_Rect sr, dr; Uint x, y; /* Compute the effective source and destination rectangles. */ if (srcRect != NULL) { sr = *srcRect; if (sr.x < 0) { sr.x = 0; } if (sr.y < 0) { sr.y = 0; } if (sr.x+sr.w >= ss->w) { sr.w = ss->w - sr.x; } if (sr.y+sr.h >= ss->h) { sr.h = ss->h - sr.y; } } else { sr.x = 0; sr.y = 0; sr.w = ss->w; sr.h = ss->h; } dr.x = MAX(xDst, ds->clipRect.x); dr.y = MAX(yDst, ds->clipRect.y); dr.w = (dr.x+sr.w > ds->clipRect.x+ds->clipRect.w) ? (ds->clipRect.x+ds->clipRect.w - dr.x) : sr.w; dr.h = (dr.y+sr.h > ds->clipRect.y+ds->clipRect.h) ? (ds->clipRect.y+ds->clipRect.h - dr.y) : sr.h; /* XXX TODO optimized cases */ /* XXX TODO per-surface alpha */ for (y = 0; y < dr.h; y++) { pSrc = (Uint8 *)ss->pixels + (sr.y+y)*ss->pitch; pDst = (Uint8 *)ds->pixels + (dr.y+y)*ds->pitch + dr.x*ds->format->BytesPerPixel; for (x = 0; x < dr.w; x++) { pixel = AG_GET_PIXEL(ss, pSrc); if ((ss->flags & AG_SRCCOLORKEY) && (ss->format->colorkey == pixel)) { pSrc += ss->format->BytesPerPixel; pDst += ds->format->BytesPerPixel; continue; } C = AG_GetColorRGBA(pixel, ss->format); if ((C.a != AG_ALPHA_OPAQUE) && (ss->flags & AG_SRCALPHA)) { AG_SurfaceBlendPixel(ds, pDst, C, AG_ALPHA_SRC); } else { AG_PUT_PIXEL(ds, pDst, AG_MapColorRGB(ds->format, C)); } pSrc += ss->format->BytesPerPixel; pDst += ds->format->BytesPerPixel; } } } /* Resize a surface; pixels are left uninitialized. */ int AG_SurfaceResize(AG_Surface *s, Uint w, Uint h) { Uint8 *pixelsNew; int pitchNew = w*s->format->BytesPerPixel; if ((pixelsNew = TryRealloc(s->pixels, h*pitchNew)) == NULL) { return (-1); } s->pixels = pixelsNew; s->pitch = pitchNew; s->w = w; s->h = h; s->clipRect = AG_RECT(0,0,w,h); return (0); } /* Free the specified surface. */ void AG_SurfaceFree(AG_Surface *s) { AG_PixelFormatFree(s->format); Free(s->pixels); Free(s); } /* * Blend the specified components with the pixel at s:[x,y], using the * given alpha function. No clipping is done. */ void AG_SurfaceBlendPixel(AG_Surface *s, Uint8 *pDst, AG_Color Cnew, AG_BlendFn fn) { Uint32 pxDst; AG_Color Cdst; Uint8 a; pxDst = AG_GET_PIXEL(s, pDst); if ((s->flags & AG_SRCCOLORKEY) && (pxDst == s->format->colorkey)) { AG_SurfacePutPixel(s, pDst, AG_MapColorRGBA(s->format, Cnew)); } else { Cdst = AG_GetColorRGBA(pxDst, s->format); switch (fn) { case AG_ALPHA_DST: a = Cdst.a; break; case AG_ALPHA_SRC: a = Cnew.a; break; case AG_ALPHA_ZERO: a = 0; break; case AG_ALPHA_OVERLAY: a = (Uint8)((Cdst.a+Cnew.a) > 255) ? 255 : (Cdst.a+Cnew.a); break; case AG_ALPHA_ONE_MINUS_DST: a = 255-Cdst.a; break; case AG_ALPHA_ONE_MINUS_SRC: a = 255-Cnew.a; break; case AG_ALPHA_ONE: default: a = 255; break; } AG_SurfacePutPixel(s, pDst, AG_MapPixelRGBA(s->format, (((Cnew.r - Cdst.r)*Cnew.a) >> 8) + Cdst.r, (((Cnew.g - Cdst.g)*Cnew.a) >> 8) + Cdst.g, (((Cnew.b - Cdst.b)*Cnew.a) >> 8) + Cdst.b, Cdst.a)); } } /* * Obtain the hue/saturation/value of a given RGB triplet. * Note that the hue is lost as saturation approaches 0. */ void AG_RGB2HSV(Uint8 r, Uint8 g, Uint8 b, float *h, float *s, float *v) { float vR, vG, vB; float vMin, vMax, deltaMax; float deltaR, deltaG, deltaB; vR = (float)r/255.0F; vG = (float)g/255.0F; vB = (float)b/255.0F; vMin = MIN3(vR, vG, vB); vMax = MAX3(vR, vG, vB); deltaMax = vMax - vMin; *v = vMax; if (deltaMax == 0.0) { /* This is a gray color (zero hue, no saturation). */ *h = 0.0; *s = 0.0; } else { *s = deltaMax / vMax; deltaR = ((vMax - vR)/6.0F + deltaMax/2.0F) / deltaMax; deltaG = ((vMax - vG)/6.0F + deltaMax/2.0F) / deltaMax; deltaB = ((vMax - vB)/6.0F + deltaMax/2.0F) / deltaMax; if (vR == vMax) { *h = (deltaB - deltaG)*360.0F; } else if (vG == vMax) { *h = 120.0F + (deltaR - deltaB)*360.0F; /* 1/3 */ } else if (vB == vMax) { *h = 240.0F + (deltaG - deltaR)*360.0F; /* 2/3 */ } if (*h < 0.0F) (*h)++; if (*h > 360.0F) (*h)--; } } /* Convert hue/saturation/value to RGB. */ void AG_HSV2RGB(float h, float s, float v, Uint8 *r, Uint8 *g, Uint8 *b) { float var[3]; float vR, vG, vB, hv; int iv; if (s == 0.0) { *r = (Uint8)v*255; *g = (Uint8)v*255; *b = (Uint8)v*255; return; } hv = h/60.0F; iv = Floor(hv); var[0] = v * (1.0F - s); var[1] = v * (1.0F - s*(hv - iv)); var[2] = v * (1.0F - s*(1.0F - (hv - iv))); switch (iv) { case 0: vR = v; vG = var[2]; vB = var[0]; break; case 1: vR = var[1]; vG = v; vB = var[0]; break; case 2: vR = var[0]; vG = v; vB = var[2]; break; case 3: vR = var[0]; vG = var[1]; vB = v; break; case 4: vR = var[2]; vG = var[0]; vB = v; break; default: vR = v; vG = var[0]; vB = var[1]; break; } *r = vR*255; *g = vG*255; *b = vB*255; } /* * Allocate a new surface containing a pixmap of ss scaled to wxh. * XXX TODO optimize; filtering */ int AG_ScaleSurface(const AG_Surface *ss, Uint16 w, Uint16 h, AG_Surface **ds) { Uint8 *pDst; int x, y; int sameFormat; if (*ds == NULL) { *ds = AG_SurfaceNew( AG_SURFACE_PACKED, w, h, ss->format, ss->flags & (AG_SRCALPHA|AG_SRCCOLORKEY)); if (*ds == NULL) { return (-1); } (*ds)->format->alpha = ss->format->alpha; (*ds)->format->colorkey = ss->format->colorkey; sameFormat = 1; } else { //sameFormat = !AG_PixelFormatCompare((*ds)->format, ss->format); sameFormat = 0; } if (ss->w == w && ss->h == h) { AG_SurfaceCopy(*ds, ss); return (0); } pDst = (Uint8 *)(*ds)->pixels; for (y = 0; y < (*ds)->h; y++) { for (x = 0; x < (*ds)->w; x++) { Uint8 *pSrc = (Uint8 *)ss->pixels + (y*ss->h/(*ds)->h)*ss->pitch + (x*ss->w/(*ds)->w)*ss->format->BytesPerPixel; Uint32 pxSrc, pxDst; AG_Color C; pxSrc = AG_GET_PIXEL(ss,pSrc); if (sameFormat) { pxDst = pxSrc; } else { C = AG_GetColorRGBA(pxSrc, ss->format); pxDst = AG_MapColorRGBA((*ds)->format, C); } AG_SurfacePutPixel((*ds), pDst, pxDst); pDst += (*ds)->format->BytesPerPixel; } } return (0); } /* Set the alpha value of all pixels in a surface where a != 0. */ void AG_SetAlphaPixels(AG_Surface *su, Uint8 alpha) { Uint8 *pDst = (Uint8 *)su->pixels; int x, y; AG_Color C; for (y = 0; y < su->h; y++) { for (x = 0; x < su->w; x++) { /* XXX unnecessary conversion */ C = AG_GetColorRGBA(AG_GET_PIXEL(su,pDst), su->format); if (C.a != 0) { C.a = alpha; } AG_SurfacePutPixel(su, pDst, AG_MapColorRGBA(su->format, C)); pDst += su->format->BytesPerPixel; } } } /* Fill a rectangle with pixels of the specified color. */ void AG_FillRect(AG_Surface *su, const AG_Rect *rDst, AG_Color C) { int x, y; Uint32 px; AG_Rect r; if (rDst != NULL) { r = *rDst; if (r.x < su->clipRect.x) { r.x = su->clipRect.x; } if (r.y < su->clipRect.y) { r.y = su->clipRect.y; } if (r.x+r.w >= su->clipRect.x+su->clipRect.w) r.w = su->clipRect.x+su->clipRect.w - r.x; if (r.y+r.h >= su->clipRect.y+su->clipRect.h) r.h = su->clipRect.y+su->clipRect.h - r.y; } else { r = su->clipRect; } px = AG_MapColorRGBA(su->format, C); /* XXX TODO optimize */ for (y = 0; y < r.h; y++) { for (x = 0; x < r.w; x++) { AG_PUT_PIXEL2(su, r.x + x, r.y + y, px); } } } /* Called by AG_MapPixelRGB() for color-index surfaces. */ Uint32 AG_MapPixelIndexedRGB(const AG_PixelFormat *pf, Uint8 r, Uint8 g, Uint8 b) { Uint i, iMin = 0; int err, errMin = 255*3; for (i = 0; i < pf->palette->nColors; i++) { AG_Color *C = &pf->palette->colors[i]; err = Fabs(C->r - r) + Fabs(C->g - g) + Fabs(C->b - b); if (err < errMin) { errMin = err; iMin = i; } } return (Uint32)iMin; } /* Called by AG_MapPixelRGBA() for color-index surfaces. */ Uint32 AG_MapPixelIndexedRGBA(const AG_PixelFormat *pf, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { Uint i, iMin = 0; int err, errMin = 255*4; for (i = 0; i < pf->palette->nColors; i++) { AG_Color *C = &pf->palette->colors[i]; err = Fabs(C->r - r) + Fabs(C->g - g) + Fabs(C->b - b) + Fabs(C->a - a); if (err < errMin) { errMin = err; iMin = i; } } return (Uint32)iMin; } #ifdef AG_LEGACY void AG_SurfaceLock(AG_Surface *su) { /* No-op */ } void AG_SurfaceUnlock(AG_Surface *su) { /* No-op */ } Uint32 AG_MapRGB(const AG_PixelFormat *pf, Uint8 r, Uint8 g, Uint8 b) { return AG_MapPixelRGB(pf, r,g,b); } Uint32 AG_MapRGBA(const AG_PixelFormat *pf, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { return AG_MapPixelRGBA(pf, r,g,b,a); } AG_Surface *AG_DupSurface(AG_Surface *su) { return AG_SurfaceDup((const AG_Surface *)su); } void AG_GetRGB(Uint32 px, const AG_PixelFormat *pf, Uint8 *r, Uint8 *g, Uint8 *b) { AG_GetPixelRGB(px, pf, r,g,b); } void AG_GetRGBA(Uint32 px, const AG_PixelFormat *pf, Uint8 *r, Uint8 *g, Uint8 *b, Uint8 *a) { AG_GetPixelRGBA(px, pf, r,g,b,a); } int AG_SamePixelFmt(const AG_Surface *s1, const AG_Surface *s2) { return (AG_PixelFormatCompare(s1->format, s2->format)) == 0; } #endif /* AG_LEGACY */
602924.c
/******************************************************************************* * * Copyright (c) 2000-2003 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither name of Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /*! * \file * * Purpose: This file contains functions for copying strings based on * different options. */ #include "config.h" #include "upnp.h" #include "upnputil.h" #include <string.h> void linecopy(char dest[LINE_SIZE], const char *src) { strncpy(dest, src, LINE_SIZE - 1); /* null-terminate if len(src) >= LINE_SIZE. */ dest[LINE_SIZE - 1] = '\0'; } void namecopy(char dest[NAME_SIZE], const char *src) { strncpy(dest, src, NAME_SIZE - 1); /* null-terminate if len(src) >= NAME_SIZE. */ dest[NAME_SIZE - 1] = '\0'; } void linecopylen(char dest[LINE_SIZE], const char *src, size_t srclen) { size_t len; len = srclen < (LINE_SIZE - 1) ? srclen : (LINE_SIZE - 1); strncpy(dest, src, len); dest[len] = '\0'; }
93331.c
/**************************** Module Header ********************************\ * Module Name: sbctl.c * * Copyright (c) 1985 - 1999, Microsoft Corporation * * Scroll bar internal routines * * History: * 11/21/90 JimA Created. * 02-04-91 IanJa Revalidaion added \***************************************************************************/ #include "precomp.h" #pragma hdrstop void CalcSBStuff( PWND pwnd, PSBCALC pSBCalc, BOOL fVert); #define IsScrollBarControl(h) (GETFNID(h) == FNID_SCROLLBAR) /* * Now it is possible to selectively Enable/Disable just one arrow of a Window * scroll bar; Various bits in the 7th word in the rgwScroll array indicates which * one of these arrows are disabled; The following masks indicate which bit of the * word indicates which arrow; */ #define WSB_HORZ_LF 0x0001 // Represents the Left arrow of the horizontal scroll bar. #define WSB_HORZ_RT 0x0002 // Represents the Right arrow of the horizontal scroll bar. #define WSB_VERT_UP 0x0004 // Represents the Up arrow of the vert scroll bar. #define WSB_VERT_DN 0x0008 // Represents the Down arrow of the vert scroll bar. #define WSB_VERT (WSB_VERT_UP | WSB_VERT_DN) #define WSB_HORZ (WSB_HORZ_LF | WSB_HORZ_RT) void DrawCtlThumb(PSBWND); /* * RETURN_IF_PSBTRACK_INVALID: * This macro tests whether the pSBTrack we have is invalid, which can happen * if it gets freed during a callback. * This protects agains the original pSBTrack being freed and no new one * being allocated or a new one being allocated at a different address. * This does not protect against the original pSBTrack being freed and a new * one being allocated at the same address. * If pSBTrack has changed, we assert that there is not already a new one * because we are really not expecting this. */ #define RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd) \ if ((pSBTrack) != PWNDTOPSBTRACK(pwnd)) { \ UserAssert(PWNDTOPSBTRACK(pwnd) == NULL); \ return; \ } /* * REEVALUATE_PSBTRACK * This macro just refreshes the local variable pSBTrack, in case it has * been changed during a callback. After performing this operation, pSBTrack * should be tested to make sure it is not now NULL. */ #if DBG #define REEVALUATE_PSBTRACK(pSBTrack, pwnd, str) \ if ((pSBTrack) != PWNDTOPSBTRACK(pwnd)) { \ RIPMSG3(RIP_WARNING, \ "%s: pSBTrack changed from %#p to %#p", \ (str), (pSBTrack), PWNDTOPSBTRACK(pwnd)); \ } \ (pSBTrack) = PWNDTOPSBTRACK(pwnd) #else #define REEVALUATE_PSBTRACK(pSBTrack, pwnd, str) \ (pSBTrack) = PWNDTOPSBTRACK(pwnd) #endif /***************************************************************************\ * HitTestScrollBar * * 11/15/96 vadimg ported from Memphis sources \***************************************************************************/ int HitTestScrollBar(PWND pwnd, BOOL fVert, POINT pt) { UINT wDisable; int px; BOOL fCtl = IsScrollBarControl(pwnd); SBCALC SBCalc, *pSBCalc; if (fCtl) { wDisable = ((PSBWND)pwnd)->wDisableFlags; } else { #ifdef USE_MIRRORING // // Reflect the click coordinates on the horizontal // scroll bar if the window is mirrored // if (TestWF(pwnd,WEFLAYOUTRTL) && !fVert) { pt.x = pwnd->rcWindow.right - pt.x; } else #endif pt.x -= pwnd->rcWindow.left; pt.y -= pwnd->rcWindow.top; wDisable = GetWndSBDisableFlags(pwnd, fVert); } if ((wDisable & SB_DISABLE_MASK) == SB_DISABLE_MASK) { return HTERROR; } if (fCtl) { pSBCalc = &(((PSBWND)pwnd)->SBCalc); } else { pSBCalc = &SBCalc; CalcSBStuff(pwnd, pSBCalc, fVert); } px = fVert ? pt.y : pt.x; if (px < pSBCalc->pxUpArrow) { if (wDisable & LTUPFLAG) { return HTERROR; } return HTSCROLLUP; } else if (px >= pSBCalc->pxDownArrow) { if (wDisable & RTDNFLAG) { return HTERROR; } return HTSCROLLDOWN; } else if (px < pSBCalc->pxThumbTop) { return HTSCROLLUPPAGE; } else if (px < pSBCalc->pxThumbBottom) { return HTSCROLLTHUMB; } else if (px < pSBCalc->pxDownArrow) { return HTSCROLLDOWNPAGE; } return HTERROR; } BOOL _SBGetParms( PWND pwnd, int code, PSBDATA pw, LPSCROLLINFO lpsi) { PSBTRACK pSBTrack; pSBTrack = PWNDTOPSBTRACK(pwnd); if (lpsi->fMask & SIF_RANGE) { lpsi->nMin = pw->posMin; lpsi->nMax = pw->posMax; } if (lpsi->fMask & SIF_PAGE) lpsi->nPage = pw->page; if (lpsi->fMask & SIF_POS) { lpsi->nPos = pw->pos; } if (lpsi->fMask & SIF_TRACKPOS) { if (pSBTrack && (pSBTrack->nBar == code) && (pSBTrack->spwndTrack == pwnd)) { // posNew is in the context of psbiSB's window and bar code lpsi->nTrackPos = pSBTrack->posNew; } else { lpsi->nTrackPos = pw->pos; } } return ((lpsi->fMask & SIF_ALL) ? TRUE : FALSE); } /***************************************************************************\ * GetWndSBDisableFlags * * This returns the scroll bar Disable flags of the scroll bars of a * given Window. * * * History: * 4-18-91 MikeHar Ported for the 31 merge \***************************************************************************/ UINT GetWndSBDisableFlags( PWND pwnd, // The window whose scroll bar Disable Flags are to be returned; BOOL fVert) // If this is TRUE, it means Vertical scroll bar. { PSBINFO pw; if ((pw = pwnd->pSBInfo) == NULL) { RIPERR0(ERROR_NO_SCROLLBARS, RIP_VERBOSE, ""); return 0; } return (fVert ? (pw->WSBflags & WSB_VERT) >> 2 : pw->WSBflags & WSB_HORZ); } /***************************************************************************\ * xxxEnableSBCtlArrows() * * This function can be used to selectively Enable/Disable * the arrows of a scroll bar Control * * History: * 04-18-91 MikeHar Ported for the 31 merge \***************************************************************************/ BOOL xxxEnableSBCtlArrows( PWND pwnd, UINT wArrows) { UINT wOldFlags; CheckLock(pwnd); UserAssert(IsWinEventNotifyDeferredOK()); wOldFlags = ((PSBWND)pwnd)->wDisableFlags; // Get the original status if (wArrows == ESB_ENABLE_BOTH) { // Enable both the arrows ((PSBWND)pwnd)->wDisableFlags &= ~SB_DISABLE_MASK; } else { ((PSBWND)pwnd)->wDisableFlags |= wArrows; } /* * Check if the status has changed because of this call */ if (wOldFlags == ((PSBWND)pwnd)->wDisableFlags) return FALSE; /* * Else, redraw the scroll bar control to reflect the new state */ if (IsVisible(pwnd)) xxxInvalidateRect(pwnd, NULL, TRUE); if (FWINABLE()) { UINT wNewFlags = ((PSBWND)pwnd)->wDisableFlags; /* * state change notifications */ if ((wOldFlags & ESB_DISABLE_UP) != (wNewFlags & ESB_DISABLE_UP)) { xxxWindowEvent(EVENT_OBJECT_STATECHANGE, pwnd, OBJID_CLIENT, INDEX_SCROLLBAR_UP, WEF_USEPWNDTHREAD); } if ((wOldFlags & ESB_DISABLE_DOWN) != (wNewFlags & ESB_DISABLE_DOWN)) { xxxWindowEvent(EVENT_OBJECT_STATECHANGE, pwnd, OBJID_CLIENT, INDEX_SCROLLBAR_DOWN, WEF_USEPWNDTHREAD); } } return TRUE; } /***************************************************************************\ * xxxEnableWndSBArrows() * * This function can be used to selectively Enable/Disable * the arrows of a Window Scroll bar(s) * * History: * 4-18-91 MikeHar Ported for the 31 merge \***************************************************************************/ BOOL xxxEnableWndSBArrows( PWND pwnd, UINT wSBflags, UINT wArrows) { INT wOldFlags; PSBINFO pw; BOOL bRetValue = FALSE; HDC hdc; CheckLock(pwnd); UserAssert(IsWinEventNotifyDeferredOK()); if ((pw = pwnd->pSBInfo) != NULL) { wOldFlags = pw->WSBflags; } else { /* * Originally everything is enabled; Check to see if this function is * asked to disable anything; Otherwise, no change in status; So, must * return immediately; */ if(!wArrows) return FALSE; // No change in status! wOldFlags = 0; // Both are originally enabled; if((pw = _InitPwSB(pwnd)) == NULL) // Allocate the pSBInfo for hWnd return FALSE; } if((hdc = _GetWindowDC(pwnd)) == NULL) return FALSE; /* * First Take care of the Horizontal Scroll bar, if one exists. */ if((wSBflags == SB_HORZ) || (wSBflags == SB_BOTH)) { if(wArrows == ESB_ENABLE_BOTH) // Enable both the arrows pw->WSBflags &= ~SB_DISABLE_MASK; else pw->WSBflags |= wArrows; /* * Update the display of the Horizontal Scroll Bar; */ if(pw->WSBflags != wOldFlags) { bRetValue = TRUE; wOldFlags = pw->WSBflags; if (TestWF(pwnd, WFHPRESENT) && !TestWF(pwnd, WFMINIMIZED) && IsVisible(pwnd)) { xxxDrawScrollBar(pwnd, hdc, FALSE); // Horizontal Scroll Bar. } } if (FWINABLE()) { // Left button if ((wOldFlags & ESB_DISABLE_LEFT) != (pw->WSBflags & ESB_DISABLE_LEFT)) { xxxWindowEvent(EVENT_OBJECT_STATECHANGE, pwnd, OBJID_HSCROLL, INDEX_SCROLLBAR_UP, WEF_USEPWNDTHREAD); } // Right button if ((wOldFlags & ESB_DISABLE_RIGHT) != (pw->WSBflags & ESB_DISABLE_RIGHT)) { xxxWindowEvent(EVENT_OBJECT_STATECHANGE, pwnd, OBJID_HSCROLL, INDEX_SCROLLBAR_DOWN, WEF_USEPWNDTHREAD); } } } /* * Then take care of the Vertical Scroll bar, if one exists. */ if((wSBflags == SB_VERT) || (wSBflags == SB_BOTH)) { if(wArrows == ESB_ENABLE_BOTH) // Enable both the arrows pw->WSBflags &= ~(SB_DISABLE_MASK << 2); else pw->WSBflags |= (wArrows << 2); /* * Update the display of the Vertical Scroll Bar; */ if(pw->WSBflags != wOldFlags) { bRetValue = TRUE; if (TestWF(pwnd, WFVPRESENT) && !TestWF(pwnd, WFMINIMIZED) && IsVisible(pwnd)) { xxxDrawScrollBar(pwnd, hdc, TRUE); // Vertical Scroll Bar } if (FWINABLE()) { // Up button if ((wOldFlags & (ESB_DISABLE_UP << 2)) != (pw->WSBflags & (ESB_DISABLE_UP << 2))) { xxxWindowEvent(EVENT_OBJECT_STATECHANGE, pwnd, OBJID_VSCROLL, INDEX_SCROLLBAR_UP, WEF_USEPWNDTHREAD); } // Down button if ((wOldFlags & (ESB_DISABLE_DOWN << 2)) != (pw->WSBflags & (ESB_DISABLE_DOWN << 2))) { xxxWindowEvent(EVENT_OBJECT_STATECHANGE, pwnd, OBJID_VSCROLL, INDEX_SCROLLBAR_DOWN, WEF_USEPWNDTHREAD); } } } } _ReleaseDC(hdc); return bRetValue; } /***************************************************************************\ * EnableScrollBar() * * This function can be used to selectively Enable/Disable * the arrows of a scroll bar; It could be used with Windows Scroll * bars as well as scroll bar controls * * History: * 4-18-91 MikeHar Ported for the 31 merge \***************************************************************************/ BOOL xxxEnableScrollBar( PWND pwnd, UINT wSBflags, // Whether it is a Window Scroll Bar; if so, HORZ or VERT? // Possible values are SB_HORZ, SB_VERT, SB_CTL or SB_BOTH UINT wArrows) // Which arrows must be enabled/disabled: // ESB_ENABLE_BOTH = > Enable both arrows. // ESB_DISABLE_LTUP = > Disable Left/Up arrow; // ESB_DISABLE_RTDN = > DIsable Right/Down arrow; // ESB_DISABLE_BOTH = > Disable both the arrows; { #define ES_NOTHING 0 #define ES_DISABLE 1 #define ES_ENABLE 2 UINT wOldFlags; UINT wEnableWindow; CheckLock(pwnd); if(wSBflags != SB_CTL) { return xxxEnableWndSBArrows(pwnd, wSBflags, wArrows); } /* * Let us assume that we don't have to call EnableWindow */ wEnableWindow = ES_NOTHING; wOldFlags = ((PSBWND)pwnd)->wDisableFlags & (UINT)SB_DISABLE_MASK; /* * Check if the present state of the arrows is exactly the same * as what the caller wants: */ if (wOldFlags == wArrows) return FALSE ; // If so, nothing needs to be done; /* * Check if the caller wants to disable both the arrows */ if (wArrows == ESB_DISABLE_BOTH) { wEnableWindow = ES_DISABLE; // Yes! So, disable the whole SB Ctl. } else { /* * Check if the caller wants to enable both the arrows */ if(wArrows == ESB_ENABLE_BOTH) { /* * We need to enable the SB Ctl only if it was already disabled. */ if(wOldFlags == ESB_DISABLE_BOTH) wEnableWindow = ES_ENABLE;// EnableWindow(.., TRUE); } else { /* * Now, Caller wants to disable only one arrow; * Check if one of the arrows was already disabled and we want * to disable the other;If so, the whole SB Ctl will have to be * disabled; Check if this is the case: */ if((wOldFlags | wArrows) == ESB_DISABLE_BOTH) wEnableWindow = ES_DISABLE; // EnableWindow(, FALSE); } } if(wEnableWindow != ES_NOTHING) { /* * EnableWindow returns old state of the window; We must return * TRUE only if the Old state is different from new state. */ if(xxxEnableWindow(pwnd, (BOOL)(wEnableWindow == ES_ENABLE))) { return !(TestWF(pwnd, WFDISABLED)); } else { return TestWF(pwnd, WFDISABLED); } } return (BOOL)xxxSendMessage(pwnd, SBM_ENABLE_ARROWS, (DWORD)wArrows, 0); #undef ES_NOTHING #undef ES_DISABLE #undef ES_ENABLE } /***************************************************************************\ * * DrawSize() - * \***************************************************************************/ void FAR DrawSize(PWND pwnd, HDC hdc, int cxFrame,int cyFrame) { int x, y; //HBRUSH hbrSave; if (TestWF(pwnd, WEFLEFTSCROLL)) { x = cxFrame; } else { x = pwnd->rcWindow.right - pwnd->rcWindow.left - cxFrame - SYSMET(CXVSCROLL); } y = pwnd->rcWindow.bottom - pwnd->rcWindow.top - cyFrame - SYSMET(CYHSCROLL); // If we have a scrollbar control, or the sizebox is not associated with // a sizeable window, draw the flat gray sizebox. Otherwise, use the // sizing grip. if (IsScrollBarControl(pwnd)) { if (TestWF(pwnd, SBFSIZEGRIP)) goto DrawSizeGrip; else goto DrawBox; } else if (!SizeBoxHwnd(pwnd)) { DrawBox: { //hbrSave = GreSelectBrush(hdc, SYSHBR(3DFACE)); //GrePatBlt(hdc, x, y, SYSMET(CXVSCROLL), SYSMET(CYHSCROLL), PATCOPY); //GreSelectBrush(hdc, hbrSave); POLYPATBLT PolyData; PolyData.x = x; PolyData.y = y; PolyData.cx = SYSMET(CXVSCROLL); PolyData.cy = SYSMET(CYHSCROLL); PolyData.BrClr.hbr = SYSHBR(3DFACE); GrePolyPatBlt(hdc,PATCOPY,&PolyData,1,PPB_BRUSH); } } else { DrawSizeGrip: // Blt out the grip bitmap. BitBltSysBmp(hdc, x, y, TestWF(pwnd, WEFLEFTSCROLL) ? OBI_NCGRIP_L : OBI_NCGRIP); } } /***************************************************************************\ * xxxSelectColorObjects * * * * History: \***************************************************************************/ HBRUSH xxxGetColorObjects( PWND pwnd, HDC hdc) { HBRUSH hbrRet; CheckLock(pwnd); // Use the scrollbar color even if the scrollbar is disabeld. if (!IsScrollBarControl(pwnd)) hbrRet = (HBRUSH)xxxDefWindowProc(pwnd, WM_CTLCOLORSCROLLBAR, (WPARAM)hdc, (LPARAM)HWq(pwnd)); else { // B#12770 - GetControlBrush sends a WM_CTLCOLOR message to the // owner. If the app doesn't process the message, DefWindowProc32 // will always return the appropriate system brush. If the app. // returns an invalid object, GetControlBrush will call DWP for // the default brush. Thus hbrRet doesn't need any validation // here. hbrRet = xxxGetControlBrush(pwnd, hdc, WM_CTLCOLORSCROLLBAR); } return hbrRet; } /***************************************************************************\ * * DrawGroove() * * Draws lines & middle of thumb groove * Note that pw points into prc. Moreover, note that both pw & prc are * NEAR pointers, so *prc better not be on the stack. * \***************************************************************************/ void NEAR DrawGroove(HDC hdc, HBRUSH hbr, LPRECT prc, BOOL fVert) { if ((hbr == SYSHBR(3DHILIGHT)) || (hbr == gpsi->hbrGray)) FillRect(hdc, prc, hbr); else { RECT rc; // Draw sides CopyRect(&rc, prc); DrawEdge(hdc, &rc, EDGE_SUNKEN, BF_ADJUST | BF_FLAT | (fVert ? BF_LEFT | BF_RIGHT : BF_TOP | BF_BOTTOM)); // Fill middle FillRect(hdc, &rc, hbr); } } /***************************************************************************\ * CalcTrackDragRect * * Give the rectangle for a scrollbar in pSBTrack->pSBCalc, * calculate pSBTrack->rcTrack, the rectangle where tracking * may occur without cancelling the thumbdrag operation. * \***************************************************************************/ void CalcTrackDragRect(PSBTRACK pSBTrack) { int cx; int cy; LPINT pwX, pwY; // // Point pwX and pwY at the parts of the rectangle // corresponding to pSBCalc->pxLeft, pxTop, etc. // // pSBTrack->pSBCalc->pxLeft is the left edge of a vertical // scrollbar and the top edge of horizontal one. // pSBTrack->pSBCalc->pxTop is the top of a vertical // scrollbar and the left of horizontal one. // etc... // // Point pwX and pwY to the corresponding parts // of pSBTrack->rcTrack. // pwX = pwY = (LPINT)&pSBTrack->rcTrack; if (pSBTrack->fTrackVert) { cy = SYSMET(CYVTHUMB); pwY++; } else { cy = SYSMET(CXHTHUMB); pwX++; } /* * Later5.0 GerardoB: People keep complaining about this tracking region * being too narrow so let's make it wider while PM decides what to do * about it. * We also used to have some hard coded min and max values but that should * depend on some metric, if at all needed. */ cx = (pSBTrack->pSBCalc->pxRight - pSBTrack->pSBCalc->pxLeft) * 8; cy *= 2; *(pwX + 0) = pSBTrack->pSBCalc->pxLeft - cx; *(pwY + 0) = pSBTrack->pSBCalc->pxTop - cy; *(pwX + 2) = pSBTrack->pSBCalc->pxRight + cx; *(pwY + 2) = pSBTrack->pSBCalc->pxBottom + cy; } void RecalcTrackRect(PSBTRACK pSBTrack) { LPINT pwX, pwY; RECT rcSB; if (!pSBTrack->fCtlSB) CalcSBStuff(pSBTrack->spwndTrack, pSBTrack->pSBCalc, pSBTrack->fTrackVert); pwX = (LPINT)&rcSB; pwY = pwX + 1; if (!pSBTrack->fTrackVert) pwX = pwY--; *(pwX + 0) = pSBTrack->pSBCalc->pxLeft; *(pwY + 0) = pSBTrack->pSBCalc->pxTop; *(pwX + 2) = pSBTrack->pSBCalc->pxRight; *(pwY + 2) = pSBTrack->pSBCalc->pxBottom; switch(pSBTrack->cmdSB) { case SB_LINEUP: *(pwY + 2) = pSBTrack->pSBCalc->pxUpArrow; break; case SB_LINEDOWN: *(pwY + 0) = pSBTrack->pSBCalc->pxDownArrow; break; case SB_PAGEUP: *(pwY + 0) = pSBTrack->pSBCalc->pxUpArrow; *(pwY + 2) = pSBTrack->pSBCalc->pxThumbTop; break; case SB_THUMBPOSITION: CalcTrackDragRect(pSBTrack); break; case SB_PAGEDOWN: *(pwY + 0) = pSBTrack->pSBCalc->pxThumbBottom; *(pwY + 2) = pSBTrack->pSBCalc->pxDownArrow; break; } if (pSBTrack->cmdSB != SB_THUMBPOSITION) { CopyRect(&pSBTrack->rcTrack, &rcSB); } } /***************************************************************************\ * DrawThumb2 * * * * History: * 01-03-94 FritzS Chicago changes \***************************************************************************/ void DrawThumb2( PWND pwnd, PSBCALC pSBCalc, HDC hdc, HBRUSH hbr, BOOL fVert, UINT wDisable) /* Disabled flags for the scroll bar */ { int *pLength; int *pWidth; RECT rcSB; PSBTRACK pSBTrack; // // Bail out if the scrollbar has an empty rect // if ((pSBCalc->pxTop >= pSBCalc->pxBottom) || (pSBCalc->pxLeft >= pSBCalc->pxRight)) return; pLength = (LPINT)&rcSB; if (fVert) pWidth = pLength++; else pWidth = pLength + 1; pWidth[0] = pSBCalc->pxLeft; pWidth[2] = pSBCalc->pxRight; /* * If both scroll bar arrows are disabled, then we should not draw * the thumb. So, quit now! */ if (((wDisable & LTUPFLAG) && (wDisable & RTDNFLAG)) || ((pSBCalc->pxDownArrow - pSBCalc->pxUpArrow) < pSBCalc->cpxThumb)) { pLength[0] = pSBCalc->pxUpArrow; pLength[2] = pSBCalc->pxDownArrow; DrawGroove(hdc, hbr, &rcSB, fVert); return; } if (pSBCalc->pxUpArrow < pSBCalc->pxThumbTop) { // Fill in space above Thumb pLength[0] = pSBCalc->pxUpArrow; pLength[2] = pSBCalc->pxThumbTop; DrawGroove(hdc, hbr, &rcSB, fVert); } if (pSBCalc->pxThumbBottom < pSBCalc->pxDownArrow) { // Fill in space below Thumb pLength[0] = pSBCalc->pxThumbBottom; pLength[2] = pSBCalc->pxDownArrow; DrawGroove(hdc, hbr, &rcSB, fVert); } // // Draw elevator // pLength[0] = pSBCalc->pxThumbTop; pLength[2] = pSBCalc->pxThumbBottom; // Not soft! DrawPushButton(hdc, &rcSB, 0, 0); /* * If we're tracking a page scroll, then we've obliterated the hilite. * We need to correct the hiliting rectangle, and rehilite it. */ pSBTrack = PWNDTOPSBTRACK(pwnd); if (pSBTrack && (pSBTrack->cmdSB == SB_PAGEUP || pSBTrack->cmdSB == SB_PAGEDOWN) && (pwnd == pSBTrack->spwndTrack) && (BOOL)pSBTrack->fTrackVert == fVert) { if (pSBTrack->fTrackRecalc) { RecalcTrackRect(pSBTrack); pSBTrack->fTrackRecalc = FALSE; } pLength = (int *)&pSBTrack->rcTrack; if (fVert) pLength++; if (pSBTrack->cmdSB == SB_PAGEUP) pLength[2] = pSBCalc->pxThumbTop; else pLength[0] = pSBCalc->pxThumbBottom; if (pLength[0] < pLength[2]) InvertRect(hdc, &pSBTrack->rcTrack); } } /***************************************************************************\ * xxxDrawSB2 * * * * History: \***************************************************************************/ void xxxDrawSB2( PWND pwnd, PSBCALC pSBCalc, HDC hdc, BOOL fVert, UINT wDisable) { int cLength; int cWidth; int *pwX; int *pwY; HBRUSH hbr; HBRUSH hbrSave; int cpxArrow; RECT rc, rcSB; COLORREF crText, crBk; CheckLock(pwnd); cLength = (pSBCalc->pxBottom - pSBCalc->pxTop) / 2; cWidth = (pSBCalc->pxRight - pSBCalc->pxLeft); if ((cLength <= 0) || (cWidth <= 0)) { return; } if (fVert) cpxArrow = SYSMET(CYVSCROLL); else cpxArrow = SYSMET(CXHSCROLL); /* * Save background and DC color, since they get changed in * xxxGetColorObjects. Restore before we return. */ crBk = GreGetBkColor(hdc); crText = GreGetTextColor(hdc); hbr = xxxGetColorObjects(pwnd, hdc); if (cLength > cpxArrow) cLength = cpxArrow; pwX = (int *)&rcSB; pwY = pwX + 1; if (!fVert) pwX = pwY--; pwX[0] = pSBCalc->pxLeft; pwY[0] = pSBCalc->pxTop; pwX[2] = pSBCalc->pxRight; pwY[2] = pSBCalc->pxBottom; hbrSave = GreSelectBrush(hdc, SYSHBR(BTNTEXT)); // // BOGUS // Draw scrollbar arrows as disabled if the scrollbar itself is // disabled OR if the window it is a part of is disabled? // if (fVert) { if ((cLength == SYSMET(CYVSCROLL)) && (cWidth == SYSMET(CXVSCROLL))) { BitBltSysBmp(hdc, rcSB.left, rcSB.top, (wDisable & LTUPFLAG) ? OBI_UPARROW_I : OBI_UPARROW); BitBltSysBmp(hdc, rcSB.left, rcSB.bottom - cLength, (wDisable & RTDNFLAG) ? OBI_DNARROW_I : OBI_DNARROW); } else { CopyRect(&rc, &rcSB); rc.bottom = rc.top + cLength; DrawFrameControl(hdc, &rc, DFC_SCROLL, DFCS_SCROLLUP | ((wDisable & LTUPFLAG) ? DFCS_INACTIVE : 0)); rc.bottom = rcSB.bottom; rc.top = rcSB.bottom - cLength; DrawFrameControl(hdc, &rc, DFC_SCROLL, DFCS_SCROLLDOWN | ((wDisable & RTDNFLAG) ? DFCS_INACTIVE : 0)); } } else { if ((cLength == SYSMET(CXHSCROLL)) && (cWidth == SYSMET(CYHSCROLL))) { BitBltSysBmp(hdc, rcSB.left, rcSB.top, (wDisable & LTUPFLAG) ? OBI_LFARROW_I : OBI_LFARROW); BitBltSysBmp(hdc, rcSB.right - cLength, rcSB.top, (wDisable & RTDNFLAG) ? OBI_RGARROW_I : OBI_RGARROW); } else { CopyRect(&rc, &rcSB); rc.right = rc.left + cLength; DrawFrameControl(hdc, &rc, DFC_SCROLL, DFCS_SCROLLLEFT | ((wDisable & LTUPFLAG) ? DFCS_INACTIVE : 0)); rc.right = rcSB.right; rc.left = rcSB.right - cLength; DrawFrameControl(hdc, &rc, DFC_SCROLL, DFCS_SCROLLRIGHT | ((wDisable & RTDNFLAG) ? DFCS_INACTIVE : 0)); } } hbrSave = GreSelectBrush(hdc, hbrSave); DrawThumb2(pwnd, pSBCalc, hdc, hbr, fVert, wDisable); GreSelectBrush(hdc, hbrSave); GreSetBkColor(hdc, crBk); GreSetTextColor(hdc, crText); } /***************************************************************************\ * zzzSetSBCaretPos * * * * History: \***************************************************************************/ void zzzSetSBCaretPos( PSBWND psbwnd) { if ((PWND)psbwnd == PtiCurrent()->pq->spwndFocus) { zzzSetCaretPos((psbwnd->fVert ? psbwnd->SBCalc.pxLeft : psbwnd->SBCalc.pxThumbTop) + SYSMET(CXEDGE), (psbwnd->fVert ? psbwnd->SBCalc.pxThumbTop : psbwnd->SBCalc.pxLeft) + SYSMET(CYEDGE)); } } /***************************************************************************\ * CalcSBStuff2 * * * * History: \***************************************************************************/ void CalcSBStuff2( PSBCALC pSBCalc, LPRECT lprc, CONST PSBDATA pw, BOOL fVert) { int cpx; DWORD dwRange; int denom; if (fVert) { pSBCalc->pxTop = lprc->top; pSBCalc->pxBottom = lprc->bottom; pSBCalc->pxLeft = lprc->left; pSBCalc->pxRight = lprc->right; pSBCalc->cpxThumb = SYSMET(CYVSCROLL); } else { /* * For horiz scroll bars, "left" & "right" are "top" and "bottom", * and vice versa. */ pSBCalc->pxTop = lprc->left; pSBCalc->pxBottom = lprc->right; pSBCalc->pxLeft = lprc->top; pSBCalc->pxRight = lprc->bottom; pSBCalc->cpxThumb = SYSMET(CXHSCROLL); } pSBCalc->pos = pw->pos; pSBCalc->page = pw->page; pSBCalc->posMin = pw->posMin; pSBCalc->posMax = pw->posMax; dwRange = ((DWORD)(pSBCalc->posMax - pSBCalc->posMin)) + 1; // // For the case of short scroll bars that don't have enough // room to fit the full-sized up and down arrows, shorten // their sizes to make 'em fit // cpx = min((pSBCalc->pxBottom - pSBCalc->pxTop) / 2, pSBCalc->cpxThumb); pSBCalc->pxUpArrow = pSBCalc->pxTop + cpx; pSBCalc->pxDownArrow = pSBCalc->pxBottom - cpx; if ((pw->page != 0) && (dwRange != 0)) { // JEFFBOG -- This is the one and only place where we should // see 'range'. Elsewhere it should be 'range - page'. /* * The minimun thumb size used to depend on the frame/edge metrics. * People that increase the scrollbar width/height expect the minimun * to grow with proportianally. So NT5 bases the minimun on * CXH/YVSCROLL, which is set by default in cpxThumb. */ /* * i is used to keep the macro "max" from executing EngMulDiv twice. */ int i = EngMulDiv(pSBCalc->pxDownArrow - pSBCalc->pxUpArrow, pw->page, dwRange); pSBCalc->cpxThumb = max(pSBCalc->cpxThumb / 2, i); } pSBCalc->pxMin = pSBCalc->pxTop + cpx; pSBCalc->cpx = pSBCalc->pxBottom - cpx - pSBCalc->cpxThumb - pSBCalc->pxMin; denom = dwRange - (pw->page ? pw->page : 1); if (denom) pSBCalc->pxThumbTop = EngMulDiv(pw->pos - pw->posMin, pSBCalc->cpx, denom) + pSBCalc->pxMin; else pSBCalc->pxThumbTop = pSBCalc->pxMin - 1; pSBCalc->pxThumbBottom = pSBCalc->pxThumbTop + pSBCalc->cpxThumb; } /***************************************************************************\ * SBCtlSetup * * * * History: \***************************************************************************/ void SBCtlSetup( PSBWND psbwnd) { RECT rc; GetRect((PWND)psbwnd, &rc, GRECT_CLIENT | GRECT_CLIENTCOORDS); CalcSBStuff2(&psbwnd->SBCalc, &rc, (PSBDATA)&psbwnd->SBCalc, psbwnd->fVert); } /***************************************************************************\ * HotTrackSB * \***************************************************************************/ #ifdef COLOR_HOTTRACKING DWORD GetTrackFlags(int ht, BOOL fDraw) { if (fDraw) { switch(ht) { case HTSCROLLUP: case HTSCROLLUPPAGE: return LTUPFLAG; case HTSCROLLDOWN: case HTSCROLLDOWNPAGE: return RTDNFLAG; case HTSCROLLTHUMB: return LTUPFLAG | RTDNFLAG; default: return 0; } } else { return 0; } } BOOL xxxHotTrackSB(PWND pwnd, int htEx, BOOL fDraw) { SBCALC SBCalc; HDC hdc; BOOL fVert = HIWORD(htEx); int ht = LOWORD(htEx); DWORD dwTrack = GetTrackFlags(ht, fDraw); CheckLock(pwnd); /* * xxxDrawSB2 does not callback or leave the critical section when it's * not a SB control and the window belongs to a different thread. It * calls xxxDefWindowProc which simply returns the brush color. */ CalcSBStuff(pwnd, &SBCalc, fVert); hdc = _GetDCEx(pwnd, NULL, DCX_WINDOW | DCX_USESTYLE | DCX_CACHE); xxxDrawSB2(pwnd, &SBCalc, hdc, fVert, GetWndSBDisableFlags(pwnd, fVert), dwTrack); _ReleaseDC(hdc); return TRUE; } void xxxHotTrackSBCtl(PSBWND psbwnd, int ht, BOOL fDraw) { DWORD dwTrack = GetTrackFlags(ht, fDraw); HDC hdc; CheckLock(psbwnd); SBCtlSetup(psbwnd); hdc = _GetDCEx((PWND)psbwnd, NULL, DCX_WINDOW | DCX_USESTYLE | DCX_CACHE); xxxDrawSB2((PWND)psbwnd, &psbwnd->SBCalc, hdc, psbwnd->fVert, psbwnd->wDisableFlags, dwTrack); _ReleaseDC(hdc); } #endif // COLOR_HOTTRACKING BOOL SBSetParms(PSBDATA pw, LPSCROLLINFO lpsi, LPBOOL lpfScroll, LPLONG lplres) { // pass the struct because we modify the struct but don't want that // modified version to get back to the calling app BOOL fChanged = FALSE; if (lpsi->fMask & SIF_RETURNOLDPOS) // save previous position *lplres = pw->pos; if (lpsi->fMask & SIF_RANGE) { // if the range MAX is below the range MIN -- then treat is as a // zero range starting at the range MIN. if (lpsi->nMax < lpsi->nMin) lpsi->nMax = lpsi->nMin; if ((pw->posMin != lpsi->nMin) || (pw->posMax != lpsi->nMax)) { pw->posMin = lpsi->nMin; pw->posMax = lpsi->nMax; if (!(lpsi->fMask & SIF_PAGE)) { lpsi->fMask |= SIF_PAGE; lpsi->nPage = pw->page; } if (!(lpsi->fMask & SIF_POS)) { lpsi->fMask |= SIF_POS; lpsi->nPos = pw->pos; } fChanged = TRUE; } } if (lpsi->fMask & SIF_PAGE) { DWORD dwMaxPage = (DWORD) abs(pw->posMax - pw->posMin) + 1; // Clip page to 0, posMax - posMin + 1 if (lpsi->nPage > dwMaxPage) lpsi->nPage = dwMaxPage; if (pw->page != (int)(lpsi->nPage)) { pw->page = lpsi->nPage; if (!(lpsi->fMask & SIF_POS)) { lpsi->fMask |= SIF_POS; lpsi->nPos = pw->pos; } fChanged = TRUE; } } if (lpsi->fMask & SIF_POS) { int iMaxPos = pw->posMax - ((pw->page) ? pw->page - 1 : 0); // Clip pos to posMin, posMax - (page - 1). if (lpsi->nPos < pw->posMin) lpsi->nPos = pw->posMin; else if (lpsi->nPos > iMaxPos) lpsi->nPos = iMaxPos; if (pw->pos != lpsi->nPos) { pw->pos = lpsi->nPos; fChanged = TRUE; } } if (!(lpsi->fMask & SIF_RETURNOLDPOS)) { // Return the new position *lplres = pw->pos; } /* * This was added by JimA as Cairo merge but will conflict * with the documentation for SetScrollPos */ /* else if (*lplres == pw->pos) *lplres = 0; */ if (lpsi->fMask & SIF_RANGE) { if (*lpfScroll = (pw->posMin != pw->posMax)) goto checkPage; } else if (lpsi->fMask & SIF_PAGE) checkPage: *lpfScroll = (pw->page <= (pw->posMax - pw->posMin)); return fChanged; } /***************************************************************************\ * CalcSBStuff * * * * History: \***************************************************************************/ void CalcSBStuff( PWND pwnd, PSBCALC pSBCalc, BOOL fVert) { RECT rcT; RECT rcClient; #ifdef USE_MIRRORING int cx, iTemp; #endif // // Get client rectangle. We know that scrollbars always align to the right // and to the bottom of the client area. // GetRect(pwnd, &rcClient, GRECT_CLIENT | GRECT_WINDOWCOORDS); #ifdef USE_MIRRORING if (TestWF(pwnd, WEFLAYOUTRTL)) { cx = pwnd->rcWindow.right - pwnd->rcWindow.left; iTemp = rcClient.left; rcClient.left = cx - rcClient.right; rcClient.right = cx - iTemp; } #endif if (fVert) { // Only add on space if vertical scrollbar is really there. if (TestWF(pwnd, WEFLEFTSCROLL)) { rcT.right = rcT.left = rcClient.left; if (TestWF(pwnd, WFVPRESENT)) rcT.left -= SYSMET(CXVSCROLL); } else { rcT.right = rcT.left = rcClient.right; if (TestWF(pwnd, WFVPRESENT)) rcT.right += SYSMET(CXVSCROLL); } rcT.top = rcClient.top; rcT.bottom = rcClient.bottom; } else { // Only add on space if horizontal scrollbar is really there. rcT.bottom = rcT.top = rcClient.bottom; if (TestWF(pwnd, WFHPRESENT)) rcT.bottom += SYSMET(CYHSCROLL); rcT.left = rcClient.left; rcT.right = rcClient.right; } // If InitPwSB stuff fails (due to our heap being full) there isn't anything reasonable // we can do here, so just let it go through. We won't fault but the scrollbar won't work // properly either... if (_InitPwSB(pwnd)) CalcSBStuff2(pSBCalc, &rcT, (fVert) ? &pwnd->pSBInfo->Vert : &pwnd->pSBInfo->Horz, fVert); } /***************************************************************************\ * * DrawCtlThumb() * \***************************************************************************/ void DrawCtlThumb(PSBWND psb) { HBRUSH hbr, hbrSave; HDC hdc = (HDC) _GetWindowDC((PWND) psb); SBCtlSetup(psb); hbrSave = GreSelectBrush(hdc, hbr = xxxGetColorObjects((PWND) psb, hdc)); DrawThumb2((PWND) psb, &psb->SBCalc, hdc, hbr, psb->fVert, psb->wDisableFlags); GreSelectBrush(hdc, hbrSave); _ReleaseDC(hdc); } /***************************************************************************\ * xxxDrawThumb * * * * History: \***************************************************************************/ void xxxDrawThumb( PWND pwnd, PSBCALC pSBCalc, BOOL fVert) { HBRUSH hbr, hbrSave; HDC hdc; UINT wDisableFlags; SBCALC SBCalc; CheckLock(pwnd); if (!pSBCalc) pSBCalc = &SBCalc; hdc = (HDC)_GetWindowDC(pwnd); CalcSBStuff(pwnd, &SBCalc, fVert); wDisableFlags = GetWndSBDisableFlags(pwnd, fVert); hbrSave = GreSelectBrush(hdc, hbr = xxxGetColorObjects(pwnd, hdc)); DrawThumb2(pwnd, &SBCalc, hdc, hbr, fVert, wDisableFlags); GreSelectBrush(hdc, hbrSave); /* * Won't hurt even if DC is already released (which happens automatically * if window is destroyed during xxxSelectColorObjects) */ _ReleaseDC(hdc); } /***************************************************************************\ * xxxSetScrollBar * * * * History: \***************************************************************************/ LONG xxxSetScrollBar( PWND pwnd, int code, LPSCROLLINFO lpsi, BOOL fRedraw) { BOOL fVert; PSBDATA pw; PSBINFO pSBInfo; BOOL fOldScroll; BOOL fScroll; WORD wfScroll; LONG lres; BOOL fNewScroll; CheckLock(pwnd); UserAssert(IsWinEventNotifyDeferredOK()); if (fRedraw) // window must be visible to redraw fRedraw = IsVisible(pwnd); if (code == SB_CTL) #ifdef FE_SB // xxxSetScrollBar() // scroll bar control; send the control a message if(GETPTI(pwnd)->TIF_flags & TIF_16BIT) { // // If the target application is 16bit apps, we don't pass win40's message. // This fix for Ichitaro v6.3. It eats the message. It never forwards // the un-processed messages to original windows procedure via // CallWindowProc(). // // Is this from xxxSetScrollPos() ? if(lpsi->fMask == (SIF_POS|SIF_RETURNOLDPOS)) { return (int)xxxSendMessage(pwnd, SBM_SETPOS, lpsi->nPos, fRedraw); // Is this from xxxSetScrollRange() ? } else if(lpsi->fMask == SIF_RANGE) { xxxSendMessage(pwnd, SBM_SETRANGE, lpsi->nMin, lpsi->nMax); return TRUE; // Others... } else { return (LONG)xxxSendMessage(pwnd, SBM_SETSCROLLINFO, (WPARAM) fRedraw, (LPARAM) lpsi); } } else { return (LONG)xxxSendMessage(pwnd, SBM_SETSCROLLINFO, (WPARAM) fRedraw, (LPARAM) lpsi); } #else // scroll bar control; send the control a message return (LONG)xxxSendMessage(pwnd, SBM_SETSCROLLINFO, (WPARAM) fRedraw, (LPARAM) lpsi); #endif // FE_SB fVert = (code != SB_HORZ); wfScroll = (fVert) ? WFVSCROLL : WFHSCROLL; fScroll = fOldScroll = (TestWF(pwnd, wfScroll)) ? TRUE : FALSE; /* * Don't do anything if we're setting position of a nonexistent scroll bar. */ if (!(lpsi->fMask & SIF_RANGE) && !fOldScroll && (pwnd->pSBInfo == NULL)) { RIPERR0(ERROR_NO_SCROLLBARS, RIP_VERBOSE, ""); return 0; } if (fNewScroll = !(pSBInfo = pwnd->pSBInfo)) { if ((pSBInfo = _InitPwSB(pwnd)) == NULL) return 0; } pw = (fVert) ? &(pSBInfo->Vert) : &(pSBInfo->Horz); if (!SBSetParms(pw, lpsi, &fScroll, &lres) && !fNewScroll) { // no change -- but if REDRAW is specified and there's a scrollbar, // redraw the thumb if (fOldScroll && fRedraw) goto redrawAfterSet; return lres; } ClrWF(pwnd, wfScroll); if (fScroll) SetWF(pwnd, wfScroll); else if (!TestWF(pwnd, (WFHSCROLL | WFVSCROLL))) { // if neither scroll bar is set and both ranges are 0, then free up the // scroll info pSBInfo = pwnd->pSBInfo; if ((pSBInfo->Horz.posMin == pSBInfo->Horz.posMax) && (pSBInfo->Vert.posMin == pSBInfo->Vert.posMax)) { DesktopFree(pwnd->head.rpdesk, (HANDLE)(pwnd->pSBInfo)); pwnd->pSBInfo = NULL; } } if (lpsi->fMask & SIF_DISABLENOSCROLL) { if (fOldScroll) { SetWF(pwnd, wfScroll); xxxEnableWndSBArrows(pwnd, code, (fScroll) ? ESB_ENABLE_BOTH : ESB_DISABLE_BOTH); } } else if (fOldScroll ^ fScroll) { PSBTRACK pSBTrack = PWNDTOPSBTRACK(pwnd); if (pSBTrack && (pwnd == pSBTrack->spwndTrack)) { pSBTrack->fTrackRecalc = TRUE; } xxxRedrawFrame(pwnd); // Note: after xxx, pSBTrack may no longer be valid (but we return now) return lres; } if (fScroll && fRedraw && (fVert ? TestWF(pwnd, WFVPRESENT) : TestWF(pwnd, WFHPRESENT))) { PSBTRACK pSBTrack; redrawAfterSet: if (FWINABLE()) { xxxWindowEvent(EVENT_OBJECT_VALUECHANGE, pwnd, (fVert ? OBJID_VSCROLL : OBJID_HSCROLL), INDEX_SCROLLBAR_SELF, WEF_USEPWNDTHREAD); } pSBTrack = PWNDTOPSBTRACK(pwnd); // Bail out if the caller is trying to change the position of // a scrollbar that is in the middle of tracking. We'll hose // TrackThumb() otherwise. if (pSBTrack && (pwnd == pSBTrack->spwndTrack) && ((BOOL)(pSBTrack->fTrackVert) == fVert) && (pSBTrack->xxxpfnSB == xxxTrackThumb)) { return lres; } xxxDrawThumb(pwnd, NULL, fVert); // Note: after xxx, pSBTrack may no longer be valid (but we return now) } return lres; } /***************************************************************************\ * xxxDrawScrollBar * * * * History: \***************************************************************************/ void xxxDrawScrollBar( PWND pwnd, HDC hdc, BOOL fVert) { SBCALC SBCalc; PSBCALC pSBCalc; PSBTRACK pSBTrack = PWNDTOPSBTRACK(pwnd); CheckLock(pwnd); if (pSBTrack && (pwnd == pSBTrack->spwndTrack) && (pSBTrack->fCtlSB == FALSE) && (fVert == (BOOL)pSBTrack->fTrackVert)) { pSBCalc = pSBTrack->pSBCalc; } else { pSBCalc = &SBCalc; } CalcSBStuff(pwnd, pSBCalc, fVert); xxxDrawSB2(pwnd, pSBCalc, hdc, fVert, GetWndSBDisableFlags(pwnd, fVert)); } /***************************************************************************\ * SBPosFromPx * * Compute scroll bar position from pixel location * * History: \***************************************************************************/ int SBPosFromPx( PSBCALC pSBCalc, int px) { if (px < pSBCalc->pxMin) { return pSBCalc->posMin; } if (px >= pSBCalc->pxMin + pSBCalc->cpx) { return (pSBCalc->posMax - (pSBCalc->page ? pSBCalc->page - 1 : 0)); } if (pSBCalc->cpx) return (pSBCalc->posMin + EngMulDiv(pSBCalc->posMax - pSBCalc->posMin - (pSBCalc->page ? pSBCalc->page - 1 : 0), px - pSBCalc->pxMin, pSBCalc->cpx)); else return (pSBCalc->posMin - 1); } /***************************************************************************\ * InvertScrollHilite * * * * History: \***************************************************************************/ void InvertScrollHilite( PWND pwnd, PSBTRACK pSBTrack) { HDC hdc; /* * Don't invert if the thumb is all the way at the top or bottom * or you will end up inverting the line between the arrow and the thumb. */ if (!IsRectEmpty(&pSBTrack->rcTrack)) { if (pSBTrack->fTrackRecalc) { RecalcTrackRect(pSBTrack); pSBTrack->fTrackRecalc = FALSE; } hdc = (HDC)_GetWindowDC(pwnd); InvertRect(hdc, &pSBTrack->rcTrack); _ReleaseDC(hdc); } } /***************************************************************************\ * xxxDoScroll * * Sends scroll notification to the scroll bar owner * * History: \***************************************************************************/ void xxxDoScroll( PWND pwnd, PWND pwndNotify, int cmd, int pos, BOOL fVert ) { TL tlpwndNotify; /* * Special case!!!! this routine is always passed pwnds that are * not thread locked, so they need to be thread locked here. The * callers always know that by the time DoScroll() returns, * pwnd and pwndNotify could be invalid. */ ThreadLock(pwndNotify, &tlpwndNotify); xxxSendMessage(pwndNotify, (UINT)(fVert ? WM_VSCROLL : WM_HSCROLL), MAKELONG(cmd, pos), (LPARAM)HW(pwnd)); ThreadUnlock(&tlpwndNotify); } // ------------------------------------------------------------------------- // // CheckScrollRecalc() // // ------------------------------------------------------------------------- //void CheckScrollRecalc(PWND pwnd, PSBSTATE pSBState, PSBCALC pSBCalc) //{ // if ((pSBState->pwndCalc != pwnd) || ((pSBState->nBar != SB_CTL) && (pSBState->nBar != ((pSBState->fVertSB) ? SB_VERT : SB_HORZ)))) // { // // Calculate SB stuff based on whether it's a control or in a window // if (pSBState->fCtlSB) // SBCtlSetup((PSBWND) pwnd); // else // CalcSBStuff(pwnd, pSBCalc, pSBState->fVertSB); // } //} /***************************************************************************\ * xxxMoveThumb * * History: \***************************************************************************/ void xxxMoveThumb( PWND pwnd, PSBCALC pSBCalc, int px) { HBRUSH hbr, hbrSave; HDC hdc; PSBTRACK pSBTrack; CheckLock(pwnd); pSBTrack = PWNDTOPSBTRACK(pwnd); if ((pSBTrack == NULL) || (px == pSBTrack->pxOld)) return; pxReCalc: pSBTrack->posNew = SBPosFromPx(pSBCalc, px); /* Tentative position changed -- notify the guy. */ if (pSBTrack->posNew != pSBTrack->posOld) { if (pSBTrack->spwndSBNotify != NULL) { xxxDoScroll(pSBTrack->spwndSB, pSBTrack->spwndSBNotify, SB_THUMBTRACK, pSBTrack->posNew, pSBTrack->fTrackVert ); } // After xxxDoScroll, re-evaluate pSBTrack REEVALUATE_PSBTRACK(pSBTrack, pwnd, "xxxMoveThumb(1)"); if ((pSBTrack == NULL) || (pSBTrack->xxxpfnSB == NULL)) return; pSBTrack->posOld = pSBTrack->posNew; // // Anything can happen after the SendMessage above! // Make sure that the SBINFO structure contains data for the // window being tracked -- if not, recalculate data in SBINFO // // CheckScrollRecalc(pwnd, pSBState, pSBCalc); // when we yield, our range can get messed with // so make sure we handle this if (px >= pSBCalc->pxMin + pSBCalc->cpx) { px = pSBCalc->pxMin + pSBCalc->cpx; goto pxReCalc; } } hdc = _GetWindowDC(pwnd); pSBCalc->pxThumbTop = px; pSBCalc->pxThumbBottom = pSBCalc->pxThumbTop + pSBCalc->cpxThumb; // at this point, the disable flags are always going to be 0 -- // we're in the middle of tracking. hbrSave = GreSelectBrush(hdc, hbr = xxxGetColorObjects(pwnd, hdc)); // After xxxGetColorObjects, re-evaluate pSBTrack REEVALUATE_PSBTRACK(pSBTrack, pwnd, "xxxMoveThumb(2)"); if (pSBTrack == NULL) { RIPMSG1(RIP_ERROR, "Did we use to leak hdc %#p?", hdc) ; _ReleaseDC(hdc); return; } DrawThumb2(pwnd, pSBCalc, hdc, hbr, pSBTrack->fTrackVert, 0); GreSelectBrush(hdc, hbrSave); _ReleaseDC(hdc); pSBTrack->pxOld = px; } /***************************************************************************\ * zzzDrawInvertScrollArea * * * * History: \***************************************************************************/ void zzzDrawInvertScrollArea( PWND pwnd, PSBTRACK pSBTrack, BOOL fHit, UINT cmd) { HDC hdc; RECT rcTemp; int cx, cy; UINT bm; if ((cmd != SB_LINEUP) && (cmd != SB_LINEDOWN)) { // not hitting on arrow -- just invert the area and return InvertScrollHilite(pwnd, pSBTrack); if (cmd == SB_PAGEUP) { if (fHit) SetWF(pwnd, WFPAGEUPBUTTONDOWN); else ClrWF(pwnd, WFPAGEUPBUTTONDOWN); } else { if (fHit) SetWF(pwnd, WFPAGEDNBUTTONDOWN); else ClrWF(pwnd, WFPAGEDNBUTTONDOWN); } if (FWINABLE()) { zzzWindowEvent(EVENT_OBJECT_STATECHANGE, pwnd, (pSBTrack->fCtlSB ? OBJID_CLIENT : (pSBTrack->fTrackVert ? OBJID_VSCROLL : OBJID_HSCROLL)), ((cmd == SB_PAGEUP) ? INDEX_SCROLLBAR_UPPAGE : INDEX_SCROLLBAR_DOWNPAGE), WEF_USEPWNDTHREAD); // Note: after zzz, pSBTrack may no longer be valid (but we return now) } return; } if (pSBTrack->fTrackRecalc) { RecalcTrackRect(pSBTrack); pSBTrack->fTrackRecalc = FALSE; } CopyRect(&rcTemp, &pSBTrack->rcTrack); hdc = _GetWindowDC(pwnd); if (pSBTrack->fTrackVert) { cx = SYSMET(CXVSCROLL); cy = SYSMET(CYVSCROLL); } else { cx = SYSMET(CXHSCROLL); cy = SYSMET(CYHSCROLL); } if ((cx == (rcTemp.right - rcTemp.left)) && (cy == (rcTemp.bottom - rcTemp.top))) { if (cmd == SB_LINEUP) bm = (pSBTrack->fTrackVert) ? OBI_UPARROW : OBI_LFARROW; else // SB_LINEDOWN bm = (pSBTrack->fTrackVert) ? OBI_DNARROW : OBI_RGARROW; if (fHit) bm += DOBI_PUSHED; BitBltSysBmp(hdc, rcTemp.left, rcTemp.top, bm); } else { DrawFrameControl(hdc, &rcTemp, DFC_SCROLL, ((pSBTrack->fTrackVert) ? DFCS_SCROLLVERT : DFCS_SCROLLHORZ) | ((fHit) ? DFCS_PUSHED | DFCS_FLAT : 0) | ((cmd == SB_LINEUP) ? DFCS_SCROLLMIN : DFCS_SCROLLMAX)); } _ReleaseDC(hdc); if (cmd == SB_LINEUP) { if (fHit) SetWF(pwnd, WFLINEUPBUTTONDOWN); else ClrWF(pwnd, WFLINEUPBUTTONDOWN); } else { if (fHit) SetWF(pwnd, WFLINEDNBUTTONDOWN); else ClrWF(pwnd, WFLINEDNBUTTONDOWN); } if (FWINABLE()) { zzzWindowEvent(EVENT_OBJECT_STATECHANGE, pwnd, (pSBTrack->fCtlSB ? OBJID_CLIENT : (pSBTrack->fTrackVert ? OBJID_VSCROLL : OBJID_HSCROLL)), (cmd == SB_LINEUP ? INDEX_SCROLLBAR_UP : INDEX_SCROLLBAR_DOWN), WEF_USEPWNDTHREAD); // Note: after zzz, pSBTrack may no longer be valid (but we return now) } } /***************************************************************************\ * xxxEndScroll * * * * History: \***************************************************************************/ void xxxEndScroll( PWND pwnd, BOOL fCancel) { UINT oldcmd; PSBTRACK pSBTrack; CheckLock(pwnd); UserAssert(!IsWinEventNotifyDeferred()); pSBTrack = PWNDTOPSBTRACK(pwnd); if (pSBTrack && PtiCurrent()->pq->spwndCapture == pwnd && pSBTrack->xxxpfnSB != NULL) { oldcmd = pSBTrack->cmdSB; pSBTrack->cmdSB = 0; xxxReleaseCapture(); // After xxxReleaseCapture, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); if (pSBTrack->xxxpfnSB == xxxTrackThumb) { if (fCancel) { pSBTrack->posOld = pSBTrack->pSBCalc->pos; } /* * DoScroll does thread locking on these two pwnds - * this is ok since they are not used after this * call. */ if (pSBTrack->spwndSBNotify != NULL) { xxxDoScroll(pSBTrack->spwndSB, pSBTrack->spwndSBNotify, SB_THUMBPOSITION, pSBTrack->posOld, pSBTrack->fTrackVert ); // After xxxDoScroll, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); } if (pSBTrack->fCtlSB) { DrawCtlThumb((PSBWND) pwnd); } else { xxxDrawThumb(pwnd, pSBTrack->pSBCalc, pSBTrack->fTrackVert); // Note: after xxx, pSBTrack may no longer be valid } } else if (pSBTrack->xxxpfnSB == xxxTrackBox) { DWORD lParam; POINT ptMsg; if (pSBTrack->hTimerSB != 0) { _KillSystemTimer(pwnd, IDSYS_SCROLL); pSBTrack->hTimerSB = 0; } lParam = _GetMessagePos(); #ifdef USE_MIRRORING if (TestWF(pwnd, WEFLAYOUTRTL)) { ptMsg.x = pwnd->rcWindow.right - GET_X_LPARAM(lParam); } else #endif { ptMsg.x = GET_X_LPARAM(lParam) - pwnd->rcWindow.left; } ptMsg.y = GET_Y_LPARAM(lParam) - pwnd->rcWindow.top; if (PtInRect(&pSBTrack->rcTrack, ptMsg)) { zzzDrawInvertScrollArea(pwnd, pSBTrack, FALSE, oldcmd); // Note: after zzz, pSBTrack may no longer be valid } } /* * Always send SB_ENDSCROLL message. * * DoScroll does thread locking on these two pwnds - * this is ok since they are not used after this * call. */ // After xxxDrawThumb or zzzDrawInvertScrollArea, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); if (pSBTrack->spwndSBNotify != NULL) { xxxDoScroll(pSBTrack->spwndSB, pSBTrack->spwndSBNotify, SB_ENDSCROLL, 0, pSBTrack->fTrackVert); // After xxxDoScroll, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); } ClrWF(pwnd, WFSCROLLBUTTONDOWN); ClrWF(pwnd, WFVERTSCROLLTRACK); if (FWINABLE()) { xxxWindowEvent(EVENT_SYSTEM_SCROLLINGEND, pwnd, (pSBTrack->fCtlSB ? OBJID_CLIENT : (pSBTrack->fTrackVert ? OBJID_VSCROLL : OBJID_HSCROLL)), INDEXID_CONTAINER, 0); // After xxxWindowEvent, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); } /* * If this is a Scroll Bar Control, turn the caret back on. */ if (pSBTrack->spwndSB != NULL) { zzzShowCaret(pSBTrack->spwndSB); // After zzz, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); } pSBTrack->xxxpfnSB = NULL; /* * Unlock structure members so they are no longer holding down windows. */ Unlock(&pSBTrack->spwndSB); Unlock(&pSBTrack->spwndSBNotify); Unlock(&pSBTrack->spwndTrack); UserFreePool(pSBTrack); PWNDTOPSBTRACK(pwnd) = NULL; } } /***************************************************************************\ * xxxContScroll * * * * History: \***************************************************************************/ VOID xxxContScroll( PWND pwnd, UINT message, UINT_PTR ID, LPARAM lParam) { LONG pt; PSBTRACK pSBTrack = PWNDTOPSBTRACK(pwnd); UNREFERENCED_PARAMETER(message); UNREFERENCED_PARAMETER(ID); UNREFERENCED_PARAMETER(lParam); if (pSBTrack == NULL) return; CheckLock(pwnd); pt = _GetMessagePos(); #ifdef USE_MIRRORING if (TestWF(pwnd, WEFLAYOUTRTL)) { pt = MAKELONG(pwnd->rcWindow.right - GET_X_LPARAM(pt), GET_Y_LPARAM(pt) - pwnd->rcWindow.top); } else #endif { pt = MAKELONG( GET_X_LPARAM(pt) - pwnd->rcWindow.left, GET_Y_LPARAM(pt) - pwnd->rcWindow.top); } xxxTrackBox(pwnd, WM_NULL, 0, pt, NULL); // After xxxTrackBox, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); if (pSBTrack->fHitOld) { pSBTrack->hTimerSB = _SetSystemTimer(pwnd, IDSYS_SCROLL, gpsi->dtScroll / 8, xxxContScroll); /* * DoScroll does thread locking on these two pwnds - * this is ok since they are not used after this * call. */ if (pSBTrack->spwndSBNotify != NULL) { xxxDoScroll(pSBTrack->spwndSB, pSBTrack->spwndSBNotify, pSBTrack->cmdSB, 0, pSBTrack->fTrackVert); // Note: after xxx, pSBTrack may no longer be valid (but we return now) } } return; } /***************************************************************************\ * xxxTrackBox * * * * History: \***************************************************************************/ void xxxTrackBox( PWND pwnd, UINT message, WPARAM wParam, LPARAM lParam, PSBCALC pSBCalc) { BOOL fHit; POINT ptHit; PSBTRACK pSBTrack = PWNDTOPSBTRACK(pwnd); int cmsTimer; UNREFERENCED_PARAMETER(wParam); UNREFERENCED_PARAMETER(pSBCalc); CheckLock(pwnd); UserAssert(IsWinEventNotifyDeferredOK()); if (pSBTrack == NULL) return; if (message != WM_NULL && HIBYTE(message) != HIBYTE(WM_MOUSEFIRST)) return; if (pSBTrack->fTrackRecalc) { RecalcTrackRect(pSBTrack); pSBTrack->fTrackRecalc = FALSE; } ptHit.x = GET_X_LPARAM(lParam); ptHit.y = GET_Y_LPARAM(lParam); fHit = PtInRect(&pSBTrack->rcTrack, ptHit); if (fHit != (BOOL)pSBTrack->fHitOld) { zzzDrawInvertScrollArea(pwnd, pSBTrack, fHit, pSBTrack->cmdSB); // After zzz, pSBTrack may no longer be valid RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); } cmsTimer = gpsi->dtScroll / 8; switch (message) { case WM_LBUTTONUP: xxxEndScroll(pwnd, FALSE); // Note: after xxx, pSBTrack may no longer be valid break; case WM_LBUTTONDOWN: pSBTrack->hTimerSB = 0; cmsTimer = gpsi->dtScroll; /* *** FALL THRU ** */ case WM_MOUSEMOVE: if (fHit && fHit != (BOOL)pSBTrack->fHitOld) { /* * We moved back into the normal rectangle: reset timer */ pSBTrack->hTimerSB = _SetSystemTimer(pwnd, IDSYS_SCROLL, cmsTimer, xxxContScroll); /* * DoScroll does thread locking on these two pwnds - * this is ok since they are not used after this * call. */ if (pSBTrack->spwndSBNotify != NULL) { xxxDoScroll(pSBTrack->spwndSB, pSBTrack->spwndSBNotify, pSBTrack->cmdSB, 0, pSBTrack->fTrackVert); // Note: after xxx, pSBTrack may no longer be valid } } } // After xxxDoScroll or xxxEndScroll, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); pSBTrack->fHitOld = fHit; } /***************************************************************************\ * xxxTrackThumb * * * * History: \***************************************************************************/ void xxxTrackThumb( PWND pwnd, UINT message, WPARAM wParam, LPARAM lParam, PSBCALC pSBCalc) { int px; PSBTRACK pSBTrack = PWNDTOPSBTRACK(pwnd); POINT pt; UNREFERENCED_PARAMETER(wParam); CheckLock(pwnd); if (HIBYTE(message) != HIBYTE(WM_MOUSEFIRST)) return; if (pSBTrack == NULL) return; // Make sure that the SBINFO structure contains data for the // window being tracked -- if not, recalculate data in SBINFO // CheckScrollRecalc(pwnd, pSBState, pSBCalc); if (pSBTrack->fTrackRecalc) { RecalcTrackRect(pSBTrack); pSBTrack->fTrackRecalc = FALSE; } pt.y = GET_Y_LPARAM(lParam); pt.x = GET_X_LPARAM(lParam); if (!PtInRect(&pSBTrack->rcTrack, pt)) px = pSBCalc->pxStart; else { px = (pSBTrack->fTrackVert ? pt.y : pt.x) + pSBTrack->dpxThumb; if (px < pSBCalc->pxMin) px = pSBCalc->pxMin; else if (px >= pSBCalc->pxMin + pSBCalc->cpx) px = pSBCalc->pxMin + pSBCalc->cpx; } xxxMoveThumb(pwnd, pSBCalc, px); /* * We won't get the WM_LBUTTONUP message if we got here through * the scroll menu, so test the button state directly. */ if (message == WM_LBUTTONUP || _GetKeyState(VK_LBUTTON) >= 0) { xxxEndScroll(pwnd, FALSE); } } /***************************************************************************\ * xxxSBTrackLoop * * * * History: \***************************************************************************/ void xxxSBTrackLoop( PWND pwnd, LPARAM lParam, PSBCALC pSBCalc) { MSG msg; UINT cmd; PTHREADINFO ptiCurrent; VOID (*xxxpfnSB)(PWND, UINT, WPARAM, LPARAM, PSBCALC); PSBTRACK pSBTrack; CheckLock(pwnd); UserAssert(IsWinEventNotifyDeferredOK()); pSBTrack = PWNDTOPSBTRACK(pwnd); if ((pSBTrack == NULL) || (NULL == (xxxpfnSB = pSBTrack->xxxpfnSB))) // mode cancelled -- exit track loop return; if (pSBTrack->fTrackVert) SetWF(pwnd, WFVERTSCROLLTRACK); if (FWINABLE()) { xxxWindowEvent(EVENT_SYSTEM_SCROLLINGSTART, pwnd, (pSBTrack->fCtlSB ? OBJID_CLIENT : (pSBTrack->fTrackVert ? OBJID_VSCROLL : OBJID_HSCROLL)), INDEXID_CONTAINER, 0); // Note: after xxx, pSBTrack may no longer be valid } (*xxxpfnSB)(pwnd, WM_LBUTTONDOWN, 0, lParam, pSBCalc); // Note: after xxx, pSBTrack may no longer be valid ptiCurrent = PtiCurrent(); while (ptiCurrent->pq->spwndCapture == pwnd) { if (!xxxGetMessage(&msg, NULL, 0, 0)) { // Note: after xxx, pSBTrack may no longer be valid break; } if (!_CallMsgFilter(&msg, MSGF_SCROLLBAR)) { cmd = msg.message; if (msg.hwnd == HWq(pwnd) && ((cmd >= WM_MOUSEFIRST && cmd <= WM_MOUSELAST) || (cmd >= WM_KEYFIRST && cmd <= WM_KEYLAST))) { cmd = SystoChar(cmd, msg.lParam); // After xxxWindowEvent, xxxpfnSB, xxxTranslateMessage or // xxxDispatchMessage, re-evaluate pSBTrack. REEVALUATE_PSBTRACK(pSBTrack, pwnd, "xxxTrackLoop"); if ((pSBTrack == NULL) || (NULL == (xxxpfnSB = pSBTrack->xxxpfnSB))) // mode cancelled -- exit track loop return; (*xxxpfnSB)(pwnd, cmd, msg.wParam, msg.lParam, pSBCalc); } else { xxxTranslateMessage(&msg, 0); xxxDispatchMessage(&msg); } } } } /***************************************************************************\ * xxxSBTrackInit * * History: \***************************************************************************/ void xxxSBTrackInit( PWND pwnd, LPARAM lParam, int curArea, UINT uType) { int px; LPINT pwX; LPINT pwY; UINT wDisable; // Scroll bar disable flags; SBCALC SBCalc; PSBCALC pSBCalc; RECT rcSB; PSBTRACK pSBTrack; CheckLock(pwnd); if (PWNDTOPSBTRACK(pwnd)) { RIPMSG1(RIP_WARNING, "xxxSBTrackInit: PWNDTOPSBTRACK(pwnd) == %#p", PWNDTOPSBTRACK(pwnd)); return; } pSBTrack = (PSBTRACK)UserAllocPoolWithQuota(sizeof(*pSBTrack), TAG_SCROLLTRACK); if (pSBTrack == NULL) return; pSBTrack->hTimerSB = 0; pSBTrack->fHitOld = FALSE; pSBTrack->xxxpfnSB = xxxTrackBox; pSBTrack->spwndTrack = NULL; pSBTrack->spwndSB = NULL; pSBTrack->spwndSBNotify = NULL; Lock(&pSBTrack->spwndTrack, pwnd); PWNDTOPSBTRACK(pwnd) = pSBTrack; pSBTrack->fCtlSB = (!curArea); if (pSBTrack->fCtlSB) { /* * This is a scroll bar control. */ Lock(&pSBTrack->spwndSB, pwnd); pSBTrack->fTrackVert = ((PSBWND)pwnd)->fVert; Lock(&pSBTrack->spwndSBNotify, pwnd->spwndParent); wDisable = ((PSBWND)pwnd)->wDisableFlags; pSBCalc = &((PSBWND)pwnd)->SBCalc; pSBTrack->nBar = SB_CTL; } else { /* * This is a scroll bar that is part of the window frame. */ #ifdef USE_MIRRORING // // Mirror the window coord of the scroll bar, // if it is a mirrored one // if (TestWF(pwnd,WEFLAYOUTRTL)) { lParam = MAKELONG( pwnd->rcWindow.right - GET_X_LPARAM(lParam), GET_Y_LPARAM(lParam) - pwnd->rcWindow.top); } else { #endif lParam = MAKELONG( GET_X_LPARAM(lParam) - pwnd->rcWindow.left, GET_Y_LPARAM(lParam) - pwnd->rcWindow.top); #ifdef USE_MIRRORING } #endif Lock(&pSBTrack->spwndSBNotify, pwnd); Lock(&pSBTrack->spwndSB, NULL); pSBTrack->fTrackVert = (curArea - HTHSCROLL); wDisable = GetWndSBDisableFlags(pwnd, pSBTrack->fTrackVert); pSBCalc = &SBCalc; pSBTrack->nBar = (curArea - HTHSCROLL) ? SB_VERT : SB_HORZ; } pSBTrack->pSBCalc = pSBCalc; /* * Check if the whole scroll bar is disabled */ if((wDisable & SB_DISABLE_MASK) == SB_DISABLE_MASK) { Unlock(&pSBTrack->spwndSBNotify); Unlock(&pSBTrack->spwndSB); Unlock(&pSBTrack->spwndTrack); UserFreePool(pSBTrack); PWNDTOPSBTRACK(pwnd) = NULL; return; // It is a disabled scroll bar; So, do not respond. } if (!pSBTrack->fCtlSB) { CalcSBStuff(pwnd, pSBCalc, pSBTrack->fTrackVert); } pwX = (LPINT)&rcSB; pwY = pwX + 1; if (!pSBTrack->fTrackVert) pwX = pwY--; px = (pSBTrack->fTrackVert ? GET_Y_LPARAM(lParam) : GET_X_LPARAM(lParam)); *(pwX + 0) = pSBCalc->pxLeft; *(pwY + 0) = pSBCalc->pxTop; *(pwX + 2) = pSBCalc->pxRight; *(pwY + 2) = pSBCalc->pxBottom; pSBTrack->cmdSB = (UINT)-1; if (px < pSBCalc->pxUpArrow) { /* * The click occurred on Left/Up arrow; Check if it is disabled */ if(wDisable & LTUPFLAG) { if(pSBTrack->fCtlSB) { // If this is a scroll bar control, zzzShowCaret(pSBTrack->spwndSB); // show the caret before returning; // After zzzShowCaret, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); } Unlock(&pSBTrack->spwndSBNotify); Unlock(&pSBTrack->spwndSB); Unlock(&pSBTrack->spwndTrack); UserFreePool(pSBTrack); PWNDTOPSBTRACK(pwnd) = NULL; return; // Yes! disabled. Do not respond. } // LINEUP -- make rcSB the Up Arrow's Rectangle pSBTrack->cmdSB = SB_LINEUP; *(pwY + 2) = pSBCalc->pxUpArrow; } else if (px >= pSBCalc->pxDownArrow) { /* * The click occurred on Right/Down arrow; Check if it is disabled */ if (wDisable & RTDNFLAG) { if (pSBTrack->fCtlSB) { // If this is a scroll bar control, zzzShowCaret(pSBTrack->spwndSB); // show the caret before returning; // After zzzShowCaret, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); } Unlock(&pSBTrack->spwndSBNotify); Unlock(&pSBTrack->spwndSB); Unlock(&pSBTrack->spwndTrack); UserFreePool(pSBTrack); PWNDTOPSBTRACK(pwnd) = NULL; return;// Yes! disabled. Do not respond. } // LINEDOWN -- make rcSB the Down Arrow's Rectangle pSBTrack->cmdSB = SB_LINEDOWN; *(pwY + 0) = pSBCalc->pxDownArrow; } else if (px < pSBCalc->pxThumbTop) { // PAGEUP -- make rcSB the rectangle between Up Arrow and Thumb pSBTrack->cmdSB = SB_PAGEUP; *(pwY + 0) = pSBCalc->pxUpArrow; *(pwY + 2) = pSBCalc->pxThumbTop; } else if (px < pSBCalc->pxThumbBottom) { DoThumbPos: /* * Elevator isn't there if there's no room. */ if (pSBCalc->pxDownArrow - pSBCalc->pxUpArrow <= pSBCalc->cpxThumb) { Unlock(&pSBTrack->spwndSBNotify); Unlock(&pSBTrack->spwndSB); Unlock(&pSBTrack->spwndTrack); UserFreePool(pSBTrack); PWNDTOPSBTRACK(pwnd) = NULL; return; } // THUMBPOSITION -- we're tracking with the thumb pSBTrack->cmdSB = SB_THUMBPOSITION; CalcTrackDragRect(pSBTrack); pSBTrack->xxxpfnSB = xxxTrackThumb; pSBTrack->pxOld = pSBCalc->pxStart = pSBCalc->pxThumbTop; pSBTrack->posNew = pSBTrack->posOld = pSBCalc->pos; pSBTrack->dpxThumb = pSBCalc->pxStart - px; xxxCapture(PtiCurrent(), pwnd, WINDOW_CAPTURE); // After xxxCapture, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); /* * DoScroll does thread locking on these two pwnds - * this is ok since they are not used after this * call. */ if (pSBTrack->spwndSBNotify != NULL) { xxxDoScroll(pSBTrack->spwndSB, pSBTrack->spwndSBNotify, SB_THUMBTRACK, pSBTrack->posOld, pSBTrack->fTrackVert ); // Note: after xxx, pSBTrack may no longer be valid } } else if (px < pSBCalc->pxDownArrow) { // PAGEDOWN -- make rcSB the rectangle between Thumb and Down Arrow pSBTrack->cmdSB = SB_PAGEDOWN; *(pwY + 0) = pSBCalc->pxThumbBottom; *(pwY + 2) = pSBCalc->pxDownArrow; } /* * If the shift key is down, we'll position the thumb directly so it's * centered on the click point. */ if ((uType == SCROLL_DIRECT && pSBTrack->cmdSB != SB_LINEUP && pSBTrack->cmdSB != SB_LINEDOWN) || (uType == SCROLL_MENU)) { if (pSBTrack->cmdSB != SB_THUMBPOSITION) { goto DoThumbPos; } pSBTrack->dpxThumb = -(pSBCalc->cpxThumb / 2); } xxxCapture(PtiCurrent(), pwnd, WINDOW_CAPTURE); // After xxxCapture, revalidate pSBTrack RETURN_IF_PSBTRACK_INVALID(pSBTrack, pwnd); if (pSBTrack->cmdSB != SB_THUMBPOSITION) { CopyRect(&pSBTrack->rcTrack, &rcSB); } xxxSBTrackLoop(pwnd, lParam, pSBCalc); // After xxx, re-evaluate pSBTrack REEVALUATE_PSBTRACK(pSBTrack, pwnd, "xxxTrackLoop"); if (pSBTrack) { Unlock(&pSBTrack->spwndSBNotify); Unlock(&pSBTrack->spwndSB); Unlock(&pSBTrack->spwndTrack); UserFreePool(pSBTrack); PWNDTOPSBTRACK(pwnd) = NULL; } } /***************************************************************************\ * GetScrollMenu * * History: \***************************************************************************/ PMENU xxxGetScrollMenu( PWND pwnd, BOOL fVert) { PMENU pMenu; PMENU *ppDesktopMenu; /* * Grab the menu from the desktop. If the desktop menu * has not been loaded and this is not a system thread, * load it now. Callbacks cannot be made from a system * thread or when a thread is in cleanup. */ if (fVert) { ppDesktopMenu = &pwnd->head.rpdesk->spmenuVScroll; } else { ppDesktopMenu = &pwnd->head.rpdesk->spmenuHScroll; } pMenu = *ppDesktopMenu; if (pMenu == NULL && !(PtiCurrent()->TIF_flags & (TIF_SYSTEMTHREAD | TIF_INCLEANUP))) { UNICODE_STRING strMenuName; RtlInitUnicodeStringOrId(&strMenuName, fVert ? MAKEINTRESOURCE(ID_VSCROLLMENU) : MAKEINTRESOURCE(ID_HSCROLLMENU)); pMenu = xxxClientLoadMenu(NULL, &strMenuName); LockDesktopMenu(ppDesktopMenu, pMenu); } /* * Return the handle to the scroll menu. */ if (pMenu != NULL) { return _GetSubMenu(pMenu, 0); } return NULL; } /***************************************************************************\ * xxxDoScrollMenu * * History: \***************************************************************************/ VOID xxxDoScrollMenu( PWND pwndNotify, PWND pwndSB, BOOL fVert, LPARAM lParam) { PMENU pMenu; SBCALC SBCalc, *pSBCalc; UINT cmd; POINT pt; TL tlpmenu; UINT wDisable; /* * Check the compatibility flag. Word 6.0 AV's when selecting an item * in this menu. * NOTE: If this hack is to be extended for other apps we should use * another bit for GACF_NOSCROLLBARCTXMENU as the current one is re-used * MCostea #119380 */ if (GetAppCompatFlags(NULL) & GACF_NOSCROLLBARCTXMENU) { return; } /* * Initialize some stuff. */ POINTSTOPOINT(pt, lParam); if (pwndSB) { SBCtlSetup((PSBWND)pwndSB); pSBCalc = &(((PSBWND)pwndSB)->SBCalc); wDisable = ((PSBWND)pwndSB)->wDisableFlags; pt.x -= pwndSB->rcWindow.left; pt.y -= pwndSB->rcWindow.top; } else { pSBCalc = &SBCalc; CalcSBStuff(pwndNotify, pSBCalc, fVert); wDisable = GetWndSBDisableFlags(pwndNotify, fVert); pt.x -= pwndNotify->rcWindow.left; pt.y -= pwndNotify->rcWindow.top; } /* * Make sure the scrollbar isn't disabled. */ if ((wDisable & SB_DISABLE_MASK) == SB_DISABLE_MASK) { return; } /* * Put up a menu and scroll accordingly. */ if ((pMenu = xxxGetScrollMenu(pwndNotify, fVert)) != NULL) { ThreadLockAlways(pMenu, &tlpmenu); cmd = xxxTrackPopupMenuEx(pMenu, TPM_RIGHTBUTTON | TPM_RETURNCMD | TPM_NONOTIFY, GET_X_LPARAM(lParam), GET_Y_LPARAM(lParam), pwndNotify, NULL); ThreadUnlock(&tlpmenu); if (cmd) { if ((cmd & 0x00FF) == SB_THUMBPOSITION) { if (pwndSB) { xxxSBTrackInit(pwndSB, MAKELPARAM(pt.x, pt.y), 0, SCROLL_MENU); } else { xxxSBTrackInit(pwndNotify, lParam, fVert ? HTVSCROLL : HTHSCROLL, SCROLL_MENU); } } else { xxxDoScroll(pwndSB, pwndNotify, cmd & 0x00FF, 0, fVert ); xxxDoScroll(pwndSB, pwndNotify, SB_ENDSCROLL, 0, fVert ); } } } } /***************************************************************************\ * xxxSBWndProc * * History: * 08-15-95 jparsons Added guard against NULL lParam [51986] \***************************************************************************/ LRESULT xxxSBWndProc( PSBWND psbwnd, UINT message, WPARAM wParam, LPARAM lParam) { LONG l; LONG lres; int cx; int cy; UINT cmd; UINT uSide; HDC hdc; RECT rc; POINT pt; BOOL fSizeReal; HBRUSH hbrSave; BOOL fSize; PAINTSTRUCT ps; UINT style; TL tlpwndParent; SCROLLINFO si; LPSCROLLINFO lpsi = &si; BOOL fRedraw = FALSE; BOOL fScroll; CheckLock(psbwnd); UserAssert(IsWinEventNotifyDeferredOK()); VALIDATECLASSANDSIZE(((PWND)psbwnd), message, wParam, lParam, FNID_SCROLLBAR, WM_CREATE); style = LOBYTE(psbwnd->wnd.style); fSize = ((style & (SBS_SIZEBOX | SBS_SIZEGRIP)) != 0); switch (message) { case WM_CREATE: /* * Guard against lParam being NULL since the thunk allows it [51986] */ if (lParam) { rc.right = (rc.left = ((LPCREATESTRUCT)lParam)->x) + ((LPCREATESTRUCT)lParam)->cx; rc.bottom = (rc.top = ((LPCREATESTRUCT)lParam)->y) + ((LPCREATESTRUCT)lParam)->cy; // This is because we can't just rev CardFile -- we should fix the // problem here in case anyone else happened to have some EXTRA // scroll styles on their scroll bar controls (jeffbog 03/21/94) if (!TestWF((PWND)psbwnd, WFWIN40COMPAT)) psbwnd->wnd.style &= ~(WS_HSCROLL | WS_VSCROLL); if (!fSize) { l = PtrToLong(((LPCREATESTRUCT)lParam)->lpCreateParams); psbwnd->SBCalc.pos = psbwnd->SBCalc.posMin = LOWORD(l); psbwnd->SBCalc.posMax = HIWORD(l); psbwnd->fVert = ((LOBYTE(psbwnd->wnd.style) & SBS_VERT) != 0); psbwnd->SBCalc.page = 0; } if (psbwnd->wnd.style & WS_DISABLED) psbwnd->wDisableFlags = SB_DISABLE_MASK; if (style & (SBS_TOPALIGN | SBS_BOTTOMALIGN)) { if (fSize) { if (style & SBS_SIZEBOXBOTTOMRIGHTALIGN) { rc.left = rc.right - SYSMET(CXVSCROLL); rc.top = rc.bottom - SYSMET(CYHSCROLL); } rc.right = rc.left + SYSMET(CXVSCROLL); rc.bottom = rc.top + SYSMET(CYHSCROLL); } else { if (style & SBS_VERT) { if (style & SBS_LEFTALIGN) rc.right = rc.left + SYSMET(CXVSCROLL); else rc.left = rc.right - SYSMET(CXVSCROLL); } else { if (style & SBS_TOPALIGN) rc.bottom = rc.top + SYSMET(CYHSCROLL); else rc.top = rc.bottom - SYSMET(CYHSCROLL); } } xxxMoveWindow((PWND)psbwnd, rc.left, rc.top, rc.right - rc.left, rc.bottom - rc.top, FALSE); } } /* if */ else { RIPERR0(ERROR_INVALID_PARAMETER, RIP_WARNING, "xxxSBWndProc - NULL lParam for WM_CREATE\n") ; } /* else */ break; case WM_SIZE: if (PtiCurrent()->pq->spwndFocus != (PWND)psbwnd) break; // scroll bar has the focus -- recalc it's thumb caret size // no need to DeferWinEventNotify() - see xxxCreateCaret below. zzzDestroyCaret(); // | | // | FALL THRU | // V V case WM_SETFOCUS: SBCtlSetup(psbwnd); cx = (psbwnd->fVert ? psbwnd->wnd.rcWindow.right - psbwnd->wnd.rcWindow.left : psbwnd->SBCalc.cpxThumb) - 2 * SYSMET(CXEDGE); cy = (psbwnd->fVert ? psbwnd->SBCalc.cpxThumb : psbwnd->wnd.rcWindow.bottom - psbwnd->wnd.rcWindow.top) - 2 * SYSMET(CYEDGE); xxxCreateCaret((PWND)psbwnd, (HBITMAP)1, cx, cy); zzzSetSBCaretPos(psbwnd); zzzShowCaret((PWND)psbwnd); break; case WM_KILLFOCUS: zzzDestroyCaret(); break; case WM_ERASEBKGND: /* * Do nothing, but don't let DefWndProc() do it either. * It will be erased when its painted. */ return (LONG)TRUE; case WM_PRINTCLIENT: case WM_PAINT: if ((hdc = (HDC)wParam) == NULL) { hdc = xxxBeginPaint((PWND)psbwnd, (LPPAINTSTRUCT)&ps); } if (!fSize) { SBCtlSetup(psbwnd); xxxDrawSB2((PWND)psbwnd, &psbwnd->SBCalc, hdc, psbwnd->fVert, psbwnd->wDisableFlags); } else { fSizeReal = TestWF((PWND)psbwnd, WFSIZEBOX); if (!fSizeReal) SetWF((PWND)psbwnd, WFSIZEBOX); DrawSize((PWND)psbwnd, hdc, 0, 0); if (!fSizeReal) ClrWF((PWND)psbwnd, WFSIZEBOX); } if (wParam == 0L) xxxEndPaint((PWND)psbwnd, (LPPAINTSTRUCT)&ps); break; case WM_GETDLGCODE: return DLGC_WANTARROWS; case WM_CONTEXTMENU: ThreadLock(psbwnd->wnd.spwndParent, &tlpwndParent); xxxDoScrollMenu(psbwnd->wnd.spwndParent, (PWND)psbwnd, psbwnd->fVert, lParam); ThreadUnlock(&tlpwndParent); break; case WM_NCHITTEST: if (style & SBS_SIZEGRIP) { #ifdef USE_MIRRORING /* * If the scroll bar is RTL mirrored, then * mirror the hittest of the grip location. */ if (TestWF((PWND)psbwnd, WEFLAYOUTRTL)) return HTBOTTOMLEFT; else #endif return HTBOTTOMRIGHT; } else { goto DoDefault; } break; #ifdef COLOR_HOTTRACKING case WM_MOUSELEAVE: xxxHotTrackSBCtl(psbwnd, 0, FALSE); psbwnd->ht = 0; break; case WM_MOUSEMOVE: { int ht; if (psbwnd->ht == 0) { TRACKMOUSEEVENT tme = {sizeof(TRACKMOUSEEVENT), TME_LEAVE, HWq(psbwnd), 0}; TrackMouseEvent(&tme); } pt.x = GET_X_LPARAM(lParam); pt.y = GET_Y_LPARAM(lParam); ht = HitTestScrollBar((PWND)psbwnd, psbwnd->fVert, pt); if (psbwnd->ht != ht) { xxxHotTrackSBCtl(psbwnd, ht, TRUE); psbwnd->ht = ht; } } break; #endif // COLOR_HOTTRACKING case WM_LBUTTONDBLCLK: cmd = SC_ZOOM; if (fSize) goto postmsg; /* *** FALL THRU ** */ case WM_LBUTTONDOWN: // // Note that SBS_SIZEGRIP guys normally won't ever see button // downs. This is because they return HTBOTTOMRIGHT to // WindowHitTest handling. This will walk up the parent chain // to the first sizeable ancestor, bailing out at caption windows // of course. That dude, if he exists, will handle the sizing // instead. // if (!fSize) { if (TestWF((PWND)psbwnd, WFTABSTOP)) { xxxSetFocus((PWND)psbwnd); } zzzHideCaret((PWND)psbwnd); SBCtlSetup(psbwnd); /* * SBCtlSetup enters SEM_SB, and xxxSBTrackInit leaves it. */ xxxSBTrackInit((PWND)psbwnd, lParam, 0, (_GetKeyState(VK_SHIFT) < 0) ? SCROLL_DIRECT : SCROLL_NORMAL); break; } else { cmd = SC_SIZE; postmsg: pt.x = GET_X_LPARAM(lParam); pt.y = GET_Y_LPARAM(lParam); _ClientToScreen((PWND)psbwnd, &pt); lParam = MAKELONG(pt.x, pt.y); /* * convert HT value into a move value. This is bad, * but this is purely temporary. */ #ifdef USE_MIRRORING if (TestWF(((PWND)psbwnd)->spwndParent,WEFLAYOUTRTL)) { uSide = HTBOTTOMLEFT; } else #endif { uSide = HTBOTTOMRIGHT; } ThreadLock(((PWND)psbwnd)->spwndParent, &tlpwndParent); xxxSendMessage(((PWND)psbwnd)->spwndParent, WM_SYSCOMMAND, (cmd | (uSide - HTSIZEFIRST + 1)), lParam); ThreadUnlock(&tlpwndParent); } break; case WM_KEYUP: switch (wParam) { case VK_HOME: case VK_END: case VK_PRIOR: case VK_NEXT: case VK_LEFT: case VK_UP: case VK_RIGHT: case VK_DOWN: /* * Send end scroll message when user up clicks on keyboard * scrolling. * * DoScroll does thread locking on these two pwnds - * this is ok since they are not used after this * call. */ xxxDoScroll((PWND)psbwnd, psbwnd->wnd.spwndParent, SB_ENDSCROLL, 0, psbwnd->fVert ); break; default: break; } break; case WM_KEYDOWN: switch (wParam) { case VK_HOME: wParam = SB_TOP; goto KeyScroll; case VK_END: wParam = SB_BOTTOM; goto KeyScroll; case VK_PRIOR: wParam = SB_PAGEUP; goto KeyScroll; case VK_NEXT: wParam = SB_PAGEDOWN; goto KeyScroll; case VK_LEFT: case VK_UP: wParam = SB_LINEUP; goto KeyScroll; case VK_RIGHT: case VK_DOWN: wParam = SB_LINEDOWN; KeyScroll: /* * DoScroll does thread locking on these two pwnds - * this is ok since they are not used after this * call. */ xxxDoScroll((PWND)psbwnd, psbwnd->wnd.spwndParent, (int)wParam, 0, psbwnd->fVert ); break; default: break; } break; case WM_ENABLE: return xxxSendMessage((PWND)psbwnd, SBM_ENABLE_ARROWS, (wParam ? ESB_ENABLE_BOTH : ESB_DISABLE_BOTH), 0); case SBM_ENABLE_ARROWS: /* * This is used to enable/disable the arrows in a SB ctrl */ return (LONG)xxxEnableSBCtlArrows((PWND)psbwnd, (UINT)wParam); case SBM_GETPOS: return (LONG)psbwnd->SBCalc.pos; case SBM_GETRANGE: *((LPINT)wParam) = psbwnd->SBCalc.posMin; *((LPINT)lParam) = psbwnd->SBCalc.posMax; return MAKELRESULT(LOWORD(psbwnd->SBCalc.posMin), LOWORD(psbwnd->SBCalc.posMax)); case SBM_GETSCROLLINFO: return (LONG)_SBGetParms((PWND)psbwnd, SB_CTL, (PSBDATA)&psbwnd->SBCalc, (LPSCROLLINFO) lParam); case SBM_SETRANGEREDRAW: fRedraw = TRUE; case SBM_SETRANGE: // Save the old values of Min and Max for return value si.cbSize = sizeof(si); // si.nMin = LOWORD(lParam); // si.nMax = HIWORD(lParam); si.nMin = (int)wParam; si.nMax = (int)lParam; si.fMask = SIF_RANGE | SIF_RETURNOLDPOS; goto SetInfo; case SBM_SETPOS: fRedraw = (BOOL) lParam; si.cbSize = sizeof(si); si.fMask = SIF_POS | SIF_RETURNOLDPOS; si.nPos = (int)wParam; goto SetInfo; case SBM_SETSCROLLINFO: lpsi = (LPSCROLLINFO) lParam; fRedraw = (BOOL) wParam; SetInfo: fScroll = TRUE; if (SBSetParms((PSBDATA)&psbwnd->SBCalc, lpsi, &fScroll, &lres) && FWINABLE()) { xxxWindowEvent(EVENT_OBJECT_VALUECHANGE, (PWND)psbwnd, OBJID_CLIENT, INDEX_SCROLLBAR_SELF, WEF_USEPWNDTHREAD); } if (!fRedraw) return lres; /* * We must set the new position of the caret irrespective of * whether the window is visible or not; * Still, this will work only if the app has done a xxxSetScrollPos * with fRedraw = TRUE; * Fix for Bug #5188 --SANKAR-- 10-15-89 * No need to DeferWinEventNotify since psbwnd is locked. */ zzzHideCaret((PWND)psbwnd); SBCtlSetup(psbwnd); zzzSetSBCaretPos(psbwnd); /* ** The following zzzShowCaret() must be done after the DrawThumb2(), ** otherwise this caret will be erased by DrawThumb2() resulting ** in this bug: ** Fix for Bug #9263 --SANKAR-- 02-09-90 * */ /* *********** zzzShowCaret((PWND)psbwnd); ****** */ if (_FChildVisible((PWND)psbwnd) && fRedraw) { UINT wDisable; HBRUSH hbrUse; if (!fScroll) fScroll = !(lpsi->fMask & SIF_DISABLENOSCROLL); wDisable = (fScroll) ? ESB_ENABLE_BOTH : ESB_DISABLE_BOTH; xxxEnableScrollBar((PWND) psbwnd, SB_CTL, wDisable); hdc = _GetWindowDC((PWND)psbwnd); hbrSave = GreSelectBrush(hdc, hbrUse = xxxGetColorObjects((PWND)psbwnd, hdc)); /* * Before we used to only hideshowthumb() if the mesage was * not SBM_SETPOS. I am not sure why but this case was ever * needed for win 3.x but on NT it resulted in trashing the border * of the scrollbar when the app called SetScrollPos() during * scrollbar tracking. - mikehar 8/26 */ DrawThumb2((PWND)psbwnd, &psbwnd->SBCalc, hdc, hbrUse, psbwnd->fVert, psbwnd->wDisableFlags); GreSelectBrush(hdc, hbrSave); _ReleaseDC(hdc); } /* * This zzzShowCaret() has been moved to this place from above * Fix for Bug #9263 --SANKAR-- 02-09-90 */ zzzShowCaret((PWND)psbwnd); return lres; default: DoDefault: return xxxDefWindowProc((PWND)psbwnd, message, wParam, lParam); } return 0L; }
871739.c
/* * Copyright (C) 2017 - 2019 Xilinx, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * */ #include <stdio.h> #include "xparameters.h" #include "netif/xadapter.h" #include "platform.h" #include "platform_config.h" #include "lwipopts.h" #include "xil_printf.h" #include "sleep.h" #include "lwip/priv/tcp_priv.h" #include "lwip/init.h" #include "lwip/inet.h" #include "xil_cache.h" #if LWIP_DHCP==1 #include "lwip/dhcp.h" extern volatile int dhcp_timoutcntr; #endif extern volatile int TcpFastTmrFlag; extern volatile int TcpSlowTmrFlag; #define DEFAULT_IP_ADDRESS "192.168.1.10" #define DEFAULT_IP_MASK "255.255.255.0" #define DEFAULT_GW_ADDRESS "192.168.1.1" void platform_enable_interrupts(void); void start_application(void); void print_app_header(void); #if defined (__arm__) && !defined (ARMR5) #if XPAR_GIGE_PCS_PMA_SGMII_CORE_PRESENT == 1 || \ XPAR_GIGE_PCS_PMA_1000BASEX_CORE_PRESENT == 1 int ProgramSi5324(void); int ProgramSfpPhy(void); #endif #endif #ifdef XPS_BOARD_ZCU102 #ifdef XPAR_XIICPS_0_DEVICE_ID int IicPhyReset(void); #endif #endif struct netif server_netif; static void print_ip(char *msg, ip_addr_t *ip) { print(msg); xil_printf("%d.%d.%d.%d\r\n", ip4_addr1(ip), ip4_addr2(ip), ip4_addr3(ip), ip4_addr4(ip)); } static void print_ip_settings(ip_addr_t *ip, ip_addr_t *mask, ip_addr_t *gw) { print_ip("Board IP: ", ip); print_ip("Netmask : ", mask); print_ip("Gateway : ", gw); } static void assign_default_ip(ip_addr_t *ip, ip_addr_t *mask, ip_addr_t *gw) { int err; xil_printf("Configuring default IP %s \r\n", DEFAULT_IP_ADDRESS); err = inet_aton(DEFAULT_IP_ADDRESS, ip); if (!err) xil_printf("Invalid default IP address: %d\r\n", err); err = inet_aton(DEFAULT_IP_MASK, mask); if (!err) xil_printf("Invalid default IP MASK: %d\r\n", err); err = inet_aton(DEFAULT_GW_ADDRESS, gw); if (!err) xil_printf("Invalid default gateway address: %d\r\n", err); } int main(void) { struct netif *netif; /* the mac address of the board. this should be unique per board */ unsigned char mac_ethernet_address[] = { 0x00, 0x0a, 0x35, 0x00, 0x01, 0x02 }; netif = &server_netif; #if defined (__arm__) && !defined (ARMR5) #if XPAR_GIGE_PCS_PMA_SGMII_CORE_PRESENT == 1 || \ XPAR_GIGE_PCS_PMA_1000BASEX_CORE_PRESENT == 1 ProgramSi5324(); ProgramSfpPhy(); #endif #endif /* Define this board specific macro in order perform PHY reset * on ZCU102 */ #ifdef XPS_BOARD_ZCU102 IicPhyReset(); #endif init_platform(); xil_printf("\r\n\r\n"); xil_printf("-----lwIP RAW Mode UDP Server Application-----\r\n"); /* initialize lwIP */ lwip_init(); /* Add network interface to the netif_list, and set it as default */ if (!xemac_add(netif, NULL, NULL, NULL, mac_ethernet_address, PLATFORM_EMAC_BASEADDR)) { xil_printf("Error adding N/W interface\r\n"); return -1; } netif_set_default(netif); /* now enable interrupts */ platform_enable_interrupts(); /* specify that the network if is up */ netif_set_up(netif); #if (LWIP_DHCP==1) /* Create a new DHCP client for this interface. * Note: you must call dhcp_fine_tmr() and dhcp_coarse_tmr() at * the predefined regular intervals after starting the client. */ dhcp_start(netif); dhcp_timoutcntr = 24; while (((netif->ip_addr.addr) == 0) && (dhcp_timoutcntr > 0)) xemacif_input(netif); if (dhcp_timoutcntr <= 0) { if ((netif->ip_addr.addr) == 0) { xil_printf("ERROR: DHCP request timed out\r\n"); assign_default_ip(&(netif->ip_addr), &(netif->netmask), &(netif->gw)); } } /* print IP address, netmask and gateway */ #else assign_default_ip(&(netif->ip_addr), &(netif->netmask), &(netif->gw)); #endif print_ip_settings(&(netif->ip_addr), &(netif->netmask), &(netif->gw)); xil_printf("\r\n"); /* print app header */ print_app_header(); /* start the application*/ start_application(); xil_printf("\r\n"); while (1) { if (TcpFastTmrFlag) { tcp_fasttmr(); TcpFastTmrFlag = 0; } if (TcpSlowTmrFlag) { tcp_slowtmr(); TcpSlowTmrFlag = 0; } xemacif_input(netif); } /* never reached */ cleanup_platform(); return 0; }
783655.c
/* @(#)xdr.c 2.1 88/07/29 4.0 RPCSRC */ /* * Sun RPC is a product of Sun Microsystems, Inc. and is provided for * unrestricted use provided that this legend is included on all tape * media and as a part of the software program in whole or part. Users * may copy or modify Sun RPC without charge, but are not authorized * to license or distribute it to anyone else except as part of a product or * program developed by the user. * * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. * * Sun RPC is provided with no support and without any obligation on the * part of Sun Microsystems, Inc. to assist in its use, correction, * modification or enhancement. * * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC * OR ANY PART THEREOF. * * In no event will Sun Microsystems, Inc. be liable for any lost revenue * or profits or other special, indirect and consequential damages, even if * Sun has been advised of the possibility of such damages. * * Sun Microsystems, Inc. * 2550 Garcia Avenue * Mountain View, California 94043 */ #if !defined(lint) && defined(SCCSIDS) static char sccsid[] = "@(#)xdr.c 1.35 87/08/12"; #endif /* * xdr.c, Generic XDR routines implementation. * * Copyright (C) 1986, Sun Microsystems, Inc. * * These are the "generic" xdr routines used to serialize and de-serialize * most common data items. See xdr.h for more info on the interface to * xdr. */ #include <stdio.h> #include <string.h> #include <gssrpc/types.h> #include <gssrpc/xdr.h> /* * constants specific to the xdr "protocol" */ #define XDR_FALSE ((rpc_int32) 0) #define XDR_TRUE ((rpc_int32) 1) #define LASTUNSIGNED ((unsigned int) 0-1) /* * for unit alignment */ static char xdr_zero[BYTES_PER_XDR_UNIT] = { 0, 0, 0, 0 }; /* * Free a data structure using XDR * Not a filter, but a convenient utility nonetheless */ void gssrpc_xdr_free(proc, objp) xdrproc_t proc; char *objp; { XDR x; x.x_op = XDR_FREE; (*proc)(&x, objp); } /* * XDR nothing */ bool_t xdr_void(/* xdrs, addr */) /* XDR *xdrs; */ /* caddr_t addr; */ { return (TRUE); } /* * XDR integers */ bool_t xdr_int(xdrs, ip) XDR *xdrs; int *ip; { #ifdef lint (void) (xdr_short(xdrs, (short *)ip)); return (xdr_long(xdrs, (rpc_int32 *)ip)); #else if (sizeof (int) >= 4) { long l; switch (xdrs->x_op) { case XDR_ENCODE: l = *ip; return (xdr_long(xdrs, &l)); case XDR_DECODE: if (!xdr_long(xdrs, &l)) { return (FALSE); } *ip = l; return (TRUE); case XDR_FREE: return (TRUE); } } else { return (xdr_short(xdrs, (short *)ip)); } #endif } /* * XDR unsigned integers */ bool_t xdr_u_int(xdrs, up) XDR *xdrs; unsigned int *up; { #ifdef lint (void) (xdr_short(xdrs, (short *)up)); return (xdr_u_long(xdrs, (rpc_u_int32 *)up)); #else if (sizeof (unsigned int) >= 4) { unsigned long l; switch (xdrs->x_op) { case XDR_ENCODE: l = *up; return (xdr_u_long(xdrs, &l)); case XDR_DECODE: if (!xdr_u_long(xdrs, &l)) { return (FALSE); } *up = l; return (TRUE); case XDR_FREE: return (TRUE); } } else { return (xdr_short(xdrs, (short *)up)); } #endif } /* * XDR long integers * same as xdr_u_long - open coded to save a proc call! */ bool_t xdr_long(xdrs, lp) register XDR *xdrs; long *lp; { if (xdrs->x_op == XDR_ENCODE) { if (sizeof (long) > 4) { /* See if the dereferenced value fits in 4 bytes. If not, return FALSE. * Check by loading value into a rpc_int32, then loading back and comparing * results. */ rpc_int32 i = (int) *lp; long l = i; if (l != *lp) { return (FALSE); } } return (XDR_PUTLONG(xdrs, lp)); } if (xdrs->x_op == XDR_DECODE) return (XDR_GETLONG(xdrs, lp)); if (xdrs->x_op == XDR_FREE) return (TRUE); return (FALSE); } /* * XDR unsigned long integers * same as xdr_long - open coded to save a proc call! */ bool_t xdr_u_long(xdrs, ulp) register XDR *xdrs; unsigned long *ulp; { if (xdrs->x_op == XDR_ENCODE) { if (sizeof (unsigned long) > 4) { /* See if the dereferenced value fits in 4 bytes. If not, return FALSE. * Check by loading value into a rpc_int32, then loading back and comparing * results. */ unsigned int ui = *ulp; unsigned long ul = ui; if (ul != *ulp) { return (FALSE); } } return (XDR_PUTLONG(xdrs, ulp)); } if (xdrs->x_op == XDR_DECODE) { return (XDR_GETLONG(xdrs, (long *)ulp)); } if (xdrs->x_op == XDR_FREE) return (TRUE); return (FALSE); } /* * XDR short integers */ bool_t xdr_short(xdrs, sp) register XDR *xdrs; short *sp; { long l; switch (xdrs->x_op) { case XDR_ENCODE: l = (long) *sp; return (XDR_PUTLONG(xdrs, &l)); case XDR_DECODE: if (!XDR_GETLONG(xdrs, &l)) { return (FALSE); } *sp = (short) l; return (TRUE); case XDR_FREE: return (TRUE); } return (FALSE); } /* * XDR unsigned short integers */ bool_t xdr_u_short(xdrs, usp) register XDR *xdrs; unsigned short *usp; { unsigned long l; switch (xdrs->x_op) { case XDR_ENCODE: l = (unsigned long) *usp; return (XDR_PUTLONG(xdrs, &l)); case XDR_DECODE: if (!XDR_GETLONG(xdrs, &l)) { return (FALSE); } *usp = (unsigned short) l; return (TRUE); case XDR_FREE: return (TRUE); } return (FALSE); } /* * XDR a char */ bool_t xdr_char(xdrs, cp) XDR *xdrs; char *cp; { int i; i = (*cp); if (!xdr_int(xdrs, &i)) { return (FALSE); } *cp = i; return (TRUE); } /* * XDR an unsigned char */ bool_t xdr_u_char(xdrs, cp) XDR *xdrs; char *cp; { unsigned int u; u = (*cp); if (!xdr_u_int(xdrs, &u)) { return (FALSE); } *cp = u; return (TRUE); } /* * XDR booleans */ bool_t xdr_bool(xdrs, bp) register XDR *xdrs; bool_t *bp; { long lb; switch (xdrs->x_op) { case XDR_ENCODE: lb = *bp ? XDR_TRUE : XDR_FALSE; return (XDR_PUTLONG(xdrs, &lb)); case XDR_DECODE: if (!XDR_GETLONG(xdrs, &lb)) { return (FALSE); } *bp = (lb == XDR_FALSE) ? FALSE : TRUE; return (TRUE); case XDR_FREE: return (TRUE); } return (FALSE); } /* * XDR enumerations */ bool_t xdr_enum(xdrs, ep) XDR *xdrs; enum_t *ep; { #ifndef lint enum sizecheck { SIZEVAL }; /* used to find the size of an enum */ /* * enums are treated as ints */ if (sizeof (enum sizecheck) == sizeof (rpc_int32)) { return (xdr_int32(xdrs, (rpc_int32 *)ep)); } else if (sizeof (enum sizecheck) == sizeof (short)) { return (xdr_short(xdrs, (short *)ep)); } else { return (FALSE); } #else (void) (xdr_short(xdrs, (short *)ep)); return (xdr_long(xdrs, (long *)ep)); #endif } /* * XDR opaque data * Allows the specification of a fixed size sequence of opaque bytes. * cp points to the opaque object and cnt gives the byte length. */ bool_t xdr_opaque(xdrs, cp, cnt) register XDR *xdrs; caddr_t cp; register unsigned int cnt; { register unsigned int rndup; static crud[BYTES_PER_XDR_UNIT]; /* * if no data we are done */ if (cnt == 0) return (TRUE); /* * round byte count to full xdr units */ rndup = cnt % BYTES_PER_XDR_UNIT; if (rndup > 0) rndup = BYTES_PER_XDR_UNIT - rndup; if (xdrs->x_op == XDR_DECODE) { if (!XDR_GETBYTES(xdrs, cp, cnt)) { return (FALSE); } if (rndup == 0) return (TRUE); return (XDR_GETBYTES(xdrs, crud, rndup)); } if (xdrs->x_op == XDR_ENCODE) { if (!XDR_PUTBYTES(xdrs, cp, cnt)) { return (FALSE); } if (rndup == 0) return (TRUE); return (XDR_PUTBYTES(xdrs, xdr_zero, rndup)); } if (xdrs->x_op == XDR_FREE) { return (TRUE); } return (FALSE); } /* * XDR counted bytes * *cpp is a pointer to the bytes, *sizep is the count. * If *cpp is NULL maxsize bytes are allocated */ bool_t xdr_bytes(xdrs, cpp, sizep, maxsize) register XDR *xdrs; char **cpp; register unsigned int *sizep; unsigned int maxsize; { register char *sp = *cpp; /* sp is the actual string pointer */ register unsigned int nodesize; /* * first deal with the length since xdr bytes are counted */ if (! xdr_u_int(xdrs, sizep)) { return (FALSE); } nodesize = *sizep; if ((nodesize > maxsize) && (xdrs->x_op != XDR_FREE)) { return (FALSE); } /* * now deal with the actual bytes */ switch (xdrs->x_op) { case XDR_DECODE: if (nodesize == 0) { return (TRUE); } if (sp == NULL) { *cpp = sp = (char *)mem_alloc(nodesize); } if (sp == NULL) { (void) fprintf(stderr, "xdr_bytes: out of memory\n"); return (FALSE); } /* fall into ... */ case XDR_ENCODE: return (xdr_opaque(xdrs, sp, nodesize)); case XDR_FREE: if (sp != NULL) { mem_free(sp, nodesize); *cpp = NULL; } return (TRUE); } return (FALSE); } /* * Implemented here due to commonality of the object. */ bool_t xdr_netobj(xdrs, np) XDR *xdrs; struct netobj *np; { return (xdr_bytes(xdrs, &np->n_bytes, &np->n_len, MAX_NETOBJ_SZ)); } bool_t xdr_int32(xdrs, ip) XDR *xdrs; rpc_int32 *ip; { long l; switch (xdrs->x_op) { case XDR_ENCODE: l = *ip; return (xdr_long(xdrs, &l)); case XDR_DECODE: if (!xdr_long(xdrs, &l)) { return (FALSE); } *ip = l; return (TRUE); case XDR_FREE: return (TRUE); } } xdr_u_int32(xdrs, up) XDR *xdrs; rpc_u_int32 *up; { unsigned long ul; switch (xdrs->x_op) { case XDR_ENCODE: ul = *up; return (xdr_u_long(xdrs, &ul)); case XDR_DECODE: if (!xdr_u_long(xdrs, &ul)) { return (FALSE); } *up = ul; return (TRUE); case XDR_FREE: return (TRUE); } } /* * XDR a descriminated union * Support routine for discriminated unions. * You create an array of xdrdiscrim structures, terminated with * an entry with a null procedure pointer. The routine gets * the discriminant value and then searches the array of xdrdiscrims * looking for that value. It calls the procedure given in the xdrdiscrim * to handle the discriminant. If there is no specific routine a default * routine may be called. * If there is no specific or default routine an error is returned. */ bool_t xdr_union(xdrs, dscmp, unp, choices, dfault) register XDR *xdrs; enum_t *dscmp; /* enum to decide which arm to work on */ char *unp; /* the union itself */ struct xdr_discrim *choices; /* [value, xdr proc] for each arm */ xdrproc_t dfault; /* default xdr routine */ { register enum_t dscm; /* * we deal with the discriminator; it's an enum */ if (! xdr_enum(xdrs, dscmp)) { return (FALSE); } dscm = *dscmp; /* * search choices for a value that matches the discriminator. * if we find one, execute the xdr routine for that value. */ for (; choices->proc != NULL_xdrproc_t; choices++) { if (choices->value == dscm) return ((*(choices->proc))(xdrs, unp, LASTUNSIGNED)); } /* * no match - execute the default xdr routine if there is one */ return ((dfault == NULL_xdrproc_t) ? FALSE : (*dfault)(xdrs, unp, LASTUNSIGNED)); } /* * Non-portable xdr primitives. * Care should be taken when moving these routines to new architectures. */ /* * XDR null terminated ASCII strings * xdr_string deals with "C strings" - arrays of bytes that are * terminated by a NULL character. The parameter cpp references a * pointer to storage; If the pointer is null, then the necessary * storage is allocated. The last parameter is the max allowed length * of the string as specified by a protocol. */ bool_t xdr_string(xdrs, cpp, maxsize) register XDR *xdrs; char **cpp; unsigned int maxsize; { register char *sp = *cpp; /* sp is the actual string pointer */ unsigned int size; unsigned int nodesize; /* * first deal with the length since xdr strings are counted-strings */ switch (xdrs->x_op) { case XDR_FREE: if (sp == NULL) { return(TRUE); /* already free */ } /* fall through... */ case XDR_ENCODE: size = strlen(sp); break; } if (! xdr_u_int(xdrs, &size)) { return (FALSE); } if (size > maxsize) { return (FALSE); } nodesize = size + 1; /* * now deal with the actual bytes */ switch (xdrs->x_op) { case XDR_DECODE: if (nodesize == 0) { return (TRUE); } if (sp == NULL) *cpp = sp = (char *)mem_alloc(nodesize); if (sp == NULL) { (void) fprintf(stderr, "xdr_string: out of memory\n"); return (FALSE); } sp[size] = 0; /* fall into ... */ case XDR_ENCODE: return (xdr_opaque(xdrs, sp, size)); case XDR_FREE: mem_free(sp, nodesize); *cpp = NULL; return (TRUE); } return (FALSE); } /* * Wrapper for xdr_string that can be called directly from * routines like clnt_call */ bool_t xdr_wrapstring(xdrs, cpp) XDR *xdrs; char **cpp; { if (xdr_string(xdrs, cpp, LASTUNSIGNED)) { return (TRUE); } return (FALSE); }
485976.c
/* * Copyright (C) 2018 Gunar Schorcht * * This file is subject to the terms and conditions of the GNU Lesser * General Public License v2.1. See the file LICENSE in the top level * directory for more details. */ /** * @ingroup drivers_hmc5883l * @brief HMC5883L adaption to the RIOT actuator/sensor interface * @author Gunar Schorcht <[email protected]> * @file */ #include <string.h> #include "saul.h" #include "hmc5883l.h" static int read(const void *dev, phydat_t *res) { hmc5883l_data_t data; int ret = hmc5883l_read((const hmc5883l_t *)dev, &data); if (ret < 0) { return -ECANCELED; } res->val[0] = data.x; res->val[1] = data.y; res->val[2] = data.z; res->unit = UNIT_GS; res->scale = -3; return 3; } const saul_driver_t hmc5883l_saul_driver = { .read = read, .write = saul_notsup, .type = SAUL_SENSE_MAG, };
302179.c
/* +----------------------------------------------------------------------+ | pthreads | +----------------------------------------------------------------------+ | Copyright (c) Joe Watkins 2012 - 2015 | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | [email protected] so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Joe Watkins <[email protected]> | +----------------------------------------------------------------------+ */ #ifndef HAVE_PTHREADS_PREPARE #define HAVE_PTHREADS_PREPARE #ifndef HAVE_PTHREADS_PREPARE_H # include <src/prepare.h> #endif #ifndef HAVE_PTHREADS_OBJECT_H # include <src/object.h> #endif #ifndef HAVE_PTHREADS_RESOURCES_H # include <src/resources.h> #endif #ifndef HAVE_PTHREADS_GLOBALS_H # include <src/globals.h> #endif #ifndef HAVE_PTHREADS_COPY_H # include <src/copy.h> #endif #define PTHREADS_PREPARATION_BEGIN_CRITICAL() pthreads_globals_lock(); #define PTHREADS_PREPARATION_END_CRITICAL() pthreads_globals_unlock() /* {{{ */ static zend_trait_alias * pthreads_preparation_copy_trait_alias(pthreads_object_t* thread, zend_trait_alias *alias); static zend_trait_precedence * pthreads_preparation_copy_trait_precedence(pthreads_object_t* thread, zend_trait_precedence *precedence); static zend_trait_method_reference * pthreads_preparation_copy_trait_method_reference(pthreads_object_t* thread, zend_trait_method_reference *reference); static void pthreads_prepared_resource_dtor(zval *zv); /* }}} */ /* {{{ */ static void prepare_class_constants(pthreads_object_t* thread, zend_class_entry *candidate, zend_class_entry *prepared) { zend_string *key; zval *value; ZEND_HASH_FOREACH_STR_KEY_VAL(&candidate->constants_table, key, value) { zend_string *name; zval separated; if (zend_hash_exists(&prepared->constants_table, key)) { continue; } switch (Z_TYPE_P(value)) { case IS_PTR: { zend_class_constant *zc = Z_PTR_P(value), rc; memcpy(&rc, zc, sizeof(zend_class_constant)); if (pthreads_store_separate(&zc->value, &rc.value, 1) == SUCCESS) { if (zc->doc_comment != NULL) { rc.doc_comment = zend_string_new(zc->doc_comment); } rc.ce = pthreads_prepared_entry(thread, zc->ce); name = zend_string_new(key); zend_hash_add_mem(&prepared->constants_table, name, &rc, sizeof(zend_class_constant)); zend_string_release(name); } continue; } case IS_STRING: case IS_ARRAY: { if (pthreads_store_separate(value, &separated, 1) != SUCCESS) { continue; } } break; default: ZVAL_COPY(&separated, value); } name = zend_string_new(key); zend_hash_update(&prepared->constants_table, name, &separated); zend_string_release(name); } ZEND_HASH_FOREACH_END(); } /* }}} */ /* {{{ */ static void prepare_class_statics(pthreads_object_t* thread, zend_class_entry *candidate, zend_class_entry *prepared) { if (candidate->default_static_members_count) { int i; if(prepared->default_static_members_table != NULL) { efree(prepared->default_static_members_table); } prepared->default_static_members_table = (zval*) ecalloc( sizeof(zval), candidate->default_static_members_count); prepared->default_static_members_count = candidate->default_static_members_count; memcpy(prepared->default_static_members_table, candidate->default_static_members_table, sizeof(zval) * candidate->default_static_members_count); for (i=0; i<prepared->default_static_members_count; i++) { pthreads_store_separate( &candidate->default_static_members_table[i], &prepared->default_static_members_table[i], 0); } prepared->static_members_table = prepared->default_static_members_table; } else prepared->default_static_members_count = 0; } /* }}} */ /* {{{ */ static void prepare_class_function_table(zend_class_entry *candidate, zend_class_entry *prepared) { zend_string *key; zend_function *value; ZEND_HASH_FOREACH_STR_KEY_PTR(&candidate->function_table, key, value) { if (!zend_hash_exists(&prepared->function_table, key)) { zend_string *name = zend_string_new(key); value = pthreads_copy_function(value); zend_hash_add_ptr(&prepared->function_table, name, value); zend_string_release(name); } } ZEND_HASH_FOREACH_END(); } /* }}} */ /* {{{ */ static void prepare_class_property_table(pthreads_object_t* thread, zend_class_entry *candidate, zend_class_entry *prepared) { zend_property_info *info; zend_string *name; ZEND_HASH_FOREACH_STR_KEY_PTR(&candidate->properties_info, name, info) { zend_property_info dup = *info; if (info->doc_comment) { if (thread->options & PTHREADS_INHERIT_COMMENTS) { dup.doc_comment = zend_string_new(info->doc_comment); } else dup.doc_comment = NULL; } if (info->ce) { if (info->ce == candidate) { dup.ce = prepared; } else dup.ce = pthreads_prepared_entry(thread, info->ce); } if (!zend_hash_str_add_mem(&prepared->properties_info, name->val, name->len, &dup, sizeof(zend_property_info))) { if (dup.doc_comment) zend_string_release(dup.doc_comment); } } ZEND_HASH_FOREACH_END(); if (candidate->default_properties_count) { int i; if(prepared->default_properties_table != NULL) { efree(prepared->default_properties_table); } prepared->default_properties_table = emalloc( sizeof(zval) * candidate->default_properties_count); memcpy( prepared->default_properties_table, candidate->default_properties_table, sizeof(zval) * candidate->default_properties_count); for (i=0; i<candidate->default_properties_count; i++) { if (Z_REFCOUNTED(prepared->default_properties_table[i])) { pthreads_store_separate( &candidate->default_properties_table[i], &prepared->default_properties_table[i], 1); } } prepared->default_properties_count = candidate->default_properties_count; } else prepared->default_properties_count = 0; } /* }}} */ /* {{{ */ static void prepare_class_handlers(zend_class_entry *candidate, zend_class_entry *prepared) { prepared->create_object = candidate->create_object; prepared->serialize = candidate->serialize; prepared->unserialize = candidate->unserialize; prepared->get_iterator = candidate->get_iterator; prepared->iterator_funcs = candidate->iterator_funcs; prepared->interface_gets_implemented = candidate->interface_gets_implemented; prepared->get_static_method = candidate->get_static_method; } /* }}} */ static void prepare_class_interceptors(zend_class_entry *candidate, zend_class_entry *prepared) { zend_function *func; if (!prepared->constructor && zend_hash_num_elements(&prepared->function_table)) { if ((func = zend_hash_str_find_ptr(&prepared->function_table, "__construct", sizeof("__construct")-1))) { prepared->constructor = func; } else { if ((func = zend_hash_find_ptr(&prepared->function_table, prepared->name))) { prepared->constructor = func; } } } #define FIND_AND_SET(f, n) do {\ if (!prepared->f && zend_hash_num_elements(&prepared->function_table)) { \ if ((func = zend_hash_str_find_ptr(&prepared->function_table, n, sizeof(n)-1))) { \ prepared->f = func; \ } \ } \ } \ while(0) FIND_AND_SET(clone, "__clone"); FIND_AND_SET(__get, "__get"); FIND_AND_SET(__set, "__set"); FIND_AND_SET(__unset, "__unset"); FIND_AND_SET(__isset, "__isset"); FIND_AND_SET(__call, "__call"); FIND_AND_SET(__callstatic, "__callstatic"); FIND_AND_SET(serialize_func, "serialize"); FIND_AND_SET(unserialize_func, "unserialize"); FIND_AND_SET(__tostring, "__tostring"); FIND_AND_SET(destructor, "__destruct"); #undef FIND_AND_SET #define SET_ITERATOR_FUNC(f) do { \ if (candidate->iterator_funcs.f) { \ prepared->iterator_funcs.f = zend_hash_index_find_ptr( \ &PTHREADS_ZG(resolve), (zend_ulong) candidate->iterator_funcs.f); \ } \ } while (0) memcpy(&prepared->iterator_funcs, &candidate->iterator_funcs, sizeof(zend_class_iterator_funcs)); SET_ITERATOR_FUNC(zf_new_iterator); SET_ITERATOR_FUNC(zf_valid); SET_ITERATOR_FUNC(zf_current); SET_ITERATOR_FUNC(zf_key); SET_ITERATOR_FUNC(zf_next); SET_ITERATOR_FUNC(zf_rewind); #undef SET_ITERATOR_FUNC } /* }}} */ /* {{{ */ static void prepare_class_traits(pthreads_object_t* thread, zend_class_entry *candidate, zend_class_entry *prepared) { if (candidate->num_traits) { uint trait; prepared->traits = emalloc(sizeof(zend_class_entry*) * candidate->num_traits); for (trait=0; trait<candidate->num_traits; trait++) prepared->traits[trait] = pthreads_prepared_entry(thread, candidate->traits[trait]); prepared->num_traits = candidate->num_traits; if (candidate->trait_aliases) { size_t alias = 0; while (candidate->trait_aliases[alias]) { alias++; } prepared->trait_aliases = emalloc(sizeof(zend_trait_alias*) * (alias+1)); alias = 0; while (candidate->trait_aliases[alias]) { prepared->trait_aliases[alias] = pthreads_preparation_copy_trait_alias( thread, candidate->trait_aliases[alias] ); alias++; } prepared->trait_aliases[alias]=NULL; } else prepared->trait_aliases = NULL; if (candidate->trait_precedences) { size_t precedence = 0; while (candidate->trait_precedences[precedence]) { precedence++; } prepared->trait_precedences = emalloc(sizeof(zend_trait_precedence*) * (precedence+1)); precedence = 0; while (candidate->trait_precedences[precedence]) { prepared->trait_precedences[precedence] = pthreads_preparation_copy_trait_precedence( thread, candidate->trait_precedences[precedence] ); precedence++; } prepared->trait_precedences[precedence]=NULL; } else prepared->trait_precedences = NULL; } else { prepared->num_traits = 0; prepared->trait_aliases = 0; prepared->trait_precedences = 0; } } /* }}} */ /* {{{ */ static zend_class_entry* pthreads_complete_entry(pthreads_object_t* thread, zend_class_entry *candidate, zend_class_entry *prepared) { if (candidate->parent) { prepared->parent = pthreads_prepared_entry(thread, candidate->parent); } if (candidate->num_interfaces) { uint interface; prepared->interfaces = emalloc(sizeof(zend_class_entry*) * candidate->num_interfaces); for(interface=0; interface<candidate->num_interfaces; interface++) prepared->interfaces[interface] = pthreads_prepared_entry(thread, candidate->interfaces[interface]); prepared->num_interfaces = candidate->num_interfaces; } else prepared->num_interfaces = 0; prepare_class_traits(thread, candidate, prepared); prepare_class_handlers(candidate, prepared); // if this is an unbound anonymous class, then this will be the second copy, // where all inherited functions will be copied prepare_class_function_table(candidate, prepared); prepare_class_interceptors(candidate, prepared); return prepared; } /* }}} */ /* {{{ */ static zend_class_entry* pthreads_copy_entry(pthreads_object_t* thread, zend_class_entry *candidate) { zend_class_entry *prepared; prepared = zend_arena_alloc(&CG(arena), sizeof(zend_class_entry)); prepared->name = zend_string_new(candidate->name); prepared->type = candidate->type; zend_initialize_class_data(prepared, 1); prepared->ce_flags = candidate->ce_flags; prepared->refcount = 1; memcpy(&prepared->info.user, &candidate->info.user, sizeof(candidate->info.user)); if ((thread->options & PTHREADS_INHERIT_COMMENTS) && (candidate->info.user.doc_comment)) { prepared->info.user.doc_comment = zend_string_new(candidate->info.user.doc_comment); } else prepared->info.user.doc_comment = NULL; if (prepared->info.user.filename) { prepared->info.user.filename = zend_string_new(candidate->info.user.filename); } prepare_class_property_table(thread, candidate, prepared); if (candidate->ce_flags & ZEND_ACC_ANON_CLASS && !(prepared->ce_flags & ZEND_ACC_ANON_BOUND)) { // this first copy will copy all declared functions on the unbound anonymous class prepare_class_function_table(candidate, prepared); prepare_class_interceptors(candidate, prepared); return prepared; } return pthreads_complete_entry(thread, candidate, prepared); } /* }}} */ /* {{{ */ static inline int pthreads_prepared_entry_function_prepare(zval *bucket, int argc, va_list argv, zend_hash_key *key) { zend_function *function = (zend_function*) Z_PTR_P(bucket); pthreads_object_t* thread = va_arg(argv, pthreads_object_t*); zend_class_entry *prepared = va_arg(argv, zend_class_entry*); zend_class_entry *candidate = va_arg(argv, zend_class_entry*); zend_class_entry *scope = function->common.scope; if (function->type == ZEND_USER_FUNCTION) { if (scope == candidate) { function->common.scope = prepared; } else { if (function->common.scope->type == ZEND_USER_CLASS) { function->common.scope = pthreads_prepared_entry(thread, function->common.scope); } } /* runtime cache relies on immutable scope, so if scope changed, reallocate runtime cache */ /* IT WOULD BE NICE IF THIS WERE DOCUMENTED SOMEWHERE OTHER THAN PHP-SRC */ if (!function->op_array.run_time_cache || function->common.scope != scope) { zend_op_array *op_array = &function->op_array; op_array->run_time_cache = emalloc(op_array->cache_size); memset(op_array->run_time_cache, 0, op_array->cache_size); op_array->fn_flags |= ZEND_ACC_NO_RT_ARENA; } } return ZEND_HASH_APPLY_KEEP; } /* }}} */ /* {{{ */ static inline void pthreads_prepare_closures(pthreads_object_t *thread) { Bucket *bucket; ZEND_HASH_FOREACH_BUCKET(PTHREADS_CG(thread->creator.ls, function_table), bucket) { zend_function *function = Z_PTR(bucket->val), *prepared; zend_string *named; if (function->common.fn_flags & ZEND_ACC_CLOSURE) { if (zend_hash_exists(CG(function_table), bucket->key)) { continue; } named = zend_string_new(bucket->key); prepared = pthreads_copy_function(function); if (!zend_hash_add_ptr(CG(function_table), named, prepared)) { destroy_op_array((zend_op_array*) prepared); } zend_string_release(named); } } ZEND_HASH_FOREACH_END(); } /* }}} */ /* {{{ */ zend_class_entry* pthreads_prepared_entry(pthreads_object_t* thread, zend_class_entry *candidate) { return pthreads_create_entry(thread, candidate, 1); } /* }}} */ /* {{{ */ zend_class_entry* pthreads_create_entry(pthreads_object_t* thread, zend_class_entry *candidate, int do_late_bindings) { zend_class_entry *prepared = NULL; zend_string *lookup = NULL; if (!candidate) { return NULL; } if (candidate->type == ZEND_INTERNAL_CLASS) { return zend_lookup_class(candidate->name); } lookup = zend_string_tolower(candidate->name); if ((prepared = zend_hash_find_ptr(EG(class_table), lookup))) { zend_string_release(lookup); if(prepared->create_object == NULL && candidate->create_object != NULL) { return pthreads_complete_entry(thread, candidate, prepared); } return prepared; } if (!(prepared = pthreads_copy_entry(thread, candidate))) { zend_string_release(lookup); return NULL; } zend_hash_update_ptr(EG(class_table), lookup, prepared); if(do_late_bindings) { pthreads_prepared_entry_late_bindings(thread, candidate, prepared); } pthreads_prepare_closures(thread); zend_hash_apply_with_arguments( &prepared->function_table, pthreads_prepared_entry_function_prepare, 3, thread, prepared, candidate); zend_string_release(lookup); return prepared; } /* }}} */ /* {{{ */ void pthreads_prepared_entry_late_bindings(pthreads_object_t* thread, zend_class_entry *candidate, zend_class_entry *prepared) { prepare_class_statics(thread, candidate, prepared); prepare_class_constants(thread, candidate, prepared); } /* }}} */ /* {{{ */ void pthreads_context_late_bindings(pthreads_object_t* thread) { zend_class_entry *entry; zend_string *name; ZEND_HASH_FOREACH_STR_KEY_PTR(PTHREADS_CG(thread->local.ls, class_table), name, entry) { if (entry->type != ZEND_INTERNAL_CLASS) { pthreads_prepared_entry_late_bindings(thread, zend_hash_find_ptr(PTHREADS_CG(thread->creator.ls, class_table), name), entry); } } ZEND_HASH_FOREACH_END(); } /* {{{ */ static inline zend_bool pthreads_constant_exists(zend_string *name) { int retval = 1; zend_string *lookup; if (!zend_hash_exists(EG(zend_constants), name)) { lookup = zend_string_tolower(name); retval = zend_hash_exists(EG(zend_constants), lookup); zend_string_release(lookup); } return retval; } /* }}} */ /* {{{ */ static inline void pthreads_prepare_ini(pthreads_object_t* thread) { zend_ini_entry *entry[2]; zend_string *name; HashTable *table[2] = {PTHREADS_EG(thread->creator.ls, ini_directives), EG(ini_directives)}; if (!(thread->options & PTHREADS_ALLOW_HEADERS)) { zend_alter_ini_entry_chars( PTHREADS_G(strings).session.cache_limiter, "nocache", sizeof("nocache")-1, PHP_INI_USER, PHP_INI_STAGE_ACTIVATE); zend_alter_ini_entry_chars( PTHREADS_G(strings).session.use_cookies, "0", sizeof("0")-1, PHP_INI_USER, PHP_INI_STAGE_ACTIVATE); } ZEND_HASH_FOREACH_STR_KEY_PTR(table[0], name, entry[0]) { if ((entry[1] = zend_hash_find_ptr(table[1], name))) { if (entry[0]->value && entry[1]->value) { if (strcmp(ZSTR_VAL(entry[0]->value), ZSTR_VAL(entry[1]->value)) != SUCCESS) { zend_bool resmod = entry[1]->modifiable; zend_string *copied = zend_string_new(name); if (!EG(modified_ini_directives)) { ALLOC_HASHTABLE(EG(modified_ini_directives)); zend_hash_init(EG(modified_ini_directives), 8, NULL, NULL, 0); } if (!entry[1]->modified) { entry[1]->orig_value = entry[1]->value; entry[1]->orig_modifiable = entry[1]->modifiable; entry[1]->modified = 1; zend_hash_add_ptr(EG(modified_ini_directives), copied, entry[1]); } entry[1]->modifiable = 1; entry[1]->on_modify(entry[1], entry[0]->value, entry[1]->mh_arg1, entry[1]->mh_arg2, entry[1]->mh_arg3, ZEND_INI_SYSTEM); if (entry[1]->modified && entry[1]->orig_value != entry[1]->value) { zend_string_release(entry[1]->value); } entry[1]->value = zend_string_new(entry[0]->value); entry[1]->modifiable = resmod; zend_string_release(copied); } } } } ZEND_HASH_FOREACH_END(); } /* }}} */ /* {{{ */ static inline void pthreads_prepare_constants(pthreads_object_t* thread) { zend_constant *zconstant; zend_string *name; ZEND_HASH_FOREACH_STR_KEY_PTR(PTHREADS_EG(thread->creator.ls, zend_constants), name, zconstant) { if (zconstant->name) { if (strncmp(name->val, "STDIN", name->len-1)==0|| strncmp(name->val, "STDOUT", name->len-1)==0|| strncmp(name->val, "STDERR", name->len-1)==0){ continue; } else { zend_constant constant; if (!pthreads_constant_exists(name)) { constant.flags = zconstant->flags; constant.module_number = zconstant->module_number; constant.name = zend_string_new(name); switch((Z_TYPE_INFO(constant.value)=Z_TYPE(zconstant->value))) { case IS_TRUE: case IS_FALSE: case IS_LONG: { Z_LVAL(constant.value)=Z_LVAL(zconstant->value); } break; case IS_DOUBLE: Z_DVAL(constant.value)=Z_DVAL(zconstant->value); break; case IS_STRING: { #if PHP_VERSION_ID >= 70300 Z_STR(constant.value)=Z_STR(zconstant->value); #else ZVAL_NEW_STR(&constant.value, zend_string_new(Z_STR(zconstant->value))); #endif } break; case IS_ARRAY: { pthreads_store_separate(&zconstant->value, &constant.value, 1); } break; } zend_register_constant(&constant); } } } } ZEND_HASH_FOREACH_END(); } /* }}} */ /* {{{ */ static inline void pthreads_prepare_functions(pthreads_object_t* thread) { zend_string *key, *name; zend_function *value = NULL, *prepared = NULL; ZEND_HASH_FOREACH_STR_KEY_PTR(PTHREADS_CG(thread->creator.ls, function_table), key, value) { if (value->type == ZEND_INTERNAL_FUNCTION || zend_hash_exists(PTHREADS_CG(thread->local.ls, function_table), key)) continue; name = zend_string_new(key); prepared = pthreads_copy_function(value); if (!zend_hash_add_ptr(CG(function_table), name, prepared)) { destroy_op_array((zend_op_array*)prepared); } zend_string_release(name); } ZEND_HASH_FOREACH_END(); } /* }}} */ /* {{{ */ static inline void pthreads_prepare_classes(pthreads_object_t* thread) { zend_class_entry *entry; zend_string *name; ZEND_HASH_FOREACH_STR_KEY_PTR(PTHREADS_CG(thread->creator.ls, class_table), name, entry) { if (!zend_hash_exists(PTHREADS_CG(thread->local.ls, class_table), name) && ZSTR_VAL(name)[0] != '\0') { pthreads_create_entry(thread, entry, 0); } } ZEND_HASH_FOREACH_END(); pthreads_context_late_bindings(thread); } /* }}} */ /* {{{ */ static inline void pthreads_prepare_includes(pthreads_object_t* thread) { zend_string *file; ZEND_HASH_FOREACH_STR_KEY(&PTHREADS_EG(thread->creator.ls, included_files), file) { zend_string *name = zend_string_new(file); zend_hash_add_empty_element(&EG(included_files), name); zend_string_release(name); } ZEND_HASH_FOREACH_END(); } /* }}} */ /* {{{ */ static inline void pthreads_prepare_exception_handler(pthreads_object_t* thread) { zval *handler = &PTHREADS_EG(thread->creator.ls, user_exception_handler); if (thread->options & (PTHREADS_INHERIT_CLASSES|PTHREADS_INHERIT_FUNCTIONS)) { if (Z_TYPE_P(handler) != IS_UNDEF) { if (Z_TYPE_P(handler) == IS_ARRAY) { if (zend_hash_num_elements(Z_ARRVAL_P(handler)) > 1) { if (!(thread->options & PTHREADS_INHERIT_CLASSES)) { return; } } else if(!(thread->options & PTHREADS_INHERIT_FUNCTIONS)) { return; } } pthreads_store_separate(handler, &EG(user_exception_handler), 1); } } } /* }}} */ /* {{{ */ static inline void pthreads_prepare_resource_destructor(pthreads_object_t* thread) { if (!PTHREADS_G(default_resource_dtor)) PTHREADS_G(default_resource_dtor)=(EG(regular_list).pDestructor); EG(regular_list).pDestructor = (dtor_func_t) pthreads_prepared_resource_dtor; } /* }}} */ /* {{{ */ static inline void pthreads_prepare_sapi(pthreads_object_t* thread) { SG(sapi_started) = 0; if (!(thread->options & PTHREADS_ALLOW_HEADERS)) { SG(headers_sent)=1; SG(request_info).no_headers = 1; } } /* }}} */ /* {{{ */ static inline void pthreads_rebuild_object(zval *zv) { if (Z_TYPE_P(zv) == IS_OBJECT) { rebuild_object_properties(Z_OBJ_P(zv)); } else if (Z_TYPE_P(zv) == IS_ARRAY) { zval *object = zend_hash_index_find(Z_ARRVAL_P(zv), 0); if (object && Z_TYPE_P(object) == IS_OBJECT) { rebuild_object_properties(Z_OBJ_P(object)); } } } /* }}} */ /* {{{ */ void pthreads_prepare_parent(pthreads_object_t *thread) { if (Z_TYPE(EG(user_exception_handler)) != IS_UNDEF) pthreads_rebuild_object(&EG(user_exception_handler)); } /* }}} */ /* {{{ */ int pthreads_prepared_startup(pthreads_object_t* thread, pthreads_monitor_t *ready) { PTHREADS_PREPARATION_BEGIN_CRITICAL() { thread->local.id = pthreads_self(); thread->local.ls = ts_resource(0); TSRMLS_CACHE_UPDATE(); SG(server_context) = PTHREADS_SG(thread->creator.ls, server_context); PG(expose_php) = 0; PG(auto_globals_jit) = 0; php_request_startup(); pthreads_prepare_sapi(thread); if (thread->options & PTHREADS_INHERIT_INI) pthreads_prepare_ini(thread); if (thread->options & PTHREADS_INHERIT_CONSTANTS) pthreads_prepare_constants(thread); if (thread->options & PTHREADS_INHERIT_FUNCTIONS) pthreads_prepare_functions(thread); else pthreads_prepare_closures(thread); if (thread->options & PTHREADS_INHERIT_CLASSES) { pthreads_prepare_classes(thread); } else { pthreads_create_entry(thread, thread->std.ce, 0); pthreads_context_late_bindings(thread); } if (thread->options & PTHREADS_INHERIT_INCLUDES) pthreads_prepare_includes(thread); pthreads_prepare_exception_handler(thread); pthreads_prepare_resource_destructor(thread); pthreads_monitor_add(ready, PTHREADS_MONITOR_READY); } PTHREADS_PREPARATION_END_CRITICAL(); return SUCCESS; } /* }}} */ /* {{{ */ static inline int pthreads_resources_cleanup(zval *bucket) { if (pthreads_resources_kept(Z_RES_P(bucket))) { return ZEND_HASH_APPLY_REMOVE; } else return ZEND_HASH_APPLY_KEEP; } /* }}} */ /* {{{ */ int pthreads_prepared_shutdown(void) { PTHREADS_PREPARATION_BEGIN_CRITICAL() { zend_hash_apply(&EG(regular_list), pthreads_resources_cleanup); PG(report_memleaks) = 0; php_request_shutdown((void*)NULL); ts_free_thread(); } PTHREADS_PREPARATION_END_CRITICAL(); return SUCCESS; } /* }}} */ /* {{{ */ static zend_trait_alias * pthreads_preparation_copy_trait_alias(pthreads_object_t* thread, zend_trait_alias *alias) { zend_trait_alias *copy = ecalloc(1, sizeof(zend_trait_alias)); if (copy->trait_method) { copy->trait_method = pthreads_preparation_copy_trait_method_reference(thread, alias->trait_method); } if (copy->alias) { copy->alias = zend_string_new(alias->alias); } copy->modifiers = alias->modifiers; return copy; } /* }}} */ /* {{{ */ static zend_trait_precedence * pthreads_preparation_copy_trait_precedence(pthreads_object_t* thread, zend_trait_precedence *precedence) { zend_trait_precedence *copy = ecalloc(1, sizeof(zend_trait_precedence)); copy->trait_method = pthreads_preparation_copy_trait_method_reference(thread, precedence->trait_method); if (precedence->exclude_from_classes) { copy->exclude_from_classes = emalloc(sizeof(*copy->exclude_from_classes)); copy->exclude_from_classes->ce = pthreads_prepared_entry( thread, precedence->exclude_from_classes->ce ); copy->exclude_from_classes->class_name = zend_string_new(precedence->exclude_from_classes->class_name); } return copy; } /* }}} */ /* {{{ */ static zend_trait_method_reference * pthreads_preparation_copy_trait_method_reference(pthreads_object_t* thread, zend_trait_method_reference *reference) { zend_trait_method_reference *copy = ecalloc(1, sizeof(zend_trait_method_reference)); copy->method_name = zend_string_new(reference->method_name); if (reference->class_name) { copy->class_name = zend_string_new(reference->class_name); } copy->ce = pthreads_prepared_entry(thread, (zend_class_entry*) reference->ce); return copy; } /* }}} */ /* {{{ */ static void pthreads_prepared_resource_dtor(zval *zv) { zend_try { if (!pthreads_resources_kept(Z_RES_P(zv)) && PTHREADS_G(default_resource_dtor)){ PTHREADS_G(default_resource_dtor)(zv); } } zend_end_try(); } /* }}} */ #endif
523141.c
/* * Renesas SCP/MCP Software * Copyright (c) 2020-2021, Renesas Electronics Corporation. All rights * reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <rcar_scmi.h> #include <internal/scmi.h> #include <mod_scmi.h> #include <mod_smt.h> #include <fwk_element.h> #include <fwk_id.h> #include <fwk_macros.h> #include <fwk_module.h> #include <fwk_module_idx.h> static const struct fwk_element element_table[] = { [RCAR_SCMI_SERVICE_IDX_PSCI] = { .name = "PSCI", .data = &(struct mod_scmi_service_config) { .transport_id = FWK_ID_ELEMENT_INIT( FWK_MODULE_IDX_SMT, RCAR_SCMI_SERVICE_IDX_PSCI), .transport_api_id = FWK_ID_API_INIT( FWK_MODULE_IDX_SMT, MOD_SMT_API_IDX_SCMI_TRANSPORT), .transport_notification_init_id = FWK_ID_NOTIFICATION_INIT(FWK_MODULE_IDX_SMT, MOD_SMT_NOTIFICATION_IDX_INITIALIZED), .scmi_agent_id = SCMI_AGENT_ID_PSCI, }, }, [RCAR_SCMI_SERVICE_IDX_OSPM] = { .name = "OSPM", .data = &(struct mod_scmi_service_config) { .transport_id = FWK_ID_ELEMENT_INIT( FWK_MODULE_IDX_SMT, RCAR_SCMI_SERVICE_IDX_OSPM), .transport_api_id = FWK_ID_API_INIT( FWK_MODULE_IDX_SMT, MOD_SMT_API_IDX_SCMI_TRANSPORT), .transport_notification_init_id = FWK_ID_NOTIFICATION_INIT(FWK_MODULE_IDX_SMT, MOD_SMT_NOTIFICATION_IDX_INITIALIZED), .scmi_agent_id = SCMI_AGENT_ID_OSPM, }, }, [RCAR_SCMI_SERVICE_IDX_VMM] = { .name = "VMM", .data = &(struct mod_scmi_service_config) { .transport_id = FWK_ID_ELEMENT_INIT( FWK_MODULE_IDX_SMT, RCAR_SCMI_SERVICE_IDX_VMM), .transport_api_id = FWK_ID_API_INIT( FWK_MODULE_IDX_SMT, MOD_SMT_API_IDX_SCMI_TRANSPORT), .transport_notification_init_id = FWK_ID_NOTIFICATION_INIT(FWK_MODULE_IDX_SMT, MOD_SMT_NOTIFICATION_IDX_INITIALIZED), .scmi_agent_id = SCMI_AGENT_ID_VMM, }, }, [RCAR_SCMI_SERVICE_IDX_VM1] = { .name = "VM1", .data = &(struct mod_scmi_service_config) { .transport_id = FWK_ID_ELEMENT_INIT( FWK_MODULE_IDX_SMT, RCAR_SCMI_SERVICE_IDX_VM1), .transport_api_id = FWK_ID_API_INIT( FWK_MODULE_IDX_SMT, MOD_SMT_API_IDX_SCMI_TRANSPORT), .transport_notification_init_id = FWK_ID_NOTIFICATION_INIT(FWK_MODULE_IDX_SMT, MOD_SMT_NOTIFICATION_IDX_INITIALIZED), .scmi_agent_id = SCMI_AGENT_ID_VM1, }, }, [RCAR_SCMI_SERVICE_IDX_VM2] = { .name = "VM2", .data = &(struct mod_scmi_service_config) { .transport_id = FWK_ID_ELEMENT_INIT( FWK_MODULE_IDX_SMT, RCAR_SCMI_SERVICE_IDX_VM2), .transport_api_id = FWK_ID_API_INIT( FWK_MODULE_IDX_SMT, MOD_SMT_API_IDX_SCMI_TRANSPORT), .transport_notification_init_id = FWK_ID_NOTIFICATION_INIT(FWK_MODULE_IDX_SMT, MOD_SMT_NOTIFICATION_IDX_INITIALIZED), .scmi_agent_id = SCMI_AGENT_ID_VM2, }, }, [RCAR_SCMI_SERVICE_IDX_COUNT] = { 0 }, }; static const struct fwk_element *get_element_table(fwk_id_t module_id) { return element_table; } static const struct mod_scmi_agent agent_table[] = { [SCMI_AGENT_ID_OSPM] = { .type = SCMI_AGENT_TYPE_OSPM, .name = "OSPM", }, [SCMI_AGENT_ID_PSCI] = { .type = SCMI_AGENT_TYPE_PSCI, .name = "PSCI", }, [SCMI_AGENT_ID_VMM] = { .type = SCMI_AGENT_TYPE_OSPM, .name = "VMM", }, [SCMI_AGENT_ID_VM1] = { .type = SCMI_AGENT_TYPE_OSPM, .name = "VM1", }, [SCMI_AGENT_ID_VM2] = { .type = SCMI_AGENT_TYPE_OSPM, .name = "VM2", }, }; struct fwk_module_config config_scmi = { .elements = FWK_MODULE_DYNAMIC_ELEMENTS(get_element_table), .data = &((struct mod_scmi_config){ .protocol_count_max = 9, #ifndef BUILD_HAS_MOD_RESOURCE_PERMS /* No protocols are disabled for PSCI agents */ .dis_protocol_count_psci = 0, .dis_protocol_list_psci = NULL, #endif .agent_count = FWK_ARRAY_SIZE(agent_table) - 1, .agent_table = agent_table, .vendor_identifier = "renesas", .sub_vendor_identifier = "renesas", }), };
54131.c
/* ChibiOS - Copyright (C) 2006..2015 Giovanni Di Sirio Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "ch.h" #include "hal.h" #include "test.h" /* * Blinker thread #1. */ static THD_WORKING_AREA(waThread1, 128); static THD_FUNCTION(Thread1, arg) { (void)arg; chRegSetThreadName("blinker"); while (true) { palSetPad(GPIOC, GPIOC_LED4); chThdSleepMilliseconds(250); palClearPad(GPIOC, GPIOC_LED4); chThdSleepMilliseconds(250); } } /* * Blinker thread #2. */ static THD_WORKING_AREA(waThread2, 128); static THD_FUNCTION(Thread2, arg) { (void)arg; chRegSetThreadName("blinker"); while (true) { palSetPad(GPIOC, GPIOC_LED3); chThdSleepMilliseconds(500); palClearPad(GPIOC, GPIOC_LED3); chThdSleepMilliseconds(500); } } /* * Application entry point. */ int main(void) { /* * System initializations. * - HAL initialization, this also initializes the configured device drivers * and performs the board-specific initializations. * - Kernel initialization, the main() function becomes a thread and the * RTOS is active. */ halInit(); chSysInit(); /* * Activates the serial driver 1 using the driver default configuration. * PA9(TX) and PA10(RX) are routed to USART1. */ sdStart(&SD1, NULL); /* * Creates the example threads. */ chThdCreateStatic(waThread1, sizeof(waThread1), NORMALPRIO+1, Thread1, NULL); chThdCreateStatic(waThread2, sizeof(waThread2), NORMALPRIO+1, Thread2, NULL); /* * Normal main() thread activity, in this demo it does nothing except * sleeping in a loop and check the button state, when the button is * pressed the test procedure is launched. */ while (true) { if (palReadPad(GPIOA, GPIOA_BUTTON)) TestThread(&SD1); chThdSleepMilliseconds(500); } }
73677.c
#include "common.h" void halt(const char *msg) { static volatile const char *reason; reason = msg; (void) reason; asm volatile ("bkpt"); while (1) ; } __attribute__ ((always_inline)) inline void assertm(bool condition, const char *msg) { if (!__builtin_expect(condition, 1)) halt(msg); }
504607.c
// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdint.h> #include <sys/param.h> #include "esp_attr.h" #include "esp32s3/clk.h" #include "soc/rtc.h" #define MHZ (1000000) // g_ticks_us defined in ROMs for PRO and APP CPU extern uint32_t g_ticks_per_us_pro; int IRAM_ATTR esp_clk_cpu_freq(void) { return g_ticks_per_us_pro * MHZ; } int IRAM_ATTR esp_clk_apb_freq(void) { return MIN(g_ticks_per_us_pro, 80) * MHZ; } int IRAM_ATTR esp_clk_xtal_freq(void) { return rtc_clk_xtal_freq_get() * MHZ; } void IRAM_ATTR ets_update_cpu_frequency(uint32_t ticks_per_us) { /* Update scale factors used by esp_rom_delay_us */ g_ticks_per_us_pro = ticks_per_us; }
744455.c
/* fips_cmactest.c */ /* Written by Dr Stephen N Henson ([email protected]) for the OpenSSL * project 2005. */ /* ==================================================================== * Copyright (c) 2005 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * [email protected]. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * ([email protected]). This product includes software written by Tim * Hudson ([email protected]). * */ #define OPENSSL_FIPSAPI #include <stdio.h> #include <ctype.h> #include <string.h> #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/cmac.h> #include <openssl/err.h> #include <openssl/bn.h> #ifndef OPENSSL_FIPS int main(int argc, char *argv[]) { printf("No FIPS CMAC support\n"); return(0); } #else #include <openssl/fips.h> #include "fips_utl.h" static int cmac_test(const EVP_CIPHER *cipher, FILE *out, FILE *in, int mode, int Klen_counts_keys, int known_keylen); static int print_cmac_gen(const EVP_CIPHER *cipher, FILE *out, unsigned char *Key, int Klen, unsigned char *Msg, int Msglen, int Tlen); static int print_cmac_ver(const EVP_CIPHER *cipher, FILE *out, unsigned char *Key, int Klen, unsigned char *Msg, int Msglen, unsigned char *Mac, int Maclen, int Tlen); #ifdef FIPS_ALGVS int fips_cmactest_main(int argc, char **argv) #else int main(int argc, char **argv) #endif { FILE *in = NULL, *out = NULL; int mode = 0; /* 0 => Generate, 1 => Verify */ int Klen_counts_keys = 0; /* 0 => Klen is size of one key 1 => Klen is amount of keys */ int known_keylen = 0; /* Only set when Klen_counts_keys = 1 */ const EVP_CIPHER *cipher = 0; int ret = 1; fips_algtest_init(); while (argc > 1 && argv[1][0] == '-') { switch (argv[1][1]) { case 'a': { char *p = &argv[1][2]; if (*p == '\0') { if (argc <= 2) { fprintf(stderr, "Option %s needs a value\n", argv[1]); goto end; } argv++; argc--; p = &argv[1][0]; } if (!strcmp(p, "aes128")) cipher = EVP_aes_128_cbc(); else if (!strcmp(p, "aes192")) cipher = EVP_aes_192_cbc(); else if (!strcmp(p, "aes256")) cipher = EVP_aes_256_cbc(); else if (!strcmp(p, "tdea3") || !strcmp(p, "tdes3")) { cipher = EVP_des_ede3_cbc(); Klen_counts_keys = 1; known_keylen = 8; } else { fprintf(stderr, "Unknown algorithm %s\n", p); goto end; } } break; case 'g': mode = 0; break; case 'v': mode = 1; break; default: fprintf(stderr, "Unknown option %s\n", argv[1]); goto end; } argv++; argc--; } if (argc == 1) in = stdin; else in = fopen(argv[1], "r"); if (argc < 2) out = stdout; else out = fopen(argv[2], "w"); if (!in) { fprintf(stderr, "FATAL input initialization error\n"); goto end; } if (!out) { fprintf(stderr, "FATAL output initialization error\n"); goto end; } if (!cmac_test(cipher, out, in, mode, Klen_counts_keys, known_keylen)) { fprintf(stderr, "FATAL cmac file processing error\n"); goto end; } else ret = 0; end: if (in && (in != stdin)) fclose(in); if (out && (out != stdout)) fclose(out); return ret; } #define CMAC_TEST_MAXLINELEN 150000 int cmac_test(const EVP_CIPHER *cipher, FILE *out, FILE *in, int mode, int Klen_counts_keys, int known_keylen) { char *linebuf, *olinebuf, *p, *q; char *keyword, *value; unsigned char **Keys = NULL, *Msg = NULL, *Mac = NULL; unsigned char *Key = NULL; int Count, Klen, Mlen, Tlen; long Keylen, Msglen, Maclen; int ret = 0; int lnum = 0; olinebuf = OPENSSL_malloc(CMAC_TEST_MAXLINELEN); linebuf = OPENSSL_malloc(CMAC_TEST_MAXLINELEN); if (!linebuf || !olinebuf) goto error; Count = -1; Klen = -1; Mlen = -1; Tlen = -1; while (fgets(olinebuf, CMAC_TEST_MAXLINELEN, in)) { lnum++; strcpy(linebuf, olinebuf); keyword = linebuf; /* Skip leading space */ while (isspace((unsigned char)*keyword)) keyword++; /* Skip comments */ if (keyword[0] == '#') { if (fputs(olinebuf, out) < 0) goto error; continue; } /* Look for = sign */ p = strchr(linebuf, '='); /* If no = or starts with [ (for [L=20] line) just copy */ if (!p) { if (fputs(olinebuf, out) < 0) goto error; continue; } q = p - 1; /* Remove trailing space */ while (isspace((unsigned char)*q)) *q-- = 0; *p = 0; value = p + 1; /* Remove leading space from value */ while (isspace((unsigned char)*value)) value++; /* Remove trailing space from value */ p = value + strlen(value) - 1; while (*p == '\n' || isspace((unsigned char)*p)) *p-- = 0; if (!strcmp(keyword, "Count")) { if (Count != -1) goto parse_error; Count = atoi(value); if (Count < 0) goto parse_error; } else if (!strcmp(keyword, "Klen")) { if (Klen != -1) goto parse_error; Klen = atoi(value); if (Klen < 0) goto parse_error; if (Klen_counts_keys) { Keys = OPENSSL_malloc(sizeof(*Keys) * Klen); memset(Keys, '\0', sizeof(*Keys) * Klen); } else { Keys = OPENSSL_malloc(sizeof(*Keys)); memset(Keys, '\0', sizeof(*Keys)); } } else if (!strcmp(keyword, "Mlen")) { if (Mlen != -1) goto parse_error; Mlen = atoi(value); if (Mlen < 0) goto parse_error; } else if (!strcmp(keyword, "Tlen")) { if (Tlen != -1) goto parse_error; Tlen = atoi(value); if (Tlen < 0) goto parse_error; } else if (!strcmp(keyword, "Key") && !Klen_counts_keys) { if (Keys[0]) goto parse_error; Keys[0] = hex2bin_m(value, &Keylen); if (!Keys[0]) goto parse_error; } else if (!strncmp(keyword, "Key", 3) && Klen_counts_keys) { int keynum = atoi(keyword + 3); if (!keynum || keynum > Klen || Keys[keynum-1]) goto parse_error; Keys[keynum-1] = hex2bin_m(value, &Keylen); if (!Keys[keynum-1]) goto parse_error; } else if (!strcmp(keyword, "Msg")) { if (Msg) goto parse_error; Msg = hex2bin_m(value, &Msglen); if (!Msg) goto parse_error; } else if (!strcmp(keyword, "Mac")) { if (mode == 0) continue; if (Mac) goto parse_error; Mac = hex2bin_m(value, &Maclen); if (!Mac) goto parse_error; } else if (!strcmp(keyword, "Result")) { if (mode == 1) continue; goto parse_error; } else goto parse_error; fputs(olinebuf, out); if (Keys && Msg && (!mode || Mac) && (Tlen > 0) && (Klen > 0)) { if (Klen_counts_keys) { int x; Key = OPENSSL_malloc(Klen * known_keylen); for (x = 0; x < Klen; x++) { memcpy(Key + x * known_keylen, Keys[x], known_keylen); OPENSSL_free(Keys[x]); } Klen *= known_keylen; } else { Key = OPENSSL_malloc(Klen); memcpy(Key, Keys[0], Klen); OPENSSL_free(Keys[0]); } OPENSSL_free(Keys); switch(mode) { case 0: if (!print_cmac_gen(cipher, out, Key, Klen, Msg, Mlen, Tlen)) goto error; break; case 1: if (!print_cmac_ver(cipher, out, Key, Klen, Msg, Mlen, Mac, Maclen, Tlen)) goto error; break; } OPENSSL_free(Key); Key = NULL; OPENSSL_free(Msg); Msg = NULL; OPENSSL_free(Mac); Mac = NULL; Klen = -1; Mlen = -1; Tlen = -1; Count = -1; } } ret = 1; error: if (olinebuf) OPENSSL_free(olinebuf); if (linebuf) OPENSSL_free(linebuf); if (Key) OPENSSL_free(Key); if (Msg) OPENSSL_free(Msg); if (Mac) OPENSSL_free(Mac); return ret; parse_error: fprintf(stderr, "FATAL parse error processing line %d\n", lnum); goto error; } static int print_cmac_gen(const EVP_CIPHER *cipher, FILE *out, unsigned char *Key, int Klen, unsigned char *Msg, int Mlen, int Tlen) { int rc, i; size_t reslen; unsigned char res[128]; CMAC_CTX *cmac_ctx = CMAC_CTX_new(); CMAC_Init(cmac_ctx, Key, Klen, cipher, 0); CMAC_Update(cmac_ctx, Msg, Mlen); if (!CMAC_Final(cmac_ctx, res, &reslen)) { fputs("Error calculating CMAC\n", stderr); rc = 0; } else if (Tlen > (int)reslen) { fputs("Parameter error, Tlen > CMAC length\n", stderr); rc = 0; } else { fputs("Mac = ", out); for (i = 0; i < Tlen; i++) fprintf(out, "%02x", res[i]); fputs(RESP_EOL, out); rc = 1; } CMAC_CTX_free(cmac_ctx); return rc; } static int print_cmac_ver(const EVP_CIPHER *cipher, FILE *out, unsigned char *Key, int Klen, unsigned char *Msg, int Mlen, unsigned char *Mac, int Maclen, int Tlen) { int rc = 1; size_t reslen; unsigned char res[128]; CMAC_CTX *cmac_ctx = CMAC_CTX_new(); CMAC_Init(cmac_ctx, Key, Klen, cipher, 0); CMAC_Update(cmac_ctx, Msg, Mlen); if (!CMAC_Final(cmac_ctx, res, &reslen)) { fputs("Error calculating CMAC\n", stderr); rc = 0; } else if (Tlen > (int)reslen) { fputs("Parameter error, Tlen > CMAC length\n", stderr); rc = 0; } else if (Tlen != Maclen) { fputs("Parameter error, Tlen != resulting Mac length\n", stderr); rc = 0; } else { if (!memcmp(Mac, res, Maclen)) fputs("Result = P" RESP_EOL, out); else fputs("Result = F" RESP_EOL, out); } CMAC_CTX_free(cmac_ctx); return rc; } #endif
763313.c
/* * Bitinverter.c * * Created on: 26 Oct 2017 * Author: brauni */ #include<stdio.h> int main(void) { setbuf(stdout, NULL); //Console Fix //Create Variables int a; unsigned int n; int ergebnis_positiv; int ergebnis_negativ; //Define Variables by Userinput printf("Bitte geben sie eine ganze Zahl \"a\" ein: "); scanf("%i", &a); printf("Bitte geben sie eine natürliche Zahl \"n\" ein: "); scanf("%i", &n); //Bitverschiebungen ergebnis_positiv= a <<n; ergebnis_negativ= a >>n; //Ausgabe der Ergebnisse printf("a*2^n = %i*2^%i = %i\n", a, n, ergebnis_positiv ); printf("a*2^-n = %i*2^-%i = %i", a, n, ergebnis_negativ); return 0; }
709402.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "ant_buf.h" void ant_buf_init(ant_buf_t *buf, int length) { assert(buf != NULL); assert(length >= 0); buf->buf = (length == 0) ? NULL : malloc(length); buf->length = length; buf->ncurr = 0; /*ncurr record the current number of bytes*/ buf->nm_alloc = 0; /* The number of memory allocation */ buf->flag = 0; /* 1 is heap, 0 is stack*/ } ant_buf_t * ant_buf_new(int length) { ant_buf_t *buf = malloc(sizeof(ant_buf_t)); assert(buf != NULL); ant_buf_init(buf, length); buf->flag = 1; return buf; } int ant_buf_append(ant_buf_t *buf, void *elem, int size) { int needed; char *newp; assert(buf != NULL); assert(elem != NULL); assert(size >= 0); if (buf->buf == NULL || buf->length == 0) { buf->length = size + 500; buf->buf = malloc(buf->length); if (buf->buf == NULL) return -1; buf->nm_alloc = 1; } else { needed = buf->ncurr + size + 1; if (needed > buf->length) { if (needed < 2 * buf->length) { needed = 2 * buf->length; } newp = calloc(needed, 1); if (newp == NULL) return -1; buf->nm_alloc++; memcpy(newp, buf->buf, buf->ncurr); /* free old */ free(buf->buf); buf->buf = newp; buf->length = needed; } } memcpy(buf->buf + buf->ncurr, elem, size); buf->ncurr += size; return 0; } void ant_buf_free(ant_buf_t *buf) { assert(buf != NULL); free(buf->buf); if (buf->flag) free(buf); } void ant_buf_clear(ant_buf_t *buf) { assert(buf != NULL); memset(buf->buf, '\0', buf->ncurr); buf->ncurr = 0; }
19166.c
#include <linux/ceph/ceph_debug.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/writeback.h> #include <linux/falloc.h> #include "super.h" #include "mds_client.h" #include "cache.h" /* * Ceph file operations * * Implement basic open/close functionality, and implement * read/write. * * We implement three modes of file I/O: * - buffered uses the generic_file_aio_{read,write} helpers * * - synchronous is used when there is multi-client read/write * sharing, avoids the page cache, and synchronously waits for an * ack from the OSD. * * - direct io takes the variant of the sync path that references * user pages directly. * * fsync() flushes and waits on dirty pages, but just queues metadata * for writeback: since the MDS can recover size and mtime there is no * need to wait for MDS acknowledgement. */ /* * Calculate the length sum of direct io vectors that can * be combined into one page vector. */ static size_t dio_get_pagev_size(const struct iov_iter *it) { const struct iovec *iov = it->iov; const struct iovec *iovend = iov + it->nr_segs; size_t size; size = iov->iov_len - it->iov_offset; /* * An iov can be page vectored when both the current tail * and the next base are page aligned. */ while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) && (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) { size += iov->iov_len; } dout("dio_get_pagevlen len = %zu\n", size); return size; } /* * Allocate a page vector based on (@it, @nbytes). * The return value is the tuple describing a page vector, * that is (@pages, @page_align, @num_pages). */ static struct page ** dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, size_t *page_align, int *num_pages) { struct iov_iter tmp_it = *it; size_t align; struct page **pages; int ret = 0, idx, npages; align = (unsigned long)(it->iov->iov_base + it->iov_offset) & (PAGE_SIZE - 1); npages = calc_pages_for(align, nbytes); pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); if (!pages) { pages = vmalloc(sizeof(*pages) * npages); if (!pages) return ERR_PTR(-ENOMEM); } for (idx = 0; idx < npages; ) { size_t start; ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, npages - idx, &start); if (ret < 0) goto fail; iov_iter_advance(&tmp_it, ret); nbytes -= ret; idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE; } BUG_ON(nbytes != 0); *num_pages = npages; *page_align = align; dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); return pages; fail: ceph_put_page_vector(pages, idx, false); return ERR_PTR(ret); } /* * Prepare an open request. Preallocate ceph_cap to avoid an * inopportune ENOMEM later. */ static struct ceph_mds_request * prepare_open_request(struct super_block *sb, int flags, int create_mode) { struct ceph_fs_client *fsc = ceph_sb_to_client(sb); struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req; int want_auth = USE_ANY_MDS; int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) want_auth = USE_AUTH_MDS; req = ceph_mdsc_create_request(mdsc, op, want_auth); if (IS_ERR(req)) goto out; req->r_fmode = ceph_flags_to_mode(flags); req->r_args.open.flags = cpu_to_le32(flags); req->r_args.open.mode = cpu_to_le32(create_mode); out: return req; } /* * initialize private struct file data. * if we fail, clean up by dropping fmode reference on the ceph_inode */ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) { struct ceph_file_info *cf; int ret = 0; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; switch (inode->i_mode & S_IFMT) { case S_IFREG: /* First file open request creates the cookie, we want to keep * this cookie around for the filetime of the inode as not to * have to worry about fscache register / revoke / operation * races. * * Also, if we know the operation is going to invalidate data * (non readonly) just nuke the cache right away. */ ceph_fscache_register_inode_cookie(mdsc->fsc, ci); if ((fmode & CEPH_FILE_MODE_WR)) ceph_fscache_invalidate(inode); case S_IFDIR: dout("init_file %p %p 0%o (regular)\n", inode, file, inode->i_mode); cf = kmem_cache_alloc(ceph_file_cachep, GFP_KERNEL | __GFP_ZERO); if (cf == NULL) { ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ return -ENOMEM; } cf->fmode = fmode; cf->next_offset = 2; cf->readdir_cache_idx = -1; file->private_data = cf; BUG_ON(inode->i_fop->release != ceph_release); break; case S_IFLNK: dout("init_file %p %p 0%o (symlink)\n", inode, file, inode->i_mode); ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ break; default: dout("init_file %p %p 0%o (special)\n", inode, file, inode->i_mode); /* * we need to drop the open ref now, since we don't * have .release set to ceph_release. */ ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ BUG_ON(inode->i_fop->release == ceph_release); /* call the proper open fop */ ret = inode->i_fop->open(inode, file); } return ret; } /* * If we already have the requisite capabilities, we can satisfy * the open request locally (no need to request new caps from the * MDS). We do, however, need to inform the MDS (asynchronously) * if our wanted caps set expands. */ int ceph_open(struct inode *inode, struct file *file) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req; struct ceph_file_info *cf = file->private_data; int err; int flags, fmode, wanted; if (cf) { dout("open file %p is already opened\n", file); return 0; } /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ flags = file->f_flags & ~(O_CREAT|O_EXCL); if (S_ISDIR(inode->i_mode)) flags = O_DIRECTORY; /* mds likes to know */ dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, ceph_vinop(inode), file, flags, file->f_flags); fmode = ceph_flags_to_mode(flags); wanted = ceph_caps_for_mode(fmode); /* snapped files are read-only */ if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) return -EROFS; /* trivially open snapdir */ if (ceph_snap(inode) == CEPH_SNAPDIR) { spin_lock(&ci->i_ceph_lock); __ceph_get_fmode(ci, fmode); spin_unlock(&ci->i_ceph_lock); return ceph_init_file(inode, file, fmode); } /* * No need to block if we have caps on the auth MDS (for * write) or any MDS (for read). Update wanted set * asynchronously. */ spin_lock(&ci->i_ceph_lock); if (__ceph_is_any_real_caps(ci) && (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { int mds_wanted = __ceph_caps_mds_wanted(ci); int issued = __ceph_caps_issued(ci, NULL); dout("open %p fmode %d want %s issued %s using existing\n", inode, fmode, ceph_cap_string(wanted), ceph_cap_string(issued)); __ceph_get_fmode(ci, fmode); spin_unlock(&ci->i_ceph_lock); /* adjust wanted? */ if ((issued & wanted) != wanted && (mds_wanted & wanted) != wanted && ceph_snap(inode) != CEPH_SNAPDIR) ceph_check_caps(ci, 0, NULL); return ceph_init_file(inode, file, fmode); } else if (ceph_snap(inode) != CEPH_NOSNAP && (ci->i_snap_caps & wanted) == wanted) { __ceph_get_fmode(ci, fmode); spin_unlock(&ci->i_ceph_lock); return ceph_init_file(inode, file, fmode); } spin_unlock(&ci->i_ceph_lock); dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); req = prepare_open_request(inode->i_sb, flags, 0); if (IS_ERR(req)) { err = PTR_ERR(req); goto out; } req->r_inode = inode; ihold(inode); req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, NULL, req); if (!err) err = ceph_init_file(inode, file, req->r_fmode); ceph_mdsc_put_request(req); dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); out: return err; } /* * Do a lookup + open with a single request. If we get a non-existent * file or symlink, return 1 so the VFS can retry. */ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned flags, umode_t mode, int *opened) { struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req; struct dentry *dn; struct ceph_acls_info acls = {}; int err; dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", dir, dentry, dentry, d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); if (dentry->d_name.len > NAME_MAX) return -ENAMETOOLONG; err = ceph_init_dentry(dentry); if (err < 0) return err; if (flags & O_CREAT) { err = ceph_pre_init_acls(dir, &mode, &acls); if (err < 0) return err; } /* do the open */ req = prepare_open_request(dir->i_sb, flags, mode); if (IS_ERR(req)) { err = PTR_ERR(req); goto out_acl; } req->r_dentry = dget(dentry); req->r_num_caps = 2; if (flags & O_CREAT) { req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; if (acls.pagelist) { req->r_pagelist = acls.pagelist; acls.pagelist = NULL; } } req->r_locked_dir = dir; /* caller holds dir->i_mutex */ err = ceph_mdsc_do_request(mdsc, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, req); err = ceph_handle_snapdir(req, dentry, err); if (err) goto out_req; if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); if (d_unhashed(dentry)) { dn = ceph_finish_lookup(req, dentry, err); if (IS_ERR(dn)) err = PTR_ERR(dn); } else { /* we were given a hashed negative dentry */ dn = NULL; } if (err) goto out_req; if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { /* make vfs retry on splice, ENOENT, or symlink */ dout("atomic_open finish_no_open on dn %p\n", dn); err = finish_no_open(file, dn); } else { dout("atomic_open finish_open on dn %p\n", dn); if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { ceph_init_inode_acls(d_inode(dentry), &acls); *opened |= FILE_CREATED; } err = finish_open(file, dentry, ceph_open, opened); } out_req: if (!req->r_err && req->r_target_inode) ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); ceph_mdsc_put_request(req); out_acl: ceph_release_acls_info(&acls); dout("atomic_open result=%d\n", err); return err; } int ceph_release(struct inode *inode, struct file *file) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_file_info *cf = file->private_data; dout("release inode %p file %p\n", inode, file); ceph_put_fmode(ci, cf->fmode); if (cf->last_readdir) ceph_mdsc_put_request(cf->last_readdir); kfree(cf->last_name); kfree(cf->dir_info); kmem_cache_free(ceph_file_cachep, cf); /* wake up anyone waiting for caps on this inode */ wake_up_all(&ci->i_cap_wq); return 0; } enum { HAVE_RETRIED = 1, CHECK_EOF = 2, READ_INLINE = 3, }; /* * Read a range of bytes striped over one or more objects. Iterate over * objects we stripe over. (That's not atomic, but good enough for now.) * * If we get a short result from the OSD, check against i_size; we need to * only return a short read to the caller if we hit EOF. */ static int striped_read(struct inode *inode, u64 off, u64 len, struct page **pages, int num_pages, int *checkeof) { struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_inode_info *ci = ceph_inode(inode); u64 pos, this_len, left; loff_t i_size; int page_align, pages_left; int read, ret; struct page **page_pos; bool hit_stripe, was_short; /* * we may need to do multiple reads. not atomic, unfortunately. */ pos = off; left = len; page_pos = pages; pages_left = num_pages; read = 0; more: page_align = pos & ~PAGE_MASK; this_len = left; ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), &ci->i_layout, pos, &this_len, ci->i_truncate_seq, ci->i_truncate_size, page_pos, pages_left, page_align); if (ret == -ENOENT) ret = 0; hit_stripe = this_len < left; was_short = ret >= 0 && ret < this_len; dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read, ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); i_size = i_size_read(inode); if (ret >= 0) { int didpages; if (was_short && (pos + ret < i_size)) { int zlen = min(this_len - ret, i_size - pos - ret); int zoff = (off & ~PAGE_MASK) + read + ret; dout(" zero gap %llu to %llu\n", pos + ret, pos + ret + zlen); ceph_zero_page_vector_range(zoff, zlen, pages); ret += zlen; } didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; pos += ret; read = pos - off; left -= ret; page_pos += didpages; pages_left -= didpages; /* hit stripe and need continue*/ if (left && hit_stripe && pos < i_size) goto more; } if (read > 0) { ret = read; /* did we bounce off eof? */ if (pos + left > i_size) *checkeof = CHECK_EOF; } dout("striped_read returns %d\n", ret); return ret; } /* * Completely synchronous read and write methods. Direct from __user * buffer to osd, or directly to user pages (if O_DIRECT). * * If the read spans object boundary, just do multiple reads. */ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, int *checkeof) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct page **pages; u64 off = iocb->ki_pos; int num_pages, ret; size_t len = iov_iter_count(i); dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); if (!len) return 0; /* * flush any page cache pages in this range. this * will make concurrent normal and sync io slow, * but it will at least behave sensibly when they are * in sequence. */ ret = filemap_write_and_wait_range(inode->i_mapping, off, off + len); if (ret < 0) return ret; num_pages = calc_pages_for(off, len); pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) return PTR_ERR(pages); ret = striped_read(inode, off, len, pages, num_pages, checkeof); if (ret > 0) { int l, k = 0; size_t left = ret; while (left) { size_t page_off = off & ~PAGE_MASK; size_t copy = min_t(size_t, left, PAGE_SIZE - page_off); l = copy_page_to_iter(pages[k++], page_off, copy, i); off += l; left -= l; if (l < copy) break; } } ceph_release_page_vector(pages, num_pages); if (off > iocb->ki_pos) { ret = off - iocb->ki_pos; iocb->ki_pos = off; } dout("sync_read result %d\n", ret); return ret; } struct ceph_aio_request { struct kiocb *iocb; size_t total_len; int write; int error; struct list_head osd_reqs; unsigned num_reqs; atomic_t pending_reqs; struct timespec mtime; struct ceph_cap_flush *prealloc_cf; }; struct ceph_aio_work { struct work_struct work; struct ceph_osd_request *req; }; static void ceph_aio_retry_work(struct work_struct *work); static void ceph_aio_complete(struct inode *inode, struct ceph_aio_request *aio_req) { struct ceph_inode_info *ci = ceph_inode(inode); int ret; if (!atomic_dec_and_test(&aio_req->pending_reqs)) return; ret = aio_req->error; if (!ret) ret = aio_req->total_len; dout("ceph_aio_complete %p rc %d\n", inode, ret); if (ret >= 0 && aio_req->write) { int dirty; loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len; if (endoff > i_size_read(inode)) { if (ceph_inode_set_size(inode, endoff)) ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); } spin_lock(&ci->i_ceph_lock); ci->i_inline_version = CEPH_INLINE_NONE; dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &aio_req->prealloc_cf); spin_unlock(&ci->i_ceph_lock); if (dirty) __mark_inode_dirty(inode, dirty); } ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : CEPH_CAP_FILE_RD)); aio_req->iocb->ki_complete(aio_req->iocb, ret, 0); ceph_free_cap_flush(aio_req->prealloc_cf); kfree(aio_req); } static void ceph_aio_complete_req(struct ceph_osd_request *req, struct ceph_msg *msg) { int rc = req->r_result; struct inode *inode = req->r_inode; struct ceph_aio_request *aio_req = req->r_priv; struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); int num_pages = calc_pages_for((u64)osd_data->alignment, osd_data->length); dout("ceph_aio_complete_req %p rc %d bytes %llu\n", inode, rc, osd_data->length); if (rc == -EOLDSNAPC) { struct ceph_aio_work *aio_work; BUG_ON(!aio_req->write); aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS); if (aio_work) { INIT_WORK(&aio_work->work, ceph_aio_retry_work); aio_work->req = req; queue_work(ceph_inode_to_client(inode)->wb_wq, &aio_work->work); return; } rc = -ENOMEM; } else if (!aio_req->write) { if (rc == -ENOENT) rc = 0; if (rc >= 0 && osd_data->length > rc) { int zoff = osd_data->alignment + rc; int zlen = osd_data->length - rc; /* * If read is satisfied by single OSD request, * it can pass EOF. Otherwise read is within * i_size. */ if (aio_req->num_reqs == 1) { loff_t i_size = i_size_read(inode); loff_t endoff = aio_req->iocb->ki_pos + rc; if (endoff < i_size) zlen = min_t(size_t, zlen, i_size - endoff); aio_req->total_len = rc + zlen; } if (zlen > 0) ceph_zero_page_vector_range(zoff, zlen, osd_data->pages); } } ceph_put_page_vector(osd_data->pages, num_pages, false); ceph_osdc_put_request(req); if (rc < 0) cmpxchg(&aio_req->error, 0, rc); ceph_aio_complete(inode, aio_req); return; } static void ceph_aio_retry_work(struct work_struct *work) { struct ceph_aio_work *aio_work = container_of(work, struct ceph_aio_work, work); struct ceph_osd_request *orig_req = aio_work->req; struct ceph_aio_request *aio_req = orig_req->r_priv; struct inode *inode = orig_req->r_inode; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_snap_context *snapc; struct ceph_osd_request *req; int ret; spin_lock(&ci->i_ceph_lock); if (__ceph_have_pending_cap_snap(ci)) { struct ceph_cap_snap *capsnap = list_last_entry(&ci->i_cap_snaps, struct ceph_cap_snap, ci_item); snapc = ceph_get_snap_context(capsnap->context); } else { BUG_ON(!ci->i_head_snapc); snapc = ceph_get_snap_context(ci->i_head_snapc); } spin_unlock(&ci->i_ceph_lock); req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, false, GFP_NOFS); if (!req) { ret = -ENOMEM; req = orig_req; goto out; } req->r_flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE; req->r_base_oloc = orig_req->r_base_oloc; req->r_base_oid = orig_req->r_base_oid; req->r_ops[0] = orig_req->r_ops[0]; osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); ceph_osdc_build_request(req, req->r_ops[0].extent.offset, snapc, CEPH_NOSNAP, &aio_req->mtime); ceph_osdc_put_request(orig_req); req->r_callback = ceph_aio_complete_req; req->r_inode = inode; req->r_priv = aio_req; ret = ceph_osdc_start_request(req->r_osdc, req, false); out: if (ret < 0) { BUG_ON(ret == -EOLDSNAPC); req->r_result = ret; ceph_aio_complete_req(req, NULL); } ceph_put_snap_context(snapc); kfree(aio_work); } /* * Write commit request unsafe callback, called to tell us when a * request is unsafe (that is, in flight--has been handed to the * messenger to send to its target osd). It is called again when * we've received a response message indicating the request is * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request * is completed early (and unsuccessfully) due to a timeout or * interrupt. * * This is used if we requested both an ACK and ONDISK commit reply * from the OSD. */ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) { struct ceph_inode_info *ci = ceph_inode(req->r_inode); dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid, unsafe ? "un" : ""); if (unsafe) { ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); spin_lock(&ci->i_unsafe_lock); list_add_tail(&req->r_unsafe_item, &ci->i_unsafe_writes); spin_unlock(&ci->i_unsafe_lock); } else { spin_lock(&ci->i_unsafe_lock); list_del_init(&req->r_unsafe_item); spin_unlock(&ci->i_unsafe_lock); ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); } } static ssize_t ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, struct ceph_snap_context *snapc, struct ceph_cap_flush **pcf) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_vino vino; struct ceph_osd_request *req; struct page **pages; struct ceph_aio_request *aio_req = NULL; int num_pages = 0; int flags; int ret; struct timespec mtime = CURRENT_TIME; size_t count = iov_iter_count(iter); loff_t pos = iocb->ki_pos; bool write = iov_iter_rw(iter) == WRITE; if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) return -EROFS; dout("sync_direct_read_write (%s) on file %p %lld~%u\n", (write ? "write" : "read"), file, pos, (unsigned)count); ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); if (ret < 0) return ret; if (write) { ret = invalidate_inode_pages2_range(inode->i_mapping, pos >> PAGE_CACHE_SHIFT, (pos + count) >> PAGE_CACHE_SHIFT); if (ret < 0) dout("invalidate_inode_pages2_range returned %d\n", ret); flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE; } else { flags = CEPH_OSD_FLAG_READ; } while (iov_iter_count(iter) > 0) { u64 size = dio_get_pagev_size(iter); size_t start = 0; ssize_t len; vino = ceph_vino(inode); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, pos, &size, 0, /*include a 'startsync' command*/ write ? 2 : 1, write ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ, flags, snapc, ci->i_truncate_seq, ci->i_truncate_size, false); if (IS_ERR(req)) { ret = PTR_ERR(req); break; } len = size; pages = dio_get_pages_alloc(iter, len, &start, &num_pages); if (IS_ERR(pages)) { ceph_osdc_put_request(req); ret = PTR_ERR(pages); break; } /* * To simplify error handling, allow AIO when IO within i_size * or IO can be satisfied by single OSD request. */ if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) && (len == count || pos + count <= i_size_read(inode))) { aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL); if (aio_req) { aio_req->iocb = iocb; aio_req->write = write; INIT_LIST_HEAD(&aio_req->osd_reqs); if (write) { aio_req->mtime = mtime; swap(aio_req->prealloc_cf, *pcf); } } /* ignore error */ } if (write) { /* * throw out any page cache pages in this range. this * may block. */ truncate_inode_pages_range(inode->i_mapping, pos, (pos+len) | (PAGE_CACHE_SIZE - 1)); osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); } osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, false, false); ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); if (aio_req) { aio_req->total_len += len; aio_req->num_reqs++; atomic_inc(&aio_req->pending_reqs); req->r_callback = ceph_aio_complete_req; req->r_inode = inode; req->r_priv = aio_req; list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); pos += len; iov_iter_advance(iter, len); continue; } ret = ceph_osdc_start_request(req->r_osdc, req, false); if (!ret) ret = ceph_osdc_wait_request(&fsc->client->osdc, req); size = i_size_read(inode); if (!write) { if (ret == -ENOENT) ret = 0; if (ret >= 0 && ret < len && pos + ret < size) { int zlen = min_t(size_t, len - ret, size - pos - ret); ceph_zero_page_vector_range(start + ret, zlen, pages); ret += zlen; } if (ret >= 0) len = ret; } ceph_put_page_vector(pages, num_pages, false); ceph_osdc_put_request(req); if (ret < 0) break; pos += len; iov_iter_advance(iter, len); if (!write && pos >= size) break; if (write && pos > size) { if (ceph_inode_set_size(inode, pos)) ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); } } if (aio_req) { if (aio_req->num_reqs == 0) { kfree(aio_req); return ret; } ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR : CEPH_CAP_FILE_RD); while (!list_empty(&aio_req->osd_reqs)) { req = list_first_entry(&aio_req->osd_reqs, struct ceph_osd_request, r_unsafe_item); list_del_init(&req->r_unsafe_item); if (ret >= 0) ret = ceph_osdc_start_request(req->r_osdc, req, false); if (ret < 0) { BUG_ON(ret == -EOLDSNAPC); req->r_result = ret; ceph_aio_complete_req(req, NULL); } } return -EIOCBQUEUED; } if (ret != -EOLDSNAPC && pos > iocb->ki_pos) { ret = pos - iocb->ki_pos; iocb->ki_pos = pos; } return ret; } /* * Synchronous write, straight from __user pointer or user pages. * * If write spans object boundary, just do multiple writes. (For a * correct atomic write, we should e.g. take write locks on all * objects, rollback on failure, etc.) */ static ssize_t ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, struct ceph_snap_context *snapc) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_vino vino; struct ceph_osd_request *req; struct page **pages; u64 len; int num_pages; int written = 0; int flags; int check_caps = 0; int ret; struct timespec mtime = CURRENT_TIME; size_t count = iov_iter_count(from); if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) return -EROFS; dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count); ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); if (ret < 0) return ret; ret = invalidate_inode_pages2_range(inode->i_mapping, pos >> PAGE_CACHE_SHIFT, (pos + count) >> PAGE_CACHE_SHIFT); if (ret < 0) dout("invalidate_inode_pages2_range returned %d\n", ret); flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ACK; while ((len = iov_iter_count(from)) > 0) { size_t left; int n; vino = ceph_vino(inode); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, pos, &len, 0, 1, CEPH_OSD_OP_WRITE, flags, snapc, ci->i_truncate_seq, ci->i_truncate_size, false); if (IS_ERR(req)) { ret = PTR_ERR(req); break; } /* * write from beginning of first page, * regardless of io alignment */ num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) { ret = PTR_ERR(pages); goto out; } left = len; for (n = 0; n < num_pages; n++) { size_t plen = min_t(size_t, left, PAGE_SIZE); ret = copy_page_from_iter(pages[n], 0, plen, from); if (ret != plen) { ret = -EFAULT; break; } left -= ret; } if (ret < 0) { ceph_release_page_vector(pages, num_pages); goto out; } /* get a second commit callback */ req->r_unsafe_callback = ceph_sync_write_unsafe; req->r_inode = inode; osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, true); /* BUG_ON(vino.snap != CEPH_NOSNAP); */ ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); if (!ret) ret = ceph_osdc_wait_request(&fsc->client->osdc, req); out: ceph_osdc_put_request(req); if (ret == 0) { pos += len; written += len; if (pos > i_size_read(inode)) { check_caps = ceph_inode_set_size(inode, pos); if (check_caps) ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); } } else break; } if (ret != -EOLDSNAPC && written > 0) { ret = written; iocb->ki_pos = pos; } return ret; } /* * Wrap generic_file_aio_read with checks for cap bits on the inode. * Atomically grab references, so that those bits are not released * back to the MDS mid-read. * * Hmm, the sync read case isn't actually async... should it be? */ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *filp = iocb->ki_filp; struct ceph_file_info *fi = filp->private_data; size_t len = iov_iter_count(to); struct inode *inode = file_inode(filp); struct ceph_inode_info *ci = ceph_inode(inode); struct page *pinned_page = NULL; ssize_t ret; int want, got = 0; int retry_op = 0, read = 0; again: dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_CACHE; ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); if (ret < 0) return ret; if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, ceph_cap_string(got)); if (ci->i_inline_version == CEPH_INLINE_NONE) { if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { ret = ceph_direct_read_write(iocb, to, NULL, NULL); if (ret >= 0 && ret < len) retry_op = CHECK_EOF; } else { ret = ceph_sync_read(iocb, to, &retry_op); } } else { retry_op = READ_INLINE; } } else { dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, ceph_cap_string(got)); ret = generic_file_read_iter(iocb, to); } dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); if (pinned_page) { page_cache_release(pinned_page); pinned_page = NULL; } ceph_put_cap_refs(ci, got); if (retry_op > HAVE_RETRIED && ret >= 0) { int statret; struct page *page = NULL; loff_t i_size; if (retry_op == READ_INLINE) { page = __page_cache_alloc(GFP_KERNEL); if (!page) return -ENOMEM; } statret = __ceph_do_getattr(inode, page, CEPH_STAT_CAP_INLINE_DATA, !!page); if (statret < 0) { __free_page(page); if (statret == -ENODATA) { BUG_ON(retry_op != READ_INLINE); goto again; } return statret; } i_size = i_size_read(inode); if (retry_op == READ_INLINE) { BUG_ON(ret > 0 || read > 0); if (iocb->ki_pos < i_size && iocb->ki_pos < PAGE_CACHE_SIZE) { loff_t end = min_t(loff_t, i_size, iocb->ki_pos + len); end = min_t(loff_t, end, PAGE_CACHE_SIZE); if (statret < end) zero_user_segment(page, statret, end); ret = copy_page_to_iter(page, iocb->ki_pos & ~PAGE_MASK, end - iocb->ki_pos, to); iocb->ki_pos += ret; read += ret; } if (iocb->ki_pos < i_size && read < len) { size_t zlen = min_t(size_t, len - read, i_size - iocb->ki_pos); ret = iov_iter_zero(zlen, to); iocb->ki_pos += ret; read += ret; } __free_pages(page, 0); return read; } /* hit EOF or hole? */ if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && ret < len) { dout("sync_read hit hole, ppos %lld < size %lld" ", reading more\n", iocb->ki_pos, i_size); read += ret; len -= ret; retry_op = HAVE_RETRIED; goto again; } } if (ret >= 0) ret += read; return ret; } /* * Take cap references to avoid releasing caps to MDS mid-write. * * If we are synchronous, and write with an old snap context, the OSD * may return EOLDSNAPC. In that case, retry the write.. _after_ * dropping our cap refs and allowing the pending snap to logically * complete _before_ this write occurs. * * If we are near ENOSPC, write synchronously. */ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct ceph_file_info *fi = file->private_data; struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->client->osdc; struct ceph_cap_flush *prealloc_cf; ssize_t count, written = 0; int err, want, got; loff_t pos; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; prealloc_cf = ceph_alloc_cap_flush(); if (!prealloc_cf) return -ENOMEM; inode_lock(inode); /* We can write back this queue in page reclaim */ current->backing_dev_info = inode_to_bdi(inode); if (iocb->ki_flags & IOCB_APPEND) { err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); if (err < 0) goto out; } err = generic_write_checks(iocb, from); if (err <= 0) goto out; pos = iocb->ki_pos; count = iov_iter_count(from); err = file_remove_privs(file); if (err) goto out; err = file_update_time(file); if (err) goto out; if (ci->i_inline_version != CEPH_INLINE_NONE) { err = ceph_uninline_data(file, NULL); if (err < 0) goto out; } retry_snap: if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { err = -ENOSPC; goto out; } dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", inode, ceph_vinop(inode), pos, count, i_size_read(inode)); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_BUFFER; got = 0; err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count, &got, NULL); if (err < 0) goto out; dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { struct ceph_snap_context *snapc; struct iov_iter data; inode_unlock(inode); spin_lock(&ci->i_ceph_lock); if (__ceph_have_pending_cap_snap(ci)) { struct ceph_cap_snap *capsnap = list_last_entry(&ci->i_cap_snaps, struct ceph_cap_snap, ci_item); snapc = ceph_get_snap_context(capsnap->context); } else { BUG_ON(!ci->i_head_snapc); snapc = ceph_get_snap_context(ci->i_head_snapc); } spin_unlock(&ci->i_ceph_lock); /* we might need to revert back to that point */ data = *from; if (iocb->ki_flags & IOCB_DIRECT) written = ceph_direct_read_write(iocb, &data, snapc, &prealloc_cf); else written = ceph_sync_write(iocb, &data, pos, snapc); if (written == -EOLDSNAPC) { dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n", inode, ceph_vinop(inode), pos, (unsigned)count); inode_lock(inode); goto retry_snap; } if (written > 0) iov_iter_advance(from, written); ceph_put_snap_context(snapc); } else { loff_t old_size = i_size_read(inode); /* * No need to acquire the i_truncate_mutex. Because * the MDS revokes Fwb caps before sending truncate * message to us. We can't get Fwb cap while there * are pending vmtruncate. So write and vmtruncate * can not run at the same time */ written = generic_perform_write(file, from, pos); if (likely(written >= 0)) iocb->ki_pos = pos + written; if (i_size_read(inode) > old_size) ceph_fscache_update_objectsize(inode); inode_unlock(inode); } if (written >= 0) { int dirty; spin_lock(&ci->i_ceph_lock); ci->i_inline_version = CEPH_INLINE_NONE; dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf); spin_unlock(&ci->i_ceph_lock); if (dirty) __mark_inode_dirty(inode, dirty); } dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", inode, ceph_vinop(inode), pos, (unsigned)count, ceph_cap_string(got)); ceph_put_cap_refs(ci, got); if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { err = vfs_fsync_range(file, pos, pos + written - 1, 1); if (err < 0) written = err; } goto out_unlocked; out: inode_unlock(inode); out_unlocked: ceph_free_cap_flush(prealloc_cf); current->backing_dev_info = NULL; return written ? written : err; } /* * llseek. be sure to verify file size on SEEK_END. */ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t i_size; int ret; inode_lock(inode); if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); if (ret < 0) { offset = ret; goto out; } } i_size = i_size_read(inode); switch (whence) { case SEEK_END: offset += i_size; break; case SEEK_CUR: /* * Here we special-case the lseek(fd, 0, SEEK_CUR) * position-querying operation. Avoid rewriting the "same" * f_pos value back to the file because a concurrent read(), * write() or lseek() might have altered it */ if (offset == 0) { offset = file->f_pos; goto out; } offset += file->f_pos; break; case SEEK_DATA: if (offset >= i_size) { ret = -ENXIO; goto out; } break; case SEEK_HOLE: if (offset >= i_size) { ret = -ENXIO; goto out; } offset = i_size; break; } offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out: inode_unlock(inode); return offset; } static inline void ceph_zero_partial_page( struct inode *inode, loff_t offset, unsigned size) { struct page *page; pgoff_t index = offset >> PAGE_CACHE_SHIFT; page = find_lock_page(inode->i_mapping, index); if (page) { wait_on_page_writeback(page); zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); unlock_page(page); page_cache_release(page); } } static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, loff_t length) { loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); if (offset < nearly) { loff_t size = nearly - offset; if (length < size) size = length; ceph_zero_partial_page(inode, offset, size); offset += size; length -= size; } if (length >= PAGE_CACHE_SIZE) { loff_t size = round_down(length, PAGE_CACHE_SIZE); truncate_pagecache_range(inode, offset, offset + size - 1); offset += size; length -= size; } if (length) ceph_zero_partial_page(inode, offset, length); } static int ceph_zero_partial_object(struct inode *inode, loff_t offset, loff_t *length) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_osd_request *req; int ret = 0; loff_t zero = 0; int op; if (!length) { op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; length = &zero; } else { op = CEPH_OSD_OP_ZERO; } req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, ceph_vino(inode), offset, length, 0, 1, op, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, NULL, 0, 0, false); if (IS_ERR(req)) { ret = PTR_ERR(req); goto out; } ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap, &inode->i_mtime); ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); if (!ret) { ret = ceph_osdc_wait_request(&fsc->client->osdc, req); if (ret == -ENOENT) ret = 0; } ceph_osdc_put_request(req); out: return ret; } static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) { int ret = 0; struct ceph_inode_info *ci = ceph_inode(inode); s32 stripe_unit = ceph_file_layout_su(ci->i_layout); s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout); s32 object_size = ceph_file_layout_object_size(ci->i_layout); u64 object_set_size = object_size * stripe_count; u64 nearly, t; /* round offset up to next period boundary */ nearly = offset + object_set_size - 1; t = nearly; nearly -= do_div(t, object_set_size); while (length && offset < nearly) { loff_t size = length; ret = ceph_zero_partial_object(inode, offset, &size); if (ret < 0) return ret; offset += size; length -= size; } while (length >= object_set_size) { int i; loff_t pos = offset; for (i = 0; i < stripe_count; ++i) { ret = ceph_zero_partial_object(inode, pos, NULL); if (ret < 0) return ret; pos += stripe_unit; } offset += object_set_size; length -= object_set_size; } while (length) { loff_t size = length; ret = ceph_zero_partial_object(inode, offset, &size); if (ret < 0) return ret; offset += size; length -= size; } return ret; } static long ceph_fallocate(struct file *file, int mode, loff_t offset, loff_t length) { struct ceph_file_info *fi = file->private_data; struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->client->osdc; struct ceph_cap_flush *prealloc_cf; int want, got = 0; int dirty; int ret = 0; loff_t endoff = 0; loff_t size; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; prealloc_cf = ceph_alloc_cap_flush(); if (!prealloc_cf) return -ENOMEM; inode_lock(inode); if (ceph_snap(inode) != CEPH_NOSNAP) { ret = -EROFS; goto unlock; } if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && !(mode & FALLOC_FL_PUNCH_HOLE)) { ret = -ENOSPC; goto unlock; } if (ci->i_inline_version != CEPH_INLINE_NONE) { ret = ceph_uninline_data(file, NULL); if (ret < 0) goto unlock; } size = i_size_read(inode); if (!(mode & FALLOC_FL_KEEP_SIZE)) endoff = offset + length; if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_BUFFER; ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL); if (ret < 0) goto unlock; if (mode & FALLOC_FL_PUNCH_HOLE) { if (offset < size) ceph_zero_pagecache_range(inode, offset, length); ret = ceph_zero_objects(inode, offset, length); } else if (endoff > size) { truncate_pagecache_range(inode, size, -1); if (ceph_inode_set_size(inode, endoff)) ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); } if (!ret) { spin_lock(&ci->i_ceph_lock); ci->i_inline_version = CEPH_INLINE_NONE; dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf); spin_unlock(&ci->i_ceph_lock); if (dirty) __mark_inode_dirty(inode, dirty); } ceph_put_cap_refs(ci, got); unlock: inode_unlock(inode); ceph_free_cap_flush(prealloc_cf); return ret; } const struct file_operations ceph_file_fops = { .open = ceph_open, .release = ceph_release, .llseek = ceph_llseek, .read_iter = ceph_read_iter, .write_iter = ceph_write_iter, .mmap = ceph_mmap, .fsync = ceph_fsync, .lock = ceph_lock, .flock = ceph_flock, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .unlocked_ioctl = ceph_ioctl, .compat_ioctl = ceph_ioctl, .fallocate = ceph_fallocate, };
549650.c
/** @file * * Copyright (c) 2011, ARM Limited. All rights reserved. * * SPDX-License-Identifier: BSD-2-Clause-Patent * **/ #include <PiPei.h> // // The protocols, PPI and GUID definitions for this module // #include <Ppi/MasterBootMode.h> #include <Ppi/BootInRecoveryMode.h> #include <Ppi/GuidedSectionExtraction.h> // // The Library classes this module consumes // #include <Library/ArmPlatformLib.h> #include <Library/BaseMemoryLib.h> #include <Library/DebugLib.h> #include <Library/HobLib.h> #include <Library/PeimEntryPoint.h> #include <Library/PeiServicesLib.h> #include <Library/PcdLib.h> EFI_STATUS EFIAPI InitializePlatformPeim ( IN EFI_PEI_FILE_HANDLE FileHandle, IN CONST EFI_PEI_SERVICES **PeiServices ); EFI_STATUS EFIAPI PlatformPeim ( VOID ); // // Module globals // CONST EFI_PEI_PPI_DESCRIPTOR mPpiListBootMode = { (EFI_PEI_PPI_DESCRIPTOR_PPI | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST), &gEfiPeiMasterBootModePpiGuid, NULL }; CONST EFI_PEI_PPI_DESCRIPTOR mPpiListRecoveryBootMode = { (EFI_PEI_PPI_DESCRIPTOR_PPI | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST), &gEfiPeiBootInRecoveryModePpiGuid, NULL }; /*++ Routine Description: Arguments: FileHandle - Handle of the file being invoked. PeiServices - Describes the list of possible PEI Services. Returns: Status - EFI_SUCCESS if the boot mode could be set --*/ EFI_STATUS EFIAPI InitializePlatformPeim ( IN EFI_PEI_FILE_HANDLE FileHandle, IN CONST EFI_PEI_SERVICES **PeiServices ) { EFI_STATUS Status; EFI_BOOT_MODE BootMode; DEBUG ((EFI_D_LOAD | EFI_D_INFO, "Platform PEIM Loaded\n")); Status = PeiServicesSetBootMode (ArmPlatformGetBootMode ()); ASSERT_EFI_ERROR (Status); PlatformPeim (); Status = PeiServicesGetBootMode (&BootMode); ASSERT_EFI_ERROR (Status); Status = PeiServicesInstallPpi (&mPpiListBootMode); ASSERT_EFI_ERROR (Status); if (BootMode == BOOT_IN_RECOVERY_MODE) { Status = PeiServicesInstallPpi (&mPpiListRecoveryBootMode); ASSERT_EFI_ERROR (Status); } return Status; }
467163.c
/* * librdkafka - Apache Kafka C library * * Copyright (c) 2012-2013, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifdef __OS400__ #pragma convert(819) #endif #define _GNU_SOURCE #include <errno.h> #include <string.h> #include <stdarg.h> #include <signal.h> #include <stdlib.h> #include <sys/stat.h> #if !_WIN32 #include <sys/types.h> #include <dirent.h> #endif #include "rdkafka_int.h" #include "rdkafka_msg.h" #include "rdkafka_broker.h" #include "rdkafka_topic.h" #include "rdkafka_partition.h" #include "rdkafka_offset.h" #include "rdkafka_transport.h" #include "rdkafka_cgrp.h" #include "rdkafka_assignor.h" #include "rdkafka_request.h" #include "rdkafka_event.h" #include "rdkafka_sasl.h" #include "rdkafka_interceptor.h" #include "rdkafka_idempotence.h" #include "rdkafka_sasl_oauthbearer.h" #if WITH_SSL #include "rdkafka_ssl.h" #endif #include "rdtime.h" #include "crc32c.h" #include "rdunittest.h" #ifdef _WIN32 #include <sys/types.h> #include <sys/timeb.h> #endif static once_flag rd_kafka_global_init_once = ONCE_FLAG_INIT; static once_flag rd_kafka_global_srand_once = ONCE_FLAG_INIT; /** * @brief Global counter+lock for all active librdkafka instances */ mtx_t rd_kafka_global_lock; int rd_kafka_global_cnt; /** * Last API error code, per thread. * Shared among all rd_kafka_t instances. */ rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code; /** * Current number of threads created by rdkafka. * This is used in regression tests. */ rd_atomic32_t rd_kafka_thread_cnt_curr; int rd_kafka_thread_cnt (void) { return rd_atomic32_get(&rd_kafka_thread_cnt_curr); } /** * Current thread's log name (TLS) */ char RD_TLS rd_kafka_thread_name[64] = "app"; void rd_kafka_set_thread_name (const char *fmt, ...) { va_list ap; va_start(ap, fmt); rd_vsnprintf(rd_kafka_thread_name, sizeof(rd_kafka_thread_name), fmt, ap); va_end(ap); } /** * @brief Current thread's system name (TLS) * * Note the name must be 15 characters or less, because it is passed to * pthread_setname_np on Linux which imposes this limit. */ static char RD_TLS rd_kafka_thread_sysname[16] = "app"; void rd_kafka_set_thread_sysname (const char *fmt, ...) { va_list ap; va_start(ap, fmt); rd_vsnprintf(rd_kafka_thread_sysname, sizeof(rd_kafka_thread_sysname), fmt, ap); va_end(ap); thrd_setname(rd_kafka_thread_sysname); } static void rd_kafka_global_init0 (void) { mtx_init(&rd_kafka_global_lock, mtx_plain); #if ENABLE_DEVEL rd_atomic32_init(&rd_kafka_op_cnt, 0); #endif crc32c_global_init(); #if WITH_SSL /* The configuration interface might need to use * OpenSSL to parse keys, prior to any rd_kafka_t * object has been created. */ rd_kafka_ssl_init(); #endif } /** * @brief Initialize once per process */ void rd_kafka_global_init (void) { call_once(&rd_kafka_global_init_once, rd_kafka_global_init0); } /** * @brief Seed the PRNG with current_time.milliseconds */ static void rd_kafka_global_srand (void) { struct timeval tv; rd_gettimeofday(&tv, NULL); srand((unsigned int)(tv.tv_usec / 1000)); } /** * @returns the current number of active librdkafka instances */ static int rd_kafka_global_cnt_get (void) { int r; mtx_lock(&rd_kafka_global_lock); r = rd_kafka_global_cnt; mtx_unlock(&rd_kafka_global_lock); return r; } /** * @brief Increase counter for active librdkafka instances. * If this is the first instance the global constructors will be called, if any. */ static void rd_kafka_global_cnt_incr (void) { mtx_lock(&rd_kafka_global_lock); rd_kafka_global_cnt++; if (rd_kafka_global_cnt == 1) { rd_kafka_transport_init(); #if WITH_SSL rd_kafka_ssl_init(); #endif rd_kafka_sasl_global_init(); } mtx_unlock(&rd_kafka_global_lock); } /** * @brief Decrease counter for active librdkafka instances. * If this counter reaches 0 the global destructors will be called, if any. */ static void rd_kafka_global_cnt_decr (void) { mtx_lock(&rd_kafka_global_lock); rd_kafka_assert(NULL, rd_kafka_global_cnt > 0); rd_kafka_global_cnt--; if (rd_kafka_global_cnt == 0) { rd_kafka_sasl_global_term(); #if WITH_SSL rd_kafka_ssl_term(); #endif } mtx_unlock(&rd_kafka_global_lock); } /** * Wait for all rd_kafka_t objects to be destroyed. * Returns 0 if all kafka objects are now destroyed, or -1 if the * timeout was reached. */ int rd_kafka_wait_destroyed (int timeout_ms) { rd_ts_t timeout = rd_clock() + (timeout_ms * 1000); while (rd_kafka_thread_cnt() > 0 || rd_kafka_global_cnt_get() > 0) { if (rd_clock() >= timeout) { rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, ETIMEDOUT); return -1; } rd_usleep(25000, NULL); /* 25ms */ } return 0; } static void rd_kafka_log_buf (const rd_kafka_conf_t *conf, const rd_kafka_t *rk, int level, int ctx, const char *fac, const char *buf) { if (level > conf->log_level) return; else if (rk && conf->log_queue) { rd_kafka_op_t *rko; if (!rk->rk_logq) return; /* Terminating */ rko = rd_kafka_op_new(RD_KAFKA_OP_LOG); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_MEDIUM); rko->rko_u.log.level = level; rd_strlcpy(rko->rko_u.log.fac, fac, sizeof(rko->rko_u.log.fac)); rko->rko_u.log.str = rd_strdup(buf); rko->rko_u.log.ctx = ctx; rd_kafka_q_enq(rk->rk_logq, rko); } else if (conf->log_cb) { conf->log_cb(rk, level, fac, buf); } } /** * @brief Logger * * @remark conf must be set, but rk may be NULL */ void rd_kafka_log0 (const rd_kafka_conf_t *conf, const rd_kafka_t *rk, const char *extra, int level, int ctx, const char *fac, const char *fmt, ...) { char buf[2048]; va_list ap; unsigned int elen = 0; unsigned int of = 0; if (level > conf->log_level) return; if (conf->log_thread_name) { elen = rd_snprintf(buf, sizeof(buf), "[thrd:%s]: ", rd_kafka_thread_name); if (unlikely(elen >= sizeof(buf))) elen = sizeof(buf); of = elen; } if (extra) { elen = rd_snprintf(buf+of, sizeof(buf)-of, "%s: ", extra); if (unlikely(elen >= sizeof(buf)-of)) elen = sizeof(buf)-of; of += elen; } va_start(ap, fmt); rd_vsnprintf(buf+of, sizeof(buf)-of, fmt, ap); va_end(ap); rd_kafka_log_buf(conf, rk, level, ctx, fac, buf); } rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size) { #if WITH_SASL_OAUTHBEARER return rd_kafka_oauthbearer_set_token0( rk, token_value, md_lifetime_ms, md_principal_name, extensions, extension_size, errstr, errstr_size); #else rd_snprintf(errstr, errstr_size, "librdkafka not built with SASL OAUTHBEARER support"); return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; #endif } rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr) { #if WITH_SASL_OAUTHBEARER return rd_kafka_oauthbearer_set_token_failure0(rk, errstr); #else return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; #endif } void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { int secs, msecs; struct timeval tv; rd_gettimeofday(&tv, NULL); secs = (int)tv.tv_sec; msecs = (int)(tv.tv_usec / 1000); fprintf(stderr, "%%%i|%u.%03u|%s|%s| %s\n", level, secs, msecs, fac, rk ? rk->rk_name : "", buf); } void rd_kafka_log_syslog (const rd_kafka_t *rk, int level, const char *fac, const char *buf) { #if WITH_SYSLOG static int initialized = 0; if (!initialized) openlog("rdkafka", LOG_PID|LOG_CONS, LOG_USER); syslog(level, "%s: %s: %s", fac, rk ? rk->rk_name : "", buf); #else rd_assert(!*"syslog support not enabled in this build"); #endif } void rd_kafka_set_logger (rd_kafka_t *rk, void (*func) (const rd_kafka_t *rk, int level, const char *fac, const char *buf)) { #if !WITH_SYSLOG if (func == rd_kafka_log_syslog) rd_assert(!*"syslog support not enabled in this build"); #endif rk->rk_conf.log_cb = func; } void rd_kafka_set_log_level (rd_kafka_t *rk, int level) { rk->rk_conf.log_level = level; } static const char *rd_kafka_type2str (rd_kafka_type_t type) { static const char *types[] = { [RD_KAFKA_PRODUCER] = "producer", [RD_KAFKA_CONSUMER] = "consumer", }; return types[type]; } #define _ERR_DESC(ENUM,DESC) \ [ENUM - RD_KAFKA_RESP_ERR__BEGIN] = { ENUM, &(# ENUM)[18]/*pfx*/, DESC } static const struct rd_kafka_err_desc rd_kafka_err_descs[] = { _ERR_DESC(RD_KAFKA_RESP_ERR__BEGIN, NULL), _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_MSG, "Local: Bad message format"), _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_COMPRESSION, "Local: Invalid compressed data"), _ERR_DESC(RD_KAFKA_RESP_ERR__DESTROY, "Local: Broker handle destroyed"), _ERR_DESC(RD_KAFKA_RESP_ERR__FAIL, "Local: Communication failure with broker"), //FIXME: too specific _ERR_DESC(RD_KAFKA_RESP_ERR__TRANSPORT, "Local: Broker transport failure"), _ERR_DESC(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, "Local: Critical system resource failure"), _ERR_DESC(RD_KAFKA_RESP_ERR__RESOLVE, "Local: Host resolution failure"), _ERR_DESC(RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, "Local: Message timed out"), _ERR_DESC(RD_KAFKA_RESP_ERR__PARTITION_EOF, "Broker: No more messages"), _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, "Local: Unknown partition"), _ERR_DESC(RD_KAFKA_RESP_ERR__FS, "Local: File or filesystem error"), _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC, "Local: Unknown topic"), _ERR_DESC(RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, "Local: All broker connections are down"), _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_ARG, "Local: Invalid argument or configuration"), _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT, "Local: Timed out"), _ERR_DESC(RD_KAFKA_RESP_ERR__QUEUE_FULL, "Local: Queue full"), _ERR_DESC(RD_KAFKA_RESP_ERR__ISR_INSUFF, "Local: ISR count insufficient"), _ERR_DESC(RD_KAFKA_RESP_ERR__NODE_UPDATE, "Local: Broker node update"), _ERR_DESC(RD_KAFKA_RESP_ERR__SSL, "Local: SSL error"), _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_COORD, "Local: Waiting for coordinator"), _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP, "Local: Unknown group"), _ERR_DESC(RD_KAFKA_RESP_ERR__IN_PROGRESS, "Local: Operation in progress"), _ERR_DESC(RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, "Local: Previous operation in progress"), _ERR_DESC(RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION, "Local: Existing subscription"), _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, "Local: Assign partitions"), _ERR_DESC(RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, "Local: Revoke partitions"), _ERR_DESC(RD_KAFKA_RESP_ERR__CONFLICT, "Local: Conflicting use"), _ERR_DESC(RD_KAFKA_RESP_ERR__STATE, "Local: Erroneous state"), _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL, "Local: Unknown protocol"), _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, "Local: Not implemented"), _ERR_DESC(RD_KAFKA_RESP_ERR__AUTHENTICATION, "Local: Authentication failure"), _ERR_DESC(RD_KAFKA_RESP_ERR__NO_OFFSET, "Local: No offset stored"), _ERR_DESC(RD_KAFKA_RESP_ERR__OUTDATED, "Local: Outdated"), _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, "Local: Timed out in queue"), _ERR_DESC(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, "Local: Required feature not supported by broker"), _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_CACHE, "Local: Awaiting cache update"), _ERR_DESC(RD_KAFKA_RESP_ERR__INTR, "Local: Operation interrupted"), _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_SERIALIZATION, "Local: Key serialization error"), _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION, "Local: Value serialization error"), _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION, "Local: Key deserialization error"), _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION, "Local: Value deserialization error"), _ERR_DESC(RD_KAFKA_RESP_ERR__PARTIAL, "Local: Partial response"), _ERR_DESC(RD_KAFKA_RESP_ERR__READ_ONLY, "Local: Read-only object"), _ERR_DESC(RD_KAFKA_RESP_ERR__NOENT, "Local: No such entry"), _ERR_DESC(RD_KAFKA_RESP_ERR__UNDERFLOW, "Local: Read underflow"), _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_TYPE, "Local: Invalid type"), _ERR_DESC(RD_KAFKA_RESP_ERR__RETRY, "Local: Retry operation"), _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_QUEUE, "Local: Purged in queue"), _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, "Local: Purged in flight"), _ERR_DESC(RD_KAFKA_RESP_ERR__FATAL, "Local: Fatal error"), _ERR_DESC(RD_KAFKA_RESP_ERR__INCONSISTENT, "Local: Inconsistent state"), _ERR_DESC(RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE, "Local: Gap-less ordering would not be guaranteed " "if proceeding"), _ERR_DESC(RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, "Local: Maximum application poll interval " "(max.poll.interval.ms) exceeded"), _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_BROKER, "Local: Unknown broker"), _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_CONFIGURED, "Local: Functionality not configured"), _ERR_DESC(RD_KAFKA_RESP_ERR__FENCED, "Local: This instance has been fenced by a newer instance"), _ERR_DESC(RD_KAFKA_RESP_ERR__APPLICATION, "Local: Application generated error"), _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST, "Local: Group partition assignment lost"), _ERR_DESC(RD_KAFKA_RESP_ERR__NOOP, "Local: No operation performed"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN, "Unknown broker error"), _ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR, "Success"), _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE, "Broker: Offset out of range"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG, "Broker: Invalid message"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, "Broker: Unknown topic or partition"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE, "Broker: Invalid message size"), _ERR_DESC(RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE, "Broker: Leader not available"), _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, "Broker: Not leader for partition"), _ERR_DESC(RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, "Broker: Request timed out"), _ERR_DESC(RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE, "Broker: Broker not available"), _ERR_DESC(RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, "Broker: Replica not available"), _ERR_DESC(RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, "Broker: Message size too large"), _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH, "Broker: StaleControllerEpochCode"), _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, "Broker: Offset metadata string too large"), _ERR_DESC(RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION, "Broker: Broker disconnected before response received"), _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, "Broker: Coordinator load in progress"), _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, "Broker: Coordinator not available"), _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_COORDINATOR, "Broker: Not coordinator"), _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION, "Broker: Invalid topic"), _ERR_DESC(RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE, "Broker: Message batch larger than configured server " "segment size"), _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, "Broker: Not enough in-sync replicas"), _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, "Broker: Message(s) written to insufficient number of " "in-sync replicas"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS, "Broker: Invalid required acks value"), _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, "Broker: Specified group generation id is not valid"), _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL, "Broker: Inconsistent group protocol"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_GROUP_ID, "Broker: Invalid group.id"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, "Broker: Unknown member"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT, "Broker: Invalid session timeout"), _ERR_DESC(RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, "Broker: Group rebalance in progress"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, "Broker: Commit offset data size is not valid"), _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, "Broker: Topic authorization failed"), _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, "Broker: Group authorization failed"), _ERR_DESC(RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED, "Broker: Cluster authorization failed"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP, "Broker: Invalid timestamp"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM, "Broker: Unsupported SASL mechanism"), _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE, "Broker: Request not valid in current SASL state"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION, "Broker: API version not supported"), _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS, "Broker: Topic already exists"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PARTITIONS, "Broker: Invalid number of partitions"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR, "Broker: Invalid replication factor"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT, "Broker: Invalid replica assignment"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_CONFIG, "Broker: Configuration is invalid"), _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_CONTROLLER, "Broker: Not controller for cluster"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUEST, "Broker: Invalid request"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT, "Broker: Message format on broker does not support request"), _ERR_DESC(RD_KAFKA_RESP_ERR_POLICY_VIOLATION, "Broker: Policy violation"), _ERR_DESC(RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, "Broker: Broker received an out of order sequence number"), _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, "Broker: Broker received a duplicate sequence number"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, "Broker: Producer attempted an operation with an old epoch"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, "Broker: Producer attempted a transactional operation in " "an invalid state"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING, "Broker: Producer attempted to use a producer id which is " "not currently assigned to its transactional id"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, "Broker: Transaction timeout is larger than the maximum " "value allowed by the broker's max.transaction.timeout.ms"), _ERR_DESC(RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, "Broker: Producer attempted to update a transaction while " "another concurrent operation on the same transaction was " "ongoing"), _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED, "Broker: Indicates that the transaction coordinator sending " "a WriteTxnMarker is no longer the current coordinator for " "a given producer"), _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, "Broker: Transactional Id authorization failed"), _ERR_DESC(RD_KAFKA_RESP_ERR_SECURITY_DISABLED, "Broker: Security features are disabled"), _ERR_DESC(RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED, "Broker: Operation not attempted"), _ERR_DESC(RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, "Broker: Disk error when trying to access log file on disk"), _ERR_DESC(RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND, "Broker: The user-specified log directory is not found " "in the broker config"), _ERR_DESC(RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED, "Broker: SASL Authentication failed"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, "Broker: Unknown Producer Id"), _ERR_DESC(RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS, "Broker: Partition reassignment is in progress"), _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED, "Broker: Delegation Token feature is not enabled"), _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND, "Broker: Delegation Token is not found on server"), _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH, "Broker: Specified Principal is not valid Owner/Renewer"), _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, "Broker: Delegation Token requests are not allowed on " "this connection"), _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED, "Broker: Delegation Token authorization failed"), _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED, "Broker: Delegation Token is expired"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE, "Broker: Supplied principalType is not supported"), _ERR_DESC(RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP, "Broker: The group is not empty"), _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND, "Broker: The group id does not exist"), _ERR_DESC(RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND, "Broker: The fetch session ID was not found"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH, "Broker: The fetch session epoch is invalid"), _ERR_DESC(RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND, "Broker: No matching listener"), _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED, "Broker: Topic deletion is disabled"), _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH, "Broker: Leader epoch is older than broker epoch"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH, "Broker: Leader epoch is newer than broker epoch"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE, "Broker: Unsupported compression type"), _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH, "Broker: Broker epoch has changed"), _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE, "Broker: Leader high watermark is not caught up"), _ERR_DESC(RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED, "Broker: Group member needs a valid member ID"), _ERR_DESC(RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE, "Broker: Preferred leader was not available"), _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED, "Broker: Consumer group has reached maximum size"), _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, "Broker: Static consumer fenced by other consumer with same " "group.instance.id"), _ERR_DESC(RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE, "Broker: Eligible partition leaders are not available"), _ERR_DESC(RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED, "Broker: Leader election not needed for topic partition"), _ERR_DESC(RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS, "Broker: No partition reassignment is in progress"), _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC, "Broker: Deleting offsets of a topic while the consumer group is subscribed to it"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_RECORD, "Broker: Broker failed to validate record"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, "Broker: There are unstable offsets that need to be cleared"), _ERR_DESC(RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED, "Broker: Throttling quota has been exceeded"), _ERR_DESC(RD_KAFKA_RESP_ERR_PRODUCER_FENCED, "Broker: There is a newer producer with the same " "transactionalId which fences the current one"), _ERR_DESC(RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND, "Broker: Request illegally referred to resource that " "does not exist"), _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE, "Broker: Request illegally referred to the same resource " "twice"), _ERR_DESC(RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL, "Broker: Requested credential would not meet criteria for " "acceptability"), _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET, "Broker: Indicates that the either the sender or recipient " "of a voter-only request is not one of the expected voters"), _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION, "Broker: Invalid update version"), _ERR_DESC(RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED, "Broker: Unable to update finalized features due to " "server error"), _ERR_DESC(RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE, "Broker: Request principal deserialization failed during " "forwarding"), _ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL) }; void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, size_t *cntp) { *errdescs = rd_kafka_err_descs; *cntp = RD_ARRAYSIZE(rd_kafka_err_descs); } const char *rd_kafka_err2str (rd_kafka_resp_err_t err) { static RD_TLS char ret[32]; int idx = err - RD_KAFKA_RESP_ERR__BEGIN; if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || err >= RD_KAFKA_RESP_ERR_END_ALL || !rd_kafka_err_descs[idx].desc)) { rd_snprintf(ret, sizeof(ret), "Err-%i?", err); return ret; } return rd_kafka_err_descs[idx].desc; } const char *rd_kafka_err2name (rd_kafka_resp_err_t err) { static RD_TLS char ret[32]; int idx = err - RD_KAFKA_RESP_ERR__BEGIN; if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || err >= RD_KAFKA_RESP_ERR_END_ALL || !rd_kafka_err_descs[idx].desc)) { rd_snprintf(ret, sizeof(ret), "ERR_%i?", err); return ret; } return rd_kafka_err_descs[idx].name; } rd_kafka_resp_err_t rd_kafka_last_error (void) { return rd_kafka_last_error_code; } rd_kafka_resp_err_t rd_kafka_errno2err (int errnox) { switch (errnox) { case EINVAL: return RD_KAFKA_RESP_ERR__INVALID_ARG; case EBUSY: return RD_KAFKA_RESP_ERR__CONFLICT; case ENOENT: return RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; case ESRCH: return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; case ETIMEDOUT: return RD_KAFKA_RESP_ERR__TIMED_OUT; case EMSGSIZE: return RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE; case ENOBUFS: return RD_KAFKA_RESP_ERR__QUEUE_FULL; case ECANCELED: return RD_KAFKA_RESP_ERR__FATAL; default: return RD_KAFKA_RESP_ERR__FAIL; } } rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, char *errstr, size_t errstr_size) { rd_kafka_resp_err_t err; if (unlikely((err = rd_atomic32_get(&rk->rk_fatal.err)))) { rd_kafka_rdlock(rk); rd_snprintf(errstr, errstr_size, "%s", rk->rk_fatal.errstr); rd_kafka_rdunlock(rk); } return err; } /** * @brief Set's the fatal error for this instance. * * @param do_lock RD_DO_LOCK: rd_kafka_wrlock() will be acquired and released, * RD_DONT_LOCK: caller must hold rd_kafka_wrlock(). * * @returns 1 if the error was set, or 0 if a previous fatal error * has already been set on this instance. * * @locality any * @locks none */ int rd_kafka_set_fatal_error0 (rd_kafka_t *rk, rd_dolock_t do_lock, rd_kafka_resp_err_t err, const char *fmt, ...) { va_list ap; char buf[512]; if (do_lock) rd_kafka_wrlock(rk); rk->rk_fatal.cnt++; if (rd_atomic32_get(&rk->rk_fatal.err)) { if (do_lock) rd_kafka_wrunlock(rk); rd_kafka_dbg(rk, GENERIC, "FATAL", "Suppressing subsequent fatal error: %s", rd_kafka_err2name(err)); return 0; } rd_atomic32_set(&rk->rk_fatal.err, err); va_start(ap, fmt); rd_vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); rk->rk_fatal.errstr = rd_strdup(buf); if (do_lock) rd_kafka_wrunlock(rk); /* If there is an error callback or event handler we * also log the fatal error as it happens. * If there is no error callback the error event * will be automatically logged, and this check here * prevents us from duplicate logs. */ if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR) rd_kafka_log(rk, LOG_EMERG, "FATAL", "Fatal error: %s: %s", rd_kafka_err2str(err), rk->rk_fatal.errstr); else rd_kafka_dbg(rk, ALL, "FATAL", "Fatal error: %s: %s", rd_kafka_err2str(err), rk->rk_fatal.errstr); /* Indicate to the application that a fatal error was raised, * the app should use rd_kafka_fatal_error() to extract the * fatal error code itself. * For the high-level consumer we propagate the error as a * consumer error so it is returned from consumer_poll(), * while for all other client types (the producer) we propagate to * the standard error handler (typically error_cb). */ if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp) rd_kafka_consumer_err(rk->rk_cgrp->rkcg_q, RD_KAFKA_NODEID_UA, RD_KAFKA_RESP_ERR__FATAL, 0, NULL, NULL, RD_KAFKA_OFFSET_INVALID, "Fatal error: %s: %s", rd_kafka_err2str(err), rk->rk_fatal.errstr); else rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__FATAL, "Fatal error: %s: %s", rd_kafka_err2str(err), rk->rk_fatal.errstr); /* Tell rdkafka main thread to purge producer queues, but not * in-flight since we'll want proper delivery status for transmitted * requests. * Need NON_BLOCKING to avoid dead-lock if user is * calling purge() at the same time, which could be * waiting for this broker thread to handle its * OP_PURGE request. */ if (rk->rk_type == RD_KAFKA_PRODUCER) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PURGE); rko->rko_u.purge.flags = RD_KAFKA_PURGE_F_QUEUE| RD_KAFKA_PURGE_F_NON_BLOCKING; rd_kafka_q_enq(rk->rk_ops, rko); } return 1; } rd_kafka_resp_err_t rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { if (!rd_kafka_set_fatal_error(rk, err, "test_fatal_error: %s", reason)) return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS; else return RD_KAFKA_RESP_ERR_NO_ERROR; } /** * @brief Final destructor for rd_kafka_t, must only be called with refcnt 0. * * @locality application thread */ void rd_kafka_destroy_final (rd_kafka_t *rk) { rd_kafka_assert(rk, rd_kafka_terminating(rk)); /* Synchronize state */ rd_kafka_wrlock(rk); rd_kafka_wrunlock(rk); /* Terminate SASL provider */ if (rk->rk_conf.sasl.provider) rd_kafka_sasl_term(rk); rd_kafka_timers_destroy(&rk->rk_timers); rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying op queues"); /* Destroy cgrp */ if (rk->rk_cgrp) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying cgrp"); /* Reset queue forwarding (rep -> cgrp) */ rd_kafka_q_fwd_set(rk->rk_rep, NULL); rd_kafka_cgrp_destroy_final(rk->rk_cgrp); } rd_kafka_assignors_term(rk); if (rk->rk_type == RD_KAFKA_CONSUMER) { rd_kafka_assignment_destroy(rk); if (rk->rk_consumer.q) rd_kafka_q_destroy(rk->rk_consumer.q); } /* Purge op-queues */ rd_kafka_q_destroy_owner(rk->rk_rep); rd_kafka_q_destroy_owner(rk->rk_ops); #if WITH_SSL if (rk->rk_conf.ssl.ctx) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying SSL CTX"); rd_kafka_ssl_ctx_term(rk); } #endif /* It is not safe to log after this point. */ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Termination done: freeing resources"); if (rk->rk_logq) { rd_kafka_q_destroy_owner(rk->rk_logq); rk->rk_logq = NULL; } if (rk->rk_type == RD_KAFKA_PRODUCER) { cnd_destroy(&rk->rk_curr_msgs.cnd); mtx_destroy(&rk->rk_curr_msgs.lock); } if (rk->rk_fatal.errstr) { rd_free(rk->rk_fatal.errstr); rk->rk_fatal.errstr = NULL; } cnd_destroy(&rk->rk_broker_state_change_cnd); mtx_destroy(&rk->rk_broker_state_change_lock); mtx_destroy(&rk->rk_suppress.sparse_connect_lock); cnd_destroy(&rk->rk_init_cnd); mtx_destroy(&rk->rk_init_lock); if (rk->rk_full_metadata) rd_kafka_metadata_destroy(rk->rk_full_metadata); rd_kafkap_str_destroy(rk->rk_client_id); rd_kafkap_str_destroy(rk->rk_group_id); rd_kafkap_str_destroy(rk->rk_eos.transactional_id); rd_kafka_anyconf_destroy(_RK_GLOBAL, &rk->rk_conf); rd_list_destroy(&rk->rk_broker_by_id); rwlock_destroy(&rk->rk_lock); rd_free(rk); rd_kafka_global_cnt_decr(); } static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { thrd_t thrd; #ifndef _WIN32 int term_sig = rk->rk_conf.term_sig; #endif #ifndef __OS400__ int res; #else void *res; #endif char flags_str[256]; static const char *rd_kafka_destroy_flags_names[] = { "Terminate", "DestroyCalled", "Immediate", "NoConsumerClose", NULL }; /* Fatal errors and _F_IMMEDIATE also sets .._NO_CONSUMER_CLOSE */ if (flags & RD_KAFKA_DESTROY_F_IMMEDIATE || rd_kafka_fatal_error_code(rk)) flags |= RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE; rd_flags2str(flags_str, sizeof(flags_str), rd_kafka_destroy_flags_names, flags); rd_kafka_dbg(rk, ALL, "DESTROY", "Terminating instance " "(destroy flags %s (0x%x))", flags ? flags_str : "none", flags); /* If producer still has messages in queue the application * is terminating the producer without first calling flush() or purge() * which is a common new user mistake, so hint the user of proper * shutdown semantics. */ if (rk->rk_type == RD_KAFKA_PRODUCER) { unsigned int tot_cnt; size_t tot_size; rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); if (tot_cnt > 0) rd_kafka_log(rk, LOG_WARNING, "TERMINATE", "Producer terminating with %u message%s " "(%"PRIusz" byte%s) still in " "queue or transit: " "use flush() to wait for " "outstanding message delivery", tot_cnt, tot_cnt > 1 ? "s" : "", tot_size, tot_size > 1 ? "s" : ""); } /* Make sure destroy is not called from a librdkafka thread * since this will most likely cause a deadlock. * FIXME: include broker threads (for log_cb) */ if (thrd_is_current(rk->rk_thread) || thrd_is_current(rk->rk_background.thread)) { rd_kafka_log(rk, LOG_EMERG, "BGQUEUE", "Application bug: " "rd_kafka_destroy() called from " "librdkafka owned thread"); rd_kafka_assert(NULL, !*"Application bug: " "calling rd_kafka_destroy() from " "librdkafka owned thread is prohibited"); } /* Before signaling for general termination, set the destroy * flags to hint cgrp how to shut down. */ rd_atomic32_set(&rk->rk_terminate, flags|RD_KAFKA_DESTROY_F_DESTROY_CALLED); /* The legacy/simple consumer lacks an API to close down the consumer*/ if (rk->rk_cgrp) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Terminating consumer group handler"); rd_kafka_consumer_close(rk); } /* With the consumer closed, terminate the rest of librdkafka. */ rd_atomic32_set(&rk->rk_terminate, flags|RD_KAFKA_DESTROY_F_TERMINATE); rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Interrupting timers"); rd_kafka_wrlock(rk); thrd = rk->rk_thread; rd_kafka_timers_interrupt(&rk->rk_timers); rd_kafka_wrunlock(rk); rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Sending TERMINATE to internal main thread"); /* Send op to trigger queue/io wake-up. * The op itself is (likely) ignored by the receiver. */ rd_kafka_q_enq(rk->rk_ops, rd_kafka_op_new(RD_KAFKA_OP_TERMINATE)); #ifndef _WIN32 /* Interrupt main kafka thread to speed up termination. */ if (term_sig) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Sending thread kill signal %d", term_sig); pthread_kill(thrd, term_sig); } #endif if (rd_kafka_destroy_flags_check(rk, RD_KAFKA_DESTROY_F_IMMEDIATE)) return; /* FIXME: thread resource leak */ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Joining internal main thread"); if (thrd_join(thrd, &res) != thrd_success) rd_kafka_log(rk, LOG_ERR, "DESTROY", "Failed to join internal main thread: %s " "(was process forked?)", rd_strerror(errno)); rd_kafka_destroy_final(rk); } /* NOTE: Must only be called by application. * librdkafka itself must use rd_kafka_destroy0(). */ void rd_kafka_destroy (rd_kafka_t *rk) { rd_kafka_destroy_app(rk, 0); } void rd_kafka_destroy_flags (rd_kafka_t *rk, int flags) { rd_kafka_destroy_app(rk, flags); } /** * Main destructor for rd_kafka_t * * Locality: rdkafka main thread or application thread during rd_kafka_new() */ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { rd_kafka_topic_t *rkt, *rkt_tmp; rd_kafka_broker_t *rkb, *rkb_tmp; rd_list_t wait_thrds; thrd_t *thrd; int i; rd_kafka_dbg(rk, ALL, "DESTROY", "Destroy internal"); /* Trigger any state-change waiters (which should check the * terminate flag whenever they wake up). */ rd_kafka_brokers_broadcast_state_change(rk); #ifndef __OS400__ if (rk->rk_background.thread) { int res; #else if (*(long long *)(void *)&rk->rk_background.thread_tid) { void *res; #endif /* Send op to trigger queue/io wake-up. * The op itself is (likely) ignored by the receiver. */ rd_kafka_q_enq(rk->rk_background.q, rd_kafka_op_new(RD_KAFKA_OP_TERMINATE)); rd_kafka_dbg(rk, ALL, "DESTROY", "Waiting for background queue thread " "to terminate"); thrd_join(rk->rk_background.thread, &res); rd_kafka_q_destroy_owner(rk->rk_background.q); } /* Call on_destroy() interceptors */ rd_kafka_interceptors_on_destroy(rk); /* Brokers pick up on rk_terminate automatically. */ /* List of (broker) threads to join to synchronize termination */ rd_list_init(&wait_thrds, rd_atomic32_get(&rk->rk_broker_cnt), NULL); rd_kafka_wrlock(rk); rd_kafka_dbg(rk, ALL, "DESTROY", "Removing all topics"); /* Decommission all topics */ TAILQ_FOREACH_SAFE(rkt, &rk->rk_topics, rkt_link, rkt_tmp) { rd_kafka_wrunlock(rk); rd_kafka_topic_partitions_remove(rkt); rd_kafka_wrlock(rk); } /* Decommission brokers. * Broker thread holds a refcount and detects when broker refcounts * reaches 1 and then decommissions itself. */ TAILQ_FOREACH_SAFE(rkb, &rk->rk_brokers, rkb_link, rkb_tmp) { /* Add broker's thread to wait_thrds list for later joining */ thrd = malloc(sizeof(*thrd)); *thrd = rkb->rkb_thread; rd_list_add(&wait_thrds, thrd); rd_kafka_wrunlock(rk); rd_kafka_dbg(rk, BROKER, "DESTROY", "Sending TERMINATE to %s", rd_kafka_broker_name(rkb)); /* Send op to trigger queue/io wake-up. * The op itself is (likely) ignored by the broker thread. */ rd_kafka_q_enq(rkb->rkb_ops, rd_kafka_op_new(RD_KAFKA_OP_TERMINATE)); #ifndef _WIN32 /* Interrupt IO threads to speed up termination. */ if (rk->rk_conf.term_sig) pthread_kill(rkb->rkb_thread, rk->rk_conf.term_sig); #endif rd_kafka_broker_destroy(rkb); rd_kafka_wrlock(rk); } if (rk->rk_clusterid) { rd_free(rk->rk_clusterid); rk->rk_clusterid = NULL; } /* Destroy coord requests */ rd_kafka_coord_reqs_term(rk); /* Destroy the coordinator cache */ rd_kafka_coord_cache_destroy(&rk->rk_coord_cache); /* Destroy metadata cache */ rd_kafka_metadata_cache_destroy(rk); rd_kafka_wrunlock(rk); mtx_lock(&rk->rk_broker_state_change_lock); /* Purge broker state change waiters */ rd_list_destroy(&rk->rk_broker_state_change_waiters); mtx_unlock(&rk->rk_broker_state_change_lock); if (rk->rk_type == RD_KAFKA_CONSUMER) { if (rk->rk_consumer.q) rd_kafka_q_disable(rk->rk_consumer.q); } rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Purging reply queue"); /* Purge op-queue */ rd_kafka_q_disable(rk->rk_rep); rd_kafka_q_purge(rk->rk_rep); /* Loose our special reference to the internal broker. */ mtx_lock(&rk->rk_internal_rkb_lock); if ((rkb = rk->rk_internal_rkb)) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Decommissioning internal broker"); /* Send op to trigger queue wake-up. */ rd_kafka_q_enq(rkb->rkb_ops, rd_kafka_op_new(RD_KAFKA_OP_TERMINATE)); rk->rk_internal_rkb = NULL; thrd = malloc(sizeof(*thrd)); *thrd = rkb->rkb_thread; rd_list_add(&wait_thrds, thrd); } mtx_unlock(&rk->rk_internal_rkb_lock); if (rkb) rd_kafka_broker_destroy(rkb); rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Join %d broker thread(s)", rd_list_cnt(&wait_thrds)); /* Join broker threads */ RD_LIST_FOREACH(thrd, &wait_thrds, i) { #ifndef __OS400__ int res; #else void *res; #endif if (thrd_join(*thrd, &res) != thrd_success) ; free(thrd); } rd_list_destroy(&wait_thrds); /* Destroy mock cluster */ if (rk->rk_mock.cluster) rd_kafka_mock_cluster_destroy(rk->rk_mock.cluster); if (rd_atomic32_get(&rk->rk_mock.cluster_cnt) > 0) { rd_kafka_log(rk, LOG_EMERG, "MOCK", "%d mock cluster(s) still active: " "must be explicitly destroyed with " "rd_kafka_mock_cluster_destroy() prior to " "terminating the rd_kafka_t instance", (int)rd_atomic32_get(&rk->rk_mock.cluster_cnt)); rd_assert(!*"All mock clusters must be destroyed prior to " "rd_kafka_t destroy"); } } /** * @brief Buffer state for stats emitter */ struct _stats_emit { char *buf; /* Pointer to allocated buffer */ size_t size; /* Current allocated size of buf */ size_t of; /* Current write-offset in buf */ }; /* Stats buffer printf. Requires a (struct _stats_emit *)st variable in the * current scope. */ #define _st_printf(...) do { \ ssize_t _r; \ ssize_t _rem = st->size - st->of; \ _r = rd_snprintf(st->buf+st->of, _rem, __VA_ARGS__); \ if (_r >= _rem) { \ st->size *= 2; \ _rem = st->size - st->of; \ st->buf = rd_realloc(st->buf, st->size); \ _r = rd_snprintf(st->buf+st->of, _rem, __VA_ARGS__); \ } \ st->of += _r; \ } while (0) struct _stats_total { int64_t tx; /**< broker.tx */ int64_t tx_bytes; /**< broker.tx_bytes */ int64_t rx; /**< broker.rx */ int64_t rx_bytes; /**< broker.rx_bytes */ int64_t txmsgs; /**< partition.txmsgs */ int64_t txmsg_bytes; /**< partition.txbytes */ int64_t rxmsgs; /**< partition.rxmsgs */ int64_t rxmsg_bytes; /**< partition.rxbytes */ }; /** * @brief Rollover and emit an average window. */ static RD_INLINE void rd_kafka_stats_emit_avg (struct _stats_emit *st, const char *name, rd_avg_t *src_avg) { rd_avg_t avg; rd_avg_rollover(&avg, src_avg); _st_printf( "\"%s\": {" " \"min\":%"PRId64"," " \"max\":%"PRId64"," " \"avg\":%"PRId64"," " \"sum\":%"PRId64"," " \"stddev\": %"PRId64"," " \"p50\": %"PRId64"," " \"p75\": %"PRId64"," " \"p90\": %"PRId64"," " \"p95\": %"PRId64"," " \"p99\": %"PRId64"," " \"p99_99\": %"PRId64"," " \"outofrange\": %"PRId64"," " \"hdrsize\": %"PRId32"," " \"cnt\":%i " "}, ", name, avg.ra_v.minv, avg.ra_v.maxv, avg.ra_v.avg, avg.ra_v.sum, (int64_t)avg.ra_hist.stddev, avg.ra_hist.p50, avg.ra_hist.p75, avg.ra_hist.p90, avg.ra_hist.p95, avg.ra_hist.p99, avg.ra_hist.p99_99, avg.ra_hist.oor, avg.ra_hist.hdrsize, avg.ra_v.cnt); rd_avg_destroy(&avg); } /** * Emit stats for toppar */ static RD_INLINE void rd_kafka_stats_emit_toppar (struct _stats_emit *st, struct _stats_total *total, rd_kafka_toppar_t *rktp, int first) { rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; int64_t end_offset; int64_t consumer_lag = -1; struct offset_stats offs; int32_t broker_id = -1; rd_kafka_toppar_lock(rktp); if (rktp->rktp_broker) { rd_kafka_broker_lock(rktp->rktp_broker); broker_id = rktp->rktp_broker->rkb_nodeid; rd_kafka_broker_unlock(rktp->rktp_broker); } /* Grab a copy of the latest finalized offset stats */ offs = rktp->rktp_offsets_fin; end_offset = (rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED) ? rktp->rktp_ls_offset : rktp->rktp_hi_offset; /* Calculate consumer_lag by using the highest offset * of app_offset (the last message passed to application + 1) * or the committed_offset (the last message committed by this or * another consumer). * Using app_offset allows consumer_lag to be up to date even if * offsets are not (yet) committed. */ if (end_offset != RD_KAFKA_OFFSET_INVALID && (rktp->rktp_app_offset >= 0 || rktp->rktp_committed_offset >= 0)) { consumer_lag = end_offset - RD_MAX(rktp->rktp_app_offset, rktp->rktp_committed_offset); if (unlikely(consumer_lag) < 0) consumer_lag = 0; } _st_printf("%s\"%"PRId32"\": { " "\"partition\":%"PRId32", " "\"broker\":%"PRId32", " "\"leader\":%"PRId32", " "\"desired\":%s, " "\"unknown\":%s, " "\"msgq_cnt\":%i, " "\"msgq_bytes\":%"PRIusz", " "\"xmit_msgq_cnt\":%i, " "\"xmit_msgq_bytes\":%"PRIusz", " "\"fetchq_cnt\":%i, " "\"fetchq_size\":%"PRIu64", " "\"fetch_state\":\"%s\", " "\"query_offset\":%"PRId64", " "\"next_offset\":%"PRId64", " "\"app_offset\":%"PRId64", " "\"stored_offset\":%"PRId64", " "\"commited_offset\":%"PRId64", " /*FIXME: issue #80 */ "\"committed_offset\":%"PRId64", " "\"eof_offset\":%"PRId64", " "\"lo_offset\":%"PRId64", " "\"hi_offset\":%"PRId64", " "\"ls_offset\":%"PRId64", " "\"consumer_lag\":%"PRId64", " "\"txmsgs\":%"PRIu64", " "\"txbytes\":%"PRIu64", " "\"rxmsgs\":%"PRIu64", " "\"rxbytes\":%"PRIu64", " "\"msgs\": %"PRIu64", " "\"rx_ver_drops\": %"PRIu64", " "\"msgs_inflight\": %"PRId32", " "\"next_ack_seq\": %"PRId32", " "\"next_err_seq\": %"PRId32", " "\"acked_msgid\": %"PRIu64 "} ", first ? "" : ", ", rktp->rktp_partition, rktp->rktp_partition, broker_id, rktp->rktp_leader_id, (rktp->rktp_flags&RD_KAFKA_TOPPAR_F_DESIRED)?"true":"false", (rktp->rktp_flags&RD_KAFKA_TOPPAR_F_UNKNOWN)?"true":"false", rd_kafka_msgq_len(&rktp->rktp_msgq), rd_kafka_msgq_size(&rktp->rktp_msgq), /* FIXME: xmit_msgq is local to the broker thread. */ 0, (size_t)0, rd_kafka_q_len(rktp->rktp_fetchq), rd_kafka_q_size(rktp->rktp_fetchq), rd_kafka_fetch_states[rktp->rktp_fetch_state], rktp->rktp_query_offset, offs.fetch_offset, rktp->rktp_app_offset, rktp->rktp_stored_offset, rktp->rktp_committed_offset, /* FIXME: issue #80 */ rktp->rktp_committed_offset, offs.eof_offset, rktp->rktp_lo_offset, rktp->rktp_hi_offset, rktp->rktp_ls_offset, consumer_lag, rd_atomic64_get(&rktp->rktp_c.tx_msgs), rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes), rd_atomic64_get(&rktp->rktp_c.rx_msgs), rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes), rk->rk_type == RD_KAFKA_PRODUCER ? rd_atomic64_get(&rktp->rktp_c.producer_enq_msgs) : rd_atomic64_get(&rktp->rktp_c.rx_msgs), /* legacy, same as rx_msgs */ rd_atomic64_get(&rktp->rktp_c.rx_ver_drops), rd_atomic32_get(&rktp->rktp_msgs_inflight), rktp->rktp_eos.next_ack_seq, rktp->rktp_eos.next_err_seq, rktp->rktp_eos.acked_msgid); if (total) { total->txmsgs += rd_atomic64_get(&rktp->rktp_c.tx_msgs); total->txmsg_bytes += rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes); total->rxmsgs += rd_atomic64_get(&rktp->rktp_c.rx_msgs); total->rxmsg_bytes += rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes); } rd_kafka_toppar_unlock(rktp); } /** * @brief Emit broker request type stats */ static void rd_kafka_stats_emit_broker_reqs (struct _stats_emit *st, rd_kafka_broker_t *rkb) { /* Filter out request types that will never be sent by the client. */ static const rd_bool_t filter[4][RD_KAFKAP__NUM] = { [RD_KAFKA_PRODUCER] = { [RD_KAFKAP_Fetch] = rd_true, [RD_KAFKAP_OffsetCommit] = rd_true, [RD_KAFKAP_OffsetFetch] = rd_true, [RD_KAFKAP_JoinGroup] = rd_true, [RD_KAFKAP_Heartbeat] = rd_true, [RD_KAFKAP_LeaveGroup] = rd_true, [RD_KAFKAP_SyncGroup] = rd_true }, [RD_KAFKA_CONSUMER] = { [RD_KAFKAP_Produce] = rd_true, [RD_KAFKAP_InitProducerId] = rd_true, /* Transactional producer */ [RD_KAFKAP_AddPartitionsToTxn] = rd_true, [RD_KAFKAP_AddOffsetsToTxn] = rd_true, [RD_KAFKAP_EndTxn] = rd_true, [RD_KAFKAP_TxnOffsetCommit] = rd_true, }, [2/*any client type*/] = { [RD_KAFKAP_UpdateMetadata] = rd_true, [RD_KAFKAP_ControlledShutdown] = rd_true, [RD_KAFKAP_LeaderAndIsr] = rd_true, [RD_KAFKAP_StopReplica] = rd_true, [RD_KAFKAP_OffsetForLeaderEpoch] = rd_true, [RD_KAFKAP_WriteTxnMarkers] = rd_true, [RD_KAFKAP_AlterReplicaLogDirs] = rd_true, [RD_KAFKAP_DescribeLogDirs] = rd_true, [RD_KAFKAP_SaslAuthenticate] = rd_false, [RD_KAFKAP_CreateDelegationToken] = rd_true, [RD_KAFKAP_RenewDelegationToken] = rd_true, [RD_KAFKAP_ExpireDelegationToken] = rd_true, [RD_KAFKAP_DescribeDelegationToken] = rd_true }, [3/*hide-unless-non-zero*/] = { /* Hide Admin requests unless they've been used */ [RD_KAFKAP_CreateTopics] = rd_true, [RD_KAFKAP_DeleteTopics] = rd_true, [RD_KAFKAP_DeleteRecords] = rd_true, [RD_KAFKAP_CreatePartitions] = rd_true, [RD_KAFKAP_DescribeAcls] = rd_true, [RD_KAFKAP_CreateAcls] = rd_true, [RD_KAFKAP_DeleteAcls] = rd_true, [RD_KAFKAP_DescribeConfigs] = rd_true, [RD_KAFKAP_AlterConfigs] = rd_true, [RD_KAFKAP_DeleteGroups] = rd_true, [RD_KAFKAP_ListGroups] = rd_true, [RD_KAFKAP_DescribeGroups] = rd_true } }; int i; int cnt = 0; _st_printf("\"req\": { "); for (i = 0 ; i < RD_KAFKAP__NUM ; i++) { int64_t v; if (filter[rkb->rkb_rk->rk_type][i] || filter[2][i]) continue; v = rd_atomic64_get(&rkb->rkb_c.reqtype[i]); if (!v && filter[3][i]) continue; /* Filter out zero values */ _st_printf("%s\"%s\": %"PRId64, cnt > 0 ? ", " : "", rd_kafka_ApiKey2str(i), v); cnt++; } _st_printf(" }, "); } /** * Emit all statistics */ static void rd_kafka_stats_emit_all (rd_kafka_t *rk) { rd_kafka_broker_t *rkb; rd_kafka_topic_t *rkt; rd_ts_t now; rd_kafka_op_t *rko; unsigned int tot_cnt; size_t tot_size; rd_kafka_resp_err_t err; struct _stats_emit stx = { .size = 1024*10 }; struct _stats_emit *st = &stx; struct _stats_total total = {0}; st->buf = rd_malloc(st->size); rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); rd_kafka_rdlock(rk); now = rd_clock(); _st_printf("{ " "\"name\": \"%s\", " "\"client_id\": \"%s\", " "\"type\": \"%s\", " "\"ts\":%"PRId64", " "\"time\":%lli, " "\"replyq\":%i, " "\"msg_cnt\":%u, " "\"msg_size\":%"PRIusz", " "\"msg_max\":%u, " "\"msg_size_max\":%"PRIusz", " "\"simple_cnt\":%i, " "\"metadata_cache_cnt\":%i, " "\"brokers\":{ "/*open brokers*/, rk->rk_name, rk->rk_conf.client_id_str, rd_kafka_type2str(rk->rk_type), now, (signed long long)time(NULL), rd_kafka_q_len(rk->rk_rep), tot_cnt, tot_size, rk->rk_curr_msgs.max_cnt, rk->rk_curr_msgs.max_size, rd_atomic32_get(&rk->rk_simple_cnt), rk->rk_metadata_cache.rkmc_cnt); TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { rd_kafka_toppar_t *rktp; rd_kafka_broker_lock(rkb); _st_printf("%s\"%s\": { "/*open broker*/ "\"name\":\"%s\", " "\"nodeid\":%"PRId32", " "\"nodename\":\"%s\", " "\"source\":\"%s\", " "\"state\":\"%s\", " "\"stateage\":%"PRId64", " "\"outbuf_cnt\":%i, " "\"outbuf_msg_cnt\":%i, " "\"waitresp_cnt\":%i, " "\"waitresp_msg_cnt\":%i, " "\"tx\":%"PRIu64", " "\"txbytes\":%"PRIu64", " "\"txerrs\":%"PRIu64", " "\"txretries\":%"PRIu64", " "\"req_timeouts\":%"PRIu64", " "\"rx\":%"PRIu64", " "\"rxbytes\":%"PRIu64", " "\"rxerrs\":%"PRIu64", " "\"rxcorriderrs\":%"PRIu64", " "\"rxpartial\":%"PRIu64", " "\"zbuf_grow\":%"PRIu64", " "\"buf_grow\":%"PRIu64", " "\"wakeups\":%"PRIu64", " "\"connects\":%"PRId32", " "\"disconnects\":%"PRId32", ", rkb == TAILQ_FIRST(&rk->rk_brokers) ? "" : ", ", rkb->rkb_name, rkb->rkb_name, rkb->rkb_nodeid, rkb->rkb_nodename, rd_kafka_confsource2str(rkb->rkb_source), rd_kafka_broker_state_names[rkb->rkb_state], rkb->rkb_ts_state ? now - rkb->rkb_ts_state : 0, rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt), rd_atomic32_get(&rkb->rkb_outbufs.rkbq_msg_cnt), rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt), rd_atomic32_get(&rkb->rkb_waitresps.rkbq_msg_cnt), rd_atomic64_get(&rkb->rkb_c.tx), rd_atomic64_get(&rkb->rkb_c.tx_bytes), rd_atomic64_get(&rkb->rkb_c.tx_err), rd_atomic64_get(&rkb->rkb_c.tx_retries), rd_atomic64_get(&rkb->rkb_c.req_timeouts), rd_atomic64_get(&rkb->rkb_c.rx), rd_atomic64_get(&rkb->rkb_c.rx_bytes), rd_atomic64_get(&rkb->rkb_c.rx_err), rd_atomic64_get(&rkb->rkb_c.rx_corrid_err), rd_atomic64_get(&rkb->rkb_c.rx_partial), rd_atomic64_get(&rkb->rkb_c.zbuf_grow), rd_atomic64_get(&rkb->rkb_c.buf_grow), rd_atomic64_get(&rkb->rkb_c.wakeups), rd_atomic32_get(&rkb->rkb_c.connects), rd_atomic32_get(&rkb->rkb_c.disconnects)); total.tx += rd_atomic64_get(&rkb->rkb_c.tx); total.tx_bytes += rd_atomic64_get(&rkb->rkb_c.tx_bytes); total.rx += rd_atomic64_get(&rkb->rkb_c.rx); total.rx_bytes += rd_atomic64_get(&rkb->rkb_c.rx_bytes); rd_kafka_stats_emit_avg(st, "int_latency", &rkb->rkb_avg_int_latency); rd_kafka_stats_emit_avg(st, "outbuf_latency", &rkb->rkb_avg_outbuf_latency); rd_kafka_stats_emit_avg(st, "rtt", &rkb->rkb_avg_rtt); rd_kafka_stats_emit_avg(st, "throttle", &rkb->rkb_avg_throttle); rd_kafka_stats_emit_broker_reqs(st, rkb); _st_printf("\"toppars\":{ "/*open toppars*/); TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { _st_printf("%s\"%.*s-%"PRId32"\": { " "\"topic\":\"%.*s\", " "\"partition\":%"PRId32"} ", rktp==TAILQ_FIRST(&rkb->rkb_toppars)?"":", ", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition); } rd_kafka_broker_unlock(rkb); _st_printf("} "/*close toppars*/ "} "/*close broker*/); } _st_printf("}, " /* close "brokers" array */ "\"topics\":{ "); TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { rd_kafka_toppar_t *rktp; int i, j; rd_kafka_topic_rdlock(rkt); _st_printf("%s\"%.*s\": { " "\"topic\":\"%.*s\", " "\"age\":%"PRId64", " "\"metadata_age\":%"PRId64", ", rkt==TAILQ_FIRST(&rk->rk_topics)?"":", ", RD_KAFKAP_STR_PR(rkt->rkt_topic), RD_KAFKAP_STR_PR(rkt->rkt_topic), (now - rkt->rkt_ts_create)/1000, rkt->rkt_ts_metadata ? (now - rkt->rkt_ts_metadata)/1000 : 0); rd_kafka_stats_emit_avg(st, "batchsize", &rkt->rkt_avg_batchsize); rd_kafka_stats_emit_avg(st, "batchcnt", &rkt->rkt_avg_batchcnt); _st_printf("\"partitions\":{ " /*open partitions*/); for (i = 0 ; i < rkt->rkt_partition_cnt ; i++) rd_kafka_stats_emit_toppar(st, &total, rkt->rkt_p[i], i == 0); RD_LIST_FOREACH(rktp, &rkt->rkt_desp, j) rd_kafka_stats_emit_toppar(st, &total, rktp, i+j == 0); i += j; if (rkt->rkt_ua) rd_kafka_stats_emit_toppar(st, NULL, rkt->rkt_ua, i++ == 0); rd_kafka_topic_rdunlock(rkt); _st_printf("} "/*close partitions*/ "} "/*close topic*/); } _st_printf("} "/*close topics*/); if (rk->rk_cgrp) { rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; _st_printf(", \"cgrp\": { " "\"state\": \"%s\", " "\"stateage\": %"PRId64", " "\"join_state\": \"%s\", " "\"rebalance_age\": %"PRId64", " "\"rebalance_cnt\": %d, " "\"rebalance_reason\": \"%s\", " "\"assignment_size\": %d }", rd_kafka_cgrp_state_names[rkcg->rkcg_state], rkcg->rkcg_ts_statechange ? (now - rkcg->rkcg_ts_statechange) / 1000 : 0, rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], rkcg->rkcg_c.ts_rebalance ? (now - rkcg->rkcg_c.ts_rebalance)/1000 : 0, rkcg->rkcg_c.rebalance_cnt, rkcg->rkcg_c.rebalance_reason, rkcg->rkcg_c.assignment_size); } if (rd_kafka_is_idempotent(rk)) { _st_printf(", \"eos\": { " "\"idemp_state\": \"%s\", " "\"idemp_stateage\": %"PRId64", " "\"txn_state\": \"%s\", " "\"txn_stateage\": %"PRId64", " "\"txn_may_enq\": %s, " "\"producer_id\": %"PRId64", " "\"producer_epoch\": %hd, " "\"epoch_cnt\": %d " "}", rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), (now - rk->rk_eos.ts_idemp_state) / 1000, rd_kafka_txn_state2str(rk->rk_eos.txn_state), (now - rk->rk_eos.ts_txn_state) / 1000, rd_atomic32_get(&rk->rk_eos.txn_may_enq) ? "true":"false", rk->rk_eos.pid.id, rk->rk_eos.pid.epoch, rk->rk_eos.epoch_cnt); } if ((err = rd_atomic32_get(&rk->rk_fatal.err))) _st_printf(", \"fatal\": { " "\"error\": \"%s\", " "\"reason\": \"%s\", " "\"cnt\": %d " "}", rd_kafka_err2str(err), rk->rk_fatal.errstr, rk->rk_fatal.cnt); rd_kafka_rdunlock(rk); /* Total counters */ _st_printf(", " "\"tx\":%"PRId64", " "\"tx_bytes\":%"PRId64", " "\"rx\":%"PRId64", " "\"rx_bytes\":%"PRId64", " "\"txmsgs\":%"PRId64", " "\"txmsg_bytes\":%"PRId64", " "\"rxmsgs\":%"PRId64", " "\"rxmsg_bytes\":%"PRId64, total.tx, total.tx_bytes, total.rx, total.rx_bytes, total.txmsgs, total.txmsg_bytes, total.rxmsgs, total.rxmsg_bytes); _st_printf("}"/*close object*/); /* Enqueue op for application */ rko = rd_kafka_op_new(RD_KAFKA_OP_STATS); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH); rko->rko_u.stats.json = st->buf; rko->rko_u.stats.json_len = st->of; rd_kafka_q_enq(rk->rk_rep, rko); } /** * @brief 1 second generic timer. * * @locality rdkafka main thread * @locks none */ static void rd_kafka_1s_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = rkts->rkts_rk; /* Scan topic state, message timeouts, etc. */ rd_kafka_topic_scan_all(rk, rd_clock()); /* Sparse connections: * try to maintain at least one connection to the cluster. */ if (rk->rk_conf.sparse_connections && rd_atomic32_get(&rk->rk_broker_up_cnt) == 0) rd_kafka_connect_any(rk, "no cluster connection"); rd_kafka_coord_cache_expire(&rk->rk_coord_cache); } static void rd_kafka_stats_emit_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = rkts->rkts_rk; rd_kafka_stats_emit_all(rk); } /** * @brief Periodic metadata refresh callback * * @locality rdkafka main thread */ static void rd_kafka_metadata_refresh_cb (rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = rkts->rkts_rk; rd_kafka_resp_err_t err; /* High-level consumer: * We need to query both locally known topics and subscribed topics * so that we can detect locally known topics changing partition * count or disappearing, as well as detect previously non-existent * subscribed topics now being available in the cluster. */ if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp) err = rd_kafka_metadata_refresh_consumer_topics( rk, NULL, "periodic topic and broker list refresh"); else err = rd_kafka_metadata_refresh_known_topics( rk, NULL, rd_true/*force*/, "periodic topic and broker list refresh"); if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC && rd_interval(&rk->rk_suppress.broker_metadata_refresh, 10*1000*1000 /*10s*/, 0) > 0) { /* If there are no (locally referenced) topics * to query, refresh the broker list. * This avoids getting idle-disconnected for clients * that have not yet referenced a topic and makes * sure such a client has an up to date broker list. */ rd_kafka_metadata_refresh_brokers( rk, NULL, "periodic broker list refresh"); } } /** * @brief Wait for background threads to initialize. * * @returns the number of background threads still not initialized. * * @locality app thread calling rd_kafka_new() * @locks none */ static int rd_kafka_init_wait (rd_kafka_t *rk, int timeout_ms) { struct timespec tspec; int ret; rd_timeout_init_timespec(&tspec, timeout_ms); mtx_lock(&rk->rk_init_lock); while (rk->rk_init_wait_cnt > 0 && cnd_timedwait_abs(&rk->rk_init_cnd, &rk->rk_init_lock, &tspec) == thrd_success) ; ret = rk->rk_init_wait_cnt; mtx_unlock(&rk->rk_init_lock); return ret; } /** * Main loop for Kafka handler thread. */ #ifndef __OS400__ static int rd_kafka_thread_main (void *arg) { #else static void *rd_kafka_thread_main (void *arg) { #endif rd_kafka_t *rk = arg; rd_kafka_timer_t tmr_1s = RD_ZERO_INIT; rd_kafka_timer_t tmr_stats_emit = RD_ZERO_INIT; rd_kafka_timer_t tmr_metadata_refresh = RD_ZERO_INIT; rd_kafka_set_thread_name("main"); rd_kafka_set_thread_sysname("rdk:main"); rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_MAIN); (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); /* Acquire lock (which was held by thread creator during creation) * to synchronise state. */ rd_kafka_wrlock(rk); rd_kafka_wrunlock(rk); /* 1 second timer for topic scan and connection checking. */ rd_kafka_timer_start(&rk->rk_timers, &tmr_1s, 1000000, rd_kafka_1s_tmr_cb, NULL); if (rk->rk_conf.stats_interval_ms) rd_kafka_timer_start(&rk->rk_timers, &tmr_stats_emit, rk->rk_conf.stats_interval_ms * 1000ll, rd_kafka_stats_emit_tmr_cb, NULL); if (rk->rk_conf.metadata_refresh_interval_ms > 0) rd_kafka_timer_start(&rk->rk_timers, &tmr_metadata_refresh, rk->rk_conf.metadata_refresh_interval_ms * 1000ll, rd_kafka_metadata_refresh_cb, NULL); if (rk->rk_cgrp) rd_kafka_q_fwd_set(rk->rk_cgrp->rkcg_ops, rk->rk_ops); if (rd_kafka_is_idempotent(rk)) rd_kafka_idemp_init(rk); mtx_lock(&rk->rk_init_lock); rk->rk_init_wait_cnt--; cnd_broadcast(&rk->rk_init_cnd); mtx_unlock(&rk->rk_init_lock); while (likely(!rd_kafka_terminating(rk) || rd_kafka_q_len(rk->rk_ops) || (rk->rk_cgrp && (rk->rk_cgrp->rkcg_state != RD_KAFKA_CGRP_STATE_TERM)))) { rd_ts_t sleeptime = rd_kafka_timers_next( &rk->rk_timers, 1000*1000/*1s*/, 1/*lock*/); rd_kafka_q_serve(rk->rk_ops, (int)(sleeptime / 1000), 0, RD_KAFKA_Q_CB_CALLBACK, NULL, NULL); if (rk->rk_cgrp) /* FIXME: move to timer-triggered */ rd_kafka_cgrp_serve(rk->rk_cgrp); rd_kafka_timers_run(&rk->rk_timers, RD_POLL_NOWAIT); } rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Internal main thread terminating"); if (rd_kafka_is_idempotent(rk)) rd_kafka_idemp_term(rk); rd_kafka_q_disable(rk->rk_ops); rd_kafka_q_purge(rk->rk_ops); rd_kafka_timer_stop(&rk->rk_timers, &tmr_1s, 1); if (rk->rk_conf.stats_interval_ms) rd_kafka_timer_stop(&rk->rk_timers, &tmr_stats_emit, 1); rd_kafka_timer_stop(&rk->rk_timers, &tmr_metadata_refresh, 1); /* Synchronise state */ rd_kafka_wrlock(rk); rd_kafka_wrunlock(rk); rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_MAIN); rd_kafka_destroy_internal(rk); rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Internal main thread termination done"); rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); #ifndef __OS400__ return 0; #else return NULL; #endif } static void rd_kafka_term_sig_handler (int sig) { /* nop */ } rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, char *errstr, size_t errstr_size) { rd_kafka_t *rk; static rd_atomic32_t rkid; rd_kafka_conf_t *conf; rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR; int ret_errno = 0; const char *conf_err; #ifndef _WIN32 sigset_t newset, oldset; #endif char builtin_features[128]; size_t bflen; rd_kafka_global_init(); /* rd_kafka_new() takes ownership of the provided \p app_conf * object if rd_kafka_new() succeeds. * Since \p app_conf is optional we allocate a default configuration * object here if \p app_conf is NULL. * The configuration object itself is struct-copied later * leaving the default *conf pointer to be ready for freeing. * In case new() fails and app_conf was specified we will clear out * rk_conf to avoid double-freeing from destroy_internal() and the * user's eventual call to rd_kafka_conf_destroy(). * This is all a bit tricky but that's the nature of * legacy interfaces. */ if (!app_conf) conf = rd_kafka_conf_new(); else conf = app_conf; /* Verify and finalize configuration */ if ((conf_err = rd_kafka_conf_finalize(type, conf))) { /* Incompatible configuration settings */ rd_snprintf(errstr, errstr_size, "%s", conf_err); if (!app_conf) rd_kafka_conf_destroy(conf); rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); return NULL; } rd_kafka_global_cnt_incr(); /* * Set up the handle. */ rk = rd_calloc(1, sizeof(*rk)); rk->rk_type = type; /* Struct-copy the config object. */ rk->rk_conf = *conf; if (!app_conf) rd_free(conf); /* Free the base config struct only, * not its fields since they were copied to * rk_conf just above. Those fields are * freed from rd_kafka_destroy_internal() * as the rk itself is destroyed. */ /* Seed PRNG, don't bother about HAVE_RAND_R, since it is pretty cheap. */ if (rk->rk_conf.enable_random_seed) call_once(&rd_kafka_global_srand_once, rd_kafka_global_srand); /* Call on_new() interceptors */ rd_kafka_interceptors_on_new(rk, &rk->rk_conf); rwlock_init(&rk->rk_lock); mtx_init(&rk->rk_internal_rkb_lock, mtx_plain); cnd_init(&rk->rk_broker_state_change_cnd); mtx_init(&rk->rk_broker_state_change_lock, mtx_plain); rd_list_init(&rk->rk_broker_state_change_waiters, 8, rd_kafka_enq_once_trigger_destroy); cnd_init(&rk->rk_init_cnd); mtx_init(&rk->rk_init_lock, mtx_plain); rd_interval_init(&rk->rk_suppress.no_idemp_brokers); rd_interval_init(&rk->rk_suppress.broker_metadata_refresh); rd_interval_init(&rk->rk_suppress.sparse_connect_random); mtx_init(&rk->rk_suppress.sparse_connect_lock, mtx_plain); rd_atomic64_init(&rk->rk_ts_last_poll, rd_clock()); rk->rk_rep = rd_kafka_q_new(rk); rk->rk_ops = rd_kafka_q_new(rk); rk->rk_ops->rkq_serve = rd_kafka_poll_cb; rk->rk_ops->rkq_opaque = rk; if (rk->rk_conf.log_queue) { rk->rk_logq = rd_kafka_q_new(rk); rk->rk_logq->rkq_serve = rd_kafka_poll_cb; rk->rk_logq->rkq_opaque = rk; } TAILQ_INIT(&rk->rk_brokers); TAILQ_INIT(&rk->rk_topics); rd_kafka_timers_init(&rk->rk_timers, rk, rk->rk_ops); rd_kafka_metadata_cache_init(rk); rd_kafka_coord_cache_init(&rk->rk_coord_cache, rk->rk_conf.metadata_refresh_interval_ms ? rk->rk_conf.metadata_refresh_interval_ms : (5 * 60 * 1000) /* 5min */); rd_kafka_coord_reqs_init(rk); if (rk->rk_conf.dr_cb || rk->rk_conf.dr_msg_cb) rk->rk_drmode = RD_KAFKA_DR_MODE_CB; else if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) rk->rk_drmode = RD_KAFKA_DR_MODE_EVENT; else rk->rk_drmode = RD_KAFKA_DR_MODE_NONE; if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE) rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_DR; if (rk->rk_conf.rebalance_cb) rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_REBALANCE; if (rk->rk_conf.offset_commit_cb) rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OFFSET_COMMIT; if (rk->rk_conf.error_cb) rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_ERROR; #if WITH_SASL_OAUTHBEARER if (rk->rk_conf.sasl.enable_oauthbearer_unsecure_jwt && !rk->rk_conf.sasl.oauthbearer_token_refresh_cb) rd_kafka_conf_set_oauthbearer_token_refresh_cb( &rk->rk_conf, rd_kafka_oauthbearer_unsecured_token); if (rk->rk_conf.sasl.oauthbearer_token_refresh_cb) rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH; #endif rk->rk_controllerid = -1; /* Admin client defaults */ rk->rk_conf.admin.request_timeout_ms = rk->rk_conf.socket_timeout_ms; if (rk->rk_conf.debug) rk->rk_conf.log_level = LOG_DEBUG; rd_snprintf(rk->rk_name, sizeof(rk->rk_name), "%s#%s-%i", rk->rk_conf.client_id_str, rd_kafka_type2str(rk->rk_type), rd_atomic32_add(&rkid, 1)); /* Construct clientid kafka string */ rk->rk_client_id = rd_kafkap_str_new(rk->rk_conf.client_id_str,-1); /* Convert group.id to kafka string (may be NULL) */ rk->rk_group_id = rd_kafkap_str_new(rk->rk_conf.group_id_str,-1); /* Config fixups */ rk->rk_conf.queued_max_msg_bytes = (int64_t)rk->rk_conf.queued_max_msg_kbytes * 1000ll; /* Enable api.version.request=true if fallback.broker.version * indicates a supporting broker. */ if (rd_kafka_ApiVersion_is_queryable(rk->rk_conf.broker_version_fallback)) rk->rk_conf.api_version_request = 1; if (rk->rk_type == RD_KAFKA_PRODUCER) { mtx_init(&rk->rk_curr_msgs.lock, mtx_plain); cnd_init(&rk->rk_curr_msgs.cnd); rk->rk_curr_msgs.max_cnt = rk->rk_conf.queue_buffering_max_msgs; if ((unsigned long long)rk->rk_conf. queue_buffering_max_kbytes * 1024 > (unsigned long long)SIZE_MAX) { rk->rk_curr_msgs.max_size = SIZE_MAX; rd_kafka_log(rk, LOG_WARNING, "QUEUESIZE", "queue.buffering.max.kbytes adjusted " "to system SIZE_MAX limit %"PRIusz" bytes", rk->rk_curr_msgs.max_size); } else { rk->rk_curr_msgs.max_size = (size_t)rk->rk_conf. queue_buffering_max_kbytes * 1024; } } if (rd_kafka_assignors_init(rk, errstr, errstr_size) == -1) { ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } /* Create Mock cluster */ rd_atomic32_init(&rk->rk_mock.cluster_cnt, 0); if (rk->rk_conf.mock.broker_cnt > 0) { rk->rk_mock.cluster = rd_kafka_mock_cluster_new( rk, rk->rk_conf.mock.broker_cnt); if (!rk->rk_mock.cluster) { rd_snprintf(errstr, errstr_size, "Failed to create mock cluster, see logs"); ret_err = RD_KAFKA_RESP_ERR__FAIL; ret_errno = EINVAL; goto fail; } rd_kafka_log(rk, LOG_NOTICE, "MOCK", "Mock cluster enabled: " "original bootstrap.servers and security.protocol " "ignored and replaced"); /* Overwrite bootstrap.servers and connection settings */ if (rd_kafka_conf_set(&rk->rk_conf, "bootstrap.servers", rd_kafka_mock_cluster_bootstraps( rk->rk_mock.cluster), NULL, 0) != RD_KAFKA_CONF_OK) rd_assert(!"failed to replace mock bootstrap.servers"); if (rd_kafka_conf_set(&rk->rk_conf, "security.protocol", "plaintext", NULL, 0) != RD_KAFKA_CONF_OK) rd_assert(!"failed to reset mock security.protocol"); rk->rk_conf.security_protocol = RD_KAFKA_PROTO_PLAINTEXT; } if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL || rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT) { /* Select SASL provider */ if (rd_kafka_sasl_select_provider(rk, errstr, errstr_size) == -1) { ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } /* Initialize SASL provider */ if (rd_kafka_sasl_init(rk, errstr, errstr_size) == -1) { rk->rk_conf.sasl.provider = NULL; ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } } #if WITH_SSL if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SSL || rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL) { /* Create SSL context */ if (rd_kafka_ssl_ctx_init(rk, errstr, errstr_size) == -1) { ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } } #endif if (type == RD_KAFKA_CONSUMER) { rd_kafka_assignment_init(rk); if (RD_KAFKAP_STR_LEN(rk->rk_group_id) > 0) { /* Create consumer group handle */ rk->rk_cgrp = rd_kafka_cgrp_new(rk, rk->rk_group_id, rk->rk_client_id); rk->rk_consumer.q = rd_kafka_q_keep(rk->rk_cgrp->rkcg_q); } else { /* Legacy consumer */ rk->rk_consumer.q = rd_kafka_q_keep(rk->rk_rep); } } else if (type == RD_KAFKA_PRODUCER) { rk->rk_eos.transactional_id = rd_kafkap_str_new(rk->rk_conf.eos.transactional_id, -1); } #ifndef _WIN32 /* Block all signals in newly created threads. * To avoid race condition we block all signals in the calling * thread, which the new thread will inherit its sigmask from, * and then restore the original sigmask of the calling thread when * we're done creating the thread. */ sigemptyset(&oldset); sigfillset(&newset); if (rk->rk_conf.term_sig) { struct sigaction sa_term = { .sa_handler = rd_kafka_term_sig_handler }; sigaction(rk->rk_conf.term_sig, &sa_term, NULL); } pthread_sigmask(SIG_SETMASK, &newset, &oldset); #endif mtx_lock(&rk->rk_init_lock); /* Create background thread and queue if background_event_cb() * has been configured. * Do this before creating the main thread since after * the main thread is created it is no longer trivial to error * out from rd_kafka_new(). */ if (rk->rk_conf.background_event_cb) { /* Hold off background thread until thrd_create() is done. */ rd_kafka_wrlock(rk); rk->rk_background.q = rd_kafka_q_new(rk); rk->rk_init_wait_cnt++; if ((thrd_create(&rk->rk_background.thread, rd_kafka_background_thread_main, rk)) != thrd_success) { rk->rk_init_wait_cnt--; ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; ret_errno = errno; if (errstr) rd_snprintf(errstr, errstr_size, "Failed to create background " "thread: %s (%i)", rd_strerror(errno), errno); rd_kafka_wrunlock(rk); mtx_unlock(&rk->rk_init_lock); #ifndef _WIN32 /* Restore sigmask of caller */ pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif goto fail; } #ifdef __OS400__ pthread_getunique_np(&rk->rk_background.thread, &rk->rk_background.thread_tid); #endif rd_kafka_wrunlock(rk); } /* Lock handle here to synchronise state, i.e., hold off * the thread until we've finalized the handle. */ rd_kafka_wrlock(rk); /* Create handler thread */ rk->rk_init_wait_cnt++; if ((thrd_create(&rk->rk_thread, rd_kafka_thread_main, rk)) != thrd_success) { rk->rk_init_wait_cnt--; ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; ret_errno = errno; if (errstr) rd_snprintf(errstr, errstr_size, "Failed to create thread: %s (%i)", rd_strerror(errno), errno); rd_kafka_wrunlock(rk); mtx_unlock(&rk->rk_init_lock); #ifndef _WIN32 /* Restore sigmask of caller */ pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif goto fail; } rd_kafka_wrunlock(rk); mtx_unlock(&rk->rk_init_lock); /* * @warning `goto fail` is prohibited past this point */ mtx_lock(&rk->rk_internal_rkb_lock); rk->rk_internal_rkb = rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, RD_KAFKA_PROTO_PLAINTEXT, "", 0, RD_KAFKA_NODEID_UA); mtx_unlock(&rk->rk_internal_rkb_lock); /* Add initial list of brokers from configuration */ if (rk->rk_conf.brokerlist) { if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist) == 0) rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, "No brokers configured"); } #ifndef _WIN32 /* Restore sigmask of caller */ pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif /* Wait for background threads to fully initialize so that * the client instance is fully functional at the time it is * returned from the constructor. */ if (rd_kafka_init_wait(rk, 60*1000) != 0) { /* This should never happen unless there is a bug * or the OS is not scheduling the background threads. * Either case there is no point in handling this gracefully * in the current state since the thread joins are likely * to hang as well. */ mtx_lock(&rk->rk_init_lock); rd_kafka_log(rk, LOG_CRIT, "INIT", "Failed to initialize %s: " "%d background thread(s) did not initialize " "within 60 seconds", rk->rk_name, rk->rk_init_wait_cnt); if (errstr) rd_snprintf(errstr, errstr_size, "Timed out waiting for " "%d background thread(s) to initialize", rk->rk_init_wait_cnt); mtx_unlock(&rk->rk_init_lock); rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, EDEADLK); return NULL; } rk->rk_initialized = 1; bflen = sizeof(builtin_features); if (rd_kafka_conf_get(&rk->rk_conf, "builtin.features", builtin_features, &bflen) != RD_KAFKA_CONF_OK) rd_snprintf(builtin_features, sizeof(builtin_features), "?"); rd_kafka_dbg(rk, ALL, "INIT", "librdkafka v%s (0x%x) %s initialized " "(builtin.features %s, %s, debug 0x%x)", rd_kafka_version_str(), rd_kafka_version(), rk->rk_name, builtin_features, BUILT_WITH, rk->rk_conf.debug); /* Log warnings for deprecated configuration */ rd_kafka_conf_warn(rk); /* Debug dump configuration */ if (rk->rk_conf.debug & RD_KAFKA_DBG_CONF) { rd_kafka_anyconf_dump_dbg(rk, _RK_GLOBAL, &rk->rk_conf, "Client configuration"); if (rk->rk_conf.topic_conf) rd_kafka_anyconf_dump_dbg( rk, _RK_TOPIC, rk->rk_conf.topic_conf, "Default topic configuration"); } /* Free user supplied conf's base pointer on success, * but not the actual allocated fields since the struct * will have been copied in its entirety above. */ if (app_conf) rd_free(app_conf); rd_kafka_set_last_error(0, 0); return rk; fail: /* * Error out and clean up */ /* * Tell background thread to terminate and wait for it to return. */ rd_atomic32_set(&rk->rk_terminate, RD_KAFKA_DESTROY_F_TERMINATE); /* Terminate SASL provider */ if (rk->rk_conf.sasl.provider) rd_kafka_sasl_term(rk); #ifndef __OS400__ if (rk->rk_background.thread) { int res; #else if (*(long long *)(void *)&rk->rk_background.thread_tid) { void *res; #endif thrd_join(rk->rk_background.thread, &res); rd_kafka_q_destroy_owner(rk->rk_background.q); } /* If on_new() interceptors have been called we also need * to allow interceptor clean-up by calling on_destroy() */ rd_kafka_interceptors_on_destroy(rk); /* If rk_conf is a struct-copy of the application configuration * we need to avoid rk_conf fields from being freed from * rd_kafka_destroy_internal() since they belong to app_conf. * However, there are some internal fields, such as interceptors, * that belong to rk_conf and thus needs to be cleaned up. * Legacy APIs, sigh.. */ if (app_conf) { rd_kafka_assignors_term(rk); rd_kafka_interceptors_destroy(&rk->rk_conf); memset(&rk->rk_conf, 0, sizeof(rk->rk_conf)); } rd_kafka_destroy_internal(rk); rd_kafka_destroy_final(rk); rd_kafka_set_last_error(ret_err, ret_errno); return NULL; } /** * Counts usage of the legacy/simple consumer (rd_kafka_consume_start() with * friends) since it does not have an API for stopping the cgrp we will need to * sort that out automatically in the background when all consumption * has stopped. * * Returns 0 if a High level consumer is already instantiated * which means a Simple consumer cannot co-operate with it, else 1. * * A rd_kafka_t handle can never migrate from simple to high-level, or * vice versa, so we dont need a ..consumer_del(). */ int rd_kafka_simple_consumer_add (rd_kafka_t *rk) { if (rd_atomic32_get(&rk->rk_simple_cnt) < 0) return 0; return (int)rd_atomic32_add(&rk->rk_simple_cnt, 1); } /** * rktp fetch is split up in these parts: * * application side: * * broker side (handled by current leader broker thread for rktp): * - the fetch state, initial offset, etc. * - fetching messages, updating fetched offset, etc. * - offset commits * * Communication between the two are: * app side -> rdkafka main side: rktp_ops * broker thread -> app side: rktp_fetchq * * There is no shared state between these threads, instead * state is communicated through the two op queues, and state synchronization * is performed by version barriers. * */ static RD_UNUSED int rd_kafka_consume_start0 (rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_q_t *rkq) { rd_kafka_toppar_t *rktp; if (partition < 0) { rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, ESRCH); return -1; } if (!rd_kafka_simple_consumer_add(rkt->rkt_rk)) { rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); return -1; } rd_kafka_topic_wrlock(rkt); rktp = rd_kafka_toppar_desired_add(rkt, partition); rd_kafka_topic_wrunlock(rkt); /* Verify offset */ if (offset == RD_KAFKA_OFFSET_BEGINNING || offset == RD_KAFKA_OFFSET_END || offset <= RD_KAFKA_OFFSET_TAIL_BASE) { /* logical offsets */ } else if (offset == RD_KAFKA_OFFSET_STORED) { /* offset manager */ if (rkt->rkt_conf.offset_store_method == RD_KAFKA_OFFSET_METHOD_BROKER && RD_KAFKAP_STR_IS_NULL(rkt->rkt_rk->rk_group_id)) { /* Broker based offsets require a group id. */ rd_kafka_toppar_destroy(rktp); rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); return -1; } } else if (offset < 0) { rd_kafka_toppar_destroy(rktp); rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); return -1; } rd_kafka_toppar_op_fetch_start(rktp, offset, rkq, RD_KAFKA_NO_REPLYQ); rd_kafka_toppar_destroy(rktp); rd_kafka_set_last_error(0, 0); return 0; } int rd_kafka_consume_start (rd_kafka_topic_t *app_rkt, int32_t partition, int64_t offset) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); rd_kafka_dbg(rkt->rkt_rk, TOPIC, "START", "Start consuming partition %"PRId32,partition); return rd_kafka_consume_start0(rkt, partition, offset, NULL); } int rd_kafka_consume_start_queue (rd_kafka_topic_t *app_rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); return rd_kafka_consume_start0(rkt, partition, offset, rkqu->rkqu_q); } static RD_UNUSED int rd_kafka_consume_stop0 (rd_kafka_toppar_t *rktp) { rd_kafka_q_t *tmpq = NULL; rd_kafka_resp_err_t err; rd_kafka_topic_wrlock(rktp->rktp_rkt); rd_kafka_toppar_lock(rktp); rd_kafka_toppar_desired_del(rktp); rd_kafka_toppar_unlock(rktp); rd_kafka_topic_wrunlock(rktp->rktp_rkt); tmpq = rd_kafka_q_new(rktp->rktp_rkt->rkt_rk); rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_REPLYQ(tmpq, 0)); /* Synchronisation: Wait for stop reply from broker thread */ err = rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE); rd_kafka_q_destroy_owner(tmpq); rd_kafka_set_last_error(err, err ? EINVAL : 0); return err ? -1 : 0; } int rd_kafka_consume_stop (rd_kafka_topic_t *app_rkt, int32_t partition) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); rd_kafka_toppar_t *rktp; int r; if (partition == RD_KAFKA_PARTITION_UA) { rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); return -1; } rd_kafka_topic_wrlock(rkt); if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) && !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) { rd_kafka_topic_wrunlock(rkt); rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, ESRCH); return -1; } rd_kafka_topic_wrunlock(rkt); r = rd_kafka_consume_stop0(rktp); /* set_last_error() called by stop0() */ rd_kafka_toppar_destroy(rktp); return r; } rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *app_rkt, int32_t partition, int64_t offset, int timeout_ms) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); rd_kafka_toppar_t *rktp; rd_kafka_q_t *tmpq = NULL; rd_kafka_resp_err_t err; rd_kafka_replyq_t replyq = RD_KAFKA_NO_REPLYQ; /* FIXME: simple consumer check */ if (partition == RD_KAFKA_PARTITION_UA) return RD_KAFKA_RESP_ERR__INVALID_ARG; rd_kafka_topic_rdlock(rkt); if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) && !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) { rd_kafka_topic_rdunlock(rkt); return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; } rd_kafka_topic_rdunlock(rkt); if (timeout_ms) { tmpq = rd_kafka_q_new(rkt->rkt_rk); replyq = RD_KAFKA_REPLYQ(tmpq, 0); } if ((err = rd_kafka_toppar_op_seek(rktp, offset, replyq))) { if (tmpq) rd_kafka_q_destroy_owner(tmpq); rd_kafka_toppar_destroy(rktp); return err; } rd_kafka_toppar_destroy(rktp); if (tmpq) { err = rd_kafka_q_wait_result(tmpq, timeout_ms); rd_kafka_q_destroy_owner(tmpq); return err; } return RD_KAFKA_RESP_ERR_NO_ERROR; } rd_kafka_error_t * rd_kafka_seek_partitions (rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms) { rd_kafka_q_t *tmpq = NULL; rd_kafka_topic_partition_t *rktpar; rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); int cnt = 0; if (rk->rk_type != RD_KAFKA_CONSUMER) return rd_kafka_error_new( RD_KAFKA_RESP_ERR__INVALID_ARG, "Must only be used on consumer instance"); if (!partitions || partitions->cnt == 0) return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, "partitions must be specified"); if (timeout_ms) tmpq = rd_kafka_q_new(rk); RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) { rd_kafka_toppar_t *rktp; rd_kafka_resp_err_t err; rktp = rd_kafka_toppar_get2(rk, rktpar->topic, rktpar->partition, rd_false/*no-ua-on-miss*/, rd_false/*no-create-on-miss*/); if (!rktp) { rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; continue; } err = rd_kafka_toppar_op_seek(rktp, rktpar->offset, RD_KAFKA_REPLYQ(tmpq, 0)); if (err) { rktpar->err = err; } else { rktpar->err = RD_KAFKA_RESP_ERR__IN_PROGRESS; cnt++; } rd_kafka_toppar_destroy(rktp); /* refcnt from toppar_get2() */ } if (!timeout_ms) return NULL; while (cnt > 0) { rd_kafka_op_t *rko; rko = rd_kafka_q_pop(tmpq, rd_timeout_remains(abs_timeout), 0); if (!rko) { rd_kafka_q_destroy_owner(tmpq); return rd_kafka_error_new( RD_KAFKA_RESP_ERR__TIMED_OUT, "Timed out waiting for %d remaining partition " "seek(s) to finish", cnt); } if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) { rd_kafka_q_destroy_owner(tmpq); rd_kafka_op_destroy(rko); return rd_kafka_error_new(RD_KAFKA_RESP_ERR__DESTROY, "Instance is terminating"); } rd_assert(rko->rko_rktp); rktpar = rd_kafka_topic_partition_list_find( partitions, rko->rko_rktp->rktp_rkt->rkt_topic->str, rko->rko_rktp->rktp_partition); rd_assert(rktpar); rktpar->err = rko->rko_err; rd_kafka_op_destroy(rko); cnt--; } rd_kafka_q_destroy_owner(tmpq); return NULL; } static ssize_t rd_kafka_consume_batch0 (rd_kafka_q_t *rkq, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size) { /* Populate application's rkmessages array. */ return rd_kafka_q_serve_rkmessages(rkq, timeout_ms, rkmessages, rkmessages_size); } ssize_t rd_kafka_consume_batch (rd_kafka_topic_t *app_rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); rd_kafka_toppar_t *rktp; ssize_t cnt; /* Get toppar */ rd_kafka_topic_rdlock(rkt); rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/); if (unlikely(!rktp)) rktp = rd_kafka_toppar_desired_get(rkt, partition); rd_kafka_topic_rdunlock(rkt); if (unlikely(!rktp)) { /* No such toppar known */ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, ESRCH); return -1; } /* Populate application's rkmessages array. */ cnt = rd_kafka_q_serve_rkmessages(rktp->rktp_fetchq, timeout_ms, rkmessages, rkmessages_size); rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */ rd_kafka_set_last_error(0, 0); return cnt; } ssize_t rd_kafka_consume_batch_queue (rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size) { /* Populate application's rkmessages array. */ return rd_kafka_consume_batch0(rkqu->rkqu_q, timeout_ms, rkmessages, rkmessages_size); } struct consume_ctx { void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque); void *opaque; }; /** * Trampoline for application's consume_cb() */ static rd_kafka_op_res_t rd_kafka_consume_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, rd_kafka_q_cb_type_t cb_type, void *opaque) { struct consume_ctx *ctx = opaque; rd_kafka_message_t *rkmessage; if (unlikely(rd_kafka_op_version_outdated(rko, 0))) { rd_kafka_op_destroy(rko); return RD_KAFKA_OP_RES_HANDLED; } rkmessage = rd_kafka_message_get(rko); rd_kafka_op_offset_store(rk, rko); ctx->consume_cb(rkmessage, ctx->opaque); rd_kafka_op_destroy(rko); return RD_KAFKA_OP_RES_HANDLED; } static rd_kafka_op_res_t rd_kafka_consume_callback0 (rd_kafka_q_t *rkq, int timeout_ms, int max_cnt, void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque), void *opaque) { struct consume_ctx ctx = { .consume_cb = consume_cb, .opaque = opaque }; rd_kafka_op_res_t res; if (timeout_ms) rd_kafka_app_poll_blocking(rkq->rkq_rk); res = rd_kafka_q_serve(rkq, timeout_ms, max_cnt, RD_KAFKA_Q_CB_RETURN, rd_kafka_consume_cb, &ctx); rd_kafka_app_polled(rkq->rkq_rk); return res; } int rd_kafka_consume_callback (rd_kafka_topic_t *app_rkt, int32_t partition, int timeout_ms, void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque), void *opaque) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); rd_kafka_toppar_t *rktp; int r; /* Get toppar */ rd_kafka_topic_rdlock(rkt); rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/); if (unlikely(!rktp)) rktp = rd_kafka_toppar_desired_get(rkt, partition); rd_kafka_topic_rdunlock(rkt); if (unlikely(!rktp)) { /* No such toppar known */ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, ESRCH); return -1; } r = rd_kafka_consume_callback0(rktp->rktp_fetchq, timeout_ms, rkt->rkt_conf.consume_callback_max_msgs, consume_cb, opaque); rd_kafka_toppar_destroy(rktp); rd_kafka_set_last_error(0, 0); return r; } int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque), void *opaque) { return rd_kafka_consume_callback0(rkqu->rkqu_q, timeout_ms, 0, consume_cb, opaque); } /** * Serve queue 'rkq' and return one message. * By serving the queue it will also call any registered callbacks * registered for matching events, this includes consumer_cb() * in which case no message will be returned. */ static rd_kafka_message_t *rd_kafka_consume0 (rd_kafka_t *rk, rd_kafka_q_t *rkq, int timeout_ms) { rd_kafka_op_t *rko; rd_kafka_message_t *rkmessage = NULL; rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); if (timeout_ms) rd_kafka_app_poll_blocking(rk); rd_kafka_yield_thread = 0; while ((rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0))) { rd_kafka_op_res_t res; res = rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL); if (res == RD_KAFKA_OP_RES_PASS) break; if (unlikely(res == RD_KAFKA_OP_RES_YIELD || rd_kafka_yield_thread)) { /* Callback called rd_kafka_yield(), we must * stop dispatching the queue and return. */ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INTR, EINTR); rd_kafka_app_polled(rk); return NULL; } /* Message was handled by callback. */ continue; } if (!rko) { /* Timeout reached with no op returned. */ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, ETIMEDOUT); rd_kafka_app_polled(rk); return NULL; } rd_kafka_assert(rk, rko->rko_type == RD_KAFKA_OP_FETCH || rko->rko_type == RD_KAFKA_OP_CONSUMER_ERR); /* Get rkmessage from rko */ rkmessage = rd_kafka_message_get(rko); /* Store offset */ rd_kafka_op_offset_store(rk, rko); rd_kafka_set_last_error(0, 0); rd_kafka_app_polled(rk); return rkmessage; } rd_kafka_message_t *rd_kafka_consume (rd_kafka_topic_t *app_rkt, int32_t partition, int timeout_ms) { rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); rd_kafka_toppar_t *rktp; rd_kafka_message_t *rkmessage; rd_kafka_topic_rdlock(rkt); rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/); if (unlikely(!rktp)) rktp = rd_kafka_toppar_desired_get(rkt, partition); rd_kafka_topic_rdunlock(rkt); if (unlikely(!rktp)) { /* No such toppar known */ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, ESRCH); return NULL; } rkmessage = rd_kafka_consume0(rkt->rkt_rk, rktp->rktp_fetchq, timeout_ms); rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */ return rkmessage; } rd_kafka_message_t *rd_kafka_consume_queue (rd_kafka_queue_t *rkqu, int timeout_ms) { return rd_kafka_consume0(rkqu->rkqu_rk, rkqu->rkqu_q, timeout_ms); } rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk) { rd_kafka_cgrp_t *rkcg; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; rd_kafka_q_fwd_set(rk->rk_rep, rkcg->rkcg_q); return RD_KAFKA_RESP_ERR_NO_ERROR; } rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms) { rd_kafka_cgrp_t *rkcg; if (unlikely(!(rkcg = rd_kafka_cgrp_get(rk)))) { rd_kafka_message_t *rkmessage = rd_kafka_message_new(); rkmessage->err = RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; return rkmessage; } return rd_kafka_consume0(rk, rkcg->rkcg_q, timeout_ms); } rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) { rd_kafka_cgrp_t *rkcg; rd_kafka_op_t *rko; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT; rd_kafka_q_t *rkq; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; /* If a fatal error has been raised and this is an * explicit consumer_close() from the application we return * a fatal error. Otherwise let the "silent" no_consumer_close * logic be performed to clean up properly. */ if (rd_kafka_fatal_error_code(rk) && !rd_kafka_destroy_flags_no_consumer_close(rk)) return RD_KAFKA_RESP_ERR__FATAL; rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Closing consumer"); /* Redirect cgrp queue to our temporary queue to make sure * all posted ops (e.g., rebalance callbacks) are served by * this function. */ rkq = rd_kafka_q_new(rk); rd_kafka_q_fwd_set(rkcg->rkcg_q, rkq); rd_kafka_cgrp_terminate(rkcg, RD_KAFKA_REPLYQ(rkq, 0)); /* async */ /* Disable the queue if termination is immediate or the user * does not want the blocking consumer_close() behaviour, this will * cause any ops posted for this queue (such as rebalance) to * be destroyed. */ if (rd_kafka_destroy_flags_no_consumer_close(rk)) { rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Disabling and purging temporary queue to quench " "close events"); rd_kafka_q_disable(rkq); /* Purge ops already enqueued */ rd_kafka_q_purge(rkq); } else { rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Waiting for close events"); while ((rko = rd_kafka_q_pop(rkq, RD_POLL_INFINITE, 0))) { rd_kafka_op_res_t res; if ((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) == RD_KAFKA_OP_TERMINATE) { err = rko->rko_err; rd_kafka_op_destroy(rko); break; } res = rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL); if (res == RD_KAFKA_OP_RES_PASS) rd_kafka_op_destroy(rko); /* Ignore YIELD, we need to finish */ } } rd_kafka_q_fwd_set(rkcg->rkcg_q, NULL); rd_kafka_q_destroy_owner(rkq); rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Consumer closed"); return err; } rd_kafka_resp_err_t rd_kafka_committed (rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms) { rd_kafka_q_t *rkq; rd_kafka_resp_err_t err; rd_kafka_cgrp_t *rkcg; rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); if (!partitions) return RD_KAFKA_RESP_ERR__INVALID_ARG; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; /* Set default offsets. */ rd_kafka_topic_partition_list_reset_offsets(partitions, RD_KAFKA_OFFSET_INVALID); rkq = rd_kafka_q_new(rk); do { rd_kafka_op_t *rko; int state_version = rd_kafka_brokers_get_state_version(rk); rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH); rd_kafka_op_set_replyq(rko, rkq, NULL); /* Issue #827 * Copy partition list to avoid use-after-free if we time out * here, the app frees the list, and then cgrp starts * processing the op. */ rko->rko_u.offset_fetch.partitions = rd_kafka_topic_partition_list_copy(partitions); rko->rko_u.offset_fetch.require_stable = rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED; rko->rko_u.offset_fetch.do_free = 1; if (!rd_kafka_q_enq(rkcg->rkcg_ops, rko)) { err = RD_KAFKA_RESP_ERR__DESTROY; break; } rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0); if (rko) { if (!(err = rko->rko_err)) rd_kafka_topic_partition_list_update( partitions, rko->rko_u.offset_fetch.partitions); else if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD || err == RD_KAFKA_RESP_ERR__TRANSPORT) && !rd_kafka_brokers_wait_state_change( rk, state_version, rd_timeout_remains(abs_timeout))) err = RD_KAFKA_RESP_ERR__TIMED_OUT; rd_kafka_op_destroy(rko); } else err = RD_KAFKA_RESP_ERR__TIMED_OUT; } while (err == RD_KAFKA_RESP_ERR__TRANSPORT || err == RD_KAFKA_RESP_ERR__WAIT_COORD); rd_kafka_q_destroy_owner(rkq); return err; } rd_kafka_resp_err_t rd_kafka_position (rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions) { int i; for (i = 0 ; i < partitions->cnt ; i++) { rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; rd_kafka_toppar_t *rktp; if (!(rktp = rd_kafka_toppar_get2(rk, rktpar->topic, rktpar->partition, 0, 1))) { rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; rktpar->offset = RD_KAFKA_OFFSET_INVALID; continue; } rd_kafka_toppar_lock(rktp); rktpar->offset = rktp->rktp_app_offset; rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; rd_kafka_toppar_unlock(rktp); rd_kafka_toppar_destroy(rktp); } return RD_KAFKA_RESP_ERR_NO_ERROR; } struct _query_wmark_offsets_state { rd_kafka_resp_err_t err; const char *topic; int32_t partition; int64_t offsets[2]; int offidx; /* next offset to set from response */ rd_ts_t ts_end; int state_version; /* Broker state version */ }; static void rd_kafka_query_wmark_offsets_resp_cb (rd_kafka_t *rk, rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, rd_kafka_buf_t *rkbuf, rd_kafka_buf_t *request, void *opaque) { struct _query_wmark_offsets_state *state; rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; if (err == RD_KAFKA_RESP_ERR__DESTROY) { /* 'state' has gone out of scope when query_watermark..() * timed out and returned to the caller. */ return; } state = opaque; offsets = rd_kafka_topic_partition_list_new(1); err = rd_kafka_handle_Offset(rk, rkb, err, rkbuf, request, offsets); if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { rd_kafka_topic_partition_list_destroy(offsets); return; /* Retrying */ } /* Retry if no broker connection is available yet. */ if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD || err == RD_KAFKA_RESP_ERR__TRANSPORT) && rkb && rd_kafka_brokers_wait_state_change( rkb->rkb_rk, state->state_version, rd_timeout_remains(state->ts_end))) { /* Retry */ state->state_version = rd_kafka_brokers_get_state_version(rk); request->rkbuf_retries = 0; if (rd_kafka_buf_retry(rkb, request)) { rd_kafka_topic_partition_list_destroy(offsets); return; /* Retry in progress */ } /* FALLTHRU */ } /* Partition not seen in response. */ if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, state->topic, state->partition))) err = RD_KAFKA_RESP_ERR__BAD_MSG; else if (rktpar->err) err = rktpar->err; else state->offsets[state->offidx] = rktpar->offset; state->offidx++; if (err || state->offidx == 2) /* Error or Done */ state->err = err; rd_kafka_topic_partition_list_destroy(offsets); } rd_kafka_resp_err_t rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms) { rd_kafka_q_t *rkq; struct _query_wmark_offsets_state state; rd_ts_t ts_end = rd_timeout_init(timeout_ms); rd_kafka_topic_partition_list_t *partitions; rd_kafka_topic_partition_t *rktpar; struct rd_kafka_partition_leader *leader; rd_list_t leaders; rd_kafka_resp_err_t err; partitions = rd_kafka_topic_partition_list_new(1); rktpar = rd_kafka_topic_partition_list_add(partitions, topic, partition); rd_list_init(&leaders, partitions->cnt, (void *)rd_kafka_partition_leader_destroy); err = rd_kafka_topic_partition_list_query_leaders(rk, partitions, &leaders, timeout_ms); if (err) { rd_list_destroy(&leaders); rd_kafka_topic_partition_list_destroy(partitions); return err; } leader = rd_list_elem(&leaders, 0); rkq = rd_kafka_q_new(rk); /* Due to KAFKA-1588 we need to send a request for each wanted offset, * in this case one for the low watermark and one for the high. */ state.topic = topic; state.partition = partition; state.offsets[0] = RD_KAFKA_OFFSET_BEGINNING; state.offsets[1] = RD_KAFKA_OFFSET_END; state.offidx = 0; state.err = RD_KAFKA_RESP_ERR__IN_PROGRESS; state.ts_end = ts_end; state.state_version = rd_kafka_brokers_get_state_version(rk); rktpar->offset = RD_KAFKA_OFFSET_BEGINNING; rd_kafka_OffsetRequest(leader->rkb, partitions, 0, RD_KAFKA_REPLYQ(rkq, 0), rd_kafka_query_wmark_offsets_resp_cb, &state); rktpar->offset = RD_KAFKA_OFFSET_END; rd_kafka_OffsetRequest(leader->rkb, partitions, 0, RD_KAFKA_REPLYQ(rkq, 0), rd_kafka_query_wmark_offsets_resp_cb, &state); rd_kafka_topic_partition_list_destroy(partitions); rd_list_destroy(&leaders); /* Wait for reply (or timeout) */ while (state.err == RD_KAFKA_RESP_ERR__IN_PROGRESS && rd_kafka_q_serve(rkq, 100, 0, RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL) != RD_KAFKA_OP_RES_YIELD) ; rd_kafka_q_destroy_owner(rkq); if (state.err) return state.err; else if (state.offidx != 2) return RD_KAFKA_RESP_ERR__FAIL; /* We are not certain about the returned order. */ if (state.offsets[0] < state.offsets[1]) { *low = state.offsets[0]; *high = state.offsets[1]; } else { *low = state.offsets[1]; *high = state.offsets[0]; } /* If partition is empty only one offset (the last) will be returned. */ if (*low < 0 && *high >= 0) *low = *high; return RD_KAFKA_RESP_ERR_NO_ERROR; } rd_kafka_resp_err_t rd_kafka_get_watermark_offsets (rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high) { rd_kafka_toppar_t *rktp; rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, 1); if (!rktp) return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; rd_kafka_toppar_lock(rktp); *low = rktp->rktp_lo_offset; *high = rktp->rktp_hi_offset; rd_kafka_toppar_unlock(rktp); rd_kafka_toppar_destroy(rktp); return RD_KAFKA_RESP_ERR_NO_ERROR; } /** * @brief get_offsets_for_times() state */ struct _get_offsets_for_times { rd_kafka_topic_partition_list_t *results; rd_kafka_resp_err_t err; int wait_reply; int state_version; rd_ts_t ts_end; }; /** * @brief Handle OffsetRequest responses */ static void rd_kafka_get_offsets_for_times_resp_cb (rd_kafka_t *rk, rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, rd_kafka_buf_t *rkbuf, rd_kafka_buf_t *request, void *opaque) { struct _get_offsets_for_times *state; if (err == RD_KAFKA_RESP_ERR__DESTROY) { /* 'state' has gone out of scope when offsets_for_times() * timed out and returned to the caller. */ return; } state = opaque; err = rd_kafka_handle_Offset(rk, rkb, err, rkbuf, request, state->results); if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) return; /* Retrying */ /* Retry if no broker connection is available yet. */ if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD || err == RD_KAFKA_RESP_ERR__TRANSPORT) && rkb && rd_kafka_brokers_wait_state_change( rkb->rkb_rk, state->state_version, rd_timeout_remains(state->ts_end))) { /* Retry */ state->state_version = rd_kafka_brokers_get_state_version(rk); request->rkbuf_retries = 0; if (rd_kafka_buf_retry(rkb, request)) return; /* Retry in progress */ /* FALLTHRU */ } if (err && !state->err) state->err = err; state->wait_reply--; } rd_kafka_resp_err_t rd_kafka_offsets_for_times (rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms) { rd_kafka_q_t *rkq; struct _get_offsets_for_times state = RD_ZERO_INIT; rd_ts_t ts_end = rd_timeout_init(timeout_ms); rd_list_t leaders; int i; rd_kafka_resp_err_t err; struct rd_kafka_partition_leader *leader; int tmout; if (offsets->cnt == 0) return RD_KAFKA_RESP_ERR__INVALID_ARG; rd_list_init(&leaders, offsets->cnt, (void *)rd_kafka_partition_leader_destroy); err = rd_kafka_topic_partition_list_query_leaders(rk, offsets, &leaders, timeout_ms); if (err) { rd_list_destroy(&leaders); return err; } rkq = rd_kafka_q_new(rk); state.wait_reply = 0; state.results = rd_kafka_topic_partition_list_new(offsets->cnt); /* For each leader send a request for its partitions */ RD_LIST_FOREACH(leader, &leaders, i) { state.wait_reply++; rd_kafka_OffsetRequest(leader->rkb, leader->partitions, 1, RD_KAFKA_REPLYQ(rkq, 0), rd_kafka_get_offsets_for_times_resp_cb, &state); } rd_list_destroy(&leaders); /* Wait for reply (or timeout) */ while (state.wait_reply > 0 && !rd_timeout_expired((tmout = rd_timeout_remains(ts_end)))) rd_kafka_q_serve(rkq, tmout, 0, RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); rd_kafka_q_destroy_owner(rkq); if (state.wait_reply > 0 && !state.err) state.err = RD_KAFKA_RESP_ERR__TIMED_OUT; /* Then update the queried partitions. */ if (!state.err) rd_kafka_topic_partition_list_update(offsets, state.results); rd_kafka_topic_partition_list_destroy(state.results); return state.err; } /** * @brief rd_kafka_poll() (and similar) op callback handler. * Will either call registered callback depending on cb_type and op type * or return op to application, if applicable (e.g., fetch message). * * @returns RD_KAFKA_OP_RES_HANDLED if op was handled, else one of the * other res types (such as OP_RES_PASS). * * @locality any thread that serves op queues */ rd_kafka_op_res_t rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, rd_kafka_q_cb_type_t cb_type, void *opaque) { rd_kafka_msg_t *rkm; rd_kafka_op_res_t res = RD_KAFKA_OP_RES_HANDLED; /* Special handling for events based on cb_type */ if (cb_type == RD_KAFKA_Q_CB_EVENT && rd_kafka_event_setup(rk, rko)) { /* Return-as-event requested. */ return RD_KAFKA_OP_RES_PASS; /* Return as event */ } switch ((int)rko->rko_type) { case RD_KAFKA_OP_FETCH: if (!rk->rk_conf.consume_cb || cb_type == RD_KAFKA_Q_CB_RETURN || cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) return RD_KAFKA_OP_RES_PASS; /* Dont handle here */ else { struct consume_ctx ctx = { .consume_cb = rk->rk_conf.consume_cb, .opaque = rk->rk_conf.opaque }; return rd_kafka_consume_cb(rk, rkq, rko, cb_type, &ctx); } break; case RD_KAFKA_OP_REBALANCE: if (rk->rk_conf.rebalance_cb) rk->rk_conf.rebalance_cb( rk, rko->rko_err, rko->rko_u.rebalance.partitions, rk->rk_conf.opaque); else { /** If EVENT_REBALANCE is enabled but rebalance_cb * isn't, we need to perform a dummy assign for the * application. This might happen during termination * with consumer_close() */ rd_kafka_dbg(rk, CGRP, "UNASSIGN", "Forcing unassign of %d partition(s)", rko->rko_u.rebalance.partitions ? rko->rko_u.rebalance.partitions->cnt : 0); rd_kafka_assign(rk, NULL); } break; case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY: if (!rko->rko_u.offset_commit.cb) return RD_KAFKA_OP_RES_PASS; /* Dont handle here */ rko->rko_u.offset_commit.cb( rk, rko->rko_err, rko->rko_u.offset_commit.partitions, rko->rko_u.offset_commit.opaque); break; case RD_KAFKA_OP_FETCH_STOP|RD_KAFKA_OP_REPLY: /* Reply from toppar FETCH_STOP */ rd_kafka_assignment_partition_stopped(rk, rko->rko_rktp); break; case RD_KAFKA_OP_CONSUMER_ERR: /* rd_kafka_consumer_poll() (_Q_CB_CONSUMER): * Consumer errors are returned to the application * as rkmessages, not error callbacks. * * rd_kafka_poll() (_Q_CB_GLOBAL): * convert to ERR op (fallthru) */ if (cb_type == RD_KAFKA_Q_CB_RETURN || cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) { /* return as message_t to application */ return RD_KAFKA_OP_RES_PASS; } /* FALLTHRU */ case RD_KAFKA_OP_ERR: if (rk->rk_conf.error_cb) rk->rk_conf.error_cb(rk, rko->rko_err, rko->rko_u.err.errstr, rk->rk_conf.opaque); else rd_kafka_log(rk, LOG_ERR, "ERROR", "%s: %s", rk->rk_name, rko->rko_u.err.errstr); break; case RD_KAFKA_OP_DR: /* Delivery report: * call application DR callback for each message. */ while ((rkm = TAILQ_FIRST(&rko->rko_u.dr.msgq.rkmq_msgs))) { rd_kafka_message_t *rkmessage; TAILQ_REMOVE(&rko->rko_u.dr.msgq.rkmq_msgs, rkm, rkm_link); rkmessage = rd_kafka_message_get_from_rkm(rko, rkm); if (likely(rk->rk_conf.dr_msg_cb != NULL)) { rk->rk_conf.dr_msg_cb(rk, rkmessage, rk->rk_conf.opaque); } else if (rk->rk_conf.dr_cb) { rk->rk_conf.dr_cb(rk, rkmessage->payload, rkmessage->len, rkmessage->err, rk->rk_conf.opaque, rkmessage->_private); } else if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) { rd_kafka_log(rk, LOG_WARNING, "DRDROP", "Dropped delivery report for " "message to " "%s [%"PRId32"] (%s) with " "opaque %p: flush() or poll() " "should not be called when " "EVENT_DR is enabled", rd_kafka_topic_name(rkmessage-> rkt), rkmessage->partition, rd_kafka_err2name(rkmessage->err), rkmessage->_private); } else { rd_assert(!*"BUG: neither a delivery report " "callback or EVENT_DR flag set"); } rd_kafka_msg_destroy(rk, rkm); if (unlikely(rd_kafka_yield_thread)) { /* Callback called yield(), * re-enqueue the op (if there are any * remaining messages). */ if (!TAILQ_EMPTY(&rko->rko_u.dr.msgq. rkmq_msgs)) rd_kafka_q_reenq(rkq, rko); else rd_kafka_op_destroy(rko); return RD_KAFKA_OP_RES_YIELD; } } rd_kafka_msgq_init(&rko->rko_u.dr.msgq); break; case RD_KAFKA_OP_THROTTLE: if (rk->rk_conf.throttle_cb) rk->rk_conf.throttle_cb(rk, rko->rko_u.throttle.nodename, rko->rko_u.throttle.nodeid, rko->rko_u.throttle. throttle_time, rk->rk_conf.opaque); break; case RD_KAFKA_OP_STATS: /* Statistics */ if (rk->rk_conf.stats_cb && rk->rk_conf.stats_cb(rk, rko->rko_u.stats.json, rko->rko_u.stats.json_len, rk->rk_conf.opaque) == 1) rko->rko_u.stats.json = NULL; /* Application wanted json ptr */ break; case RD_KAFKA_OP_LOG: if (likely(rk->rk_conf.log_cb && rk->rk_conf.log_level >= rko->rko_u.log.level)) rk->rk_conf.log_cb(rk, rko->rko_u.log.level, rko->rko_u.log.fac, rko->rko_u.log.str); break; case RD_KAFKA_OP_TERMINATE: /* nop: just a wake-up */ break; case RD_KAFKA_OP_CREATETOPICS: case RD_KAFKA_OP_DELETETOPICS: case RD_KAFKA_OP_CREATEPARTITIONS: case RD_KAFKA_OP_ALTERCONFIGS: case RD_KAFKA_OP_DESCRIBECONFIGS: case RD_KAFKA_OP_DELETERECORDS: case RD_KAFKA_OP_DELETEGROUPS: case RD_KAFKA_OP_ADMIN_FANOUT: /* Calls op_destroy() from worker callback, * when the time comes. */ res = rd_kafka_op_call(rk, rkq, rko); break; case RD_KAFKA_OP_ADMIN_RESULT: if (cb_type == RD_KAFKA_Q_CB_RETURN || cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) return RD_KAFKA_OP_RES_PASS; /* Don't handle here */ /* Op is silently destroyed below */ break; case RD_KAFKA_OP_TXN: /* Must only be handled by rdkafka main thread */ rd_assert(thrd_is_current(rk->rk_thread)); res = rd_kafka_op_call(rk, rkq, rko); break; case RD_KAFKA_OP_PURGE: rd_kafka_purge(rk, rko->rko_u.purge.flags); break; default: rd_kafka_assert(rk, !*"cant handle op type"); break; } if (res == RD_KAFKA_OP_RES_HANDLED) rd_kafka_op_destroy(rko); return res; } int rd_kafka_poll (rd_kafka_t *rk, int timeout_ms) { int r; if (timeout_ms) rd_kafka_app_poll_blocking(rk); r = rd_kafka_q_serve(rk->rk_rep, timeout_ms, 0, RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); rd_kafka_app_polled(rk); return r; } rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms) { rd_kafka_op_t *rko; if (timeout_ms) rd_kafka_app_poll_blocking(rkqu->rkqu_rk); rko = rd_kafka_q_pop_serve(rkqu->rkqu_q, rd_timeout_us(timeout_ms), 0, RD_KAFKA_Q_CB_EVENT, rd_kafka_poll_cb, NULL); rd_kafka_app_polled(rkqu->rkqu_rk); if (!rko) return NULL; return rko; } int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms) { int r; if (timeout_ms) rd_kafka_app_poll_blocking(rkqu->rkqu_rk); r = rd_kafka_q_serve(rkqu->rkqu_q, timeout_ms, 0, RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); rd_kafka_app_polled(rkqu->rkqu_rk); return r; } static void rd_kafka_toppar_dump (FILE *fp, const char *indent, rd_kafka_toppar_t *rktp) { fprintf(fp, "%s%.*s [%"PRId32"] broker %s, " "leader_id %s\n", indent, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rktp->rktp_broker ? rktp->rktp_broker->rkb_name : "none", rktp->rktp_leader ? rktp->rktp_leader->rkb_name : "none"); fprintf(fp, "%s refcnt %i\n" "%s msgq: %i messages\n" "%s xmit_msgq: %i messages\n" "%s total: %"PRIu64" messages, %"PRIu64" bytes\n", indent, rd_refcnt_get(&rktp->rktp_refcnt), indent, rktp->rktp_msgq.rkmq_msg_cnt, indent, rktp->rktp_xmit_msgq.rkmq_msg_cnt, indent, rd_atomic64_get(&rktp->rktp_c.tx_msgs), rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes)); } static void rd_kafka_broker_dump (FILE *fp, rd_kafka_broker_t *rkb, int locks) { rd_kafka_toppar_t *rktp; if (locks) rd_kafka_broker_lock(rkb); fprintf(fp, " rd_kafka_broker_t %p: %s NodeId %"PRId32 " in state %s (for %.3fs)\n", rkb, rkb->rkb_name, rkb->rkb_nodeid, rd_kafka_broker_state_names[rkb->rkb_state], rkb->rkb_ts_state ? (float)(rd_clock() - rkb->rkb_ts_state) / 1000000.0f : 0.0f); fprintf(fp, " refcnt %i\n", rd_refcnt_get(&rkb->rkb_refcnt)); fprintf(fp, " outbuf_cnt: %i waitresp_cnt: %i\n", rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt), rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt)); fprintf(fp, " %"PRIu64 " messages sent, %"PRIu64" bytes, " "%"PRIu64" errors, %"PRIu64" timeouts\n" " %"PRIu64 " messages received, %"PRIu64" bytes, " "%"PRIu64" errors\n" " %"PRIu64 " messageset transmissions were retried\n", rd_atomic64_get(&rkb->rkb_c.tx), rd_atomic64_get(&rkb->rkb_c.tx_bytes), rd_atomic64_get(&rkb->rkb_c.tx_err), rd_atomic64_get(&rkb->rkb_c.req_timeouts), rd_atomic64_get(&rkb->rkb_c.rx), rd_atomic64_get(&rkb->rkb_c.rx_bytes), rd_atomic64_get(&rkb->rkb_c.rx_err), rd_atomic64_get(&rkb->rkb_c.tx_retries)); fprintf(fp, " %i toppars:\n", rkb->rkb_toppar_cnt); TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) rd_kafka_toppar_dump(fp, " ", rktp); if (locks) { rd_kafka_broker_unlock(rkb); } } static void rd_kafka_dump0 (FILE *fp, rd_kafka_t *rk, int locks) { rd_kafka_broker_t *rkb; rd_kafka_topic_t *rkt; rd_kafka_toppar_t *rktp; int i; unsigned int tot_cnt; size_t tot_size; rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); if (locks) rd_kafka_rdlock(rk); #if ENABLE_DEVEL fprintf(fp, "rd_kafka_op_cnt: %d\n", rd_atomic32_get(&rd_kafka_op_cnt)); #endif fprintf(fp, "rd_kafka_t %p: %s\n", rk, rk->rk_name); fprintf(fp, " producer.msg_cnt %u (%"PRIusz" bytes)\n", tot_cnt, tot_size); fprintf(fp, " rk_rep reply queue: %i ops\n", rd_kafka_q_len(rk->rk_rep)); fprintf(fp, " brokers:\n"); if (locks) mtx_lock(&rk->rk_internal_rkb_lock); if (rk->rk_internal_rkb) rd_kafka_broker_dump(fp, rk->rk_internal_rkb, locks); if (locks) mtx_unlock(&rk->rk_internal_rkb_lock); TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { rd_kafka_broker_dump(fp, rkb, locks); } fprintf(fp, " cgrp:\n"); if (rk->rk_cgrp) { rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; fprintf(fp, " %.*s in state %s, flags 0x%x\n", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rd_kafka_cgrp_state_names[rkcg->rkcg_state], rkcg->rkcg_flags); fprintf(fp, " coord_id %"PRId32", broker %s\n", rkcg->rkcg_coord_id, rkcg->rkcg_curr_coord ? rd_kafka_broker_name(rkcg->rkcg_curr_coord):"(none)"); fprintf(fp, " toppars:\n"); RD_LIST_FOREACH(rktp, &rkcg->rkcg_toppars, i) { fprintf(fp, " %.*s [%"PRId32"] in state %s\n", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_fetch_states[rktp->rktp_fetch_state]); } } fprintf(fp, " topics:\n"); TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { fprintf(fp, " %.*s with %"PRId32" partitions, state %s, " "refcnt %i\n", RD_KAFKAP_STR_PR(rkt->rkt_topic), rkt->rkt_partition_cnt, rd_kafka_topic_state_names[rkt->rkt_state], rd_refcnt_get(&rkt->rkt_refcnt)); if (rkt->rkt_ua) rd_kafka_toppar_dump(fp, " ", rkt->rkt_ua); if (rd_list_empty(&rkt->rkt_desp)) { fprintf(fp, " desired partitions:"); RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) fprintf(fp, " %"PRId32, rktp->rktp_partition); fprintf(fp, "\n"); } } fprintf(fp, "\n"); rd_kafka_metadata_cache_dump(fp, rk); if (locks) rd_kafka_rdunlock(rk); } void rd_kafka_dump (FILE *fp, rd_kafka_t *rk) { if (rk) rd_kafka_dump0(fp, rk, 1/*locks*/); } const char *rd_kafka_name (const rd_kafka_t *rk) { return rk->rk_name; } rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk) { return rk->rk_type; } char *rd_kafka_memberid (const rd_kafka_t *rk) { rd_kafka_op_t *rko; rd_kafka_cgrp_t *rkcg; char *memberid; if (!(rkcg = rd_kafka_cgrp_get(rk))) return NULL; rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_NAME); if (!rko) return NULL; memberid = rko->rko_u.name.str; rko->rko_u.name.str = NULL; rd_kafka_op_destroy(rko); return memberid; } char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms) { rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); /* ClusterId is returned in Metadata >=V2 responses and * cached on the rk. If no cached value is available * it means no metadata has been received yet, or we're * using a lower protocol version * (e.g., lack of api.version.request=true). */ while (1) { int remains_ms; rd_kafka_rdlock(rk); if (rk->rk_clusterid) { /* Cached clusterid available. */ char *ret = rd_strdup(rk->rk_clusterid); rd_kafka_rdunlock(rk); return ret; } else if (rk->rk_ts_metadata > 0) { /* Metadata received but no clusterid, * this probably means the broker is too old * or api.version.request=false. */ rd_kafka_rdunlock(rk); return NULL; } rd_kafka_rdunlock(rk); /* Wait for up to timeout_ms for a metadata refresh, * if permitted by application. */ remains_ms = rd_timeout_remains(abs_timeout); if (rd_timeout_expired(remains_ms)) return NULL; rd_kafka_metadata_cache_wait_change(rk, remains_ms); } return NULL; } int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms) { rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); /* ControllerId is returned in Metadata >=V1 responses and * cached on the rk. If no cached value is available * it means no metadata has been received yet, or we're * using a lower protocol version * (e.g., lack of api.version.request=true). */ while (1) { int remains_ms; int version; version = rd_kafka_brokers_get_state_version(rk); rd_kafka_rdlock(rk); if (rk->rk_controllerid != -1) { /* Cached controllerid available. */ rd_kafka_rdunlock(rk); return rk->rk_controllerid; } else if (rk->rk_ts_metadata > 0) { /* Metadata received but no clusterid, * this probably means the broker is too old * or api.version.request=false. */ rd_kafka_rdunlock(rk); return -1; } rd_kafka_rdunlock(rk); /* Wait for up to timeout_ms for a metadata refresh, * if permitted by application. */ remains_ms = rd_timeout_remains(abs_timeout); if (rd_timeout_expired(remains_ms)) return -1; rd_kafka_brokers_wait_state_change(rk, version, remains_ms); } return -1; } void *rd_kafka_opaque (const rd_kafka_t *rk) { return rk->rk_conf.opaque; } int rd_kafka_outq_len (rd_kafka_t *rk) { return rd_kafka_curr_msgs_cnt(rk) + rd_kafka_q_len(rk->rk_rep) + (rk->rk_background.q ? rd_kafka_q_len(rk->rk_background.q) : 0); } rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms) { unsigned int msg_cnt = 0; if (rk->rk_type != RD_KAFKA_PRODUCER) return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; rd_kafka_yield_thread = 0; if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) { /* Application wants delivery reports as events rather * than callbacks, we must thus not serve this queue * with rd_kafka_poll() since that would trigger non-existent * delivery report callbacks, which would result * in the delivery reports being dropped. * Instead we rely on the application to serve the event * queue in another thread, so all we do here is wait * for the current message count to reach zero. */ struct timespec tspec; rd_timeout_init_timespec(&tspec, timeout_ms); while ((msg_cnt = rd_kafka_curr_msgs_wait_zero(rk, &tspec)) > 0) { if (unlikely(rd_kafka_yield_thread)) return RD_KAFKA_RESP_ERR__TIMED_OUT; } return msg_cnt > 0 ? RD_KAFKA_RESP_ERR__TIMED_OUT : RD_KAFKA_RESP_ERR_NO_ERROR; } else { /* Standard poll interface. * * First poll call is non-blocking for the case * where timeout_ms==RD_POLL_NOWAIT to make sure poll is * called at least once. */ rd_ts_t ts_end = rd_timeout_init(timeout_ms); int tmout = RD_POLL_NOWAIT; int qlen = 0; do { rd_kafka_poll(rk, tmout); qlen = rd_kafka_q_len(rk->rk_rep); msg_cnt = rd_kafka_curr_msgs_cnt(rk); } while (qlen + msg_cnt > 0 && !rd_kafka_yield_thread && (tmout = rd_timeout_remains_limit(ts_end, 10)) != RD_POLL_NOWAIT); return qlen + msg_cnt > 0 ? RD_KAFKA_RESP_ERR__TIMED_OUT : RD_KAFKA_RESP_ERR_NO_ERROR; } } rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags) { rd_kafka_broker_t *rkb; rd_kafka_q_t *tmpq = NULL; int waitcnt = 0; if (rk->rk_type != RD_KAFKA_PRODUCER) return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; /* Check that future flags are not passed */ if ((purge_flags & ~RD_KAFKA_PURGE_F_MASK) != 0) return RD_KAFKA_RESP_ERR__INVALID_ARG; /* Nothing to purge */ if (!purge_flags) return RD_KAFKA_RESP_ERR_NO_ERROR; /* Set up a reply queue to wait for broker thread signalling * completion, unless non-blocking. */ if (!(purge_flags & RD_KAFKA_PURGE_F_NON_BLOCKING)) tmpq = rd_kafka_q_new(rk); /* Send purge request to all broker threads */ rd_kafka_rdlock(rk); TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { rd_kafka_broker_purge_queues(rkb, purge_flags, RD_KAFKA_REPLYQ(tmpq, 0)); waitcnt++; } rd_kafka_rdunlock(rk); /* The internal broker handler may hold unassigned partitions */ mtx_lock(&rk->rk_internal_rkb_lock); rd_kafka_broker_purge_queues(rk->rk_internal_rkb, purge_flags, RD_KAFKA_REPLYQ(tmpq, 0)); mtx_unlock(&rk->rk_internal_rkb_lock); waitcnt++; if (tmpq) { /* Wait for responses */ while (waitcnt-- > 0) rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE); rd_kafka_q_destroy_owner(tmpq); } /* Purge messages for the UA(-1) partitions (which are not * handled by a broker thread) */ if (purge_flags & RD_KAFKA_PURGE_F_QUEUE) rd_kafka_purge_ua_toppar_queues(rk); return RD_KAFKA_RESP_ERR_NO_ERROR; } /** * @returns a csv string of purge flags in thread-local storage */ const char *rd_kafka_purge_flags2str (int flags) { static const char *names[] = { "queue", "inflight", "non-blocking", NULL }; static RD_TLS char ret[64]; return rd_flags2str(ret, sizeof(ret), names, flags); } int rd_kafka_version (void) { return RD_KAFKA_VERSION; } const char *rd_kafka_version_str (void) { static RD_TLS char ret[128]; size_t of = 0, r; if (*ret) return ret; #ifdef LIBRDKAFKA_GIT_VERSION if (*LIBRDKAFKA_GIT_VERSION) { of = rd_snprintf(ret, sizeof(ret), "%s", *LIBRDKAFKA_GIT_VERSION == 'v' ? LIBRDKAFKA_GIT_VERSION+1 : LIBRDKAFKA_GIT_VERSION); if (of > sizeof(ret)) of = sizeof(ret); } #endif #define _my_sprintf(...) do { \ r = rd_snprintf(ret+of, sizeof(ret)-of, __VA_ARGS__); \ if (r > sizeof(ret)-of) \ r = sizeof(ret)-of; \ of += r; \ } while(0) if (of == 0) { int ver = rd_kafka_version(); int prel = (ver & 0xff); _my_sprintf("%i.%i.%i", (ver >> 24) & 0xff, (ver >> 16) & 0xff, (ver >> 8) & 0xff); if (prel != 0xff) { /* pre-builds below 200 are just running numbers, * above 200 are RC numbers. */ if (prel <= 200) _my_sprintf("-pre%d", prel); else _my_sprintf("-RC%d", prel - 200); } } #if ENABLE_DEVEL _my_sprintf("-devel"); #endif #if WITHOUT_OPTIMIZATION _my_sprintf("-O0"); #endif return ret; } /** * Assert trampoline to print some debugging information on crash. */ void RD_NORETURN rd_kafka_crash (const char *file, int line, const char *function, rd_kafka_t *rk, const char *reason) { fprintf(stderr, "*** %s:%i:%s: %s ***\n", file, line, function, reason); if (rk) rd_kafka_dump0(stderr, rk, 0/*no locks*/); abort(); } struct list_groups_state { rd_kafka_q_t *q; rd_kafka_resp_err_t err; int wait_cnt; const char *desired_group; struct rd_kafka_group_list *grplist; int grplist_size; }; static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, rd_kafka_buf_t *reply, rd_kafka_buf_t *request, void *opaque) { struct list_groups_state *state; const int log_decode_errors = LOG_ERR; int cnt; if (err == RD_KAFKA_RESP_ERR__DESTROY) { /* 'state' has gone out of scope due to list_groups() * timing out and returning. */ return; } state = opaque; state->wait_cnt--; if (err) goto err; rd_kafka_buf_read_i32(reply, &cnt); while (cnt-- > 0) { int16_t ErrorCode; rd_kafkap_str_t Group, GroupState, ProtoType, Proto; int MemberCnt; struct rd_kafka_group_info *gi; if (state->grplist->group_cnt == state->grplist_size) { /* Grow group array */ state->grplist_size *= 2; state->grplist->groups = rd_realloc(state->grplist->groups, state->grplist_size * sizeof(*state->grplist->groups)); } gi = &state->grplist->groups[state->grplist->group_cnt++]; memset(gi, 0, sizeof(*gi)); rd_kafka_buf_read_i16(reply, &ErrorCode); rd_kafka_buf_read_str(reply, &Group); rd_kafka_buf_read_str(reply, &GroupState); rd_kafka_buf_read_str(reply, &ProtoType); rd_kafka_buf_read_str(reply, &Proto); rd_kafka_buf_read_i32(reply, &MemberCnt); if (MemberCnt > 100000) { err = RD_KAFKA_RESP_ERR__BAD_MSG; goto err; } rd_kafka_broker_lock(rkb); gi->broker.id = rkb->rkb_nodeid; gi->broker.host = rd_strdup(rkb->rkb_origname); gi->broker.port = rkb->rkb_port; rd_kafka_broker_unlock(rkb); gi->err = ErrorCode; gi->group = RD_KAFKAP_STR_DUP(&Group); gi->state = RD_KAFKAP_STR_DUP(&GroupState); gi->protocol_type = RD_KAFKAP_STR_DUP(&ProtoType); gi->protocol = RD_KAFKAP_STR_DUP(&Proto); if (MemberCnt > 0) gi->members = rd_malloc(MemberCnt * sizeof(*gi->members)); while (MemberCnt-- > 0) { rd_kafkap_str_t MemberId, ClientId, ClientHost; rd_kafkap_bytes_t Meta, Assignment; struct rd_kafka_group_member_info *mi; mi = &gi->members[gi->member_cnt++]; memset(mi, 0, sizeof(*mi)); rd_kafka_buf_read_str(reply, &MemberId); rd_kafka_buf_read_str(reply, &ClientId); rd_kafka_buf_read_str(reply, &ClientHost); rd_kafka_buf_read_bytes(reply, &Meta); rd_kafka_buf_read_bytes(reply, &Assignment); mi->member_id = RD_KAFKAP_STR_DUP(&MemberId); mi->client_id = RD_KAFKAP_STR_DUP(&ClientId); mi->client_host = RD_KAFKAP_STR_DUP(&ClientHost); if (RD_KAFKAP_BYTES_LEN(&Meta) == 0) { mi->member_metadata_size = 0; mi->member_metadata = NULL; } else { mi->member_metadata_size = RD_KAFKAP_BYTES_LEN(&Meta); mi->member_metadata = rd_memdup(Meta.data, mi->member_metadata_size); } if (RD_KAFKAP_BYTES_LEN(&Assignment) == 0) { mi->member_assignment_size = 0; mi->member_assignment = NULL; } else { mi->member_assignment_size = RD_KAFKAP_BYTES_LEN(&Assignment); mi->member_assignment = rd_memdup(Assignment.data, mi->member_assignment_size); } } } err: state->err = err; return; err_parse: state->err = reply->rkbuf_err; } static void rd_kafka_ListGroups_resp_cb (rd_kafka_t *rk, rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, rd_kafka_buf_t *reply, rd_kafka_buf_t *request, void *opaque) { struct list_groups_state *state; const int log_decode_errors = LOG_ERR; int16_t ErrorCode; char **grps = NULL; int cnt, grpcnt, i = 0; if (err == RD_KAFKA_RESP_ERR__DESTROY) { /* 'state' is no longer in scope because * list_groups() timed out and returned to the caller. * We must not touch anything here but simply return. */ return; } state = opaque; state->wait_cnt--; if (err) goto err; rd_kafka_buf_read_i16(reply, &ErrorCode); if (ErrorCode) { err = ErrorCode; goto err; } rd_kafka_buf_read_i32(reply, &cnt); if (state->desired_group) grpcnt = 1; else grpcnt = cnt; if (cnt == 0 || grpcnt == 0) return; grps = rd_malloc(sizeof(*grps) * grpcnt); while (cnt-- > 0) { rd_kafkap_str_t grp, proto; rd_kafka_buf_read_str(reply, &grp); rd_kafka_buf_read_str(reply, &proto); if (state->desired_group && rd_kafkap_str_cmp_str(&grp, state->desired_group)) continue; grps[i++] = RD_KAFKAP_STR_DUP(&grp); if (i == grpcnt) break; } if (i > 0) { state->wait_cnt++; rd_kafka_DescribeGroupsRequest(rkb, (const char **)grps, i, RD_KAFKA_REPLYQ(state->q, 0), rd_kafka_DescribeGroups_resp_cb, state); while (i-- > 0) rd_free(grps[i]); } rd_free(grps); err: state->err = err; return; err_parse: if (grps) rd_free(grps); state->err = reply->rkbuf_err; } rd_kafka_resp_err_t rd_kafka_list_groups (rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms) { rd_kafka_broker_t *rkb; int rkb_cnt = 0; struct list_groups_state state = RD_ZERO_INIT; rd_ts_t ts_end = rd_timeout_init(timeout_ms); int state_version = rd_kafka_brokers_get_state_version(rk); /* Wait until metadata has been fetched from cluster so * that we have a full broker list. * This state only happens during initial client setup, after that * there'll always be a cached metadata copy. */ rd_kafka_rdlock(rk); while (!rk->rk_ts_metadata) { rd_kafka_rdunlock(rk); if (!rd_kafka_brokers_wait_state_change( rk, state_version, rd_timeout_remains(ts_end))) return RD_KAFKA_RESP_ERR__TIMED_OUT; rd_kafka_rdlock(rk); } state.q = rd_kafka_q_new(rk); state.desired_group = group; state.grplist = rd_calloc(1, sizeof(*state.grplist)); state.grplist_size = group ? 1 : 32; state.grplist->groups = rd_malloc(state.grplist_size * sizeof(*state.grplist->groups)); /* Query each broker for its list of groups */ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { rd_kafka_broker_lock(rkb); if (rkb->rkb_nodeid == -1 || RD_KAFKA_BROKER_IS_LOGICAL(rkb)) { rd_kafka_broker_unlock(rkb); continue; } rd_kafka_broker_unlock(rkb); state.wait_cnt++; rkb_cnt++; rd_kafka_ListGroupsRequest(rkb, RD_KAFKA_REPLYQ(state.q, 0), rd_kafka_ListGroups_resp_cb, &state); } rd_kafka_rdunlock(rk); if (rkb_cnt == 0) { state.err = RD_KAFKA_RESP_ERR__TRANSPORT; } else { int remains; while (state.wait_cnt > 0 && !rd_timeout_expired((remains = rd_timeout_remains(ts_end)))) { rd_kafka_q_serve(state.q, remains, 0, RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); /* Ignore yields */ } } rd_kafka_q_destroy_owner(state.q); if (state.wait_cnt > 0 && !state.err) { if (state.grplist->group_cnt == 0) state.err = RD_KAFKA_RESP_ERR__TIMED_OUT; else { *grplistp = state.grplist; return RD_KAFKA_RESP_ERR__PARTIAL; } } if (state.err) rd_kafka_group_list_destroy(state.grplist); else *grplistp = state.grplist; return state.err; } void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist0) { struct rd_kafka_group_list *grplist = (struct rd_kafka_group_list *)grplist0; while (grplist->group_cnt-- > 0) { struct rd_kafka_group_info *gi; gi = &grplist->groups[grplist->group_cnt]; if (gi->broker.host) rd_free(gi->broker.host); if (gi->group) rd_free(gi->group); if (gi->state) rd_free(gi->state); if (gi->protocol_type) rd_free(gi->protocol_type); if (gi->protocol) rd_free(gi->protocol); while (gi->member_cnt-- > 0) { struct rd_kafka_group_member_info *mi; mi = &gi->members[gi->member_cnt]; if (mi->member_id) rd_free(mi->member_id); if (mi->client_id) rd_free(mi->client_id); if (mi->client_host) rd_free(mi->client_host); if (mi->member_metadata) rd_free(mi->member_metadata); if (mi->member_assignment) rd_free(mi->member_assignment); } if (gi->members) rd_free(gi->members); } if (grplist->groups) rd_free(grplist->groups); rd_free(grplist); } const char *rd_kafka_get_debug_contexts(void) { return RD_KAFKA_DEBUG_CONTEXTS; } int rd_kafka_path_is_dir (const char *path) { #ifdef _WIN32 struct _stat st; return (_stat(path, &st) == 0 && st.st_mode & S_IFDIR); #else struct stat st; return (stat(path, &st) == 0 && S_ISDIR(st.st_mode)); #endif } /** * @returns true if directory is empty or can't be accessed, else false. */ rd_bool_t rd_kafka_dir_is_empty (const char *path) { #if _WIN32 /* FIXME: Unsupported */ return rd_true; #else DIR *dir; struct dirent *d; dir = opendir(path); if (!dir) return rd_true; while ((d = readdir(dir))) { if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, "..")) continue; #ifndef __OS400__ if (d->d_type == DT_REG || d->d_type == DT_LNK || d->d_type == DT_DIR) { closedir(dir); return rd_false; } #else /* we have something in dir - dir is not empty */ closedir(dir); return rd_false; #endif } closedir(dir); return rd_true; #endif } void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr) { free(ptr); } int rd_kafka_errno (void) { return errno; } int rd_kafka_unittest (void) { return rd_unittest(); }
368668.c
/* ---------------------------------------------------------------------------- * -- _____ ______ _____ - * -- |_ _| | ____|/ ____| - * -- | | _ __ | |__ | (___ Institute of Embedded Systems - * -- | | | '_ \| __| \___ \ Zuercher Hochschule Winterthur - * -- _| |_| | | | |____ ____) | (University of Applied Sciences) - * -- |_____|_| |_|______|_____/ 8401 Winterthur, Switzerland - * ---------------------------------------------------------------------------- */ /** * @file * @brief Test suite for the given package. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include "CUnit/Basic.h" #include "../../../testlib/src/test_utils.h" /// UUT - Unit-Under-Test struct tm getNextDayDate(struct tm date); /// Epsilon for double comparisons. #define EPSILON 0.01 // setup & cleanup static int setup(void) { // Do nothing. return 0; // success } static int teardown(void) { // Do nothing. return 0; // success } // tests static void test_next_date(void) { struct tm date = {0}; struct tm nextDate = {0}; strptime("12.12.2000", "%d.%m.%Y", &date); nextDate = getNextDayDate(date); CU_ASSERT_EQUAL(nextDate.tm_mday, 13); CU_ASSERT_EQUAL(nextDate.tm_mon, 12 - 1); CU_ASSERT_EQUAL(nextDate.tm_year, 2000 - 1900); } static void test_next_date_year_change(void) { struct tm date = {0}; struct tm nextDate = {0}; strptime("31.12.2000", "%d.%m.%Y", &date); nextDate = getNextDayDate(date); CU_ASSERT_EQUAL(nextDate.tm_mday, 1); CU_ASSERT_EQUAL(nextDate.tm_mon, 1 - 1); CU_ASSERT_EQUAL(nextDate.tm_year, 2001 - 1900); } static void test_next_date_leap_year(void) { struct tm date = {0}; struct tm nextDate = {0}; strptime("28.02.2000", "%d.%m.%Y", &date); nextDate = getNextDayDate(date); CU_ASSERT_EQUAL(nextDate.tm_mday, 29); CU_ASSERT_EQUAL(nextDate.tm_mon, 2 - 1); CU_ASSERT_EQUAL(nextDate.tm_year, 2000 - 1900); } static void test_next_date_no_leap_year(void) { struct tm date = {0}; struct tm nextDate = {0}; strptime("28.02.2001", "%d.%m.%Y", &date); nextDate = getNextDayDate(date); CU_ASSERT_EQUAL(nextDate.tm_mday, 1); CU_ASSERT_EQUAL(nextDate.tm_mon, 3 - 1); CU_ASSERT_EQUAL(nextDate.tm_year, 2001 - 1900); } /** * @brief Registers and runs the tests. */ int main(void) { // setup, run, teardown TestMainBasic("Test next date calculator", setup, teardown, test_next_date, test_next_date_year_change, test_next_date_leap_year, test_next_date_no_leap_year ); }
440625.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE390_Error_Without_Action__strtol_11.c Label Definition File: CWE390_Error_Without_Action.label.xml Template File: point-flaw-11.tmpl.c */ /* * @description * CWE: 390 Detection of Error Condition Without Action * Sinks: strtol * GoodSink: Check to see if strtol() failed and handle errors properly * BadSink : Check to see if strtol() failed, but fail to handle errors * Flow Variant: 11 Control flow: if(global_returns_t()) and if(global_returns_f()) * * */ #include "std_testcase.h" #include <errno.h> #ifndef OMITBAD void CWE390_Error_Without_Action__strtol_11_bad() { if(global_returns_t()) { { errno_t err_code = -1; long l = strtol("0xfffffffff", NULL, 0); if (_get_errno(&err_code)) { printLine("_get_errno failed"); exit(1); } /* FLAW: Check errno to see if strtol() failed, but do nothing about it */ if (err_code == ERANGE) { /* do nothing */ } printf("%li\n", l); } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { errno_t err_code = -1; long l = strtol("0xfffffffff", NULL, 0); if (_get_errno(&err_code)) { printLine("_get_errno failed"); exit(1); } /* FIX: Check errno to see if strtol() failed and handle errors properly */ if (err_code == ERANGE) { printLine("strtol() failed"); exit(1); } printf("%li\n", l); } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* good1() uses if(global_returns_f()) instead of if(global_returns_t()) */ static void good1() { if(global_returns_f()) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { errno_t err_code = -1; long l = strtol("0xfffffffff", NULL, 0); if (_get_errno(&err_code)) { printLine("_get_errno failed"); exit(1); } /* FLAW: Check errno to see if strtol() failed, but do nothing about it */ if (err_code == ERANGE) { /* do nothing */ } printf("%li\n", l); } } else { { errno_t err_code = -1; long l = strtol("0xfffffffff", NULL, 0); if (_get_errno(&err_code)) { printLine("_get_errno failed"); exit(1); } /* FIX: Check errno to see if strtol() failed and handle errors properly */ if (err_code == ERANGE) { printLine("strtol() failed"); exit(1); } printf("%li\n", l); } } } /* good2() reverses the bodies in the if statement */ static void good2() { if(global_returns_t()) { { errno_t err_code = -1; long l = strtol("0xfffffffff", NULL, 0); if (_get_errno(&err_code)) { printLine("_get_errno failed"); exit(1); } /* FIX: Check errno to see if strtol() failed and handle errors properly */ if (err_code == ERANGE) { printLine("strtol() failed"); exit(1); } printf("%li\n", l); } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { errno_t err_code = -1; long l = strtol("0xfffffffff", NULL, 0); if (_get_errno(&err_code)) { printLine("_get_errno failed"); exit(1); } /* FLAW: Check errno to see if strtol() failed, but do nothing about it */ if (err_code == ERANGE) { /* do nothing */ } printf("%li\n", l); } } } void CWE390_Error_Without_Action__strtol_11_good() { good1(); good2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE390_Error_Without_Action__strtol_11_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE390_Error_Without_Action__strtol_11_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
934493.c
/* ** EPITECH PROJECT, 2020 ** LibErty ** File description: ** estr_strcapitalize */ #include <erty/string/ecstring.h> cstr_t estr_capitalize(cstr_t src) { cstr_t str = estrdup(src); bool in_word = false; for (size_t i = 0; str[i]; i++) { if (!in_word && eis_alphanum(str[i])) { in_word = true; str[i] = eto_upper(str[i]); continue; } if (in_word && !eis_alphanum(str[i])) { in_word = false; str[i] = eto_upper(str[i]); continue; } if (in_word && eis_alphanum(str[i])) { str[i] = eto_lower(str[i]); continue; } } return (str); }
600477.c
/** ******************************************************************************** * @file stm8s_spi_ReceiveData.c * @author MCD Application Team * @version V2.2.0 * @date 30-September-2014 * @brief This file contains all the functions for the UART1 peripheral. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT 2014 STMicroelectronics</center></h2> * * Licensed under MCD-ST Liberty SW License Agreement V2, (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.st.com/software_license_agreement_liberty_v2 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm8s_spi.h" /** @addtogroup STM8S_StdPeriph_Driver * @{ */ /** @} * @addtogroup SPI_Public_Functions * @{ */ /** * @brief Returns the most recent received data by the SPI peripheral. * @param None * @retval The value of the received data. */ uint8_t SPI_ReceiveData(void) { return ((uint8_t)SPI->DR); /* Return the data in the DR register*/ } /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
627331.c
#include <stdio.h> #include <math.h> int main(void) { int n, i, a; double aver = 0.0f; a = scanf ("%d", &n); for (i = 0; i<n; i++) { int x; a = scanf ("\n"); a = scanf ("%d", &x); aver+= x; } aver/= n; printf("%lf\n", aver); return 0; }
746906.c
/* visorchannel_funcs.c * * Copyright (C) 2010 - 2015 UNISYS CORPORATION * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. */ /* * This provides s-Par channel communication primitives, which are * independent of the mechanism used to access the channel data. */ #include <linux/uuid.h> #include <linux/io.h> #include "visorbus.h" #include "visorbus_private.h" #include "controlvmchannel.h" #define MYDRVNAME "visorchannel" #define VISOR_CONSOLEVIDEO_CHANNEL_GUID \ UUID_LE(0x3cd6e705, 0xd6a2, 0x4aa5, \ 0xad, 0x5c, 0x7b, 0x8, 0x88, 0x9d, 0xff, 0xe2) static const uuid_le visor_video_guid = VISOR_CONSOLEVIDEO_CHANNEL_GUID; struct visorchannel { u64 physaddr; ulong nbytes; void *mapped; bool requested; struct channel_header chan_hdr; uuid_le guid; bool needs_lock; /* channel creator knows if more than one */ /* thread will be inserting or removing */ spinlock_t insert_lock; /* protect head writes in chan_hdr */ spinlock_t remove_lock; /* protect tail writes in chan_hdr */ uuid_le type; uuid_le inst; }; void visorchannel_destroy(struct visorchannel *channel) { if (!channel) return; if (channel->mapped) { memunmap(channel->mapped); if (channel->requested) release_mem_region(channel->physaddr, channel->nbytes); } kfree(channel); } u64 visorchannel_get_physaddr(struct visorchannel *channel) { return channel->physaddr; } ulong visorchannel_get_nbytes(struct visorchannel *channel) { return channel->nbytes; } char * visorchannel_uuid_id(uuid_le *guid, char *s) { sprintf(s, "%pUL", guid); return s; } char * visorchannel_id(struct visorchannel *channel, char *s) { return visorchannel_uuid_id(&channel->guid, s); } char * visorchannel_zoneid(struct visorchannel *channel, char *s) { return visorchannel_uuid_id(&channel->chan_hdr.zone_uuid, s); } u64 visorchannel_get_clientpartition(struct visorchannel *channel) { return channel->chan_hdr.partition_handle; } int visorchannel_set_clientpartition(struct visorchannel *channel, u64 partition_handle) { channel->chan_hdr.partition_handle = partition_handle; return 0; } /** * visorchannel_get_uuid() - queries the UUID of the designated channel * @channel: the channel to query * * Return: the UUID of the provided channel */ uuid_le visorchannel_get_uuid(struct visorchannel *channel) { return channel->guid; } EXPORT_SYMBOL_GPL(visorchannel_get_uuid); int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest, ulong nbytes) { if (offset + nbytes > channel->nbytes) return -EIO; memcpy(dest, channel->mapped + offset, nbytes); return 0; } int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest, ulong nbytes) { size_t chdr_size = sizeof(struct channel_header); size_t copy_size; if (offset + nbytes > channel->nbytes) return -EIO; if (offset < chdr_size) { copy_size = min(chdr_size - offset, nbytes); memcpy(((char *)(&channel->chan_hdr)) + offset, dest, copy_size); } memcpy(channel->mapped + offset, dest, nbytes); return 0; } void * visorchannel_get_header(struct visorchannel *channel) { return &channel->chan_hdr; } /* * Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a * channel header */ #define SIG_QUEUE_OFFSET(chan_hdr, q) \ ((chan_hdr)->ch_space_offset + \ ((q) * sizeof(struct signal_queue_header))) /* * Return offset of a specific queue entry (data) from the beginning of a * channel header */ #define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \ (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->sig_base_offset + \ ((slot) * (sig_hdr)->signal_size)) /* * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back * into host memory */ #define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \ visorchannel_write(channel, \ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue) + \ offsetof(struct signal_queue_header, FIELD), \ &((sig_hdr)->FIELD), \ sizeof((sig_hdr)->FIELD)) static int sig_read_header(struct visorchannel *channel, u32 queue, struct signal_queue_header *sig_hdr) { if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header)) return -EINVAL; /* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */ return visorchannel_read(channel, SIG_QUEUE_OFFSET(&channel->chan_hdr, queue), sig_hdr, sizeof(struct signal_queue_header)); } static int sig_read_data(struct visorchannel *channel, u32 queue, struct signal_queue_header *sig_hdr, u32 slot, void *data) { int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue, sig_hdr, slot); return visorchannel_read(channel, signal_data_offset, data, sig_hdr->signal_size); } static int sig_write_data(struct visorchannel *channel, u32 queue, struct signal_queue_header *sig_hdr, u32 slot, void *data) { int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue, sig_hdr, slot); return visorchannel_write(channel, signal_data_offset, data, sig_hdr->signal_size); } static int signalremove_inner(struct visorchannel *channel, u32 queue, void *msg) { struct signal_queue_header sig_hdr; int error; error = sig_read_header(channel, queue, &sig_hdr); if (error) return error; /* No signals to remove; have caller try again. */ if (sig_hdr.head == sig_hdr.tail) return -EAGAIN; sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots; error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg); if (error) return error; sig_hdr.num_received++; /* * For each data field in SIGNAL_QUEUE_HEADER that was modified, * update host memory. */ mb(); /* required for channel synch */ error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail); if (error) return error; error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received); if (error) return error; return 0; } /** * visorchannel_signalremove() - removes a message from the designated * channel/queue * @channel: the channel the message will be removed from * @queue: the queue the message will be removed from * @msg: the message to remove * * Return: integer error code indicating the status of the removal */ int visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg) { int rc; unsigned long flags; if (channel->needs_lock) { spin_lock_irqsave(&channel->remove_lock, flags); rc = signalremove_inner(channel, queue, msg); spin_unlock_irqrestore(&channel->remove_lock, flags); } else { rc = signalremove_inner(channel, queue, msg); } return rc; } EXPORT_SYMBOL_GPL(visorchannel_signalremove); static bool queue_empty(struct visorchannel *channel, u32 queue) { struct signal_queue_header sig_hdr; if (sig_read_header(channel, queue, &sig_hdr)) return true; return (sig_hdr.head == sig_hdr.tail); } /** * visorchannel_signalempty() - checks if the designated channel/queue * contains any messages * @channel: the channel to query * @queue: the queue in the channel to query * * Return: boolean indicating whether any messages in the designated * channel/queue are present */ bool visorchannel_signalempty(struct visorchannel *channel, u32 queue) { bool rc; unsigned long flags; if (!channel->needs_lock) return queue_empty(channel, queue); spin_lock_irqsave(&channel->remove_lock, flags); rc = queue_empty(channel, queue); spin_unlock_irqrestore(&channel->remove_lock, flags); return rc; } EXPORT_SYMBOL_GPL(visorchannel_signalempty); static int signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg) { struct signal_queue_header sig_hdr; int err; err = sig_read_header(channel, queue, &sig_hdr); if (err) return err; sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots; if (sig_hdr.head == sig_hdr.tail) { sig_hdr.num_overflows++; err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows); if (err) return err; return -EIO; } err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg); if (err) return err; sig_hdr.num_sent++; /* * For each data field in SIGNAL_QUEUE_HEADER that was modified, * update host memory. */ mb(); /* required for channel synch */ err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head); if (err) return err; err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent); if (err) return err; return 0; } /* * visorchannel_create_guts() - creates the struct visorchannel abstraction * for a data area in memory, but does NOT modify * this data area * @physaddr: physical address of start of channel * @channel_bytes: size of the channel in bytes; this may 0 if the channel has * already been initialized in memory (which is true for all * channels provided to guest environments by the s-Par * back-end), in which case the actual channel size will be * read from the channel header in memory * @gfp: gfp_t to use when allocating memory for the data struct * @guid: uuid that identifies channel type; this may 0 if the channel * has already been initialized in memory (which is true for all * channels provided to guest environments by the s-Par * back-end), in which case the actual channel guid will be * read from the channel header in memory * @needs_lock: must specify true if you have multiple threads of execution * that will be calling visorchannel methods of this * visorchannel at the same time * * Return: pointer to visorchannel that was created if successful, * otherwise NULL */ static struct visorchannel * visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, gfp_t gfp, uuid_le guid, bool needs_lock) { struct visorchannel *channel; int err; size_t size = sizeof(struct channel_header); if (physaddr == 0) return NULL; channel = kzalloc(sizeof(*channel), gfp); if (!channel) return NULL; channel->needs_lock = needs_lock; spin_lock_init(&channel->insert_lock); spin_lock_init(&channel->remove_lock); /* * Video driver constains the efi framebuffer so it will get a * conflict resource when requesting its full mem region. Since * we are only using the efi framebuffer for video we can ignore * this. Remember that we haven't requested it so we don't try to * release later on. */ channel->requested = request_mem_region(physaddr, size, MYDRVNAME); if (!channel->requested && uuid_le_cmp(guid, visor_video_guid)) /* we only care about errors if this is not the video channel */ goto err_destroy_channel; channel->mapped = memremap(physaddr, size, MEMREMAP_WB); if (!channel->mapped) { release_mem_region(physaddr, size); goto err_destroy_channel; } channel->physaddr = physaddr; channel->nbytes = size; err = visorchannel_read(channel, 0, &channel->chan_hdr, sizeof(struct channel_header)); if (err) goto err_destroy_channel; /* we had better be a CLIENT of this channel */ if (channel_bytes == 0) channel_bytes = (ulong)channel->chan_hdr.size; if (uuid_le_cmp(guid, NULL_UUID_LE) == 0) guid = channel->chan_hdr.chtype; memunmap(channel->mapped); if (channel->requested) release_mem_region(channel->physaddr, channel->nbytes); channel->mapped = NULL; channel->requested = request_mem_region(channel->physaddr, channel_bytes, MYDRVNAME); if (!channel->requested && uuid_le_cmp(guid, visor_video_guid)) /* we only care about errors if this is not the video channel */ goto err_destroy_channel; channel->mapped = memremap(channel->physaddr, channel_bytes, MEMREMAP_WB); if (!channel->mapped) { release_mem_region(channel->physaddr, channel_bytes); goto err_destroy_channel; } channel->nbytes = channel_bytes; channel->guid = guid; return channel; err_destroy_channel: visorchannel_destroy(channel); return NULL; } struct visorchannel * visorchannel_create(u64 physaddr, unsigned long channel_bytes, gfp_t gfp, uuid_le guid) { return visorchannel_create_guts(physaddr, channel_bytes, gfp, guid, false); } struct visorchannel * visorchannel_create_with_lock(u64 physaddr, unsigned long channel_bytes, gfp_t gfp, uuid_le guid) { return visorchannel_create_guts(physaddr, channel_bytes, gfp, guid, true); } /** * visorchannel_signalinsert() - inserts a message into the designated * channel/queue * @channel: the channel the message will be added to * @queue: the queue the message will be added to * @msg: the message to insert * * Return: integer error code indicating the status of the insertion */ int visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg) { int rc; unsigned long flags; if (channel->needs_lock) { spin_lock_irqsave(&channel->insert_lock, flags); rc = signalinsert_inner(channel, queue, msg); spin_unlock_irqrestore(&channel->insert_lock, flags); } else { rc = signalinsert_inner(channel, queue, msg); } return rc; } EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
260595.c
#include <stdio.h> #include <stdlib.h> #include <time.h> int** alloc_square_matrix(int n) { int **m = malloc(n * sizeof(int*)); for(int i = 0; i < n; ++i) { m[i] = malloc(n * sizeof(int)); } return m; } void unalloc_square_matrix(int** m, int n) { for(int i = 0; i < n; ++i) { free(m[i]); } free(m); } /* Read the matrix from the given file and returns it. It also updates n with * the order of the matrix. */ int** read_square_matrix(const char* file, int* n) { FILE *fp; fp = fopen(file, "r"); if(fp == NULL) { fprintf(stderr, "Error while reading the matrix.\n"); exit(1); } fscanf(fp, "%d\n", n); int **m = alloc_square_matrix(*n); for(int i = 0; i < *n; ++i) { for (int j = 0; j < *n; ++j) { fscanf(fp, "%d", &m[i][j]); } } return m; } void write_square_matrix(int **m, int n, const char* file) { FILE *fp; fp = fopen(file, "w"); if(fp == NULL) { fprintf(stderr, "Error while writing the matrix.\n"); exit(1); } fprintf(fp, "%d\n", n); for(int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { fprintf(fp, "%d ", m[i][j]); } fprintf(fp, "\n"); } } /* Print the square matrix m of order n. */ void print_square_matrix(int **m, int n) { for(int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { printf("%d ", m[i][j]); } puts(""); } } int** mult_square_matrix(int **m1, int **m2, int n) { int **m = alloc_square_matrix(n); for(int i = 0; i < n; ++i) { for(int j = 0; j < n; ++j) { int cell = 0; for(int k = 0; k < n; ++k) { cell += m1[i][k] * m2[k][j]; } m[i][j] = cell; } } return m; } int main(int argc, char *argv[]) { if(argc != 4) { printf("Usage: %s <matrix1.txt> <matrix2.txt> <outfile.txt>\n", argv[0]); exit(0); } int n, n2; clock_t begin, end; begin = clock(); int **m1 = read_square_matrix(argv[1], &n); int **m2 = read_square_matrix(argv[2], &n2); if(n != n2) { fprintf(stderr, "Error: the matrices must have the same order!\n"); exit(1); } int **m3 = mult_square_matrix(m1, m2, n); write_square_matrix(m3, n, argv[3]); end = clock(); #ifdef DEBUG print_square_matrix(m1, n); print_square_matrix(m2, n); print_square_matrix(m3, n); #endif printf("Execution time: %lf s\n", ((double)(end - begin)) / CLOCKS_PER_SEC); unalloc_square_matrix(m1, n); unalloc_square_matrix(m2, n); unalloc_square_matrix(m3, n); return 0; }
487425.c
/*! \file main.c \brief RTC calendar \version 2016-01-15, V1.0.0, demo for GD32F1x0 \version 2016-05-13, V2.0.0, demo for GD32F1x0 \version 2019-11-20, V3.0.0, demo for GD32F1x0 */ /* Copyright (c) 2019, GigaDevice Semiconductor Inc. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "gd32f1x0.h" #include "gd32f1x0_libopt.h" #include "gd32f150r_eval.h" #include "systick.h" #include <stdio.h> #include "lcd_driver.h" #include "gui.h" #include "tft_test.h" #define RTC_CLOCK_SOURCE_IRC40K __IO uint32_t prescaler_a = 0, prescaler_s = 0; rtc_parameter_struct rtc_initpara; void led_flash(int times); void rtc_pre_config(void); void led_key_config(void); void rcu_config(void); /*! \brief main function \param[in] none \param[out] none \retval none */ int main(void) { systick_config(); rcu_config(); led_key_config(); /* flash LEDs for test */ led_flash(1); /* RTC configuration */ rtc_pre_config(); lcd_init(); lcd_clear(BLUE); gui_draw_font_gbk24(10, 30, YELLOW, BLUE, " Today is "); gui_draw_font_gbk16(2, 10, WHITE,BLUE, " GigaDevice Semiconductor Inc."); gui_draw_font_gbk16(2, 30, WHITE,BLUE, " -- GD32F1x0 Series MCU -- "); gui_draw_font_gbk16(2, 50, WHITE,BLUE, " GD32F150R_EAVL "); gui_draw_font_gbk16(2, 70, WHITE,BLUE, " RTC Test :"); while( 1 ) { /* get the current date & time, in BCD mode */ rtc_current_time_get(&rtc_initpara); gui_draw_font_gbk24(10, 132, YELLOW, BLUE, " Today is "); /* year */ gui_draw_font_num32(20 - 15, 160, YELLOW, BLUE, 2 ); gui_draw_font_num32(44 - 15, 160, YELLOW, BLUE, 0 ); gui_draw_font_num32(68 - 15, 160, YELLOW, BLUE, rtc_initpara.rtc_year >> 4); gui_draw_font_num32(92 - 15, 160, YELLOW, BLUE, (rtc_initpara.rtc_year & 0x0F)); /* month */ gui_draw_font_num32(120, 160, YELLOW, BLUE, rtc_initpara.rtc_month >> 4); gui_draw_font_num32(144, 160, YELLOW, BLUE, (rtc_initpara.rtc_month & 0x0F)); /* date */ gui_draw_font_num32(172, 160, YELLOW, BLUE, rtc_initpara.rtc_date >> 4); gui_draw_font_num32(196, 160, YELLOW, BLUE, (rtc_initpara.rtc_date & 0x0F)); gui_draw_font_gbk24(10, 208, YELLOW, BLUE, " Now Time is "); if(0 == rtc_initpara.rtc_am_pm){ gui_draw_font_gbk24(20, 244, YELLOW, BLUE, " AM "); }else{ gui_draw_font_gbk24(20, 244, YELLOW, BLUE, " PM "); } /* hour */ gui_draw_font_num32(60, 236, YELLOW, BLUE, rtc_initpara.rtc_hour >> 4); gui_draw_font_num32(84, 236, YELLOW, BLUE, (rtc_initpara.rtc_hour & 0x0F)); gui_draw_font_gbk24(112, 244, YELLOW, BLUE, ":"); /* minute */ gui_draw_font_num32(116, 236, YELLOW, BLUE, rtc_initpara.rtc_minute >> 4); gui_draw_font_num32(140, 236, YELLOW, BLUE, (rtc_initpara.rtc_minute & 0x0F)); gui_draw_font_gbk24(164, 244, YELLOW, BLUE, ":"); /* second */ gui_draw_font_num32(168, 236, YELLOW, BLUE, rtc_initpara.rtc_second >> 4); gui_draw_font_num32(192, 236, YELLOW, BLUE, (rtc_initpara.rtc_second & 0x0F)); if(0 == gd_eval_key_state_get(KEY_WAKEUP)){ delay_1ms(50); if(0 == gd_eval_key_state_get(KEY_WAKEUP)){ delay_1ms(50); if(0 == gd_eval_key_state_get(KEY_WAKEUP)){ rtc_initpara.rtc_year = 0x16 ; rtc_initpara.rtc_month = RTC_MAY ; rtc_initpara.rtc_date = 0x13 ; rtc_initpara.rtc_day_of_week = RTC_FRIDAY ; rtc_initpara.rtc_hour = 0x12; rtc_initpara.rtc_minute = 0x00; rtc_initpara.rtc_second = 0x00; rtc_initpara.rtc_display_format = RTC_24HOUR; rtc_init(&rtc_initpara); } } } } } /*! \brief LEDs and KEYs configuration function \param[in] none \param[out] none \retval none */ void led_key_config(void) { gd_eval_led_init(LED1); gd_eval_led_init(LED2); gd_eval_led_init(LED3); gd_eval_led_init(LED4); /* test key init */ gd_eval_key_init(KEY_WAKEUP,KEY_MODE_GPIO); } /*! \brief RCU configuration function \param[in] none \param[out] none \retval none */ void rcu_config(void) { rcu_periph_clock_enable(RCU_GPIOA); rcu_periph_clock_enable(RCU_GPIOB); rcu_periph_clock_enable(RCU_GPIOC); rcu_periph_clock_enable(RCU_GPIOF); } /*! \brief RTC configuration function \param[in] none \param[out] none \retval none */ void rtc_pre_config(void) { /* enable access to RTC registers in backup domain */ rcu_periph_clock_enable(RCU_PMU); pmu_backup_write_enable(); #if defined (RTC_CLOCK_SOURCE_IRC40K) /* enable the IRC40K oscillator */ rcu_osci_on(RCU_IRC40K); /* wait till IRC40K is ready */ rcu_osci_stab_wait(RCU_IRC40K); /* select the RTC clock source */ rcu_rtc_clock_config(RCU_RTCSRC_IRC40K); prescaler_s = 0x18F; prescaler_a = 0x63; #elif defined (RTC_CLOCK_SOURCE_LXTAL) /* enable the IRC40K oscillator */ rcu_osci_on(RCU_LXTAL); /* wait till IRC40K is ready */ rcu_osci_stab_wait(RCU_LXTAL); /* select the RTC clock source */ rcu_rtc_clock_config(RCU_LXTAL); prescaler_s = 0xFF; prescaler_a = 0x7F; #else #error RTC clock source should be defined. #endif /* RTC_CLOCK_SOURCE_IRC40K */ rcu_periph_clock_enable(RCU_RTC); rtc_register_sync_wait(); } /*! \brief test status led \param[in] times: specifies the delay time length, in milliseconds \param[out] none \retval none */ void led_flash(int times) { int i; for(i = 0; i < times; i++){ /* insert 200 ms delay */ delay_1ms(200); /* turn on LEDs */ gd_eval_led_on(LED1); gd_eval_led_on(LED2); gd_eval_led_on(LED3); gd_eval_led_on(LED4); /* insert 200 ms delay */ delay_1ms(200); /* turn off LEDs */ gd_eval_led_off(LED1); gd_eval_led_off(LED2); gd_eval_led_off(LED3); gd_eval_led_off(LED4); } }
518990.c
//************************************************************************************************************ // // © 2016-2019 Regents of the University of California on behalf of the University of California at Berkeley // with rights granted for USDOT OSADP distribution with the ECL-2.0 open source license. // //************************************************************************************************************* /* * Generated by asn1c-0.9.29 (http://lionet.info/asn1c) * From ASN.1 module "DSRC" * found in "../j2735_asn/J2735_201603DA.asn" * `asn1c -fcompound-names -gen-PER -gen-OER -pdu=auto` */ #include "VehicleClassification.h" static int memb_regional_constraint_1(const asn_TYPE_descriptor_t *td, const void *sptr, asn_app_constraint_failed_f *ctfailcb, void *app_key) { size_t size; if(!sptr) { ASN__CTFAIL(app_key, td, sptr, "%s: value not given (%s:%d)", td->name, __FILE__, __LINE__); return -1; } /* Determine the number of elements */ size = _A_CSEQUENCE_FROM_VOID(sptr)->count; if((size >= 1 && size <= 4)) { /* Perform validation of the inner elements */ return td->encoding_constraints.general_constraints(td, sptr, ctfailcb, app_key); } else { ASN__CTFAIL(app_key, td, sptr, "%s: constraint failed (%s:%d)", td->name, __FILE__, __LINE__); return -1; } } static asn_oer_constraints_t asn_OER_type_regional_constr_10 CC_NOTUSED = { { 0, 0 }, -1 /* (SIZE(1..4)) */}; static asn_per_constraints_t asn_PER_type_regional_constr_10 CC_NOTUSED = { { APC_UNCONSTRAINED, -1, -1, 0, 0 }, { APC_CONSTRAINED, 2, 2, 1, 4 } /* (SIZE(1..4)) */, 0, 0 /* No PER value map */ }; static asn_oer_constraints_t asn_OER_memb_regional_constr_10 CC_NOTUSED = { { 0, 0 }, -1 /* (SIZE(1..4)) */}; static asn_per_constraints_t asn_PER_memb_regional_constr_10 CC_NOTUSED = { { APC_UNCONSTRAINED, -1, -1, 0, 0 }, { APC_CONSTRAINED, 2, 2, 1, 4 } /* (SIZE(1..4)) */, 0, 0 /* No PER value map */ }; static asn_TYPE_member_t asn_MBR_regional_10[] = { { ATF_POINTER, 0, 0, (ASN_TAG_CLASS_UNIVERSAL | (16 << 2)), 0, &asn_DEF_RegionalExtension_124P0, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "" }, }; static const ber_tlv_tag_t asn_DEF_regional_tags_10[] = { (ASN_TAG_CLASS_CONTEXT | (8 << 2)), (ASN_TAG_CLASS_UNIVERSAL | (16 << 2)) }; static asn_SET_OF_specifics_t asn_SPC_regional_specs_10 = { sizeof(struct VehicleClassification__regional), offsetof(struct VehicleClassification__regional, _asn_ctx), 0, /* XER encoding is XMLDelimitedItemList */ }; static /* Use -fall-defs-global to expose */ asn_TYPE_descriptor_t asn_DEF_regional_10 = { "regional", "regional", &asn_OP_SEQUENCE_OF, asn_DEF_regional_tags_10, sizeof(asn_DEF_regional_tags_10) /sizeof(asn_DEF_regional_tags_10[0]) - 1, /* 1 */ asn_DEF_regional_tags_10, /* Same as above */ sizeof(asn_DEF_regional_tags_10) /sizeof(asn_DEF_regional_tags_10[0]), /* 2 */ { &asn_OER_type_regional_constr_10, &asn_PER_type_regional_constr_10, SEQUENCE_OF_constraint }, asn_MBR_regional_10, 1, /* Single element */ &asn_SPC_regional_specs_10 /* Additional specs */ }; asn_TYPE_member_t asn_MBR_VehicleClassification_1[] = { { ATF_POINTER, 9, offsetof(struct VehicleClassification, keyType), (ASN_TAG_CLASS_CONTEXT | (0 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_BasicVehicleClass, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "keyType" }, { ATF_POINTER, 8, offsetof(struct VehicleClassification, role), (ASN_TAG_CLASS_CONTEXT | (1 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_BasicVehicleRole, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "role" }, { ATF_POINTER, 7, offsetof(struct VehicleClassification, iso3883), (ASN_TAG_CLASS_CONTEXT | (2 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_Iso3833VehicleType, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "iso3883" }, { ATF_POINTER, 6, offsetof(struct VehicleClassification, hpmsType), (ASN_TAG_CLASS_CONTEXT | (3 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_VehicleType, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "hpmsType" }, { ATF_POINTER, 5, offsetof(struct VehicleClassification, vehicleType), (ASN_TAG_CLASS_CONTEXT | (4 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_VehicleGroupAffected, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "vehicleType" }, { ATF_POINTER, 4, offsetof(struct VehicleClassification, responseEquip), (ASN_TAG_CLASS_CONTEXT | (5 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_IncidentResponseEquipment, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "responseEquip" }, { ATF_POINTER, 3, offsetof(struct VehicleClassification, responderType), (ASN_TAG_CLASS_CONTEXT | (6 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_ResponderGroupAffected, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "responderType" }, { ATF_POINTER, 2, offsetof(struct VehicleClassification, fuelType), (ASN_TAG_CLASS_CONTEXT | (7 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_FuelType, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "fuelType" }, { ATF_POINTER, 1, offsetof(struct VehicleClassification, regional), (ASN_TAG_CLASS_CONTEXT | (8 << 2)), 0, &asn_DEF_regional_10, 0, { &asn_OER_memb_regional_constr_10, &asn_PER_memb_regional_constr_10, memb_regional_constraint_1 }, 0, 0, /* No default value */ "regional" }, }; static const int asn_MAP_VehicleClassification_oms_1[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 }; static const ber_tlv_tag_t asn_DEF_VehicleClassification_tags_1[] = { (ASN_TAG_CLASS_UNIVERSAL | (16 << 2)) }; static const asn_TYPE_tag2member_t asn_MAP_VehicleClassification_tag2el_1[] = { { (ASN_TAG_CLASS_CONTEXT | (0 << 2)), 0, 0, 0 }, /* keyType */ { (ASN_TAG_CLASS_CONTEXT | (1 << 2)), 1, 0, 0 }, /* role */ { (ASN_TAG_CLASS_CONTEXT | (2 << 2)), 2, 0, 0 }, /* iso3883 */ { (ASN_TAG_CLASS_CONTEXT | (3 << 2)), 3, 0, 0 }, /* hpmsType */ { (ASN_TAG_CLASS_CONTEXT | (4 << 2)), 4, 0, 0 }, /* vehicleType */ { (ASN_TAG_CLASS_CONTEXT | (5 << 2)), 5, 0, 0 }, /* responseEquip */ { (ASN_TAG_CLASS_CONTEXT | (6 << 2)), 6, 0, 0 }, /* responderType */ { (ASN_TAG_CLASS_CONTEXT | (7 << 2)), 7, 0, 0 }, /* fuelType */ { (ASN_TAG_CLASS_CONTEXT | (8 << 2)), 8, 0, 0 } /* regional */ }; asn_SEQUENCE_specifics_t asn_SPC_VehicleClassification_specs_1 = { sizeof(struct VehicleClassification), offsetof(struct VehicleClassification, _asn_ctx), asn_MAP_VehicleClassification_tag2el_1, 9, /* Count of tags in the map */ asn_MAP_VehicleClassification_oms_1, /* Optional members */ 9, 0, /* Root/Additions */ 9, /* First extension addition */ }; asn_TYPE_descriptor_t asn_DEF_VehicleClassification = { "VehicleClassification", "VehicleClassification", &asn_OP_SEQUENCE, asn_DEF_VehicleClassification_tags_1, sizeof(asn_DEF_VehicleClassification_tags_1) /sizeof(asn_DEF_VehicleClassification_tags_1[0]), /* 1 */ asn_DEF_VehicleClassification_tags_1, /* Same as above */ sizeof(asn_DEF_VehicleClassification_tags_1) /sizeof(asn_DEF_VehicleClassification_tags_1[0]), /* 1 */ { 0, 0, SEQUENCE_constraint }, asn_MBR_VehicleClassification_1, 9, /* Elements count */ &asn_SPC_VehicleClassification_specs_1 /* Additional specs */ };
731208.c
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 1995, by Sun Microsystems, Inc. * All rights reserved. */ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include "big5p_unicode.h" /* Big-5 Plus to Unicode mapping table */ #define MSB 0x80 /* most significant bit */ #define ONEBYTE 0xff /* right most byte */ /* non-identified character */ #define UTF8_NON_ID_CHAR1 0xEF #define UTF8_NON_ID_CHAR2 0xBF #define UTF8_NON_ID_CHAR3 0xBD typedef struct _icv_state { char keepc[2]; /* maximum # byte of Big-5 code */ short cstate; /* state machine id */ int _errno; /* internal errno */ }_iconv_st; enum _CSTATE { C0, C1 }; static int big5p_2nd_byte(char); static int big5p_to_utf8(char[], char*, size_t); static int binsearch(unsigned long, big5p_utf[], int); /* * Open; called from iconv_open() */ void * _icv_open() { _iconv_st *st; if ((st = (_iconv_st *)malloc(sizeof(_iconv_st))) == NULL) { errno = ENOMEM; return ((void *) -1); } st->cstate = C0; st->_errno = 0; return ((void *) st); } /* * Close; called from iconv_close() */ void _icv_close(_iconv_st *st) { if (!st) errno = EBADF; else free(st); } /* * Actual conversion; called from iconv() */ /*======================================================= * * State Machine for interpreting Big-5 code * *======================================================= * * 1st C * +--------> C0 ----------> C1 * | ascii | 2nd C | * ^ v v * +----<-----+-----<--------+ * *=======================================================*/ /* * Big-5 Plus encoding range: * High byte: 0x81 - 0xFE * Low byte: 0x40 - 0xFE */ size_t _icv_iconv(_iconv_st *st, char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft) { int n; #ifdef DEBUG fprintf(stderr, "========== iconv(): Big-5 --> UTF2 ==========\n"); #endif if (st == NULL) { errno = EBADF; return ((size_t) -1); } if (inbuf == NULL || *inbuf == NULL) { /* Reset request. */ st->cstate = C0; st->_errno = 0; return ((size_t) 0); } st->_errno = 0; /* reset internal errno */ errno = 0; /* reset external errno */ /* a state machine for interpreting CNS 11643 code */ while (*inbytesleft > 0 && *outbytesleft > 0) { switch (st->cstate) { case C0: /* assuming ASCII in the beginning */ if (**inbuf & MSB) { st->keepc[0] = (**inbuf); st->cstate = C1; } else { /* real ASCII */ **outbuf = **inbuf; (*outbuf)++; (*outbytesleft)--; } break; case C1: /* Chinese characters: 2nd byte */ if (big5p_2nd_byte(**inbuf) == 0) { st->keepc[1] = (**inbuf); n = big5p_to_utf8(st->keepc, *outbuf, *outbytesleft); if (n > 0) { (*outbuf) += n; (*outbytesleft) -= n; st->cstate = C0; } else { /* don't reset state */ st->_errno = errno = E2BIG; } } else { /* input char doesn't belong * to the input code set */ st->_errno = errno = EILSEQ; } break; default: /* should never come here */ st->_errno = errno = EILSEQ; st->cstate = C0; /* reset state */ break; } if (st->_errno) { #ifdef DEBUG fprintf(stderr, "!!!!!\tst->_errno = %d\tst->cstate = %d\n", st->_errno, st->cstate); #endif break; } (*inbuf)++; (*inbytesleft)--; } if (errno) return ((size_t) -1); if (*inbytesleft == 0 && st->cstate != C0) { errno = EINVAL; return ((size_t) -1); } if (*inbytesleft > 0 && *outbytesleft == 0) { errno = E2BIG; return((size_t) -1); } return (*inbytesleft); } /* * Test whether inbuf is a valid character for 2nd byte Big-5 code * Return: = 0 - valid Big-5 2nd byte * = 1 - invalid Big-5 2nd byte */ static int big5p_2nd_byte(char inbuf) { unsigned int buf = (unsigned int) (inbuf & ONEBYTE); if ((buf >= 0x40) && (buf <= 0xFE)) return(0); else return(1); } /* * Big-5 code --> ISO/IEC 10646 (Unicode) * Unicode --> UTF8 (FSS-UTF) * (File System Safe Universal Character Set Transformation Format) * Return: > 0 - converted with enough space in output buffer * = 0 - no space in outbuf */ static int big5p_to_utf8(char keepc[], char *buf, size_t buflen) { unsigned long big5p_val; /* Big-5 value */ int unidx; /* Unicode index */ unsigned long uni_val; /* Unicode */ big5p_val = ((keepc[0]&ONEBYTE) << 8) + (keepc[1]&ONEBYTE); #ifdef DEBUG fprintf(stderr, "%x\t", big5p_val); #endif unidx = binsearch(big5p_val, big5p_utf_tab, MAX_BIG5P_NUM); if (unidx >= 0) uni_val = big5p_utf_tab[unidx].unicode; #ifdef DEBUG fprintf(stderr, "unidx = %d, unicode = %x\t", unidx, uni_val); #endif if (unidx >= 0) { /* do Unicode to UTF8 conversion */ if (uni_val > 0x0080 && uni_val <= 0x07ff) { if (buflen < 2) { #ifdef DEBUG fprintf(stderr, "outbuf overflow in big5p_to_utf8()!!\n"); #endif errno = E2BIG; return(0); } *buf = (char)((uni_val >> 6) & 0x1f) | 0xc0; *(buf+1) = (char)(uni_val & 0x3f) | 0x80; #ifdef DEBUG fprintf(stderr, "%x %x\n", *buf&ONEBYTE, *(buf+1)&ONEBYTE); #endif return(2); } if (uni_val > 0x0800 && uni_val <= 0xffff) { if (buflen < 3) { #ifdef DEBUG fprintf(stderr, "outbuf overflow in big5p_to_utf8()!!\n"); #endif errno = E2BIG; return(0); } *buf = (char)((uni_val >> 12) & 0xf) | 0xe0; *(buf+1) = (char)((uni_val >>6) & 0x3f) | 0x80; *(buf+2) = (char)(uni_val & 0x3f) | 0x80; #ifdef DEBUG fprintf(stderr, "%x %x %x\n", *buf&ONEBYTE, *(buf+1)&ONEBYTE, *(buf+2)&ONEBYTE); #endif return(3); } } /* can't find a match in Big-5 --> UTF8 table or illegal UTF8 code */ if (buflen < 3) { #ifdef DEBUG fprintf(stderr, "outbuf overflow in big5p_to_utf8()!!\n"); #endif errno = E2BIG; return(0); } *(unsigned char*) buf = UTF8_NON_ID_CHAR1; *(unsigned char*)(buf+1) = UTF8_NON_ID_CHAR2; *(unsigned char*)(buf+2) = UTF8_NON_ID_CHAR3; #ifdef DEBUG fprintf(stderr, "%c %c %c\n", *buf, *(buf+1), *(buf+2)); #endif return(3); } /* binsearch: find x in v[0] <= v[1] <= ... <= v[n-1] */ static int binsearch(unsigned long x, big5p_utf v[], int n) { int low, high, mid; low = 0; high = n - 1; while (low <= high) { mid = (low + high) / 2; if (x < v[mid].big5pcode) high = mid - 1; else if (x > v[mid].big5pcode) low = mid + 1; else /* found match */ return mid; } return (-1); /* no match */ }
590694.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * pSeries NUMA support * * Copyright (C) 2002 Anton Blanchard <[email protected]>, IBM */ #define pr_fmt(fmt) "numa: " fmt #include <linux/threads.h> #include <linux/memblock.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/export.h> #include <linux/nodemask.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/pfn.h> #include <linux/cpuset.h> #include <linux/node.h> #include <linux/stop_machine.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <asm/cputhreads.h> #include <asm/sparsemem.h> #include <asm/prom.h> #include <asm/smp.h> #include <asm/topology.h> #include <asm/firmware.h> #include <asm/paca.h> #include <asm/hvcall.h> #include <asm/setup.h> #include <asm/vdso.h> #include <asm/drmem.h> static int numa_enabled = 1; static char *cmdline __initdata; int numa_cpu_lookup_table[NR_CPUS]; cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(node_to_cpumask_map); EXPORT_SYMBOL(node_data); static int primary_domain_index; static int n_mem_addr_cells, n_mem_size_cells; #define FORM0_AFFINITY 0 #define FORM1_AFFINITY 1 #define FORM2_AFFINITY 2 static int affinity_form; #define MAX_DISTANCE_REF_POINTS 4 static int distance_ref_points_depth; static const __be32 *distance_ref_points; static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 } }; static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE }; /* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: cpumask_of_node() is not valid until after this is done. */ static void __init setup_node_to_cpumask_map(void) { unsigned int node; /* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES) setup_nr_node_ids(); /* allocate the map */ for_each_node(node) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); /* cpumask_of_node() will now work */ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); } static int __init fake_numa_create_new_node(unsigned long end_pfn, unsigned int *nid) { unsigned long long mem; char *p = cmdline; static unsigned int fake_nid; static unsigned long long curr_boundary; /* * Modify node id, iff we started creating NUMA nodes * We want to continue from where we left of the last time */ if (fake_nid) *nid = fake_nid; /* * In case there are no more arguments to parse, the * node_id should be the same as the last fake node id * (we've handled this above). */ if (!p) return 0; mem = memparse(p, &p); if (!mem) return 0; if (mem < curr_boundary) return 0; curr_boundary = mem; if ((end_pfn << PAGE_SHIFT) > mem) { /* * Skip commas and spaces */ while (*p == ',' || *p == ' ' || *p == '\t') p++; cmdline = p; fake_nid++; *nid = fake_nid; pr_debug("created new fake_node with id %d\n", fake_nid); return 1; } return 0; } static void __init reset_numa_cpu_lookup_table(void) { unsigned int cpu; for_each_possible_cpu(cpu) numa_cpu_lookup_table[cpu] = -1; } void map_cpu_to_node(int cpu, int node) { update_numa_cpu_lookup_table(cpu, node); if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) { pr_debug("adding cpu %d to node %d\n", cpu, node); cpumask_set_cpu(cpu, node_to_cpumask_map[node]); } } #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) void unmap_cpu_from_node(unsigned long cpu) { int node = numa_cpu_lookup_table[cpu]; if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); pr_debug("removing cpu %lu from node %d\n", cpu, node); } else { pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node); } } #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ static int __associativity_to_nid(const __be32 *associativity, int max_array_sz) { int nid; /* * primary_domain_index is 1 based array index. */ int index = primary_domain_index - 1; if (!numa_enabled || index >= max_array_sz) return NUMA_NO_NODE; nid = of_read_number(&associativity[index], 1); /* POWER4 LPAR uses 0xffff as invalid node */ if (nid == 0xffff || nid >= nr_node_ids) nid = NUMA_NO_NODE; return nid; } /* * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA * info is found. */ static int associativity_to_nid(const __be32 *associativity) { int array_sz = of_read_number(associativity, 1); /* Skip the first element in the associativity array */ return __associativity_to_nid((associativity + 1), array_sz); } static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) { int dist; int node1, node2; node1 = associativity_to_nid(cpu1_assoc); node2 = associativity_to_nid(cpu2_assoc); dist = numa_distance_table[node1][node2]; if (dist <= LOCAL_DISTANCE) return 0; else if (dist <= REMOTE_DISTANCE) return 1; else return 2; } static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) { int dist = 0; int i, index; for (i = 0; i < distance_ref_points_depth; i++) { index = be32_to_cpu(distance_ref_points[i]); if (cpu1_assoc[index] == cpu2_assoc[index]) break; dist++; } return dist; } int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) { /* We should not get called with FORM0 */ VM_WARN_ON(affinity_form == FORM0_AFFINITY); if (affinity_form == FORM1_AFFINITY) return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc); return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc); } /* must hold reference to node during call */ static const __be32 *of_get_associativity(struct device_node *dev) { return of_get_property(dev, "ibm,associativity", NULL); } int __node_distance(int a, int b) { int i; int distance = LOCAL_DISTANCE; if (affinity_form == FORM2_AFFINITY) return numa_distance_table[a][b]; else if (affinity_form == FORM0_AFFINITY) return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); for (i = 0; i < distance_ref_points_depth; i++) { if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) break; /* Double the distance for each NUMA level */ distance *= 2; } return distance; } EXPORT_SYMBOL(__node_distance); /* Returns the nid associated with the given device tree node, * or -1 if not found. */ static int of_node_to_nid_single(struct device_node *device) { int nid = NUMA_NO_NODE; const __be32 *tmp; tmp = of_get_associativity(device); if (tmp) nid = associativity_to_nid(tmp); return nid; } /* Walk the device tree upwards, looking for an associativity id */ int of_node_to_nid(struct device_node *device) { int nid = NUMA_NO_NODE; of_node_get(device); while (device) { nid = of_node_to_nid_single(device); if (nid != -1) break; device = of_get_next_parent(device); } of_node_put(device); return nid; } EXPORT_SYMBOL(of_node_to_nid); static void __initialize_form1_numa_distance(const __be32 *associativity, int max_array_sz) { int i, nid; if (affinity_form != FORM1_AFFINITY) return; nid = __associativity_to_nid(associativity, max_array_sz); if (nid != NUMA_NO_NODE) { for (i = 0; i < distance_ref_points_depth; i++) { const __be32 *entry; int index = be32_to_cpu(distance_ref_points[i]) - 1; /* * broken hierarchy, return with broken distance table */ if (WARN(index >= max_array_sz, "Broken ibm,associativity property")) return; entry = &associativity[index]; distance_lookup_table[nid][i] = of_read_number(entry, 1); } } } static void initialize_form1_numa_distance(const __be32 *associativity) { int array_sz; array_sz = of_read_number(associativity, 1); /* Skip the first element in the associativity array */ __initialize_form1_numa_distance(associativity + 1, array_sz); } /* * Used to update distance information w.r.t newly added node. */ void update_numa_distance(struct device_node *node) { int nid; if (affinity_form == FORM0_AFFINITY) return; else if (affinity_form == FORM1_AFFINITY) { const __be32 *associativity; associativity = of_get_associativity(node); if (!associativity) return; initialize_form1_numa_distance(associativity); return; } /* FORM2 affinity */ nid = of_node_to_nid_single(node); if (nid == NUMA_NO_NODE) return; /* * With FORM2 we expect NUMA distance of all possible NUMA * nodes to be provided during boot. */ WARN(numa_distance_table[nid][nid] == -1, "NUMA distance details for node %d not provided\n", nid); } /* * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN} * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements} */ static void __init initialize_form2_numa_distance_lookup_table(void) { int i, j; struct device_node *root; const __u8 *form2_distances; const __be32 *numa_lookup_index; int form2_distances_length; int max_numa_index, distance_index; if (firmware_has_feature(FW_FEATURE_OPAL)) root = of_find_node_by_path("/ibm,opal"); else root = of_find_node_by_path("/rtas"); if (!root) root = of_find_node_by_path("/"); numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL); max_numa_index = of_read_number(&numa_lookup_index[0], 1); /* first element of the array is the size and is encode-int */ form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL); form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1); /* Skip the size which is encoded int */ form2_distances += sizeof(__be32); pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n", form2_distances_length, max_numa_index); for (i = 0; i < max_numa_index; i++) /* +1 skip the max_numa_index in the property */ numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1); if (form2_distances_length != max_numa_index * max_numa_index) { WARN(1, "Wrong NUMA distance information\n"); form2_distances = NULL; // don't use it } distance_index = 0; for (i = 0; i < max_numa_index; i++) { for (j = 0; j < max_numa_index; j++) { int nodeA = numa_id_index_table[i]; int nodeB = numa_id_index_table[j]; int dist; if (form2_distances) dist = form2_distances[distance_index++]; else if (nodeA == nodeB) dist = LOCAL_DISTANCE; else dist = REMOTE_DISTANCE; numa_distance_table[nodeA][nodeB] = dist; pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist); } } of_node_put(root); } static int __init find_primary_domain_index(void) { int index; struct device_node *root; /* * Check for which form of affinity. */ if (firmware_has_feature(FW_FEATURE_OPAL)) { affinity_form = FORM1_AFFINITY; } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) { pr_debug("Using form 2 affinity\n"); affinity_form = FORM2_AFFINITY; } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) { pr_debug("Using form 1 affinity\n"); affinity_form = FORM1_AFFINITY; } else affinity_form = FORM0_AFFINITY; if (firmware_has_feature(FW_FEATURE_OPAL)) root = of_find_node_by_path("/ibm,opal"); else root = of_find_node_by_path("/rtas"); if (!root) root = of_find_node_by_path("/"); /* * This property is a set of 32-bit integers, each representing * an index into the ibm,associativity nodes. * * With form 0 affinity the first integer is for an SMP configuration * (should be all 0's) and the second is for a normal NUMA * configuration. We have only one level of NUMA. * * With form 1 affinity the first integer is the most significant * NUMA boundary and the following are progressively less significant * boundaries. There can be more than one level of NUMA. */ distance_ref_points = of_get_property(root, "ibm,associativity-reference-points", &distance_ref_points_depth); if (!distance_ref_points) { pr_debug("ibm,associativity-reference-points not found.\n"); goto err; } distance_ref_points_depth /= sizeof(int); if (affinity_form == FORM0_AFFINITY) { if (distance_ref_points_depth < 2) { pr_warn("short ibm,associativity-reference-points\n"); goto err; } index = of_read_number(&distance_ref_points[1], 1); } else { /* * Both FORM1 and FORM2 affinity find the primary domain details * at the same offset. */ index = of_read_number(distance_ref_points, 1); } /* * Warn and cap if the hardware supports more than * MAX_DISTANCE_REF_POINTS domains. */ if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { pr_warn("distance array capped at %d entries\n", MAX_DISTANCE_REF_POINTS); distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; } of_node_put(root); return index; err: of_node_put(root); return -1; } static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) { struct device_node *memory = NULL; memory = of_find_node_by_type(memory, "memory"); if (!memory) panic("numa.c: No memory nodes found!"); *n_addr_cells = of_n_addr_cells(memory); *n_size_cells = of_n_size_cells(memory); of_node_put(memory); } static unsigned long read_n_cells(int n, const __be32 **buf) { unsigned long result = 0; while (n--) { result = (result << 32) | of_read_number(*buf, 1); (*buf)++; } return result; } struct assoc_arrays { u32 n_arrays; u32 array_sz; const __be32 *arrays; }; /* * Retrieve and validate the list of associativity arrays for drconf * memory from the ibm,associativity-lookup-arrays property of the * device tree.. * * The layout of the ibm,associativity-lookup-arrays property is a number N * indicating the number of associativity arrays, followed by a number M * indicating the size of each associativity array, followed by a list * of N associativity arrays. */ static int of_get_assoc_arrays(struct assoc_arrays *aa) { struct device_node *memory; const __be32 *prop; u32 len; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (!memory) return -1; prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); if (!prop || len < 2 * sizeof(unsigned int)) { of_node_put(memory); return -1; } aa->n_arrays = of_read_number(prop++, 1); aa->array_sz = of_read_number(prop++, 1); of_node_put(memory); /* Now that we know the number of arrays and size of each array, * revalidate the size of the property read in. */ if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) return -1; aa->arrays = prop; return 0; } static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb) { struct assoc_arrays aa = { .arrays = NULL }; int default_nid = NUMA_NO_NODE; int nid = default_nid; int rc, index; if ((primary_domain_index < 0) || !numa_enabled) return default_nid; rc = of_get_assoc_arrays(&aa); if (rc) return default_nid; if (primary_domain_index <= aa.array_sz && !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) { const __be32 *associativity; index = lmb->aa_index * aa.array_sz; associativity = &aa.arrays[index]; nid = __associativity_to_nid(associativity, aa.array_sz); if (nid > 0 && affinity_form == FORM1_AFFINITY) { /* * lookup array associativity entries have * no length of the array as the first element. */ __initialize_form1_numa_distance(associativity, aa.array_sz); } } return nid; } /* * This is like of_node_to_nid_single() for memory represented in the * ibm,dynamic-reconfiguration-memory node. */ int of_drconf_to_nid_single(struct drmem_lmb *lmb) { struct assoc_arrays aa = { .arrays = NULL }; int default_nid = NUMA_NO_NODE; int nid = default_nid; int rc, index; if ((primary_domain_index < 0) || !numa_enabled) return default_nid; rc = of_get_assoc_arrays(&aa); if (rc) return default_nid; if (primary_domain_index <= aa.array_sz && !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) { const __be32 *associativity; index = lmb->aa_index * aa.array_sz; associativity = &aa.arrays[index]; nid = __associativity_to_nid(associativity, aa.array_sz); } return nid; } #ifdef CONFIG_PPC_SPLPAR static int __vphn_get_associativity(long lcpu, __be32 *associativity) { long rc, hwid; /* * On a shared lpar, device tree will not have node associativity. * At this time lppaca, or its __old_status field may not be * updated. Hence kernel cannot detect if its on a shared lpar. So * request an explicit associativity irrespective of whether the * lpar is shared or dedicated. Use the device tree property as a * fallback. cpu_to_phys_id is only valid between * smp_setup_cpu_maps() and smp_setup_pacas(). */ if (firmware_has_feature(FW_FEATURE_VPHN)) { if (cpu_to_phys_id) hwid = cpu_to_phys_id[lcpu]; else hwid = get_hard_smp_processor_id(lcpu); rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity); if (rc == H_SUCCESS) return 0; } return -1; } static int vphn_get_nid(long lcpu) { __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; if (!__vphn_get_associativity(lcpu, associativity)) return associativity_to_nid(associativity); return NUMA_NO_NODE; } #else static int __vphn_get_associativity(long lcpu, __be32 *associativity) { return -1; } static int vphn_get_nid(long unused) { return NUMA_NO_NODE; } #endif /* CONFIG_PPC_SPLPAR */ /* * Figure out to which domain a cpu belongs and stick it there. * Return the id of the domain used. */ static int numa_setup_cpu(unsigned long lcpu) { struct device_node *cpu; int fcpu = cpu_first_thread_sibling(lcpu); int nid = NUMA_NO_NODE; if (!cpu_present(lcpu)) { set_cpu_numa_node(lcpu, first_online_node); return first_online_node; } /* * If a valid cpu-to-node mapping is already available, use it * directly instead of querying the firmware, since it represents * the most recent mapping notified to us by the platform (eg: VPHN). * Since cpu_to_node binding remains the same for all threads in the * core. If a valid cpu-to-node mapping is already available, for * the first thread in the core, use it. */ nid = numa_cpu_lookup_table[fcpu]; if (nid >= 0) { map_cpu_to_node(lcpu, nid); return nid; } nid = vphn_get_nid(lcpu); if (nid != NUMA_NO_NODE) goto out_present; cpu = of_get_cpu_node(lcpu, NULL); if (!cpu) { WARN_ON(1); if (cpu_present(lcpu)) goto out_present; else goto out; } nid = of_node_to_nid_single(cpu); of_node_put(cpu); out_present: if (nid < 0 || !node_possible(nid)) nid = first_online_node; /* * Update for the first thread of the core. All threads of a core * have to be part of the same node. This not only avoids querying * for every other thread in the core, but always avoids a case * where virtual node associativity change causes subsequent threads * of a core to be associated with different nid. However if first * thread is already online, expect it to have a valid mapping. */ if (fcpu != lcpu) { WARN_ON(cpu_online(fcpu)); map_cpu_to_node(fcpu, nid); } map_cpu_to_node(lcpu, nid); out: return nid; } static void verify_cpu_node_mapping(int cpu, int node) { int base, sibling, i; /* Verify that all the threads in the core belong to the same node */ base = cpu_first_thread_sibling(cpu); for (i = 0; i < threads_per_core; i++) { sibling = base + i; if (sibling == cpu || cpu_is_offline(sibling)) continue; if (cpu_to_node(sibling) != node) { WARN(1, "CPU thread siblings %d and %d don't belong" " to the same node!\n", cpu, sibling); break; } } } /* Must run before sched domains notifier. */ static int ppc_numa_cpu_prepare(unsigned int cpu) { int nid; nid = numa_setup_cpu(cpu); verify_cpu_node_mapping(cpu, nid); return 0; } static int ppc_numa_cpu_dead(unsigned int cpu) { return 0; } /* * Check and possibly modify a memory region to enforce the memory limit. * * Returns the size the region should have to enforce the memory limit. * This will either be the original value of size, a truncated value, * or zero. If the returned value of size is 0 the region should be * discarded as it lies wholly above the memory limit. */ static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsigned long size) { /* * We use memblock_end_of_DRAM() in here instead of memory_limit because * we've already adjusted it for the limit and it takes care of * having memory holes below the limit. Also, in the case of * iommu_is_off, memory_limit is not set but is implicitly enforced. */ if (start + size <= memblock_end_of_DRAM()) return size; if (start >= memblock_end_of_DRAM()) return 0; return memblock_end_of_DRAM() - start; } /* * Reads the counter for a given entry in * linux,drconf-usable-memory property */ static inline int __init read_usm_ranges(const __be32 **usm) { /* * For each lmb in ibm,dynamic-memory a corresponding * entry in linux,drconf-usable-memory property contains * a counter followed by that many (base, size) duple. * read the counter from linux,drconf-usable-memory */ return read_n_cells(n_mem_size_cells, usm); } /* * Extract NUMA information from the ibm,dynamic-reconfiguration-memory * node. This assumes n_mem_{addr,size}_cells have been set. */ static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, const __be32 **usm, void *data) { unsigned int ranges, is_kexec_kdump = 0; unsigned long base, size, sz; int nid; /* * Skip this block if the reserved bit is set in flags (0x80) * or if the block is not assigned to this partition (0x8) */ if ((lmb->flags & DRCONF_MEM_RESERVED) || !(lmb->flags & DRCONF_MEM_ASSIGNED)) return 0; if (*usm) is_kexec_kdump = 1; base = lmb->base_addr; size = drmem_lmb_size(); ranges = 1; if (is_kexec_kdump) { ranges = read_usm_ranges(usm); if (!ranges) /* there are no (base, size) duple */ return 0; } do { if (is_kexec_kdump) { base = read_n_cells(n_mem_addr_cells, usm); size = read_n_cells(n_mem_size_cells, usm); } nid = get_nid_and_numa_distance(lmb); fake_numa_create_new_node(((base + size) >> PAGE_SHIFT), &nid); node_set_online(nid); sz = numa_enforce_memory_limit(base, size); if (sz) memblock_set_node(base, sz, &memblock.memory, nid); } while (--ranges); return 0; } static int __init parse_numa_properties(void) { struct device_node *memory; int default_nid = 0; unsigned long i; const __be32 *associativity; if (numa_enabled == 0) { pr_warn("disabled by user\n"); return -1; } primary_domain_index = find_primary_domain_index(); if (primary_domain_index < 0) { /* * if we fail to parse primary_domain_index from device tree * mark the numa disabled, boot with numa disabled. */ numa_enabled = false; return primary_domain_index; } pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index); /* * If it is FORM2 initialize the distance table here. */ if (affinity_form == FORM2_AFFINITY) initialize_form2_numa_distance_lookup_table(); /* * Even though we connect cpus to numa domains later in SMP * init, we need to know the node ids now. This is because * each node to be onlined must have NODE_DATA etc backing it. */ for_each_present_cpu(i) { __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE]; struct device_node *cpu; int nid = NUMA_NO_NODE; memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32)); if (__vphn_get_associativity(i, vphn_assoc) == 0) { nid = associativity_to_nid(vphn_assoc); initialize_form1_numa_distance(vphn_assoc); } else { /* * Don't fall back to default_nid yet -- we will plug * cpus into nodes once the memory scan has discovered * the topology. */ cpu = of_get_cpu_node(i, NULL); BUG_ON(!cpu); associativity = of_get_associativity(cpu); if (associativity) { nid = associativity_to_nid(associativity); initialize_form1_numa_distance(associativity); } of_node_put(cpu); } node_set_online(nid); } get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); for_each_node_by_type(memory, "memory") { unsigned long start; unsigned long size; int nid; int ranges; const __be32 *memcell_buf; unsigned int len; memcell_buf = of_get_property(memory, "linux,usable-memory", &len); if (!memcell_buf || len <= 0) memcell_buf = of_get_property(memory, "reg", &len); if (!memcell_buf || len <= 0) continue; /* ranges in cell */ ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); new_range: /* these are order-sensitive, and modify the buffer pointer */ start = read_n_cells(n_mem_addr_cells, &memcell_buf); size = read_n_cells(n_mem_size_cells, &memcell_buf); /* * Assumption: either all memory nodes or none will * have associativity properties. If none, then * everything goes to default_nid. */ associativity = of_get_associativity(memory); if (associativity) { nid = associativity_to_nid(associativity); initialize_form1_numa_distance(associativity); } else nid = default_nid; fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); node_set_online(nid); size = numa_enforce_memory_limit(start, size); if (size) memblock_set_node(start, size, &memblock.memory, nid); if (--ranges) goto new_range; } /* * Now do the same thing for each MEMBLOCK listed in the * ibm,dynamic-memory property in the * ibm,dynamic-reconfiguration-memory node. */ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb); of_node_put(memory); } return 0; } static void __init setup_nonnuma(void) { unsigned long top_of_ram = memblock_end_of_DRAM(); unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; int i; pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), &memblock.memory, nid); node_set_online(nid); } } void __init dump_numa_cpu_topology(void) { unsigned int node; unsigned int cpu, count; if (!numa_enabled) return; for_each_online_node(node) { pr_info("Node %d CPUs:", node); count = 0; /* * If we used a CPU iterator here we would miss printing * the holes in the cpumap. */ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { if (count == 0) pr_cont(" %u", cpu); ++count; } else { if (count > 1) pr_cont("-%u", cpu - 1); count = 0; } } if (count > 1) pr_cont("-%u", nr_cpu_ids - 1); pr_cont("\n"); } } /* Initialize NODE_DATA for a node on the local memory */ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) { u64 spanned_pages = end_pfn - start_pfn; const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); u64 nd_pa; void *nd; int tnid; nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); if (!nd_pa) panic("Cannot allocate %zu bytes for node %d data\n", nd_size, nid); nd = __va(nd_pa); /* report and initialize */ pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n", nd_pa, nd_pa + nd_size - 1); tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); if (tnid != nid) pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); node_data[nid] = nd; memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); NODE_DATA(nid)->node_id = nid; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = spanned_pages; } static void __init find_possible_nodes(void) { struct device_node *rtas; const __be32 *domains = NULL; int prop_length, max_nodes; u32 i; if (!numa_enabled) return; rtas = of_find_node_by_path("/rtas"); if (!rtas) return; /* * ibm,current-associativity-domains is a fairly recent property. If * it doesn't exist, then fallback on ibm,max-associativity-domains. * Current denotes what the platform can support compared to max * which denotes what the Hypervisor can support. * * If the LPAR is migratable, new nodes might be activated after a LPM, * so we should consider the max number in that case. */ if (!of_get_property(of_root, "ibm,migratable-partition", NULL)) domains = of_get_property(rtas, "ibm,current-associativity-domains", &prop_length); if (!domains) { domains = of_get_property(rtas, "ibm,max-associativity-domains", &prop_length); if (!domains) goto out; } max_nodes = of_read_number(&domains[primary_domain_index], 1); pr_info("Partition configured for %d NUMA nodes.\n", max_nodes); for (i = 0; i < max_nodes; i++) { if (!node_possible(i)) node_set(i, node_possible_map); } prop_length /= sizeof(int); if (prop_length > primary_domain_index + 2) coregroup_enabled = 1; out: of_node_put(rtas); } void __init mem_topology_setup(void) { int cpu; /* * Linux/mm assumes node 0 to be online at boot. However this is not * true on PowerPC, where node 0 is similar to any other node, it * could be cpuless, memoryless node. So force node 0 to be offline * for now. This will prevent cpuless, memoryless node 0 showing up * unnecessarily as online. If a node has cpus or memory that need * to be online, then node will anyway be marked online. */ node_set_offline(0); if (parse_numa_properties()) setup_nonnuma(); /* * Modify the set of possible NUMA nodes to reflect information * available about the set of online nodes, and the set of nodes * that we expect to make use of for this platform's affinity * calculations. */ nodes_and(node_possible_map, node_possible_map, node_online_map); find_possible_nodes(); setup_node_to_cpumask_map(); reset_numa_cpu_lookup_table(); for_each_possible_cpu(cpu) { /* * Powerpc with CONFIG_NUMA always used to have a node 0, * even if it was memoryless or cpuless. For all cpus that * are possible but not present, cpu_to_node() would point * to node 0. To remove a cpuless, memoryless dummy node, * powerpc need to make sure all possible but not present * cpu_to_node are set to a proper node. */ numa_setup_cpu(cpu); } } void __init initmem_init(void) { int nid; max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; max_pfn = max_low_pfn; memblock_dump_all(); for_each_online_node(nid) { unsigned long start_pfn, end_pfn; get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); setup_node_data(nid, start_pfn, end_pfn); } sparse_init(); /* * We need the numa_cpu_lookup_table to be accurate for all CPUs, * even before we online them, so that we can use cpu_to_{node,mem} * early in boot, cf. smp_prepare_cpus(). * _nocalls() + manual invocation is used because cpuhp is not yet * initialized for the boot CPU. */ cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare", ppc_numa_cpu_prepare, ppc_numa_cpu_dead); } static int __init early_numa(char *p) { if (!p) return 0; if (strstr(p, "off")) numa_enabled = 0; p = strstr(p, "fake="); if (p) cmdline = p + strlen("fake="); return 0; } early_param("numa", early_numa); #ifdef CONFIG_MEMORY_HOTPLUG /* * Find the node associated with a hot added memory section for * memory represented in the device tree by the property * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. */ static int hot_add_drconf_scn_to_nid(unsigned long scn_addr) { struct drmem_lmb *lmb; unsigned long lmb_size; int nid = NUMA_NO_NODE; lmb_size = drmem_lmb_size(); for_each_drmem_lmb(lmb) { /* skip this block if it is reserved or not assigned to * this partition */ if ((lmb->flags & DRCONF_MEM_RESERVED) || !(lmb->flags & DRCONF_MEM_ASSIGNED)) continue; if ((scn_addr < lmb->base_addr) || (scn_addr >= (lmb->base_addr + lmb_size))) continue; nid = of_drconf_to_nid_single(lmb); break; } return nid; } /* * Find the node associated with a hot added memory section for memory * represented in the device tree as a node (i.e. memory@XXXX) for * each memblock. */ static int hot_add_node_scn_to_nid(unsigned long scn_addr) { struct device_node *memory; int nid = NUMA_NO_NODE; for_each_node_by_type(memory, "memory") { unsigned long start, size; int ranges; const __be32 *memcell_buf; unsigned int len; memcell_buf = of_get_property(memory, "reg", &len); if (!memcell_buf || len <= 0) continue; /* ranges in cell */ ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); while (ranges--) { start = read_n_cells(n_mem_addr_cells, &memcell_buf); size = read_n_cells(n_mem_size_cells, &memcell_buf); if ((scn_addr < start) || (scn_addr >= (start + size))) continue; nid = of_node_to_nid_single(memory); break; } if (nid >= 0) break; } of_node_put(memory); return nid; } /* * Find the node associated with a hot added memory section. Section * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that * sections are fully contained within a single MEMBLOCK. */ int hot_add_scn_to_nid(unsigned long scn_addr) { struct device_node *memory = NULL; int nid; if (!numa_enabled) return first_online_node; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { nid = hot_add_drconf_scn_to_nid(scn_addr); of_node_put(memory); } else { nid = hot_add_node_scn_to_nid(scn_addr); } if (nid < 0 || !node_possible(nid)) nid = first_online_node; return nid; } static u64 hot_add_drconf_memory_max(void) { struct device_node *memory = NULL; struct device_node *dn = NULL; const __be64 *lrdr = NULL; dn = of_find_node_by_path("/rtas"); if (dn) { lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL); of_node_put(dn); if (lrdr) return be64_to_cpup(lrdr); } memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { of_node_put(memory); return drmem_lmb_memory_max(); } return 0; } /* * memory_hotplug_max - return max address of memory that may be added * * This is currently only used on systems that support drconfig memory * hotplug. */ u64 memory_hotplug_max(void) { return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); } #endif /* CONFIG_MEMORY_HOTPLUG */ /* Virtual Processor Home Node (VPHN) support */ #ifdef CONFIG_PPC_SPLPAR static int topology_inited; /* * Retrieve the new associativity information for a virtual processor's * home node. */ static long vphn_get_associativity(unsigned long cpu, __be32 *associativity) { long rc; rc = hcall_vphn(get_hard_smp_processor_id(cpu), VPHN_FLAG_VCPU, associativity); switch (rc) { case H_SUCCESS: pr_debug("VPHN hcall succeeded. Reset polling...\n"); goto out; case H_FUNCTION: pr_err_ratelimited("VPHN unsupported. Disabling polling...\n"); break; case H_HARDWARE: pr_err_ratelimited("hcall_vphn() experienced a hardware fault " "preventing VPHN. Disabling polling...\n"); break; case H_PARAMETER: pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. " "Disabling polling...\n"); break; default: pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n" , rc); break; } out: return rc; } int find_and_online_cpu_nid(int cpu) { __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; int new_nid; /* Use associativity from first thread for all siblings */ if (vphn_get_associativity(cpu, associativity)) return cpu_to_node(cpu); new_nid = associativity_to_nid(associativity); if (new_nid < 0 || !node_possible(new_nid)) new_nid = first_online_node; if (NODE_DATA(new_nid) == NULL) { #ifdef CONFIG_MEMORY_HOTPLUG /* * Need to ensure that NODE_DATA is initialized for a node from * available memory (see memblock_alloc_try_nid). If unable to * init the node, then default to nearest node that has memory * installed. Skip onlining a node if the subsystems are not * yet initialized. */ if (!topology_inited || try_online_node(new_nid)) new_nid = first_online_node; #else /* * Default to using the nearest node that has memory installed. * Otherwise, it would be necessary to patch the kernel MM code * to deal with more memoryless-node error conditions. */ new_nid = first_online_node; #endif } pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__, cpu, new_nid); return new_nid; } int cpu_to_coregroup_id(int cpu) { __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; int index; if (cpu < 0 || cpu > nr_cpu_ids) return -1; if (!coregroup_enabled) goto out; if (!firmware_has_feature(FW_FEATURE_VPHN)) goto out; if (vphn_get_associativity(cpu, associativity)) goto out; index = of_read_number(associativity, 1); if (index > primary_domain_index + 1) return of_read_number(&associativity[index - 1], 1); out: return cpu_to_core_id(cpu); } static int topology_update_init(void) { topology_inited = 1; return 0; } device_initcall(topology_update_init); #endif /* CONFIG_PPC_SPLPAR */
259017.c
/* $NetBSD: bootxx.c,v 1.23 1999/11/27 06:34:06 simonb Exp $ */ /*- * Copyright (c) 1999 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jonathan Stone, Michael Hitch and Simon Burge. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)boot.c 8.1 (Berkeley) 6/10/93 */ #include <sys/param.h> #include <sys/exec_elf.h> #include <lib/libsa/stand.h> #include <machine/dec_prom.h> typedef void (*entrypt) __P((int, char **, int, const void *)); int main __P((int, char **)); entrypt loadfile __P((char *path, char *name)); extern int clear_cache __P((char *addr, int len)); extern int bcmp __P((const void *, const void *, size_t)); /* XXX */ /* * This gets arguments from the PROM, calls other routines to open * and load the secondary boot loader called boot, and then transfers * execution to that program. * * Argv[0] should be something like "rz(0,0,0)netbsd" on a DECstation 3100. * Argv[0,1] should be something like "boot 5/rz0/netbsd" on a DECstation 5000. * The argument "-a" means netbsd should do an automatic reboot. */ int main(argc, argv) int argc; char **argv; { char *cp; entrypt entry; /* check for DS5000 boot */ if (strcmp(argv[0], "boot") == 0) { argc--; argv++; } cp = *argv; printf("\nNetBSD/pmax " NETBSD_VERS " " BOOTXX_FS_NAME " Primary Bootstrap\n"); entry = loadfile(cp, "/boot.pmax"); if ((int)entry != -1) goto goodload; /* Give old /boot a go... */ entry = loadfile(cp, "/boot"); if ((int)entry != -1) goto goodload; /* Booting off an 8.3 filesystem? */ entry = loadfile(cp, "/boot.pma"); if ((int)entry != -1) goto goodload; goto bad; goodload: clear_cache((char *)PRIMARY_LOAD_ADDRESS, 1024 * 1024); if (callv == &callvec) entry(argc, argv, 0, 0); else entry(argc, argv, DEC_PROM_MAGIC, callv); bad: /* XXX would calling prom_halt here be cleaner? */ return (1); } /* * Open 'filename', read in program and return the entry point or -1 if error. */ entrypt loadfile(path, name) char *path, *name; { int fd, i; char c, *buf, bootfname[64]; Elf32_Ehdr ehdr; Elf32_Phdr phdr; strcpy(bootfname, path); buf = bootfname; while ((c = *buf++) != '\0') { if (c == ')') break; if (c != '/') continue; while ((c = *buf++) != '\0') if (c == '/') break; /* * Make "N/rzY" with no trailing '/' valid by adding * the extra '/' before appending 'bootpmax' to the path. */ if (c != '/') { buf--; *buf++ = '/'; *buf = '\0'; } break; } strcpy(buf, name); if ((fd = open(bootfname, 0)) < 0) { printf("open %s: %d\n", bootfname, errno); goto err; } /* read the exec header */ i = read(fd, (char *)&ehdr, sizeof(ehdr)); if ((i != sizeof(ehdr)) || (bcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) || (ehdr.e_ident[EI_CLASS] != ELFCLASS32)) { printf("%s: No ELF header\n", bootfname); goto cerr; } for (i = 0; i < ehdr.e_phnum; i++) { if (lseek(fd, (off_t) ehdr.e_phoff + i * sizeof(phdr), 0) < 0) goto cerr; if (read(fd, &phdr, sizeof(phdr)) != sizeof(phdr)) goto cerr; if (phdr.p_type != PT_LOAD) continue; if (lseek(fd, (off_t)phdr.p_offset, 0) < 0) goto cerr; if (read(fd, (char *)phdr.p_paddr, phdr.p_filesz) != phdr.p_filesz) goto cerr; } return ((entrypt)ehdr.e_entry); cerr: #ifndef LIBSA_NO_FS_CLOSE (void) close(fd); #endif err: printf("Can't load '%s'\n", bootfname); return ((entrypt)-1); }
377724.c
/* * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana * University Research and Technology * Corporation. All rights reserved. * Copyright (c) 2004-2013 The University of Tennessee and The University * of Tennessee Research Foundation. All rights * reserved. * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * University of Stuttgart. All rights reserved. * Copyright (c) 2004-2005 The Regents of the University of California. * All rights reserved. * Copyright (c) 2006 Los Alamos National Security, LLC. All rights * reserved. * Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. * * $COPYRIGHT$ * * Additional copyrights may follow * * $HEADER$ */ #include "ompi_config.h" #include <string.h> #include "opal/class/opal_bitmap.h" #include "ompi/mca/btl/btl.h" #include "btl_tcp2.h" #include "btl_tcp2_frag.h" #include "btl_tcp2_proc.h" #include "btl_tcp2_endpoint.h" #include "opal/datatype/opal_convertor.h" #include "ompi/mca/mpool/base/base.h" #include "ompi/mca/mpool/mpool.h" #include "ompi/proc/proc.h" mca_btl_tcp2_module_t mca_btl_tcp2_module = { { &mca_btl_tcp2_component.super, 0, /* max size of first fragment */ 0, /* min send fragment size */ 0, /* max send fragment size */ 0, /* btl_rdma_pipeline_send_length */ 0, /* btl_rdma_pipeline_frag_size */ 0, /* btl_min_rdma_pipeline_size */ 0, /* exclusivity */ 0, /* latency */ 0, /* bandwidth */ 0, /* flags */ mca_btl_tcp2_add_procs, mca_btl_tcp2_del_procs, NULL, mca_btl_tcp2_finalize, mca_btl_tcp2_alloc, mca_btl_tcp2_free, mca_btl_tcp2_prepare_src, mca_btl_tcp2_prepare_dst, mca_btl_tcp2_send, NULL, /* send immediate */ mca_btl_tcp2_put, NULL, /* get */ mca_btl_base_dump, NULL, /* mpool */ NULL, /* register error */ mca_btl_tcp2_ft_event } }; /** * */ int mca_btl_tcp2_add_procs( struct mca_btl_base_module_t* btl, size_t nprocs, struct ompi_proc_t **ompi_procs, struct mca_btl_base_endpoint_t** peers, opal_bitmap_t* reachable ) { mca_btl_tcp2_module_t* tcp_btl = (mca_btl_tcp2_module_t*)btl; ompi_proc_t* my_proc; /* pointer to caller's proc structure */ int i, rc; /* get pointer to my proc structure */ my_proc = ompi_proc_local(); if( NULL == my_proc ) { return OMPI_ERR_OUT_OF_RESOURCE; } for(i = 0; i < (int) nprocs; i++) { struct ompi_proc_t* ompi_proc = ompi_procs[i]; mca_btl_tcp2_proc_t* tcp_proc; mca_btl_base_endpoint_t* tcp_endpoint; /* Do not create loopback TCP connections */ if( my_proc == ompi_proc ) { continue; } if(NULL == (tcp_proc = mca_btl_tcp2_proc_create(ompi_proc))) { return OMPI_ERR_OUT_OF_RESOURCE; } /* * Check to make sure that the peer has at least as many interface * addresses exported as we are trying to use. If not, then * don't bind this BTL instance to the proc. */ OPAL_THREAD_LOCK(&tcp_proc->proc_lock); /* The btl_proc datastructure is shared by all TCP BTL * instances that are trying to reach this destination. * Cache the peer instance on the btl_proc. */ tcp_endpoint = OBJ_NEW(mca_btl_tcp2_endpoint_t); if(NULL == tcp_endpoint) { OPAL_THREAD_UNLOCK(&tcp_proc->proc_lock); return OMPI_ERR_OUT_OF_RESOURCE; } tcp_endpoint->endpoint_btl = tcp_btl; rc = mca_btl_tcp2_proc_insert(tcp_proc, tcp_endpoint); if(rc != OMPI_SUCCESS) { OPAL_THREAD_UNLOCK(&tcp_proc->proc_lock); OBJ_RELEASE(tcp_endpoint); continue; } opal_bitmap_set_bit(reachable, i); OPAL_THREAD_UNLOCK(&tcp_proc->proc_lock); peers[i] = tcp_endpoint; opal_list_append(&tcp_btl->tcp_endpoints, (opal_list_item_t*)tcp_endpoint); /* we increase the count of MPI users of the event library once per peer, so that we are used until we aren't connected to a peer */ opal_progress_event_users_increment(); } return OMPI_SUCCESS; } int mca_btl_tcp2_del_procs(struct mca_btl_base_module_t* btl, size_t nprocs, struct ompi_proc_t **procs, struct mca_btl_base_endpoint_t ** endpoints) { mca_btl_tcp2_module_t* tcp_btl = (mca_btl_tcp2_module_t*)btl; size_t i; for(i=0; i<nprocs; i++) { mca_btl_tcp2_endpoint_t* tcp_endpoint = endpoints[i]; if(tcp_endpoint->endpoint_proc != mca_btl_tcp2_proc_local()) { opal_list_remove_item(&tcp_btl->tcp_endpoints, (opal_list_item_t*)tcp_endpoint); OBJ_RELEASE(tcp_endpoint); } opal_progress_event_users_decrement(); } return OMPI_SUCCESS; } /** * Allocate a segment. * * @param btl (IN) BTL module * @param size (IN) Request segment size. */ mca_btl_base_descriptor_t* mca_btl_tcp2_alloc( struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* endpoint, uint8_t order, size_t size, uint32_t flags) { mca_btl_tcp2_frag_t* frag = NULL; if(size <= btl->btl_eager_limit) { MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag); } else if (size <= btl->btl_max_send_size) { MCA_BTL_TCP_FRAG_ALLOC_MAX(frag); } if( OPAL_UNLIKELY(NULL == frag) ) { return NULL; } frag->segments[0].seg_len = size; frag->segments[0].seg_addr.pval = frag+1; frag->base.des_src = frag->segments; frag->base.des_src_cnt = 1; frag->base.des_dst = NULL; frag->base.des_dst_cnt = 0; frag->base.des_flags = flags; frag->base.order = MCA_BTL_NO_ORDER; frag->btl = (mca_btl_tcp2_module_t*)btl; return (mca_btl_base_descriptor_t*)frag; } /** * Return a segment */ int mca_btl_tcp2_free( struct mca_btl_base_module_t* btl, mca_btl_base_descriptor_t* des) { mca_btl_tcp2_frag_t* frag = (mca_btl_tcp2_frag_t*)des; MCA_BTL_TCP_FRAG_RETURN(frag); return OMPI_SUCCESS; } /** * Pack data and return a descriptor that can be * used for send/put. * * @param btl (IN) BTL module * @param peer (IN) BTL peer addressing */ mca_btl_base_descriptor_t* mca_btl_tcp2_prepare_src( struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* endpoint, struct mca_mpool_base_registration_t* registration, struct opal_convertor_t* convertor, uint8_t order, size_t reserve, size_t* size, uint32_t flags) { mca_btl_tcp2_frag_t* frag; struct iovec iov; uint32_t iov_count = 1; size_t max_data = *size; int rc; if( OPAL_UNLIKELY(max_data > UINT32_MAX) ) { /* limit the size to what we support */ max_data = (size_t)UINT32_MAX; } /* * if we aren't pinning the data and the requested size is less * than the eager limit pack into a fragment from the eager pool */ if (max_data+reserve <= btl->btl_eager_limit) { MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag); } else { /* * otherwise pack as much data as we can into a fragment * that is the max send size. */ MCA_BTL_TCP_FRAG_ALLOC_MAX(frag); } if( OPAL_UNLIKELY(NULL == frag) ) { return NULL; } frag->segments[0].seg_addr.pval = (frag + 1); frag->segments[0].seg_len = reserve; frag->base.des_src_cnt = 1; if(opal_convertor_need_buffers(convertor)) { if (max_data + reserve > frag->size) { max_data = frag->size - reserve; } iov.iov_len = max_data; iov.iov_base = (IOVBASE_TYPE*)(((unsigned char*)(frag->segments[0].seg_addr.pval)) + reserve); rc = opal_convertor_pack(convertor, &iov, &iov_count, &max_data ); if( OPAL_UNLIKELY(rc < 0) ) { mca_btl_tcp2_free(btl, &frag->base); return NULL; } frag->segments[0].seg_len += max_data; } else { iov.iov_len = max_data; iov.iov_base = NULL; rc = opal_convertor_pack(convertor, &iov, &iov_count, &max_data ); if( OPAL_UNLIKELY(rc < 0) ) { mca_btl_tcp2_free(btl, &frag->base); return NULL; } frag->segments[1].seg_addr.pval = iov.iov_base; frag->segments[1].seg_len = max_data; frag->base.des_src_cnt = 2; } frag->base.des_src = frag->segments; frag->base.des_dst = NULL; frag->base.des_dst_cnt = 0; frag->base.des_flags = flags; frag->base.order = MCA_BTL_NO_ORDER; *size = max_data; return &frag->base; } /** * Prepare a descriptor for send/rdma using the supplied * convertor. If the convertor references data that is contigous, * the descriptor may simply point to the user buffer. Otherwise, * this routine is responsible for allocating buffer space and * packing if required. * * @param btl (IN) BTL module * @param endpoint (IN) BTL peer addressing * @param convertor (IN) Data type convertor * @param reserve (IN) Additional bytes requested by upper layer to precede user data * @param size (IN/OUT) Number of bytes to prepare (IN), number of bytes actually prepared (OUT) */ mca_btl_base_descriptor_t* mca_btl_tcp2_prepare_dst( struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* endpoint, struct mca_mpool_base_registration_t* registration, struct opal_convertor_t* convertor, uint8_t order, size_t reserve, size_t* size, uint32_t flags) { mca_btl_tcp2_frag_t* frag; if( OPAL_UNLIKELY((*size) > UINT32_MAX) ) { /* limit the size to what we support */ *size = (size_t)UINT32_MAX; } MCA_BTL_TCP_FRAG_ALLOC_USER(frag); if( OPAL_UNLIKELY(NULL == frag) ) { return NULL; } frag->segments->seg_len = *size; opal_convertor_get_current_pointer( convertor, (void**)&(frag->segments->seg_addr.pval) ); frag->base.des_src = NULL; frag->base.des_src_cnt = 0; frag->base.des_dst = frag->segments; frag->base.des_dst_cnt = 1; frag->base.des_flags = flags; frag->base.order = MCA_BTL_NO_ORDER; return &frag->base; } /** * Initiate an asynchronous send. * * @param btl (IN) BTL module * @param endpoint (IN) BTL addressing information * @param descriptor (IN) Description of the data to be transfered * @param tag (IN) The tag value used to notify the peer. */ int mca_btl_tcp2_send( struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* endpoint, struct mca_btl_base_descriptor_t* descriptor, mca_btl_base_tag_t tag ) { mca_btl_tcp2_module_t* tcp_btl = (mca_btl_tcp2_module_t*) btl; mca_btl_tcp2_frag_t* frag = (mca_btl_tcp2_frag_t*)descriptor; int i; frag->btl = tcp_btl; frag->endpoint = endpoint; frag->rc = 0; frag->iov_idx = 0; frag->iov_cnt = 1; frag->iov_ptr = frag->iov; frag->iov[0].iov_base = (IOVBASE_TYPE*)&frag->hdr; frag->iov[0].iov_len = sizeof(frag->hdr); frag->hdr.size = 0; for( i = 0; i < (int)frag->base.des_src_cnt; i++) { frag->hdr.size += frag->segments[i].seg_len; frag->iov[i+1].iov_len = frag->segments[i].seg_len; frag->iov[i+1].iov_base = (IOVBASE_TYPE*)frag->segments[i].seg_addr.pval; frag->iov_cnt++; } frag->hdr.base.tag = tag; frag->hdr.type = MCA_BTL_TCP_HDR_TYPE_SEND; frag->hdr.count = 0; if (endpoint->endpoint_nbo) MCA_BTL_TCP_HDR_HTON(frag->hdr); return mca_btl_tcp2_endpoint_send(endpoint,frag); } /** * Initiate an asynchronous put. * * @param btl (IN) BTL module * @param endpoint (IN) BTL addressing information * @param descriptor (IN) Description of the data to be transferred */ int mca_btl_tcp2_put( mca_btl_base_module_t* btl, mca_btl_base_endpoint_t* endpoint, mca_btl_base_descriptor_t* descriptor ) { mca_btl_tcp2_module_t* tcp_btl = (mca_btl_tcp2_module_t*) btl; mca_btl_tcp2_frag_t* frag = (mca_btl_tcp2_frag_t*)descriptor; int i; frag->btl = tcp_btl; frag->endpoint = endpoint; frag->rc = 0; frag->iov_idx = 0; frag->hdr.size = 0; frag->iov_cnt = 2; frag->iov_ptr = frag->iov; frag->iov[0].iov_base = (IOVBASE_TYPE*)&frag->hdr; frag->iov[0].iov_len = sizeof(frag->hdr); frag->iov[1].iov_base = (IOVBASE_TYPE*)frag->base.des_dst; frag->iov[1].iov_len = frag->base.des_dst_cnt * sizeof(mca_btl_base_segment_t); for( i = 0; i < (int)frag->base.des_src_cnt; i++ ) { frag->hdr.size += frag->segments[i].seg_len; frag->iov[i+2].iov_len = frag->segments[i].seg_len; frag->iov[i+2].iov_base = (IOVBASE_TYPE*)frag->segments[i].seg_addr.pval; frag->iov_cnt++; } frag->hdr.base.tag = MCA_BTL_TAG_BTL; frag->hdr.type = MCA_BTL_TCP_HDR_TYPE_PUT; frag->hdr.count = frag->base.des_dst_cnt; if (endpoint->endpoint_nbo) MCA_BTL_TCP_HDR_HTON(frag->hdr); return ((i = mca_btl_tcp2_endpoint_send(endpoint,frag)) >= 0 ? OMPI_SUCCESS : i); } /** * Initiate an asynchronous get. * * @param btl (IN) BTL module * @param endpoint (IN) BTL addressing information * @param descriptor (IN) Description of the data to be transferred * */ int mca_btl_tcp2_get( mca_btl_base_module_t* btl, mca_btl_base_endpoint_t* endpoint, mca_btl_base_descriptor_t* descriptor) { mca_btl_tcp2_module_t* tcp_btl = (mca_btl_tcp2_module_t*) btl; mca_btl_tcp2_frag_t* frag = (mca_btl_tcp2_frag_t*)descriptor; int rc; frag->btl = tcp_btl; frag->endpoint = endpoint; frag->rc = 0; frag->iov_idx = 0; frag->hdr.size = 0; frag->iov_cnt = 2; frag->iov_ptr = frag->iov; frag->iov[0].iov_base = (IOVBASE_TYPE*)&frag->hdr; frag->iov[0].iov_len = sizeof(frag->hdr); frag->iov[1].iov_base = (IOVBASE_TYPE*)frag->base.des_src; frag->iov[1].iov_len = frag->base.des_src_cnt * sizeof(mca_btl_base_segment_t); frag->hdr.base.tag = MCA_BTL_TAG_BTL; frag->hdr.type = MCA_BTL_TCP_HDR_TYPE_GET; frag->hdr.count = frag->base.des_src_cnt; if (endpoint->endpoint_nbo) MCA_BTL_TCP_HDR_HTON(frag->hdr); return ((rc = mca_btl_tcp2_endpoint_send(endpoint,frag)) >= 0 ? OMPI_SUCCESS : rc); } /* * Cleanup/release module resources. */ int mca_btl_tcp2_finalize(struct mca_btl_base_module_t* btl) { mca_btl_tcp2_module_t* tcp_btl = (mca_btl_tcp2_module_t*) btl; opal_list_item_t* item; for( item = opal_list_remove_first(&tcp_btl->tcp_endpoints); item != NULL; item = opal_list_remove_first(&tcp_btl->tcp_endpoints)) { mca_btl_tcp2_endpoint_t *endpoint = (mca_btl_tcp2_endpoint_t*)item; OBJ_RELEASE(endpoint); opal_progress_event_users_decrement(); } free(tcp_btl); return OMPI_SUCCESS; }
350062.c
#include "board.h" extern DAC_HandleTypeDef hdac1; extern DAC_HandleTypeDef hdac2; void DAC_init(void) { if(HAL_DAC_Start(&hdac1, DAC_CHANNEL_1) != HAL_OK) { Error_Handler(); } if(HAL_DAC_Start(&hdac2, DAC_CHANNEL_1) != HAL_OK) { Error_Handler(); } sys.DAC1_out_mV = 5000; sys.DAC2_out_mV = 5000; } void DAC_output_handller(void) { uint16_t cali_dac1_ouput_digit, cali_dac2_ouput_digit; cali_dac1_ouput_digit = ((float)sys.DAC1_out_mV*mcfg.cali_gain_DAC[0] - mcfg.cali_offset_mV_DAC[0])/(DAC_OUTPUT_MAX_DEFAULT_mV) * DAC_RESOLUTION; cali_dac2_ouput_digit = ((float)sys.DAC2_out_mV*mcfg.cali_gain_DAC[1] - mcfg.cali_offset_mV_DAC[1])/(DAC_OUTPUT_MAX_DEFAULT_mV) * DAC_RESOLUTION; if(cali_dac1_ouput_digit > DAC_RESOLUTION) { cali_dac1_ouput_digit = DAC_RESOLUTION; } if(cali_dac2_ouput_digit > DAC_RESOLUTION) { cali_dac2_ouput_digit = DAC_RESOLUTION; } if(HAL_DAC_SetValue(&hdac1, DAC_CHANNEL_1, DAC_ALIGN_12B_R, cali_dac1_ouput_digit) != HAL_OK) { /* Setting value Error */ } if(HAL_DAC_SetValue(&hdac2, DAC_CHANNEL_1, DAC_ALIGN_12B_R, cali_dac2_ouput_digit) != HAL_OK) { /* Setting value Error */ } }
110230.c
#include <stdint.h> #include <stdio.h> #include <omp.h> #define ONE 0x0000000000000001 #define ZERO 0 #define COPROC_XADJ_SIZE uint32_t #define DEBUG 0 extern int __htc_get_unit_count(); #pragma rhomp max_phys_threads(9) #pragma omp declare target // These functions will be compiled for the coprocessor // ---------------------------------------------------------------------- uint8_t bufp(uint32_t vertex, uint64_t xoff[], COPROC_XADJ_SIZE xadj[], uint64_t bfs_tree[], uint64_t bmapOldAddr[], uint8_t xadj_index_shift) { uint8_t updated = 1; uint64_t xoff0 = xoff[2*vertex]; uint64_t xoff1 = xoff[2*vertex+1]; /* if ( (xoff1 - vso) <= 0 ) { // clear done ones // BMAP_UPD: bit_working = bit_working & ( (one << bitCnt) ^ one64); updated = 1; return updated; } */ // updated = (xoff1 - vso) <= 0; // each pipe does for loop for (xoff0 = xoff0; xoff0 < xoff1; ++xoff0) { COPROC_XADJ_SIZE neighbor = xadj[xoff0 << xadj_index_shift]; // XADJ_LD uint32_t bmapIdx = neighbor >> 6; uint64_t oldAddr = bmapOldAddr[bmapIdx]; // Careful ordering of bmapBitIdx after reading bmapOldAddr allows // bmapBitIdx to be a temp and not allocated as part of private // state. uint8_t bmapBitIdx = neighbor & 0x3f; // is neighbor in frontier? // check old bit map instead of level updated = 0; if (((oldAddr >> bmapBitIdx) & ONE) == ZERO) { updated = 1; // moved before next stmt to save a state bfs_tree[vertex] = neighbor; // have each pipe write this break; } } return updated; } #define BFS_PACKED_X(k) (bfsPackedAddr[2*k]) #define VLIST_X(k) (bfsPackedAddr[2*k+1]) #pragma omp end declare target enum CommandType { INIT, SCATTER, BFS }; void bottom_up_ctl(uint8_t function, uint64_t *bfsAddr, /* bfs_tree */ uint64_t *bfsPackedAddr, /* bfs_packed */ uint64_t *bmapOldAddr, /* bfs_tree_bit */ uint64_t *bmapNewAddr, /* bfs_tree_bit_new */ uint64_t *xoff, COPROC_XADJ_SIZE *xadj, /* CTL parameters */ uint32_t ub1, uint64_t *update_count ) { // coprocessor entry point uint8_t unitCnt = __htc_get_unit_count(); uint32_t chunk = (uint32_t)((ub1 / unitCnt) + 1); #pragma omp target teams num_teams(unitCnt) { uint8_t unit = omp_get_team_num(); uint64_t lb = (uint64_t)(unit*chunk); uint64_t ub = lb + chunk; if (ub > ub1) { ub = ub1; } uint32_t nt = ub-lb+1; uint32_t my_update_count = 0; if (nt > 512) nt = 512; #pragma omp parallel num_threads(nt) { switch (function) { case INIT: { { #pragma omp for nowait schedule(static,1) for (uint32_t k=lb; k<ub; k++) { bfsAddr[k] = 0xffffffffffffffffULL; } } } break; case SCATTER: { { #pragma omp for nowait schedule(static, 1) for (uint32_t k=lb; k<ub; k++) { bfsAddr[VLIST_X(k)] = BFS_PACKED_X(k); } } } break; case BFS: { //#pragma omp for nowait schedule(static, 1) reduction(+:my_update_count) #pragma omp for nowait schedule(static, 1) for (uint32_t index = lb; index < ub; index ++) { uint64_t mask; uint8_t bitCnt = 0; uint8_t bmapUpdCnt = 0; mask = bmapOldAddr[index]; if (mask != 0) { for (bitCnt = bitCnt; bitCnt < 64; bitCnt++) { if ( ((mask >> bitCnt) & ONE) == ONE) { // call bufp uint32_t vertex = (uint32_t)(index*64) + bitCnt; if (bufp(vertex, xoff, xadj, bfsAddr, bmapOldAddr, (uint8_t) (((uint64_t)bfsPackedAddr) & 0x1) /* xadj_index_shift */) & 0x1){ mask = mask & ~(1ULL << bitCnt); bmapUpdCnt++; } } else { uint16_t tmask = (uint16_t)(mask >> (bitCnt+1)); uint16_t skip = 0; if ((tmask & 0xff) == 0) { skip = 8; tmask >>= 8; } if ((tmask & 0x0f) == 0) { skip += 4; tmask >>= 4; } if ((tmask & 0x03) == 0) { skip += 2; tmask >>= 2; } skip += (1-(tmask & 0x1)); bitCnt += skip; } // // if ((bitCnt > 63) || (mask <= (1ULL < bitCnt))) { // bitCnt = 63; // no more higher // } // break state with comment } } if (bmapUpdCnt) { my_update_count += bmapUpdCnt; } bmapNewAddr[index] = mask; } *update_count = (uint64_t)my_update_count; } break; } } /* end of parallel */ if (function==BFS) { update_count[unit] = (uint64_t)my_update_count; } } /* pragma omp target */ } /* extern "C" */ void pers_init_bfs_tree (int64_t nv, uint64_t *bfs_tree) { #if DEBUG int unitCnt = __htc_get_unit_count(); fprintf(stderr, "pers_init_bfs_tree: #AUs = %d\n", unitCnt); #endif uint64_t ub1 = (uint64_t) nv; #if DEBUG printf("in pers_init_bfs_tree num_threads is %d\n", omp_get_num_threads()); #endif bottom_up_ctl(INIT, bfs_tree, /* bfsAddr */ 0, /* bfs_packed */ 0, /* bfs_tree_bit */ 0, /* bfs_tree_bit_new */ 0, /* xoff */ 0, /* xadj */ ub1, 0); #if DEBUG fprintf(stderr, "pers_init_bfs_tree: all units returned\n"); #endif } /* extern "C" */ void pers_scatter_bfs (uint64_t *k2, uint64_t *bfs_tree, uint64_t *bfs_packed) { #if DEBUG int unitCnt = __htc_get_unit_count(); fprintf(stderr, "pers_scatter_bfs: #AUs = %d\n", unitCnt); #endif // BFS_SIZE used for K2 on scatter instruction uint64_t S_bfsSize = *k2; #if DEBUG fprintf(stderr,"bfsSize is %ld\n", S_bfsSize); #endif uint64_t ub1 = S_bfsSize; #if DEBUG printf("in scatter num_threads is %d\n", omp_get_num_threads()); #endif bottom_up_ctl(SCATTER, bfs_tree, /* bfsAddr */ bfs_packed, /* bfs_packed */ 0, /* bfs_tree_bit */ 0, /* bfs_tree_bit_new */ 0, /* xoff */ 0, /* xadj */ ub1, 0); #if DEBUG fprintf(stderr, "pers_scatter_bfs: all units returned\n"); #endif } /* extern "C" */ void pers_bottom_up ( int64_t g500_ctl, int64_t nv, uint64_t *bfs_tree, uint64_t *bfs_packed_cp, COPROC_XADJ_SIZE *xadj, uint64_t *xoff, uint64_t **bfs_tree_bit, uint64_t **bfs_tree_bit_new, uint64_t *k1, uint64_t *k2, uint64_t *oldk2) { #if DEBUG fprintf(stderr, "pers_bottom_up: start \n"); #endif int unitCnt = __htc_get_unit_count(); uint64_t bfsSize = (uint64_t) nv; uint64_t ub1 = (bfsSize + 63) >> 6; /* bfsSize / 64 */ while (*k1 != *k2) { *oldk2 = *k2; #if DEBUG fprintf(stderr, "pers_bottom_up: #AUs = %d\n", unitCnt); #endif uint64_t updCnt = 0; int unit; uint64_t update_count[64]; //#pragma omp parallel num_threads(unitCnt) reduction(+:updCnt) //#pragma omp for nowait schedule(static , 1) private(unit) for (unit = 0; unit < unitCnt; unit++) { update_count[unit] = 0; } bottom_up_ctl(BFS, bfs_tree, /* bfsAddr */ (uint64_t *) ((g500_ctl & 0xFFFFFF) == 64), /* xadj_index_shift passed in bfs_packed slot */ *bfs_tree_bit, /* bfs_tree_bit */ *bfs_tree_bit_new, /* bfs_tree_bit_new */ xoff, /* xoff */ xadj, /* xadj */ ub1, /* ub */ &update_count[0]); for (uint8_t unit = 0; unit < unitCnt; unit++) { updCnt += update_count[unit]; } #if DEBUG fprintf(stderr, "pers_bottom_up: all units returned, updCnt = %lld\n", (long long)updCnt); #endif *k2 += updCnt; *k1 = *oldk2; // flip addresses for next iteration uint64_t *temp; temp = *bfs_tree_bit; *bfs_tree_bit = *bfs_tree_bit_new; *bfs_tree_bit_new = temp; } /* while (*k1 != *k2) */ }
137576.c
/* Copyright (c) 2019, Ameer Haj Ali (UC Berkeley), and Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "header.h" int result[64] ALIGNED16; int in1[64] ALIGNED16; int in2[64] ALIGNED16; __attribute__((noinline)) void example4b () { int i; /* feature: support for read accesses with a compile time known misalignment */ for (i=0; i<64-4; i++){ result[i] = in1[i+1] +in2[i+3]; } } int main(int argc,char* argv[]){ init_memory(&result[0], &result[64]); init_memory(&in1[0], &in1[64]); init_memory(&in2[0], &in2[64]); BENCH("Example4b", example4b(), Mi*2/64*512, digest_memory(&result[0], &result[64])); return 0; }
557494.c
/***************************************************************************** * * i8085.c * Portable I8085A emulator V1.2 * * Copyright (c) 1999 Juergen Buchmueller, all rights reserved. * Partially based on information out of Z80Em by Marcel De Kogel * * changes in V1.3 * - Added undocumented opcodes for the 8085A, based on a german * book about microcomputers: "Mikrocomputertechnik mit dem * Prozessor 8085A". * - This book also suggest that INX/DCX should modify the X flag bit * for a LSB to MSB carry and * - that jumps take 10 T-states only when they're executed, 7 when * they're skipped. * Thanks for the info and a copy of the tables go to Timo Sachsenberg * <[email protected]> * changes in V1.2 * - corrected cycle counts for these classes of opcodes * Thanks go to Jim Battle <[email protected]> * * 808x Z80 * DEC A 5 4 \ * INC A 5 4 \ * LD A,B 5 4 >-- Z80 is faster * JP (HL) 5 4 / * CALL cc,nnnn: 11/17 10/17 / * * INC HL 5 6 \ * DEC HL 5 6 \ * LD SP,HL 5 6 \ * ADD HL,BC 10 11 \ * INC (HL) 10 11 >-- 8080 is faster * DEC (HL) 10 11 / * IN A,(#) 10 11 / * OUT (#),A 10 11 / * EX (SP),HL 18 19 / * * Copyright (C) 1998,1999,2000 Juergen Buchmueller, all rights reserved. * You can contact me at [email protected] or [email protected] * * - This source code is released as freeware for non-commercial purposes * as part of the M.A.M.E. (Multiple Arcade Machine Emulator) project. * The licensing terms of MAME apply to this piece of code for the MAME * project and derviative works, as defined by the MAME license. You * may opt to make modifications, improvements or derivative works under * that same conditions, and the MAME project may opt to keep * modifications, improvements or derivatives under their terms exclusively. * * - Alternatively you can choose to apply the terms of the "GPL" (see * below) to this - and only this - piece of code or your derivative works. * Note that in no case your choice can have any impact on any other * source code of the MAME project, or binary, or executable, be it closely * or losely related to this piece of code. * * - At your choice you are also free to remove either licensing terms from * this file and continue to use it under only one of the two licenses. Do this * if you think that licenses are not compatible (enough) for you, or if you * consider either license 'too restrictive' or 'too free'. * * - GPL (GNU General Public License) * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * Revisions: * * xx-xx-2002 Acho A. Tang * * - 8085 emulation was in fact never used. It's been treated as a plain 8080. * - protected IRQ0 vector from being overwritten * - modified interrupt handler to properly process 8085-specific IRQ's * - corrected interrupt masking, RIM and SIM behaviors according to Intel's documentation * * 20-Jul-2002 Krzysztof Strzecha * * - SBB r instructions should affect parity flag. * Fixed only for non x86 asm version (#define i8080_EXACT 1). * There are probably more opcodes which should affect this flag, but don't. * - JPO nnnn and JPE nnnn opcodes in disassembler were misplaced. Fixed. * - Undocumented i8080 opcodes added: * 08h, 10h, 18h, 20h, 28h, 30h, 38h - NOP * 0CBh - JMP * 0D9h - RET * 0DDh, 0EDh, 0FDh - CALL * Thanks for the info go to Anton V. Ignatichev. * * 08-Dec-2002 Krzysztof Strzecha * * - ADC r instructions should affect parity flag. * Fixed only for non x86 asm version (#define i8080_EXACT 1). * There are probably more opcodes which should affect this flag, but don't. * * 05-Sep-2003 Krzysztof Strzecha * * - INR r, DCR r, ADD r, SUB r, CMP r instructions should affect parity flag. * Fixed only for non x86 asm version (#define i8080_EXACT 1). * * 23-Dec-2006 Tomasz Slanina * * - SIM fixed * * 28-Jan-2007 Zsolt Vasvari * * - Removed archaic i8080_EXACT flag. * *****************************************************************************/ /*int survival_prot = 0; */ #define VERBOSE 0 #include "debugger.h" #include "i8085.h" #include "i8085cpu.h" #include "i8085daa.h" #if VERBOSE #define LOG(x) logerror x #else #define LOG(x) #endif #define I8085_INTR 0xff typedef struct { int cputype; /* 0 8080, 1 8085A */ PAIR PC,SP,AF,BC,DE,HL,XX; UINT8 HALT; UINT8 IM; /* interrupt mask */ UINT8 IREQ; /* requested interrupts */ UINT8 ISRV; /* serviced interrupt */ UINT32 INTR; /* vector for INTR */ UINT32 IRQ2; /* scheduled interrupt address */ UINT32 IRQ1; /* executed interrupt address */ INT8 irq_state[4]; int (*irq_callback)(int); void (*sod_callback)(int state); } i8085_Regs; int i8085_ICount = 0; static i8085_Regs I; static UINT8 ZS[256]; static UINT8 ZSP[256]; static UINT8 RIM_IEN = 0; //AT: IEN status latch used by the RIM instruction static UINT8 ROP(void) { return cpu_readop(I.PC.w.l++); } static UINT8 ARG(void) { return cpu_readop_arg(I.PC.w.l++); } static UINT16 ARG16(void) { UINT16 w; w = cpu_readop_arg(I.PC.d); I.PC.w.l++; w += cpu_readop_arg(I.PC.d) << 8; I.PC.w.l++; return w; } static UINT8 RM(UINT32 a) { return program_read_byte_8(a); } static void WM(UINT32 a, UINT8 v) { program_write_byte_8(a, v); } INLINE void execute_one(int opcode) { switch (opcode) { case 0x00: i8085_ICount -= 4; /* NOP */ /* no op */ break; case 0x01: i8085_ICount -= 10; /* LXI B,nnnn */ I.BC.w.l = ARG16(); break; case 0x02: i8085_ICount -= 7; /* STAX B */ WM(I.BC.d, I.AF.b.h); break; case 0x03: i8085_ICount -= 5; /* INX B */ I.BC.w.l++; if (I.BC.b.l == 0x00) I.AF.b.l |= XF; else I.AF.b.l &= ~XF; break; case 0x04: i8085_ICount -= 5; /* INR B */ M_INR(I.BC.b.h); break; case 0x05: i8085_ICount -= 5; /* DCR B */ M_DCR(I.BC.b.h); break; case 0x06: i8085_ICount -= 7; /* MVI B,nn */ M_MVI(I.BC.b.h); break; case 0x07: i8085_ICount -= 4; /* RLC */ M_RLC; break; case 0x08: if( I.cputype ) { i8085_ICount -= 10; /* DSUB */ M_DSUB(); } else { i8085_ICount -= 4; /* NOP undocumented */ } break; case 0x09: i8085_ICount -= 10; /* DAD B */ M_DAD(BC); break; case 0x0a: i8085_ICount -= 7; /* LDAX B */ I.AF.b.h = RM(I.BC.d); break; case 0x0b: i8085_ICount -= 5; /* DCX B */ I.BC.w.l--; if (I.BC.b.l == 0xff) I.AF.b.l |= XF; else I.AF.b.l &= ~XF; break; case 0x0c: i8085_ICount -= 5; /* INR C */ M_INR(I.BC.b.l); break; case 0x0d: i8085_ICount -= 5; /* DCR C */ M_DCR(I.BC.b.l); break; case 0x0e: i8085_ICount -= 7; /* MVI C,nn */ M_MVI(I.BC.b.l); break; case 0x0f: i8085_ICount -= 4; /* RRC */ M_RRC; break; case 0x10: if( I.cputype ) { i8085_ICount -= 7; /* ASRH */ I.AF.b.l = (I.AF.b.l & ~CF) | (I.HL.b.l & CF); I.HL.w.l = (I.HL.w.l >> 1); } else { i8085_ICount -= 4; /* NOP undocumented */ } break; case 0x11: i8085_ICount -= 10; /* LXI D,nnnn */ I.DE.w.l = ARG16(); break; case 0x12: i8085_ICount -= 7; /* STAX D */ WM(I.DE.d, I.AF.b.h); break; case 0x13: i8085_ICount -= 5; /* INX D */ I.DE.w.l++; if (I.DE.b.l == 0x00) I.AF.b.l |= XF; else I.AF.b.l &= ~XF; break; case 0x14: i8085_ICount -= 5; /* INR D */ M_INR(I.DE.b.h); break; case 0x15: i8085_ICount -= 5; /* DCR D */ M_DCR(I.DE.b.h); break; case 0x16: i8085_ICount -= 7; /* MVI D,nn */ M_MVI(I.DE.b.h); break; case 0x17: i8085_ICount -= 4; /* RAL */ M_RAL; break; case 0x18: if( I.cputype ) { i8085_ICount -= 10; /* RLDE */ I.AF.b.l = (I.AF.b.l & ~(CF | VF)) | (I.DE.b.h >> 7); I.DE.w.l = (I.DE.w.l << 1) | (I.DE.w.l >> 15); if (0 != (((I.DE.w.l >> 15) ^ I.AF.b.l) & CF)) I.AF.b.l |= VF; } else { i8085_ICount -= 4; /* NOP undocumented */ } break; case 0x19: i8085_ICount -= 10; /* DAD D */ M_DAD(DE); break; case 0x1a: i8085_ICount -= 7; /* LDAX D */ I.AF.b.h = RM(I.DE.d); break; case 0x1b: i8085_ICount -= 5; /* DCX D */ I.DE.w.l--; if (I.DE.b.l == 0xff) I.AF.b.l |= XF; else I.AF.b.l &= ~XF; break; case 0x1c: i8085_ICount -= 5; /* INR E */ M_INR(I.DE.b.l); break; case 0x1d: i8085_ICount -= 5; /* DCR E */ M_DCR(I.DE.b.l); break; case 0x1e: i8085_ICount -= 7; /* MVI E,nn */ M_MVI(I.DE.b.l); break; case 0x1f: i8085_ICount -= 4; /* RAR */ M_RAR; break; case 0x20: if( I.cputype ) { i8085_ICount -= 7; /* RIM */ I.AF.b.h = I.IM; I.AF.b.h |= RIM_IEN; RIM_IEN = 0; //AT: read and clear IEN status latch } else { i8085_ICount -= 4; /* NOP undocumented */ } break; case 0x21: i8085_ICount -= 10; /* LXI H,nnnn */ I.HL.w.l = ARG16(); break; case 0x22: i8085_ICount -= 16; /* SHLD nnnn */ I.XX.w.l = ARG16(); WM(I.XX.d, I.HL.b.l); I.XX.w.l++; WM(I.XX.d, I.HL.b.h); break; case 0x23: i8085_ICount -= 5; /* INX H */ I.HL.w.l++; if (I.HL.b.l == 0x00) I.AF.b.l |= XF; else I.AF.b.l &= ~XF; break; case 0x24: i8085_ICount -= 5; /* INR H */ M_INR(I.HL.b.h); break; case 0x25: i8085_ICount -= 5; /* DCR H */ M_DCR(I.HL.b.h); break; case 0x26: i8085_ICount -= 7; /* MVI H,nn */ M_MVI(I.HL.b.h); break; case 0x27: i8085_ICount -= 4; /* DAA */ I.XX.d = I.AF.b.h; if (I.AF.b.l & CF) I.XX.d |= 0x100; if (I.AF.b.l & HF) I.XX.d |= 0x200; if (I.AF.b.l & NF) I.XX.d |= 0x400; I.AF.w.l = DAA[I.XX.d]; break; case 0x28: if( I.cputype ) { i8085_ICount -= 10; /* LDEH nn */ I.XX.d = ARG(); I.DE.d = (I.HL.d + I.XX.d) & 0xffff; } else { i8085_ICount -= 4; /* NOP undocumented */ } break; case 0x29: i8085_ICount -= 10; /* DAD H */ M_DAD(HL); break; case 0x2a: i8085_ICount -= 16; /* LHLD nnnn */ I.XX.d = ARG16(); I.HL.b.l = RM(I.XX.d); I.XX.w.l++; I.HL.b.h = RM(I.XX.d); break; case 0x2b: i8085_ICount -= 5; /* DCX H */ I.HL.w.l--; if (I.HL.b.l == 0xff) I.AF.b.l |= XF; else I.AF.b.l &= ~XF; break; case 0x2c: i8085_ICount -= 5; /* INR L */ M_INR(I.HL.b.l); break; case 0x2d: i8085_ICount -= 5; /* DCR L */ M_DCR(I.HL.b.l); break; case 0x2e: i8085_ICount -= 7; /* MVI L,nn */ M_MVI(I.HL.b.l); break; case 0x2f: i8085_ICount -= 4; /* CMA */ if( I.cputype ) { I.AF.b.h ^= 0xff; I.AF.b.l |= HF + NF; } else { I.AF.b.h ^= 0xff; /* 8080 */ } break; case 0x30: if( I.cputype ) { i8085_ICount -= 7; /* SIM */ if (I.AF.b.h & 0x40) //SOE - only when bit 0x40 is set! { I.IM &=~IM_SOD; if (I.AF.b.h & 0x80) I.IM |= IM_SOD; //is it needed ? if (I.sod_callback) (*I.sod_callback)(I.AF.b.h >> 7); //SOD - data = bit 0x80 } //AT //I.IM &= (IM_SID + IM_IEN + IM_TRAP); //I.IM |= (I.AF.b.h & ~(IM_SID + IM_SOD + IM_IEN + IM_TRAP)); // overwrite RST5.5-7.5 interrupt masks only when bit 0x08 of the accumulator is set if (I.AF.b.h & 0x08) I.IM = (I.IM & ~(IM_RST55+IM_RST65+IM_RST75)) | (I.AF.b.h & (IM_RST55+IM_RST65+IM_RST75)); } else { i8085_ICount -= 4; /* NOP undocumented */ } break; case 0x31: i8085_ICount -= 10; /* LXI SP,nnnn */ I.SP.w.l = ARG16(); break; case 0x32: i8085_ICount -= 13; /* STAX nnnn */ I.XX.d = ARG16(); WM(I.XX.d, I.AF.b.h); break; case 0x33: i8085_ICount -= 5; /* INX SP */ I.SP.w.l++; if (I.SP.b.l == 0x00) I.AF.b.l |= XF; else I.AF.b.l &= ~XF; break; case 0x34: i8085_ICount -= 10; /* INR M */ I.XX.b.l = RM(I.HL.d); M_INR(I.XX.b.l); WM(I.HL.d, I.XX.b.l); break; case 0x35: i8085_ICount -= 10; /* DCR M */ I.XX.b.l = RM(I.HL.d); M_DCR(I.XX.b.l); WM(I.HL.d, I.XX.b.l); break; case 0x36: i8085_ICount -= 10; /* MVI M,nn */ I.XX.b.l = ARG(); WM(I.HL.d, I.XX.b.l); break; case 0x37: i8085_ICount -= 4; /* STC */ I.AF.b.l = (I.AF.b.l & ~(HF + NF)) | CF; break; case 0x38: if( I.cputype ) { i8085_ICount -= 10; /* LDES nn */ I.XX.d = ARG(); I.DE.d = (I.SP.d + I.XX.d) & 0xffff; } else { i8085_ICount -= 4; /* NOP undocumented */ } break; case 0x39: i8085_ICount -= 10; /* DAD SP */ M_DAD(SP); break; case 0x3a: i8085_ICount -= 13; /* LDAX nnnn */ I.XX.d = ARG16(); I.AF.b.h = RM(I.XX.d); break; case 0x3b: i8085_ICount -= 5; /* DCX SP */ I.SP.w.l--; if (I.SP.b.l == 0xff) I.AF.b.l |= XF; else I.AF.b.l &= ~XF; break; case 0x3c: i8085_ICount -= 5; /* INR A */ M_INR(I.AF.b.h); break; case 0x3d: i8085_ICount -= 5; /* DCR A */ M_DCR(I.AF.b.h); break; case 0x3e: i8085_ICount -= 7; /* MVI A,nn */ M_MVI(I.AF.b.h); break; case 0x3f: i8085_ICount -= 4; /* CMF */ I.AF.b.l = ((I.AF.b.l & ~(HF + NF)) | ((I.AF.b.l & CF) << 4)) ^ CF; break; case 0x40: i8085_ICount -= 5; /* MOV B,B */ /* no op */ break; case 0x41: i8085_ICount -= 5; /* MOV B,C */ I.BC.b.h = I.BC.b.l; break; case 0x42: i8085_ICount -= 5; /* MOV B,D */ I.BC.b.h = I.DE.b.h; break; case 0x43: i8085_ICount -= 5; /* MOV B,E */ I.BC.b.h = I.DE.b.l; break; case 0x44: i8085_ICount -= 5; /* MOV B,H */ I.BC.b.h = I.HL.b.h; break; case 0x45: i8085_ICount -= 5; /* MOV B,L */ I.BC.b.h = I.HL.b.l; break; case 0x46: i8085_ICount -= 7; /* MOV B,M */ I.BC.b.h = RM(I.HL.d); break; case 0x47: i8085_ICount -= 5; /* MOV B,A */ I.BC.b.h = I.AF.b.h; break; case 0x48: i8085_ICount -= 5; /* MOV C,B */ I.BC.b.l = I.BC.b.h; break; case 0x49: i8085_ICount -= 5; /* MOV C,C */ /* no op */ break; case 0x4a: i8085_ICount -= 5; /* MOV C,D */ I.BC.b.l = I.DE.b.h; break; case 0x4b: i8085_ICount -= 5; /* MOV C,E */ I.BC.b.l = I.DE.b.l; break; case 0x4c: i8085_ICount -= 5; /* MOV C,H */ I.BC.b.l = I.HL.b.h; break; case 0x4d: i8085_ICount -= 5; /* MOV C,L */ I.BC.b.l = I.HL.b.l; break; case 0x4e: i8085_ICount -= 7; /* MOV C,M */ I.BC.b.l = RM(I.HL.d); break; case 0x4f: i8085_ICount -= 5; /* MOV C,A */ I.BC.b.l = I.AF.b.h; break; case 0x50: i8085_ICount -= 5; /* MOV D,B */ I.DE.b.h = I.BC.b.h; break; case 0x51: i8085_ICount -= 5; /* MOV D,C */ I.DE.b.h = I.BC.b.l; break; case 0x52: i8085_ICount -= 5; /* MOV D,D */ /* no op */ break; case 0x53: i8085_ICount -= 5; /* MOV D,E */ I.DE.b.h = I.DE.b.l; break; case 0x54: i8085_ICount -= 5; /* MOV D,H */ I.DE.b.h = I.HL.b.h; break; case 0x55: i8085_ICount -= 5; /* MOV D,L */ I.DE.b.h = I.HL.b.l; break; case 0x56: i8085_ICount -= 7; /* MOV D,M */ I.DE.b.h = RM(I.HL.d); break; case 0x57: i8085_ICount -= 5; /* MOV D,A */ I.DE.b.h = I.AF.b.h; break; case 0x58: i8085_ICount -= 5; /* MOV E,B */ I.DE.b.l = I.BC.b.h; break; case 0x59: i8085_ICount -= 5; /* MOV E,C */ I.DE.b.l = I.BC.b.l; break; case 0x5a: i8085_ICount -= 5; /* MOV E,D */ I.DE.b.l = I.DE.b.h; break; case 0x5b: i8085_ICount -= 5; /* MOV E,E */ /* no op */ break; case 0x5c: i8085_ICount -= 5; /* MOV E,H */ I.DE.b.l = I.HL.b.h; break; case 0x5d: i8085_ICount -= 5; /* MOV E,L */ I.DE.b.l = I.HL.b.l; break; case 0x5e: i8085_ICount -= 7; /* MOV E,M */ I.DE.b.l = RM(I.HL.d); break; case 0x5f: i8085_ICount -= 5; /* MOV E,A */ I.DE.b.l = I.AF.b.h; break; case 0x60: i8085_ICount -= 5; /* MOV H,B */ I.HL.b.h = I.BC.b.h; break; case 0x61: i8085_ICount -= 5; /* MOV H,C */ I.HL.b.h = I.BC.b.l; break; case 0x62: i8085_ICount -= 5; /* MOV H,D */ I.HL.b.h = I.DE.b.h; break; case 0x63: i8085_ICount -= 5; /* MOV H,E */ I.HL.b.h = I.DE.b.l; break; case 0x64: i8085_ICount -= 5; /* MOV H,H */ /* no op */ break; case 0x65: i8085_ICount -= 5; /* MOV H,L */ I.HL.b.h = I.HL.b.l; break; case 0x66: i8085_ICount -= 7; /* MOV H,M */ I.HL.b.h = RM(I.HL.d); break; case 0x67: i8085_ICount -= 5; /* MOV H,A */ I.HL.b.h = I.AF.b.h; break; case 0x68: i8085_ICount -= 5; /* MOV L,B */ I.HL.b.l = I.BC.b.h; break; case 0x69: i8085_ICount -= 5; /* MOV L,C */ I.HL.b.l = I.BC.b.l; break; case 0x6a: i8085_ICount -= 5; /* MOV L,D */ I.HL.b.l = I.DE.b.h; break; case 0x6b: i8085_ICount -= 5; /* MOV L,E */ I.HL.b.l = I.DE.b.l; break; case 0x6c: i8085_ICount -= 5; /* MOV L,H */ I.HL.b.l = I.HL.b.h; break; case 0x6d: i8085_ICount -= 5; /* MOV L,L */ /* no op */ break; case 0x6e: i8085_ICount -= 7; /* MOV L,M */ I.HL.b.l = RM(I.HL.d); break; case 0x6f: i8085_ICount -= 5; /* MOV L,A */ I.HL.b.l = I.AF.b.h; break; case 0x70: i8085_ICount -= 7; /* MOV M,B */ WM(I.HL.d, I.BC.b.h); break; case 0x71: i8085_ICount -= 7; /* MOV M,C */ WM(I.HL.d, I.BC.b.l); break; case 0x72: i8085_ICount -= 7; /* MOV M,D */ WM(I.HL.d, I.DE.b.h); break; case 0x73: i8085_ICount -= 7; /* MOV M,E */ WM(I.HL.d, I.DE.b.l); break; case 0x74: i8085_ICount -= 7; /* MOV M,H */ WM(I.HL.d, I.HL.b.h); break; case 0x75: i8085_ICount -= 7; /* MOV M,L */ WM(I.HL.d, I.HL.b.l); break; case 0x76: i8085_ICount -= 4; /* HALT */ I.PC.w.l--; I.HALT = 1; if (i8085_ICount > 0) i8085_ICount = 0; break; case 0x77: i8085_ICount -= 7; /* MOV M,A */ WM(I.HL.d, I.AF.b.h); break; case 0x78: i8085_ICount -= 5; /* MOV A,B */ I.AF.b.h = I.BC.b.h; break; case 0x79: i8085_ICount -= 5; /* MOV A,C */ I.AF.b.h = I.BC.b.l; break; case 0x7a: i8085_ICount -= 5; /* MOV A,D */ I.AF.b.h = I.DE.b.h; break; case 0x7b: i8085_ICount -= 5; /* MOV A,E */ I.AF.b.h = I.DE.b.l; break; case 0x7c: i8085_ICount -= 5; /* MOV A,H */ I.AF.b.h = I.HL.b.h; break; case 0x7d: i8085_ICount -= 5; /* MOV A,L */ I.AF.b.h = I.HL.b.l; break; case 0x7e: i8085_ICount -= 7; /* MOV A,M */ I.AF.b.h = RM(I.HL.d); break; case 0x7f: i8085_ICount -= 5; /* MOV A,A */ /* no op */ break; case 0x80: i8085_ICount -= 4; /* ADD B */ M_ADD(I.BC.b.h); break; case 0x81: i8085_ICount -= 4; /* ADD C */ M_ADD(I.BC.b.l); break; case 0x82: i8085_ICount -= 4; /* ADD D */ M_ADD(I.DE.b.h); break; case 0x83: i8085_ICount -= 4; /* ADD E */ M_ADD(I.DE.b.l); break; case 0x84: i8085_ICount -= 4; /* ADD H */ M_ADD(I.HL.b.h); break; case 0x85: i8085_ICount -= 4; /* ADD L */ M_ADD(I.HL.b.l); break; case 0x86: i8085_ICount -= 7; /* ADD M */ M_ADD(RM(I.HL.d)); break; case 0x87: i8085_ICount -= 4; /* ADD A */ M_ADD(I.AF.b.h); break; case 0x88: i8085_ICount -= 4; /* ADC B */ M_ADC(I.BC.b.h); break; case 0x89: i8085_ICount -= 4; /* ADC C */ M_ADC(I.BC.b.l); break; case 0x8a: i8085_ICount -= 4; /* ADC D */ M_ADC(I.DE.b.h); break; case 0x8b: i8085_ICount -= 4; /* ADC E */ M_ADC(I.DE.b.l); break; case 0x8c: i8085_ICount -= 4; /* ADC H */ M_ADC(I.HL.b.h); break; case 0x8d: i8085_ICount -= 4; /* ADC L */ M_ADC(I.HL.b.l); break; case 0x8e: i8085_ICount -= 7; /* ADC M */ M_ADC(RM(I.HL.d)); break; case 0x8f: i8085_ICount -= 4; /* ADC A */ M_ADC(I.AF.b.h); break; case 0x90: i8085_ICount -= 4; /* SUB B */ M_SUB(I.BC.b.h); break; case 0x91: i8085_ICount -= 4; /* SUB C */ M_SUB(I.BC.b.l); break; case 0x92: i8085_ICount -= 4; /* SUB D */ M_SUB(I.DE.b.h); break; case 0x93: i8085_ICount -= 4; /* SUB E */ M_SUB(I.DE.b.l); break; case 0x94: i8085_ICount -= 4; /* SUB H */ M_SUB(I.HL.b.h); break; case 0x95: i8085_ICount -= 4; /* SUB L */ M_SUB(I.HL.b.l); break; case 0x96: i8085_ICount -= 7; /* SUB M */ M_SUB(RM(I.HL.d)); break; case 0x97: i8085_ICount -= 4; /* SUB A */ M_SUB(I.AF.b.h); break; case 0x98: i8085_ICount -= 4; /* SBB B */ M_SBB(I.BC.b.h); break; case 0x99: i8085_ICount -= 4; /* SBB C */ M_SBB(I.BC.b.l); break; case 0x9a: i8085_ICount -= 4; /* SBB D */ M_SBB(I.DE.b.h); break; case 0x9b: i8085_ICount -= 4; /* SBB E */ M_SBB(I.DE.b.l); break; case 0x9c: i8085_ICount -= 4; /* SBB H */ M_SBB(I.HL.b.h); break; case 0x9d: i8085_ICount -= 4; /* SBB L */ M_SBB(I.HL.b.l); break; case 0x9e: i8085_ICount -= 7; /* SBB M */ M_SBB(RM(I.HL.d)); break; case 0x9f: i8085_ICount -= 4; /* SBB A */ M_SBB(I.AF.b.h); break; case 0xa0: i8085_ICount -= 4; /* ANA B */ M_ANA(I.BC.b.h); break; case 0xa1: i8085_ICount -= 4; /* ANA C */ M_ANA(I.BC.b.l); break; case 0xa2: i8085_ICount -= 4; /* ANA D */ M_ANA(I.DE.b.h); break; case 0xa3: i8085_ICount -= 4; /* ANA E */ M_ANA(I.DE.b.l); break; case 0xa4: i8085_ICount -= 4; /* ANA H */ M_ANA(I.HL.b.h); break; case 0xa5: i8085_ICount -= 4; /* ANA L */ M_ANA(I.HL.b.l); break; case 0xa6: i8085_ICount -= 7; /* ANA M */ M_ANA(RM(I.HL.d)); break; case 0xa7: i8085_ICount -= 4; /* ANA A */ M_ANA(I.AF.b.h); break; case 0xa8: i8085_ICount -= 4; /* XRA B */ M_XRA(I.BC.b.h); break; case 0xa9: i8085_ICount -= 4; /* XRA C */ M_XRA(I.BC.b.l); break; case 0xaa: i8085_ICount -= 4; /* XRA D */ M_XRA(I.DE.b.h); break; case 0xab: i8085_ICount -= 4; /* XRA E */ M_XRA(I.DE.b.l); break; case 0xac: i8085_ICount -= 4; /* XRA H */ M_XRA(I.HL.b.h); break; case 0xad: i8085_ICount -= 4; /* XRA L */ M_XRA(I.HL.b.l); break; case 0xae: i8085_ICount -= 7; /* XRA M */ M_XRA(RM(I.HL.d)); break; case 0xaf: i8085_ICount -= 4; /* XRA A */ M_XRA(I.AF.b.h); break; case 0xb0: i8085_ICount -= 4; /* ORA B */ M_ORA(I.BC.b.h); break; case 0xb1: i8085_ICount -= 4; /* ORA C */ M_ORA(I.BC.b.l); break; case 0xb2: i8085_ICount -= 4; /* ORA D */ M_ORA(I.DE.b.h); break; case 0xb3: i8085_ICount -= 4; /* ORA E */ M_ORA(I.DE.b.l); break; case 0xb4: i8085_ICount -= 4; /* ORA H */ M_ORA(I.HL.b.h); break; case 0xb5: i8085_ICount -= 4; /* ORA L */ M_ORA(I.HL.b.l); break; case 0xb6: i8085_ICount -= 7; /* ORA M */ M_ORA(RM(I.HL.d)); break; case 0xb7: i8085_ICount -= 4; /* ORA A */ M_ORA(I.AF.b.h); break; case 0xb8: i8085_ICount -= 4; /* CMP B */ M_CMP(I.BC.b.h); break; case 0xb9: i8085_ICount -= 4; /* CMP C */ M_CMP(I.BC.b.l); break; case 0xba: i8085_ICount -= 4; /* CMP D */ M_CMP(I.DE.b.h); break; case 0xbb: i8085_ICount -= 4; /* CMP E */ M_CMP(I.DE.b.l); break; case 0xbc: i8085_ICount -= 4; /* CMP H */ M_CMP(I.HL.b.h); break; case 0xbd: i8085_ICount -= 4; /* CMP L */ M_CMP(I.HL.b.l); break; case 0xbe: i8085_ICount -= 7; /* CMP M */ M_CMP(RM(I.HL.d)); break; case 0xbf: i8085_ICount -= 4; /* CMP A */ M_CMP(I.AF.b.h); break; case 0xc0: i8085_ICount -= 5; /* RNZ */ M_RET( !(I.AF.b.l & ZF) ); break; case 0xc1: i8085_ICount -= 10; /* POP B */ M_POP(BC); break; case 0xc2: i8085_ICount -= 7; /* JNZ nnnn */ M_JMP( !(I.AF.b.l & ZF) ); break; case 0xc3: i8085_ICount -= 7; /* JMP nnnn */ M_JMP(1); break; case 0xc4: i8085_ICount -= 11; /* CNZ nnnn */ M_CALL( !(I.AF.b.l & ZF) ); break; case 0xc5: i8085_ICount -= 11; /* PUSH B */ M_PUSH(BC); break; case 0xc6: i8085_ICount -= 7; /* ADI nn */ I.XX.b.l = ARG(); M_ADD(I.XX.b.l); break; case 0xc7: i8085_ICount -= 11; /* RST 0 */ M_RST(0); break; case 0xc8: i8085_ICount -= 5; /* RZ */ M_RET( I.AF.b.l & ZF ); break; case 0xc9: i8085_ICount -= 4; /* RET */ M_RET(1); break; case 0xca: i8085_ICount -= 7; /* JZ nnnn */ M_JMP( I.AF.b.l & ZF ); break; case 0xcb: if( I.cputype ) { if (I.AF.b.l & VF) { i8085_ICount -= 12; M_RST(8); /* call 0x40 */ } else { i8085_ICount -= 6; /* RST V */ } } else { i8085_ICount -= 7; /* JMP nnnn undocumented*/ M_JMP(1); } break; case 0xcc: i8085_ICount -= 11; /* CZ nnnn */ M_CALL( I.AF.b.l & ZF ); break; case 0xcd: i8085_ICount -= 11; /* CALL nnnn */ M_CALL(1); break; case 0xce: i8085_ICount -= 7; /* ACI nn */ I.XX.b.l = ARG(); M_ADC(I.XX.b.l); break; case 0xcf: i8085_ICount -= 11; /* RST 1 */ M_RST(1); break; case 0xd0: i8085_ICount -= 5; /* RNC */ M_RET( !(I.AF.b.l & CF) ); break; case 0xd1: i8085_ICount -= 10; /* POP D */ M_POP(DE); break; case 0xd2: i8085_ICount -= 7; /* JNC nnnn */ M_JMP( !(I.AF.b.l & CF) ); break; case 0xd3: i8085_ICount -= 10; /* OUT nn */ M_OUT; break; case 0xd4: i8085_ICount -= 11; /* CNC nnnn */ M_CALL( !(I.AF.b.l & CF) ); break; case 0xd5: i8085_ICount -= 11; /* PUSH D */ M_PUSH(DE); break; case 0xd6: i8085_ICount -= 7; /* SUI nn */ I.XX.b.l = ARG(); M_SUB(I.XX.b.l); break; case 0xd7: i8085_ICount -= 11; /* RST 2 */ M_RST(2); break; case 0xd8: i8085_ICount -= 5; /* RC */ M_RET( I.AF.b.l & CF ); break; case 0xd9: if( I.cputype ) { i8085_ICount -= 10; /* SHLX */ I.XX.w.l = I.DE.w.l; WM(I.XX.d, I.HL.b.l); I.XX.w.l++; WM(I.XX.d, I.HL.b.h); } else { i8085_ICount -= 4; /* RET undocumented */ M_RET(1); } break; case 0xda: i8085_ICount -= 7; /* JC nnnn */ M_JMP( I.AF.b.l & CF ); break; case 0xdb: i8085_ICount -= 10; /* IN nn */ M_IN; break; case 0xdc: i8085_ICount -= 11; /* CC nnnn */ M_CALL( I.AF.b.l & CF ); break; case 0xdd: if( I.cputype ) { i8085_ICount -= 7; /* JNX nnnn */ M_JMP( !(I.AF.b.l & XF) ); } else { i8085_ICount -= 11; /* CALL nnnn undocumented */ M_CALL(1); } break; case 0xde: i8085_ICount -= 7; /* SBI nn */ I.XX.b.l = ARG(); M_SBB(I.XX.b.l); break; case 0xdf: i8085_ICount -= 11; /* RST 3 */ M_RST(3); break; case 0xe0: i8085_ICount -= 5; /* RPO */ M_RET( !(I.AF.b.l & VF) ); break; case 0xe1: i8085_ICount -= 10; /* POP H */ M_POP(HL); break; case 0xe2: i8085_ICount -= 7; /* JPO nnnn */ M_JMP( !(I.AF.b.l & VF) ); break; case 0xe3: i8085_ICount -= 18; /* XTHL */ M_POP(XX); M_PUSH(HL); I.HL.d = I.XX.d; break; case 0xe4: i8085_ICount -= 11; /* CPO nnnn */ M_CALL( !(I.AF.b.l & VF) ); break; case 0xe5: i8085_ICount -= 11; /* PUSH H */ M_PUSH(HL); break; case 0xe6: i8085_ICount -= 7; /* ANI nn */ I.XX.b.l = ARG(); M_ANA(I.XX.b.l); break; case 0xe7: i8085_ICount -= 11; /* RST 4 */ M_RST(4); break; case 0xe8: i8085_ICount -= 5; /* RPE */ M_RET( I.AF.b.l & VF ); break; case 0xe9: i8085_ICount -= 5; /* PCHL */ I.PC.d = I.HL.w.l; change_pc(I.PC.d); break; case 0xea: i8085_ICount -= 7; /* JPE nnnn */ M_JMP( I.AF.b.l & VF ); break; case 0xeb: i8085_ICount -= 4; /* XCHG */ I.XX.d = I.DE.d; I.DE.d = I.HL.d; I.HL.d = I.XX.d; break; case 0xec: i8085_ICount -= 11; /* CPE nnnn */ M_CALL( I.AF.b.l & VF ); break; case 0xed: if( I.cputype ) { i8085_ICount -= 10; /* LHLX */ I.XX.w.l = I.DE.w.l; I.HL.b.l = RM(I.XX.d); I.XX.w.l++; I.HL.b.h = RM(I.XX.d); } else { i8085_ICount -= 11; /* CALL nnnn undocumented */ M_CALL(1); } break; case 0xee: i8085_ICount -= 7; /* XRI nn */ I.XX.b.l = ARG(); M_XRA(I.XX.b.l); break; case 0xef: i8085_ICount -= 11; /* RST 5 */ M_RST(5); break; case 0xf0: i8085_ICount -= 5; /* RP */ M_RET( !(I.AF.b.l&SF) ); break; case 0xf1: i8085_ICount -= 10; /* POP A */ M_POP(AF); break; case 0xf2: i8085_ICount -= 7; /* JP nnnn */ M_JMP( !(I.AF.b.l & SF) ); break; case 0xf3: i8085_ICount -= 4; /* DI */ /* remove interrupt enable */ I.IM &= ~IM_IEN; break; case 0xf4: i8085_ICount -= 11; /* CP nnnn */ M_CALL( !(I.AF.b.l & SF) ); break; case 0xf5: i8085_ICount -= 11; /* PUSH A */ M_PUSH(AF); break; case 0xf6: i8085_ICount -= 7; /* ORI nn */ I.XX.b.l = ARG(); M_ORA(I.XX.b.l); break; case 0xf7: i8085_ICount -= 11; /* RST 6 */ M_RST(6); break; case 0xf8: i8085_ICount -= 5; /* RM */ M_RET( I.AF.b.l & SF ); break; case 0xf9: i8085_ICount -= 5; /* SPHL */ I.SP.d = I.HL.d; break; case 0xfa: i8085_ICount -= 7; /* JM nnnn */ M_JMP( I.AF.b.l & SF ); break; case 0xfb: i8085_ICount -= 4; /* EI */ /* set interrupt enable */ I.IM |= IM_IEN; /* remove serviced IRQ flag */ I.IREQ &= ~I.ISRV; /* reset serviced IRQ */ I.ISRV = 0; if( I.irq_state[0] != CLEAR_LINE ) { LOG(("i8085 EI sets INTR\n")); I.IREQ |= IM_INTR; I.INTR = I8085_INTR; } if( I.cputype ) { if( I.irq_state[1] != CLEAR_LINE ) { LOG(("i8085 EI sets RST5.5\n")); I.IREQ |= IM_RST55; } if( I.irq_state[2] != CLEAR_LINE ) { LOG(("i8085 EI sets RST6.5\n")); I.IREQ |= IM_RST65; } if( I.irq_state[3] != CLEAR_LINE ) { LOG(("i8085 EI sets RST7.5\n")); I.IREQ |= IM_RST75; } /* find highest priority IREQ flag with IM enabled and schedule for execution */ if( !(I.IM & IM_RST75) && (I.IREQ & IM_RST75) ) { I.ISRV = IM_RST75; I.IRQ2 = ADDR_RST75; } else if( !(I.IM & IM_RST65) && (I.IREQ & IM_RST65) ) { I.ISRV = IM_RST65; I.IRQ2 = ADDR_RST65; } else if( !(I.IM & IM_RST55) && (I.IREQ & IM_RST55) ) { I.ISRV = IM_RST55; I.IRQ2 = ADDR_RST55; } else if( !(I.IM & IM_INTR) && (I.IREQ & IM_INTR) ) { I.ISRV = IM_INTR; I.IRQ2 = I.INTR; } } else { if( !(I.IM & IM_INTR) && (I.IREQ & IM_INTR) ) { I.ISRV = IM_INTR; I.IRQ2 = I.INTR; } } break; case 0xfc: i8085_ICount -= 11; /* CM nnnn */ M_CALL( I.AF.b.l & SF ); break; case 0xfd: if( I.cputype ) { i8085_ICount -= 7; /* JX nnnn */ M_JMP( I.AF.b.l & XF ); } else { i8085_ICount -= 11; /* CALL nnnn undocumented */ M_CALL(1); } break; case 0xfe: i8085_ICount -= 7; /* CPI nn */ I.XX.b.l = ARG(); M_CMP(I.XX.b.l); break; case 0xff: i8085_ICount -= 11; /* RST 7 */ M_RST(7); break; } } static void Interrupt(void) { if( I.HALT ) /* if the CPU was halted */ { I.PC.w.l++; /* skip HALT instr */ I.HALT = 0; } //AT I.IREQ &= ~I.ISRV; // remove serviced IRQ flag RIM_IEN = (I.ISRV==IM_TRAP) ? I.IM & IM_IEN : 0; // latch general interrupt enable bit on TRAP or NMI //ZT I.IM &= ~IM_IEN; /* remove general interrupt enable bit */ if( I.ISRV == IM_INTR ) { LOG(("Interrupt get INTR vector\n")); I.IRQ1 = (I.irq_callback)(0); } if( I.cputype ) { if( I.ISRV == IM_RST55 ) { LOG(("Interrupt get RST5.5 vector\n")); //I.IRQ1 = (I.irq_callback)(1); I.irq_state[I8085_RST55_LINE] = CLEAR_LINE; //AT: processing RST5.5, reset interrupt line } if( I.ISRV == IM_RST65 ) { LOG(("Interrupt get RST6.5 vector\n")); //I.IRQ1 = (I.irq_callback)(2); I.irq_state[I8085_RST65_LINE] = CLEAR_LINE; //AT: processing RST6.5, reset interrupt line } if( I.ISRV == IM_RST75 ) { LOG(("Interrupt get RST7.5 vector\n")); //I.IRQ1 = (I.irq_callback)(3); I.irq_state[I8085_RST75_LINE] = CLEAR_LINE; //AT: processing RST7.5, reset interrupt line } } switch( I.IRQ1 & 0xff0000 ) { case 0xcd0000: /* CALL nnnn */ i8085_ICount -= 7; M_PUSH(PC); case 0xc30000: /* JMP nnnn */ i8085_ICount -= 10; I.PC.d = I.IRQ1 & 0xffff; change_pc(I.PC.d); break; default: switch( I.ISRV ) { case IM_TRAP: case IM_RST75: case IM_RST65: case IM_RST55: M_PUSH(PC); if (I.IRQ1 != (1 << I8085_RST75_LINE)) I.PC.d = I.IRQ1; else I.PC.d = 0x3c; change_pc(I.PC.d); break; default: LOG(("i8085 take int $%02x\n", I.IRQ1)); execute_one(I.IRQ1 & 0xff); } } } int i8085_execute(int cycles) { i8085_ICount = cycles; do { CALL_MAME_DEBUG; /* interrupts enabled or TRAP pending ? */ if ( (I.IM & IM_IEN) || (I.IREQ & IM_TRAP) ) { /* copy scheduled to executed interrupt request */ I.IRQ1 = I.IRQ2; /* reset scheduled interrupt request */ I.IRQ2 = 0; /* interrupt now ? */ if (I.IRQ1) Interrupt(); } /* here we go... */ execute_one(ROP()); } while (i8085_ICount > 0); return cycles - i8085_ICount; } /**************************************************************************** * Initialise the various lookup tables used by the emulation code ****************************************************************************/ static void init_tables (void) { UINT8 zs; int i, p; for (i = 0; i < 256; i++) { zs = 0; if (i==0) zs |= ZF; if (i&128) zs |= SF; p = 0; if (i&1) ++p; if (i&2) ++p; if (i&4) ++p; if (i&8) ++p; if (i&16) ++p; if (i&32) ++p; if (i&64) ++p; if (i&128) ++p; ZS[i] = zs; ZSP[i] = zs | ((p&1) ? 0 : VF); } } /**************************************************************************** * Init the 8085 emulation ****************************************************************************/ static void i8085_init(int index, int clock, const void *config, int (*irqcallback)(int)) { init_tables(); I.cputype = 1; I.irq_callback = irqcallback; state_save_register_item("i8085", index, I.AF.w.l); state_save_register_item("i8085", index, I.BC.w.l); state_save_register_item("i8085", index, I.DE.w.l); state_save_register_item("i8085", index, I.HL.w.l); state_save_register_item("i8085", index, I.SP.w.l); state_save_register_item("i8085", index, I.PC.w.l); state_save_register_item("i8085", index, I.HALT); state_save_register_item("i8085", index, I.IM); state_save_register_item("i8085", index, I.IREQ); state_save_register_item("i8085", index, I.ISRV); state_save_register_item("i8085", index, I.INTR); state_save_register_item("i8085", index, I.IRQ2); state_save_register_item("i8085", index, I.IRQ1); state_save_register_item_array("i8085", index, I.irq_state); } /**************************************************************************** * Reset the 8085 emulation ****************************************************************************/ static void i8085_reset(void) { int (*save_irqcallback)(int); int cputype_bak = I.cputype; init_tables(); save_irqcallback = I.irq_callback; memset(&I, 0, sizeof(i8085_Regs)); I.irq_callback = save_irqcallback; change_pc(I.PC.d); I.cputype = cputype_bak; } /**************************************************************************** * Shut down the CPU emulation ****************************************************************************/ static void i8085_exit(void) { /* nothing to do */ } /**************************************************************************** * Get the current 8085 context ****************************************************************************/ static void i8085_get_context(void *dst) { if( dst ) *(i8085_Regs*)dst = I; } /**************************************************************************** * Set the current 8085 context ****************************************************************************/ static void i8085_set_context(void *src) { if( src ) { I = *(i8085_Regs*)src; change_pc(I.PC.d); } } /****************************************************************************/ /* Set TRAP signal state */ /****************************************************************************/ static void i8085_set_TRAP(int state) { LOG(("i8085: TRAP %d\n", state)); if (state) { I.IREQ |= IM_TRAP; if( I.ISRV & IM_TRAP ) return; /* already servicing TRAP ? */ I.ISRV = IM_TRAP; /* service TRAP */ I.IRQ2 = ADDR_TRAP; } else { I.IREQ &= ~IM_TRAP; /* remove request for TRAP */ } } /****************************************************************************/ /* Set RST7.5 signal state */ /****************************************************************************/ static void i8085_set_RST75(int state) { LOG(("i8085: RST7.5 %d\n", state)); if( state ) { I.IREQ |= IM_RST75; /* request RST7.5 */ if( I.IM & IM_RST75 ) return; /* if masked, ignore it for now */ if( !I.ISRV ) /* if no higher priority IREQ is serviced */ { I.ISRV = IM_RST75; /* service RST7.5 */ I.IRQ2 = ADDR_RST75; } } /* RST7.5 is reset only by SIM or end of service routine ! */ } /****************************************************************************/ /* Set RST6.5 signal state */ /****************************************************************************/ static void i8085_set_RST65(int state) { LOG(("i8085: RST6.5 %d\n", state)); if( state ) { I.IREQ |= IM_RST65; /* request RST6.5 */ if( I.IM & IM_RST65 ) return; /* if masked, ignore it for now */ if( !I.ISRV ) /* if no higher priority IREQ is serviced */ { I.ISRV = IM_RST65; /* service RST6.5 */ I.IRQ2 = ADDR_RST65; } } else { I.IREQ &= ~IM_RST65; /* remove request for RST6.5 */ } } /****************************************************************************/ /* Set RST5.5 signal state */ /****************************************************************************/ static void i8085_set_RST55(int state) { LOG(("i8085: RST5.5 %d\n", state)); if( state ) { I.IREQ |= IM_RST55; /* request RST5.5 */ if( I.IM & IM_RST55 ) return; /* if masked, ignore it for now */ if( !I.ISRV ) /* if no higher priority IREQ is serviced */ { I.ISRV = IM_RST55; /* service RST5.5 */ I.IRQ2 = ADDR_RST55; } } else { I.IREQ &= ~IM_RST55; /* remove request for RST5.5 */ } } /****************************************************************************/ /* Set INTR signal */ /****************************************************************************/ static void i8085_set_INTR(int state) { LOG(("i8085: INTR %d\n", state)); if( state ) { I.IREQ |= IM_INTR; /* request INTR */ //I.INTR = state; I.INTR = I8085_INTR; //AT: I.INTR is supposed to hold IRQ0 vector(0x38) (0xff in this implementation) if( I.IM & IM_INTR ) return; /* if masked, ignore it for now */ if( !I.ISRV ) /* if no higher priority IREQ is serviced */ { I.ISRV = IM_INTR; /* service INTR */ I.IRQ2 = I.INTR; } } else { I.IREQ &= ~IM_INTR; /* remove request for INTR */ } } static void i8085_set_irq_line(int irqline, int state) { if (irqline == INPUT_LINE_NMI) { if( state != CLEAR_LINE ) i8085_set_TRAP(1); } else if (irqline < 4) { I.irq_state[irqline] = state; if (state == CLEAR_LINE) { if( !(I.IM & IM_IEN) ) { switch (irqline) { case I8085_INTR_LINE: i8085_set_INTR(0); break; case I8085_RST55_LINE: i8085_set_RST55(0); break; case I8085_RST65_LINE: i8085_set_RST65(0); break; case I8085_RST75_LINE: i8085_set_RST75(0); break; } } } else { if( I.IM & IM_IEN ) { switch( irqline ) { case I8085_INTR_LINE: i8085_set_INTR(1); break; case I8085_RST55_LINE: i8085_set_RST55(1); break; case I8085_RST65_LINE: i8085_set_RST65(1); break; case I8085_RST75_LINE: i8085_set_RST75(1); break; } } } } } /************************************************************************** * 8080 section **************************************************************************/ #if (HAS_8080) void i8080_init(int index, int clock, const void *config, int (*irqcallback)(int)) { init_tables(); I.cputype = 0; I.irq_callback = irqcallback; state_save_register_item("i8080", index, I.AF.w.l); state_save_register_item("i8080", index, I.BC.w.l); state_save_register_item("i8080", index, I.DE.w.l); state_save_register_item("i8080", index, I.HL.w.l); state_save_register_item("i8080", index, I.SP.w.l); state_save_register_item("i8080", index, I.PC.w.l); state_save_register_item("i8080", index, I.HALT); state_save_register_item("i8085", index, I.IM); state_save_register_item("i8080", index, I.IREQ); state_save_register_item("i8080", index, I.ISRV); state_save_register_item("i8080", index, I.INTR); state_save_register_item("i8080", index, I.IRQ2); state_save_register_item("i8080", index, I.IRQ1); state_save_register_item_array("i8080", index, I.irq_state); } void i8080_set_irq_line(int irqline, int state) { if (irqline == INPUT_LINE_NMI) { if( state != CLEAR_LINE ) i8085_set_TRAP(1); } else { I.irq_state[irqline] = state; if (state == CLEAR_LINE) { if (!(I.IM & IM_IEN)) i8085_set_INTR(0); } else { if (I.IM & IM_IEN) i8085_set_INTR(1); } } } #endif /************************************************************************** * Generic set_info **************************************************************************/ static void i8085_set_info(UINT32 state, cpuinfo *info) { switch (state) { /* --- the following bits of info are set as 64-bit signed integers --- */ case CPUINFO_INT_INPUT_STATE + I8085_INTR_LINE: i8085_set_irq_line(I8085_INTR_LINE, info->i); break; case CPUINFO_INT_INPUT_STATE + I8085_RST55_LINE:i8085_set_irq_line(I8085_RST55_LINE, info->i); break; case CPUINFO_INT_INPUT_STATE + I8085_RST65_LINE:i8085_set_irq_line(I8085_RST65_LINE, info->i); break; case CPUINFO_INT_INPUT_STATE + I8085_RST75_LINE:i8085_set_irq_line(I8085_RST75_LINE, info->i); break; case CPUINFO_INT_INPUT_STATE + INPUT_LINE_NMI: i8085_set_irq_line(INPUT_LINE_NMI, info->i); break; case CPUINFO_INT_PC: I.PC.w.l = info->i; change_pc(I.PC.d); break; case CPUINFO_INT_REGISTER + I8085_PC: I.PC.w.l = info->i; break; case CPUINFO_INT_SP: I.SP.w.l = info->i; break; case CPUINFO_INT_REGISTER + I8085_SP: I.SP.w.l = info->i; break; case CPUINFO_INT_REGISTER + I8085_AF: I.AF.w.l = info->i; break; case CPUINFO_INT_REGISTER + I8085_BC: I.BC.w.l = info->i; break; case CPUINFO_INT_REGISTER + I8085_DE: I.DE.w.l = info->i; break; case CPUINFO_INT_REGISTER + I8085_HL: I.HL.w.l = info->i; break; case CPUINFO_INT_REGISTER + I8085_IM: I.IM = info->i; break; case CPUINFO_INT_REGISTER + I8085_HALT: I.HALT = info->i; break; case CPUINFO_INT_REGISTER + I8085_IREQ: I.IREQ = info->i; break; case CPUINFO_INT_REGISTER + I8085_ISRV: I.ISRV = info->i; break; case CPUINFO_INT_REGISTER + I8085_VECTOR: I.INTR = info->i; break; case CPUINFO_INT_I8085_SID: if (info->i) I.IM |= IM_SID; else I.IM &= ~IM_SID; break; /* --- the following bits of info are set as pointers to data or functions --- */ case CPUINFO_PTR_I8085_SOD_CALLBACK: I.sod_callback = (void (*)(int))info->f; break; } } /************************************************************************** * Generic get_info **************************************************************************/ void i8085_get_info(UINT32 state, cpuinfo *info) { switch (state) { /* --- the following bits of info are returned as 64-bit signed integers --- */ case CPUINFO_INT_CONTEXT_SIZE: info->i = sizeof(I); break; case CPUINFO_INT_INPUT_LINES: info->i = 4; break; case CPUINFO_INT_DEFAULT_IRQ_VECTOR: info->i = 0xff; break; case CPUINFO_INT_ENDIANNESS: info->i = CPU_IS_LE; break; case CPUINFO_INT_CLOCK_DIVIDER: info->i = 1; break; case CPUINFO_INT_MIN_INSTRUCTION_BYTES: info->i = 1; break; case CPUINFO_INT_MAX_INSTRUCTION_BYTES: info->i = 3; break; case CPUINFO_INT_MIN_CYCLES: info->i = 4; break; case CPUINFO_INT_MAX_CYCLES: info->i = 16; break; case CPUINFO_INT_DATABUS_WIDTH + ADDRESS_SPACE_PROGRAM: info->i = 8; break; case CPUINFO_INT_ADDRBUS_WIDTH + ADDRESS_SPACE_PROGRAM: info->i = 16; break; case CPUINFO_INT_ADDRBUS_SHIFT + ADDRESS_SPACE_PROGRAM: info->i = 0; break; case CPUINFO_INT_DATABUS_WIDTH + ADDRESS_SPACE_DATA: info->i = 0; break; case CPUINFO_INT_ADDRBUS_WIDTH + ADDRESS_SPACE_DATA: info->i = 0; break; case CPUINFO_INT_ADDRBUS_SHIFT + ADDRESS_SPACE_DATA: info->i = 0; break; case CPUINFO_INT_DATABUS_WIDTH + ADDRESS_SPACE_IO: info->i = 8; break; case CPUINFO_INT_ADDRBUS_WIDTH + ADDRESS_SPACE_IO: info->i = 8; break; case CPUINFO_INT_ADDRBUS_SHIFT + ADDRESS_SPACE_IO: info->i = 0; break; case CPUINFO_INT_INPUT_STATE + I8085_INTR_LINE: info->i = (I.IREQ & IM_INTR) ? ASSERT_LINE : CLEAR_LINE; break; case CPUINFO_INT_INPUT_STATE + I8085_RST55_LINE:info->i = (I.IREQ & IM_RST55) ? ASSERT_LINE : CLEAR_LINE; break; case CPUINFO_INT_INPUT_STATE + I8085_RST65_LINE:info->i = (I.IREQ & IM_RST65) ? ASSERT_LINE : CLEAR_LINE; break; case CPUINFO_INT_INPUT_STATE + I8085_RST75_LINE:info->i = (I.IREQ & IM_RST75) ? ASSERT_LINE : CLEAR_LINE; break; case CPUINFO_INT_INPUT_STATE + INPUT_LINE_NMI: info->i = (I.IREQ & IM_TRAP) ? ASSERT_LINE : CLEAR_LINE; break; case CPUINFO_INT_PREVIOUSPC: /* not supported */ break; case CPUINFO_INT_PC: info->i = I.PC.d; break; case CPUINFO_INT_REGISTER + I8085_PC: info->i = I.PC.w.l; break; case CPUINFO_INT_SP: info->i = I.SP.d; break; case CPUINFO_INT_REGISTER + I8085_SP: info->i = I.SP.w.l; break; case CPUINFO_INT_REGISTER + I8085_AF: info->i = I.AF.w.l; break; case CPUINFO_INT_REGISTER + I8085_BC: info->i = I.BC.w.l; break; case CPUINFO_INT_REGISTER + I8085_DE: info->i = I.DE.w.l; break; case CPUINFO_INT_REGISTER + I8085_HL: info->i = I.HL.w.l; break; case CPUINFO_INT_REGISTER + I8085_IM: info->i = I.IM; break; case CPUINFO_INT_REGISTER + I8085_HALT: info->i = I.HALT; break; case CPUINFO_INT_REGISTER + I8085_IREQ: info->i = I.IREQ; break; case CPUINFO_INT_REGISTER + I8085_ISRV: info->i = I.ISRV; break; case CPUINFO_INT_REGISTER + I8085_VECTOR: info->i = I.INTR; break; /* --- the following bits of info are returned as pointers to data or functions --- */ case CPUINFO_PTR_SET_INFO: info->setinfo = i8085_set_info; break; case CPUINFO_PTR_GET_CONTEXT: info->getcontext = i8085_get_context; break; case CPUINFO_PTR_SET_CONTEXT: info->setcontext = i8085_set_context; break; case CPUINFO_PTR_INIT: info->init = i8085_init; break; case CPUINFO_PTR_RESET: info->reset = i8085_reset; break; case CPUINFO_PTR_EXIT: info->exit = i8085_exit; break; case CPUINFO_PTR_EXECUTE: info->execute = i8085_execute; break; case CPUINFO_PTR_BURN: info->burn = NULL; break; #ifdef MAME_DEBUG case CPUINFO_PTR_DISASSEMBLE: info->disassemble = i8085_dasm; break; #endif /* MAME_DEBUG */ case CPUINFO_PTR_INSTRUCTION_COUNTER: info->icount = &i8085_ICount; break; /* --- the following bits of info are returned as NULL-terminated strings --- */ case CPUINFO_STR_NAME: strcpy(info->s, "8085A"); break; case CPUINFO_STR_CORE_FAMILY: strcpy(info->s, "Intel 8080"); break; case CPUINFO_STR_CORE_VERSION: strcpy(info->s, "1.1"); break; case CPUINFO_STR_CORE_FILE: strcpy(info->s, __FILE__); break; case CPUINFO_STR_CORE_CREDITS: strcpy(info->s, "Copyright (c) 1999 Juergen Buchmueller, all rights reserved."); break; case CPUINFO_STR_FLAGS: sprintf(info->s, "%c%c%c%c%c%c%c%c", I.AF.b.l & 0x80 ? 'S':'.', I.AF.b.l & 0x40 ? 'Z':'.', I.AF.b.l & 0x20 ? '?':'.', I.AF.b.l & 0x10 ? 'H':'.', I.AF.b.l & 0x08 ? '?':'.', I.AF.b.l & 0x04 ? 'P':'.', I.AF.b.l & 0x02 ? 'N':'.', I.AF.b.l & 0x01 ? 'C':'.'); break; case CPUINFO_STR_REGISTER + I8085_AF: sprintf(info->s, "AF:%04X", I.AF.w.l); break; case CPUINFO_STR_REGISTER + I8085_BC: sprintf(info->s, "BC:%04X", I.BC.w.l); break; case CPUINFO_STR_REGISTER + I8085_DE: sprintf(info->s, "DE:%04X", I.DE.w.l); break; case CPUINFO_STR_REGISTER + I8085_HL: sprintf(info->s, "HL:%04X", I.HL.w.l); break; case CPUINFO_STR_REGISTER + I8085_SP: sprintf(info->s, "SP:%04X", I.SP.w.l); break; case CPUINFO_STR_REGISTER + I8085_PC: sprintf(info->s, "PC:%04X", I.PC.w.l); break; case CPUINFO_STR_REGISTER + I8085_IM: sprintf(info->s, "IM:%02X", I.IM); break; case CPUINFO_STR_REGISTER + I8085_HALT: sprintf(info->s, "HALT:%d", I.HALT); break; case CPUINFO_STR_REGISTER + I8085_IREQ: sprintf(info->s, "IREQ:%02X", I.IREQ); break; case CPUINFO_STR_REGISTER + I8085_ISRV: sprintf(info->s, "ISRV:%02X", I.ISRV); break; case CPUINFO_STR_REGISTER + I8085_VECTOR: sprintf(info->s, "VEC:%02X", I.INTR); break; } } #if (HAS_8080) /************************************************************************** * CPU-specific get_info/set_info **************************************************************************/ static void i8080_set_info(UINT32 state, cpuinfo *info) { switch (state) { /* --- the following bits of info are set as 64-bit signed integers --- */ case CPUINFO_INT_INPUT_STATE + I8080_INTR_LINE: i8080_set_irq_line(I8080_INTR_LINE, info->i); break; case CPUINFO_INT_INPUT_STATE + INPUT_LINE_NMI: i8080_set_irq_line(INPUT_LINE_NMI, info->i); break; default: i8085_set_info(state, info); break; } } void i8080_get_info(UINT32 state, cpuinfo *info) { switch (state) { /* --- the following bits of info are returned as 64-bit signed integers --- */ case CPUINFO_INT_INPUT_LINES: info->i = 1; break; case CPUINFO_INT_INPUT_STATE + I8085_INTR_LINE: info->i = (I.IREQ & IM_INTR) ? ASSERT_LINE : CLEAR_LINE; break; case CPUINFO_INT_INPUT_STATE + INPUT_LINE_NMI: info->i = (I.IREQ & IM_TRAP) ? ASSERT_LINE : CLEAR_LINE; break; /* --- the following bits of info are returned as pointers to data or functions --- */ case CPUINFO_PTR_SET_INFO: info->setinfo = i8080_set_info; break; case CPUINFO_PTR_INIT: info->init = i8080_init; break; /* --- the following bits of info are returned as NULL-terminated strings --- */ case CPUINFO_STR_NAME: strcpy(info->s, "8080"); break; default: i8085_get_info(state, info); break; } } #endif
804164.c
/* librist. Copyright 2019-2020 SipRadius LLC. All right reserved. * Author: Daniele Lacamera <[email protected]> * Author: Kuldeep Singh Dhaka <[email protected]> * Author: Sergio Ammirata, Ph.D. <[email protected]> */ #include "udp-private.h" #include "rist-private.h" #include "aes.h" #include "fastpbkdf2.h" #include "crypto-private.h" #include "log-private.h" #include "socket-shim.h" #include "endian-shim.h" #include "lz4.h" #include <stdlib.h> #include <stddef.h> #include <errno.h> #include <stdint.h> #include <assert.h> #ifdef LINUX_CRYPTO #include <linux-crypto.h> #endif uint64_t timestampNTP_u64(void) { // We use clock_gettime instead of gettimeofday even though we only need microseconds // because gettimeofday implementation under linux is dependent on the kernel clock // and can produce duplicate times (too close to kernel timer) // We use the NTP time standard: rfc5905 (https://tools.ietf.org/html/rfc5905#section-6) // The 64-bit timestamps used by NTP consist of a 32-bit part for seconds // and a 32-bit part for fractional second, giving a time scale that rolls // over every 232 seconds (136 years) and a theoretical resolution of // 2−32 seconds (233 picoseconds). NTP uses an epoch of January 1, 1900. // Therefore, the first rollover occurs on February 7, 2036. timespec_t ts; #ifdef __APPLE__ clock_gettime_osx(&ts); #else clock_gettime(CLOCK_MONOTONIC, &ts); #endif // Convert nanoseconds to 32-bits fraction (232 picosecond units) uint64_t t = (uint64_t)(ts.tv_nsec) << 32; t /= 1000000000; // There is 70 years (incl. 17 leap ones) offset to the Unix Epoch. // No leap seconds during that period since they were not invented yet. t |= ((70LL * 365 + 17) * 24 * 60 * 60 + ts.tv_sec) << 32; return t; // nanoseconds (technically, 232.831 picosecond units) } uint64_t timestampNTP_RTC_u64(void) { timespec_t ts; #ifdef __APPLE__ clock_gettime_osx(&ts); #elif defined _WIN32 clock_gettime(CLOCK_MONOTONIC, &ts); #else clock_gettime(CLOCK_REALTIME, &ts); #endif // Convert nanoseconds to 32-bits fraction (232 picosecond units) uint64_t t = (uint64_t)(ts.tv_nsec) << 32; t /= 1000000000; // There is 70 years (incl. 17 leap ones) offset to the Unix Epoch. // No leap seconds during that period since they were not invented yet. t |= (70LL * 365 + 17) * 24 * 60 * 60 + ts.tv_sec; return t; } uint32_t timestampRTP_u32( int advanced, uint64_t i_ntp ) { if (!advanced) { i_ntp *= RTP_PTYPE_MPEGTS_CLOCKHZ; i_ntp = i_ntp >> 32; return (uint32_t)i_ntp; } else { // We just need the middle 32 bits, i.e. 65536Hz clock i_ntp = i_ntp >> 16; return (uint32_t)i_ntp; } } uint64_t convertRTPtoNTP(uint8_t ptype, uint32_t time_extension, uint32_t i_rtp) { uint64_t i_ntp; if (ptype == RTP_PTYPE_RIST) { // Convert rtp to 64 bit and shift it 16 bits uint64_t part2 = (uint64_t)i_rtp; part2 = part2 << 16; // rebuild source_time (lower and upper 16 bits) uint64_t part3 = (uint64_t)(time_extension & 0xffff); uint64_t part1 = ((uint64_t)(time_extension & 0xffff0000)) << 32; i_ntp = part1 | part2 | part3; //fprintf(stderr,"source time %"PRIu64", rtp time %"PRIu32"\n", source_time, rtp_time); } else { int32_t clock = get_rtp_ts_clock(ptype); if (RIST_UNLIKELY(!clock)){ clock = RTP_PTYPE_MPEGTS_CLOCKHZ; // Insert a new timestamp (not ideal but better than failing) i_rtp = htobe32(timestampRTP_u32(0, timestampNTP_u64())); } i_ntp = (uint64_t)i_rtp << 32; i_ntp /= clock; } return i_ntp; } uint64_t calculate_rtt_delay(uint64_t request, uint64_t response, uint32_t delay) { /* both request and response are NTP timestamps, delay is in microseconds */ uint64_t rtt = response - request; if (RIST_UNLIKELY(delay)) rtt -= (((uint64_t)delay) << 32)/1000000; return rtt; } void rist_clean_sender_enqueue(struct rist_sender *ctx) { int delete_count = 1; // Delete old packets (max 10 entries per function call) while (delete_count++ < 10) { struct rist_buffer *b = ctx->sender_queue[ctx->sender_queue_delete_index]; /* our buffer size is zero, it must be just building up */ if ((size_t)atomic_load_explicit(&ctx->sender_queue_write_index, memory_order_acquire) == ctx->sender_queue_delete_index) { break; } size_t safety_counter = 0; while (!b && ((ctx->sender_queue_delete_index + 1)& (ctx->sender_queue_max -1)) != atomic_load_explicit(&ctx->sender_queue_write_index, memory_order_acquire)) { ctx->sender_queue_delete_index = (ctx->sender_queue_delete_index + 1)& (ctx->sender_queue_max -1); // This should never happen! rist_log_priv(&ctx->common, RIST_LOG_ERROR, "Moving delete index to %zu\n", ctx->sender_queue_delete_index); b = ctx->sender_queue[ctx->sender_queue_delete_index]; if (safety_counter++ > 1000) return; } /* perform the deletion based on the buffer size plus twice the configured/measured avg_rtt */ uint64_t delay = (timestampNTP_u64() - b->time) / RIST_CLOCK; if (delay < ctx->sender_recover_min_time) { break; } //rist_log_priv(&ctx->common, RIST_LOG_WARN, // "\tDeleting %"PRIu32" (%zu bytes) after %"PRIu64" (%zu) ms\n", // b->seq, b->size, delay, ctx->sender_recover_min_time); /* now delete it */ ctx->sender_queue_bytesize -= b->size; free_rist_buffer(&ctx->common, b); ctx->sender_queue[ctx->sender_queue_delete_index] = NULL; ctx->sender_queue_delete_index = (ctx->sender_queue_delete_index + 1)& (ctx->sender_queue_max -1); } } static uint32_t rand_u32(void) { uint32_t u32; uint8_t *u8 = (void *) &u32; for (size_t i = 0; i < sizeof(u32); i++) { u8[i] = rand() % 256; } return u32; } static void _ensure_key_is_valid(struct rist_key *key, struct rist_peer *peer) { RIST_MARK_UNUSED(peer); bool new_nonce = false; if (!key->gre_nonce) { // Generate new nonce as we do not have any new_nonce = true; } else if (key->used_times > RIST_AES_KEY_REUSE_TIMES) { // Key can only be used upto certain times new_nonce = true; } else if (key->key_rotation > 0 && key->used_times >= key->key_rotation) { // custom rotation new_nonce = true; } if (new_nonce) { do { key->gre_nonce = rand_u32(); } while (!key->gre_nonce); key->used_times = 0; // The nonce MUST be fed to the function in network byte order uint32_t nonce_be = be32toh(key->gre_nonce); uint8_t aes_key[256 / 8]; fastpbkdf2_hmac_sha256( (const void *) key->password, strlen(key->password), (const void *) &nonce_be, sizeof(nonce_be), RIST_PBKDF2_HMAC_SHA256_ITERATIONS, aes_key, key->key_size / 8); /* int i=0; fprintf(stderr, "KEY: nonce %"PRIu32", size %d, pwd=%s : ", key->gre_nonce, key->key_size, key->password); while (i < key->key_size/8) { fprintf(stderr, "%02X ",(int)aes_key[i]); i++; } fprintf(stderr, "\n"); */ #ifndef LINUX_CRYPTO aes_key_setup(aes_key, key->aes_key_sched, key->key_size); #else if (peer->cryptoctx) linux_crypto_set_key(aes_key, key->key_size/8, peer->cryptoctx); else aes_key_setup(aes_key, key->aes_key_sched, key->key_size); #endif } } size_t rist_send_seq_rtcp(struct rist_peer *p, uint32_t seq, uint16_t seq_rtp, uint8_t payload_type, uint8_t *payload, size_t payload_len, uint64_t source_time, uint16_t src_port, uint16_t dst_port) { struct rist_common_ctx *ctx = get_cctx(p); struct rist_key *k = &p->key_secret; uint8_t *data; size_t len, gre_len; size_t hdr_len = 0; ssize_t ret = 0; /* Our encryption and compression operations directly modify the payload buffer we receive as a pointer so we create a local pointer that points to the payload pointer, if we would either encrypt or compress we instead malloc and mempcy, to ensure our source stays clean. We only do this with RAW data as these buffers are the only assumed to be reused by retransmits */ uint8_t *_payload = NULL; bool compressed = false; bool retry = false; bool modifyingbuffer = (ctx->profile > RIST_PROFILE_SIMPLE && payload_type == RIST_PAYLOAD_TYPE_DATA_RAW && (k->key_size || p->compression)); assert(payload != NULL); if (modifyingbuffer) { _payload = malloc(payload_len + RIST_MAX_PAYLOAD_OFFSET); _payload = _payload + RIST_MAX_PAYLOAD_OFFSET; memcpy(_payload, payload, payload_len); } else { _payload = payload; } //if (p->receiver_mode) // rist_log_priv(&ctx->common, RIST_LOG_ERROR, "Sending seq %"PRIu32" and rtp_seq %"PRIu16" payload is %d\n", // seq, seq_rtp, payload_type); //else // rist_log_priv(&ctx->common, RIST_LOG_ERROR, "Sending seq %"PRIu32" and idx is %zu/%zu/%zu (read/write/delete) and payload is %d\n", // seq, p->sender_ctx->sender_queue_read_index, // p->sender_ctx->sender_queue_write_index, // p->sender_ctx->sender_queue_delete_index, // payload_type); // TODO: write directly on the payload to make it faster uint8_t header_buf[RIST_MAX_HEADER_SIZE] = {0}; if (k->key_size) { gre_len = sizeof(struct rist_gre_key_seq); } else { gre_len = sizeof(struct rist_gre_seq); } uint16_t proto_type; if (RIST_UNLIKELY(payload_type == RIST_PAYLOAD_TYPE_DATA_OOB)) { proto_type = RIST_GRE_PROTOCOL_TYPE_FULL; } else { proto_type = RIST_GRE_PROTOCOL_TYPE_REDUCED; struct rist_protocol_hdr *hdr = (void *) (header_buf + gre_len); hdr->src_port = htobe16(src_port); hdr->dst_port = htobe16(dst_port); if (payload_type == RIST_PAYLOAD_TYPE_RTCP || payload_type == RIST_PAYLOAD_TYPE_RTCP_NACK) { hdr_len = RIST_GRE_PROTOCOL_REDUCED_SIZE; } else { hdr_len = sizeof(*hdr); // RTP header for data packets hdr->rtp.flags = RTP_MPEGTS_FLAGS; hdr->rtp.ssrc = htobe32(p->adv_flow_id); hdr->rtp.seq = htobe16(seq_rtp); if ((seq + 1) != ctx->seq) { // This is a retransmission //rist_log_priv(&ctx->common, RIST_LOG_ERROR, "\tResending: %"PRIu32"/%"PRIu16"/%"PRIu32"\n", seq, seq_rtp, ctx->seq); /* Mark SSID for retransmission (change the last bit of the ssrc to 1) */ //hdr->rtp.ssrc |= (1 << 31); hdr->rtp.ssrc = htobe32(p->adv_flow_id | 0x01); retry = true; } if (ctx->profile == RIST_PROFILE_ADVANCED) { hdr->rtp.payload_type = RTP_PTYPE_RIST; hdr->rtp.ts = htobe32(timestampRTP_u32(1, source_time)); } else { hdr->rtp.payload_type = RTP_PTYPE_MPEGTS; if (!ctx->birthtime_rtp_offset) { // Force a 32bit timestamp wrap-around 60 seconds after startup. It will break // crappy implementations and/or will guarantee 13 hours of clean stream. ctx->birthtime_rtp_offset = UINT32_MAX - timestampRTP_u32(0, source_time) - (90000*60); } hdr->rtp.ts = htobe32(ctx->birthtime_rtp_offset + timestampRTP_u32(0, source_time)); } } // copy the rtp header data (needed for encryption) memcpy(_payload - hdr_len, hdr, hdr_len); } if (ctx->profile > RIST_PROFILE_SIMPLE) { /* Compress the data packets */ if (p->compression) { int clen; void *cbuf = ctx->buf.dec; clen = LZ4_compress_default((const char *)_payload, cbuf, (int)payload_len, RIST_MAX_PACKET_SIZE); if (clen < 0) { rist_log_priv(ctx, RIST_LOG_ERROR, "Compression failed (%d), not sending\n", clen); } else { if ((size_t)clen < payload_len) { payload_len = clen; _payload = cbuf; compressed = true; } else { //msg(receiver_id, ctx->id, DEBUG, // "compressed %d to %lu\n", len, compressed_len); // Use origin data AS IS becauce compression bloated it } } } /* Encrypt everything except GRE */ if (k->key_size) { _ensure_key_is_valid(k, p); // Prepare GRE header struct rist_gre_key_seq *gre_key_seq = (void *) header_buf; SET_BIT(gre_key_seq->flags1, 7); // set checksum bit SET_BIT(gre_key_seq->flags1, 5); // set key flag SET_BIT(gre_key_seq->flags1, 4); // set seq bit if (ctx->profile == RIST_PROFILE_ADVANCED) { SET_BIT(gre_key_seq->flags2, 0); // set advanced protocol identifier if (compressed) SET_BIT(gre_key_seq->flags1, 3); // set compression bit if (retry) SET_BIT(gre_key_seq->flags1, 2); // set retry bit // TODO: implement fragmentation and fill in this data // (fragmentation to be done at API data entry point) uint8_t fragment_final = 0; uint8_t fragment_number = 0; if (CHECK_BIT(fragment_final, 0)) SET_BIT(gre_key_seq->flags1, 1); // fragment_number (max is 64) if (CHECK_BIT(fragment_number, 0)) SET_BIT(gre_key_seq->flags1, 0); if (CHECK_BIT(fragment_number, 1)) SET_BIT(gre_key_seq->flags2, 7); if (CHECK_BIT(fragment_number, 2)) SET_BIT(gre_key_seq->flags2, 6); if (CHECK_BIT(fragment_number, 3)) SET_BIT(gre_key_seq->flags2, 5); if (CHECK_BIT(fragment_number, 4)) SET_BIT(gre_key_seq->flags2, 4); if (CHECK_BIT(fragment_number, 5)) SET_BIT(gre_key_seq->flags2, 3); //SET_BIT(gre_key_seq->flags2, 2) is free for future use (version) //SET_BIT(gre_key_seq->flags2, 1) is free for future use (version) } gre_key_seq->prot_type = htobe16(proto_type); gre_key_seq->checksum_reserved1 = htobe32((uint32_t)(source_time >> 32)); gre_key_seq->nonce = htobe32(k->gre_nonce); gre_key_seq->seq = htobe32(seq); /* Prepare AES IV */ uint8_t IV[AES_BLOCK_SIZE]; // The byte array needs to be zeroes and then the seq in network byte order uint32_t seq_be = gre_key_seq->seq; memset(IV, 0, 12); memcpy(IV + 12, &seq_be, sizeof(seq_be)); // Encrypt everything other than GRE k->used_times++; /* int i=0; fprintf(stderr, "IV: seq %"PRIu32"(%d): ", seq, k->key_size); while (i < sizeof(IV)) { fprintf(stderr, "%02X ",(int)IV[i]); i++; } fprintf(stderr, "\n"); */ #ifndef LINUX_CRYPTO aes_encrypt_ctr((const void *) (_payload - hdr_len), hdr_len + payload_len, (void *) (_payload - hdr_len), k->aes_key_sched, k->key_size, IV); #else if (p->cryptoctx) linux_crypto_encrypt((void *) (_payload - hdr_len), (int)(hdr_len + payload_len), IV, p->cryptoctx); else aes_encrypt_ctr((const void *) (_payload - hdr_len), hdr_len + payload_len, (void *) (_payload - hdr_len), k->aes_key_sched, k->key_size, IV); #endif } else { struct rist_gre_seq *gre_seq = (struct rist_gre_seq *) header_buf; SET_BIT(gre_seq->flags1, 7); // set checksum bit SET_BIT(gre_seq->flags1, 4); // set seq bit if (ctx->profile == RIST_PROFILE_ADVANCED) { SET_BIT(gre_seq->flags2, 0); // set advanced protocol identifier if (compressed) SET_BIT(gre_seq->flags1, 3); // set compression bit if (retry) SET_BIT(gre_seq->flags1, 2); // set retry bit uint8_t fragment_final = 0; uint8_t fragment_number = 0; if (CHECK_BIT(fragment_final, 0)) SET_BIT(gre_seq->flags1, 1); if (CHECK_BIT(fragment_number, 0)) SET_BIT(gre_seq->flags1, 0); if (CHECK_BIT(fragment_number, 1)) SET_BIT(gre_seq->flags2, 7); if (CHECK_BIT(fragment_number, 2)) SET_BIT(gre_seq->flags2, 6); if (CHECK_BIT(fragment_number, 3)) SET_BIT(gre_seq->flags2, 5); if (CHECK_BIT(fragment_number, 4)) SET_BIT(gre_seq->flags2, 4); if (CHECK_BIT(fragment_number, 5)) SET_BIT(gre_seq->flags2, 3); } gre_seq->prot_type = htobe16(proto_type); gre_seq->checksum_reserved1 = htobe32((uint32_t)(source_time >> 32)); gre_seq->seq = htobe32(seq); } // now copy the GRE header data len = gre_len + hdr_len + payload_len; data = _payload - gre_len - hdr_len; memcpy(data, header_buf, gre_len); } else { len = hdr_len + payload_len - RIST_GRE_PROTOCOL_REDUCED_SIZE; data = _payload - hdr_len + RIST_GRE_PROTOCOL_REDUCED_SIZE; } // TODO: compare p->sender_ctx->sender_queue_read_index and p->sender_ctx->sender_queue_write_index // and warn when the difference is a multiple of 10 (slow CPU or overtaxed algortihm) // The difference should always stay very low < 10 if (RIST_UNLIKELY((p->sender_ctx && p->sender_ctx->simulate_loss) || (p->receiver_ctx && p->receiver_ctx->simulate_loss))) { uint16_t loss_percentage = p->sender_ctx? p->sender_ctx->loss_percentage : p->receiver_ctx->loss_percentage; /* very crude calculation to see if we "randomly" drop packets, good enough for testing */ uint16_t compare = rand() % 1001; if (compare <= loss_percentage) { ret = len; goto out; } } ret = sendto(p->sd,(const char*)data, len, MSG_DONTWAIT, &(p->u.address), p->address_len); out: if (RIST_UNLIKELY(ret <= 0)) { rist_log_priv(ctx, RIST_LOG_ERROR, "\tSend failed: errno=%d, ret=%d, socket=%d\n", errno, ret, p->sd); } else { rist_calculate_bitrate_sender(len, &p->bw); p->stats_sender_instant.sent++; p->stats_receiver_instant.sent++; } if (modifyingbuffer) { free(_payload - RIST_MAX_PAYLOAD_OFFSET); } return ret; } /* This function is used by receiver for all and by sender only for rist-data and oob-data */ int rist_send_common_rtcp(struct rist_peer *p, uint8_t payload_type, uint8_t *payload, size_t payload_len, uint64_t source_time, uint16_t src_port, uint16_t dst_port, uint32_t seq_gre, uint32_t seq_rtp) { // This can only and will most likely be zero for data packets. RTCP should always have a value. assert(payload_type != RIST_PAYLOAD_TYPE_DATA_RAW && payload_type != RIST_PAYLOAD_TYPE_DATA_OOB ? dst_port != 0 : 1); if (dst_port == 0) dst_port = p->config.virt_dst_port; if (src_port == 0) src_port = 32768 + p->adv_peer_id; if (p->sd < 0 || !p->address_len) { rist_log_priv(get_cctx(p), RIST_LOG_ERROR, "rist_send_common_rtcp failed\n"); return -1; } if (RIST_UNLIKELY(p->config.timing_mode == RIST_TIMING_MODE_ARRIVAL) && !p->receiver_mode) source_time = timestampNTP_u64(); size_t ret = rist_send_seq_rtcp(p, seq_gre, (uint16_t)seq_rtp, payload_type, payload, payload_len, source_time, src_port, dst_port); if ((!p->compression && ret < payload_len) || ret <= 0) { if (p->address_family == AF_INET6) { // TODO: print IP and port (and error number?) rist_log_priv(get_cctx(p), RIST_LOG_ERROR, "\tError on transmission sendto for seq #%"PRIu32"\n", seq_gre); } else { struct sockaddr_in *sin4 = (struct sockaddr_in *)&p->u.address; unsigned char *ip = (unsigned char *)&sin4->sin_addr.s_addr; rist_log_priv(get_cctx(p), RIST_LOG_ERROR, "\tError on transmission sendto, ret=%d to %d.%d.%d.%d:%d/%d, seq #%"PRIu32", %d bytes\n", ret, ip[0], ip[1], ip[2], ip[3], htons(sin4->sin_port), p->local_port, seq_gre, payload_len); } } // TODO: // This should return something meaningful, however ret is always >= 0 by virtue of being unsigned. /*if (ret >= 0) * return 0; * else * return -1; */ return 0; } int rist_set_url(struct rist_peer *peer) { char host[512]; uint16_t port; int local; if (!peer->url) { if (peer->local_port > 0) { /* Put sender in IPv4 learning mode */ peer->address_family = AF_INET; peer->address_len = sizeof(struct sockaddr_in); memset(&peer->u.address, 0, sizeof(struct sockaddr_in)); rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "Sender: in learning mode\n"); } return 1; } if (udpsocket_parse_url(peer->url, host, 512, &port, &local) != 0) { rist_log_priv(get_cctx(peer), RIST_LOG_ERROR, "%s / %s\n", strerror(errno), peer->url); return -1; } else { rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "URL parsed successfully: Host %s, Port %hu\n", (char *) host, port); } if (udpsocket_resolve_host(host, port, &peer->u.address) < 0) { rist_log_priv(get_cctx(peer), RIST_LOG_ERROR, "Host %s cannot be resolved\n", (char *) host); return -1; } if (peer->u.inaddr6.sin6_family == AF_INET6) { peer->address_family = AF_INET6; peer->address_len = sizeof(struct sockaddr_in6); } else { peer->address_family = AF_INET; peer->address_len = sizeof(struct sockaddr_in); } if (local) { peer->listening = 1; peer->local_port = port; } else { peer->listening = 0; peer->remote_port = port; } if (peer->address_family == AF_INET) { peer->u.inaddr.sin_port = htons(port); } else { peer->u.inaddr6.sin6_port = htons(port); } return 0; } void rist_populate_cname(struct rist_peer *peer) { int fd = peer->sd; char *identifier = peer->cname; struct rist_common_ctx *ctx = get_cctx(peer); if (strlen((char *)ctx->cname) != 0) { strncpy(identifier, (char * )ctx->cname, RIST_MAX_HOSTNAME); return; } /* Set the CNAME Identifier as host@ip:port and fallback to hostname if needed */ char hostname[RIST_MAX_HOSTNAME]; struct sockaddr_storage peer_sockaddr; peer_sockaddr.ss_family = AF_UNSPEC; int name_length = 0; socklen_t peer_socklen = sizeof(peer_sockaddr); int ret_hostname = gethostname(hostname, RIST_MAX_HOSTNAME); if (ret_hostname == -1) { snprintf(hostname, RIST_MAX_HOSTNAME, "UnknownHost"); } int ret_sockname = getsockname(fd, (struct sockaddr *)&peer_sockaddr, &peer_socklen); if (ret_sockname == 0) { struct sockaddr *xsa = (struct sockaddr *)&peer_sockaddr; // TODO: why is this returning non-sense? if (xsa->sa_family == AF_INET) { struct sockaddr_in *xin = (struct sockaddr_in*)&peer_sockaddr; char *addr = inet_ntoa(xin->sin_addr); if (strcmp(addr, "0.0.0.0") != 0) { name_length = snprintf(identifier, RIST_MAX_HOSTNAME, "%s@%s:%u", hostname, addr, ntohs(xin->sin_port)); if (name_length >= RIST_MAX_HOSTNAME) identifier[RIST_MAX_HOSTNAME-1] = 0; } }/* else if (xsa->sa_family == AF_INET6) { struct sockaddr_in6 *xin6 = (void*)peer; char str[INET6_ADDRSTRLEN]; inet_ntop(xin6->sin6_family, &xin6->sin6_addr, str, sizeof(struct in6_addr)); name_length = snprintf(identifier, RIST_MAX_HOSTNAME, "%s@%s:%u", hostname, str, ntohs(xin6->sin6_port)); if (name_length >= RIST_MAX_HOSTNAME) identifier[RIST_MAX_HOSTNAME-1] = 0; }*/ } if (name_length == 0) { name_length = snprintf(identifier, RIST_MAX_HOSTNAME, "%s", hostname); if (name_length >= RIST_MAX_HOSTNAME) identifier[RIST_MAX_HOSTNAME-1] = 0; } } void rist_create_socket(struct rist_peer *peer) { if(!peer->address_family && rist_set_url(peer)) { return; } if (peer->local_port) { const char* host; uint16_t port; char buffer[256]; if (peer->u.address.sa_family == AF_INET) { struct sockaddr_in *addrv4 = (struct sockaddr_in *)&(peer->u); host = inet_ntop(AF_INET, &(addrv4->sin_addr), buffer, sizeof(buffer)); port = htons(addrv4->sin_port); } else { struct sockaddr_in6 *addrv6 = (struct sockaddr_in6 *)&(peer->u); host = inet_ntop(AF_INET6, &(addrv6->sin6_addr), buffer, sizeof(buffer)); port = htons(addrv6->sin6_port); } if (!host) { rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "failed to convert address to string (errno=%d)", errno); return; } peer->sd = udpsocket_open_bind(host, port, &peer->miface[0]); if (peer->sd > 0) { rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "Starting in URL listening mode (socket# %d)\n", peer->sd); } else { rist_log_priv(get_cctx(peer), RIST_LOG_ERROR, "Could not start in URL listening mode. %s\n", strerror(errno)); } } else { // We use sendto ... so, no need to connect directly here peer->sd = udpsocket_open(peer->address_family); // TODO : set max hops if (peer->sd > 0) rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "Starting in URL connect mode (%d)\n", peer->sd); else { rist_log_priv(get_cctx(peer), RIST_LOG_ERROR, "Could not start in URL connect mode. %s\n", strerror(errno)); } peer->local_port = 32768 + (get_cctx(peer)->peer_counter % 28232); } udpsocket_set_nonblocking(peer->sd); // Increase default OS udp receive buffer size if (udpsocket_set_optimal_buffer_size(peer->sd)) { rist_log_priv(get_cctx(peer), RIST_LOG_WARN, "Unable to set the socket receive buffer size to %d Bytes. %s\n", UDPSOCKET_SOCK_BUFSIZE, strerror(errno)); } else { uint32_t current_recvbuf = udpsocket_get_buffer_size(peer->sd); rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "Configured the starting socket receive buffer size to %d Bytes.\n", current_recvbuf); } // Increase default OS udp send buffer size if (udpsocket_set_optimal_buffer_send_size(peer->sd)) { rist_log_priv(get_cctx(peer), RIST_LOG_WARN, "Unable to set the socket send buffer size to %d Bytes. %s\n", UDPSOCKET_SOCK_BUFSIZE, strerror(errno)); } else { uint32_t current_sendbuf = udpsocket_get_buffer_send_size(peer->sd); rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "Configured the starting socket send buffer size to %d Bytes.\n", current_sendbuf); } if (peer->cname[0] == 0) rist_populate_cname(peer); rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "Peer cname is %s\n", peer->cname); } static inline void rist_rtcp_write_empty_rr(uint8_t *buf, int *offset, const uint32_t flow_id) { struct rist_rtcp_rr_empty_pkt *rr = (struct rist_rtcp_rr_empty_pkt *)(buf + RIST_MAX_PAYLOAD_OFFSET + *offset); *offset += sizeof(struct rist_rtcp_rr_empty_pkt); rr->rtcp.flags = RTCP_SR_FLAGS; rr->rtcp.ptype = PTYPE_RR; rr->rtcp.ssrc = htobe32(flow_id); rr->rtcp.len = htons(1); } static inline void rist_rtcp_write_rr(uint8_t *buf, int *offset, const struct rist_peer *peer) { struct rist_rtcp_rr_pkt *rr = (struct rist_rtcp_rr_pkt *)(buf + RIST_MAX_PAYLOAD_OFFSET + *offset); *offset += sizeof(struct rist_rtcp_rr_pkt); rr->rtcp.flags = RTCP_RR_FULL_FLAGS; rr->rtcp.ptype = PTYPE_RR; rr->rtcp.ssrc = htobe32(peer->adv_flow_id); rr->rtcp.len = htons(7); /* TODO fix these variables */ rr->fraction_lost = 0; rr->cumulative_pkt_loss_msb = 0; rr->cumulative_pkt_loss_lshw = 0; rr->highest_seq = 0; rr->jitter = 0; rr->lsr = htobe32((uint32_t)(peer->last_sender_report_time >> 16)); /* expressed in units of 1/65536 == middle 16 bits?!? */ rr->dlsr = htobe32((uint32_t)((timestampNTP_u64() - peer->last_sender_report_ts) >> 16)); } static inline void rist_rtcp_write_sr(uint8_t *buf, int *offset, struct rist_peer *peer) { struct rist_rtcp_sr_pkt *sr = (struct rist_rtcp_sr_pkt *)(buf + RIST_MAX_PAYLOAD_OFFSET + *offset); *offset += sizeof(struct rist_rtcp_sr_pkt); /* Populate SR for sender */ sr->rtcp.flags = RTCP_SR_FLAGS; sr->rtcp.ptype = PTYPE_SR; sr->rtcp.ssrc = htobe32(peer->adv_flow_id); sr->rtcp.len = htons(6); uint64_t now = timestampNTP_u64(); uint64_t now_rtc = timestampNTP_RTC_u64(); peer->last_sender_report_time = now_rtc; peer->last_sender_report_ts = now; uint32_t ntp_lsw = (uint32_t)now_rtc; // There is 70 years (incl. 17 leap ones) offset to the Unix Epoch. // No leap seconds during that period since they were not invented yet. uint32_t ntp_msw = now_rtc >> 32; sr->ntp_msw = htobe32(ntp_msw); sr->ntp_lsw = htobe32(ntp_lsw); struct rist_common_ctx *ctx = get_cctx(peer); int advanced = ctx->profile == RIST_PROFILE_ADVANCED ? 1 : 0; sr->rtp_ts = htobe32(timestampRTP_u32(advanced, now)); sr->sender_pkts = 0; //htonl(f->packets_count); sr->sender_bytes = 0; //htonl(f->bytes_count); } static inline void rist_rtcp_write_sdes(uint8_t *buf, int *offset, const char *name, const uint32_t flow_id) { size_t namelen = strlen(name); size_t sdes_size = ((10 + namelen + 1) + 3) & ~3; size_t padding = sdes_size - namelen - 10; struct rist_rtcp_sdes_pkt *sdes = (struct rist_rtcp_sdes_pkt *)(buf + RIST_MAX_PAYLOAD_OFFSET + *offset); *offset += sdes_size; /* Populate SDES for sender description */ sdes->rtcp.flags = RTCP_SDES_FLAGS; sdes->rtcp.ptype = PTYPE_SDES; sdes->rtcp.len = htons((uint16_t)((sdes_size - 1) >> 2)); sdes->rtcp.ssrc = htobe32(flow_id); sdes->cname = 1; sdes->name_len = (uint8_t)namelen; // We copy the extra padding bytes from the source because it is a preallocated buffer // of size 128 with all zeroes memcpy(sdes->udn, name, namelen + padding); } static inline void rist_rtcp_write_echoreq(uint8_t *buf, int *offset, const uint32_t flow_id) { struct rist_rtcp_echoext *echo = (struct rist_rtcp_echoext *)(buf + RIST_MAX_PAYLOAD_OFFSET + *offset); *offset += sizeof(struct rist_rtcp_echoext); echo->flags = RTCP_ECHOEXT_REQ_FLAGS; echo->ptype = PTYPE_NACK_CUSTOM; echo->ssrc = htobe32(flow_id); echo->len = htons(5); memcpy(echo->name, "RIST", 4); uint64_t now = timestampNTP_u64(); echo->ntp_msw = htobe32((uint32_t)(now >> 32)); echo->ntp_lsw = htobe32((uint32_t)(now & 0x000000000FFFFFFFF)); } static inline void rist_rtcp_write_echoresp(uint8_t *buf,int *offset, const uint64_t request_time, const uint32_t flow_id) { struct rist_rtcp_echoext *echo = (struct rist_rtcp_echoext *)(buf + RIST_MAX_PAYLOAD_OFFSET + *offset); *offset += sizeof(struct rist_rtcp_echoext); echo->flags = RTCP_ECHOEXT_RESP_FLAGS; echo->ptype = PTYPE_NACK_CUSTOM; echo->len = htons(5); echo->ssrc = htobe32(flow_id); memcpy(echo->name, "RIST", 4); echo->ntp_msw = htobe32((uint32_t)(request_time >> 32)); echo->ntp_lsw = htobe32((uint32_t)(request_time & 0x000000000FFFFFFFF)); echo->delay = 0; } int rist_receiver_periodic_rtcp(struct rist_peer *peer) { uint8_t payload_type = RIST_PAYLOAD_TYPE_RTCP; uint8_t *rtcp_buf = get_cctx(peer)->buf.rtcp; int payload_len = 0; rist_rtcp_write_rr(rtcp_buf, &payload_len, peer); rist_rtcp_write_sdes(rtcp_buf, &payload_len, peer->cname, peer->adv_flow_id); rist_rtcp_write_echoreq(rtcp_buf, &payload_len, peer->adv_flow_id); struct rist_common_ctx *cctx = get_cctx(peer); return rist_send_common_rtcp(peer, payload_type, &rtcp_buf[RIST_MAX_PAYLOAD_OFFSET], payload_len, 0, peer->local_port, peer->remote_port, cctx->seq++, 0); } int rist_receiver_send_nacks(struct rist_peer *peer, uint32_t seq_array[], size_t array_len) { uint8_t payload_type = RIST_PAYLOAD_TYPE_RTCP; uint8_t *rtcp_buf = get_cctx(peer)->buf.rtcp; int payload_len = 0; rist_rtcp_write_empty_rr(rtcp_buf, &payload_len, peer->adv_flow_id); rist_rtcp_write_sdes(rtcp_buf, &payload_len, peer->cname, peer->adv_flow_id); if (RIST_LIKELY(array_len > 0)) { // Add nack requests (if any) struct rist_rtp_nack_record *rec; // First the sequence extension message (to transmit the upper 16 bits of the seq) struct rist_rtcp_seqext *seqext_buf = (struct rist_rtcp_seqext *)(rtcp_buf + RIST_MAX_PAYLOAD_OFFSET + payload_len); seqext_buf->flags = RTCP_NACK_SEQEXT_FLAGS; seqext_buf->ptype = PTYPE_NACK_CUSTOM; seqext_buf->ssrc = htobe32(peer->adv_flow_id); seqext_buf->len = htons(3); uint32_t seq = seq_array[0]; seqext_buf->seq_msb = htobe16(seq >> 16); uint32_t fci_count = 1; // Now the NACK message if (peer->receiver_ctx->nack_type == RIST_NACK_BITMASK) { struct rist_rtcp_nack_bitmask *rtcp = (struct rist_rtcp_nack_bitmask *)(rtcp_buf + RIST_MAX_PAYLOAD_OFFSET + payload_len + sizeof(struct rist_rtcp_seqext)); rtcp->flags = RTCP_NACK_BITMASK_FLAGS; rtcp->ptype = PTYPE_NACK_BITMASK; rtcp->ssrc_source = 0; // TODO rtcp->ssrc = htobe32(peer->adv_flow_id); rec = (struct rist_rtp_nack_record *)(rtcp_buf + RIST_MAX_PAYLOAD_OFFSET + payload_len + sizeof(struct rist_rtcp_seqext) + RTCP_FB_HEADER_SIZE); uint32_t last_seq, tmp_seq; tmp_seq = last_seq = seq_array[0]; uint32_t boundary = tmp_seq +16; rec->start = htons((uint16_t)tmp_seq); uint16_t extra = 0; for (size_t i = 1; i < array_len; i++) { tmp_seq = seq_array[i]; if (last_seq < tmp_seq && tmp_seq <= boundary) { uint32_t bitnum = tmp_seq - last_seq; SET_BIT(extra, (bitnum -1)); } else { rec->extra = htons(extra); rec++; fci_count++; extra = 0; rec->start = htons((uint16_t)tmp_seq); last_seq = tmp_seq; boundary = tmp_seq + 16; } } rec->extra = htons(extra); rtcp->len = htons((uint16_t)(2 + fci_count)); } else // PTYPE_NACK_CUSTOM { struct rist_rtcp_nack_range *rtcp = (struct rist_rtcp_nack_range *)(rtcp_buf + RIST_MAX_PAYLOAD_OFFSET + payload_len + sizeof(struct rist_rtcp_seqext)); rtcp->flags = RTCP_NACK_RANGE_FLAGS; rtcp->ptype = PTYPE_NACK_CUSTOM; rtcp->ssrc_source = htobe32(peer->adv_flow_id); memcpy(rtcp->name, "RIST", 4); rec = (struct rist_rtp_nack_record *)(rtcp_buf + RIST_MAX_PAYLOAD_OFFSET + payload_len + sizeof(struct rist_rtcp_seqext) + RTCP_FB_HEADER_SIZE); uint16_t tmp_seq = (uint16_t)seq_array[0]; uint16_t last_seq = tmp_seq; rec->start = htons(tmp_seq); uint16_t extra = 0; for (size_t i = 1; i < array_len; i++) { tmp_seq = (uint16_t)seq_array[i]; if (RIST_UNLIKELY(extra == UINT16_MAX)) { rec->extra = htons(extra); rec++; fci_count++; rec->start = htons(tmp_seq); extra = 0; } else if (tmp_seq == last_seq +1) { extra++; } else { rec->extra = htons(extra); rec++; fci_count++; rec->start = htons(tmp_seq); extra = 0; } last_seq = tmp_seq; } rec->extra = htons(extra); rtcp->len = htons((uint16_t)(2 + fci_count)); } int nack_bufsize = sizeof(struct rist_rtcp_seqext) + RTCP_FB_HEADER_SIZE + RTCP_FB_FCI_GENERIC_NACK_SIZE * fci_count; payload_len += nack_bufsize; payload_type = RIST_PAYLOAD_TYPE_RTCP_NACK; } // We use direct send from receiver to sender (no fifo to keep track of seq/idx) struct rist_common_ctx *cctx = get_cctx(peer); return rist_send_common_rtcp(peer, payload_type, &rtcp_buf[RIST_MAX_PAYLOAD_OFFSET], payload_len, 0, peer->local_port, peer->remote_port, cctx->seq++, 0); } static void rist_sender_send_rtcp(uint8_t *rtcp_buf, int payload_len, struct rist_peer *peer) { struct rist_common_ctx *cctx = get_cctx(peer); if (cctx->profile == RIST_PROFILE_ADVANCED) { struct rist_sender *ctx = peer->sender_ctx; pthread_mutex_lock(&ctx->queue_lock); size_t sender_write_index = atomic_load_explicit(&ctx->sender_queue_write_index, memory_order_acquire); ctx->sender_queue[sender_write_index] = rist_new_buffer(cctx, rtcp_buf, payload_len, RIST_PAYLOAD_TYPE_RTCP, 0, 0, peer->local_port, peer->remote_port); if (RIST_UNLIKELY(!ctx->sender_queue[sender_write_index])) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, "\t Could not create packet buffer inside sender buffer, OOM, decrease max bitrate or buffer time length\n"); pthread_mutex_unlock(&ctx->queue_lock); return; } ctx->sender_queue[sender_write_index]->peer = peer; ctx->sender_queue_bytesize += payload_len; atomic_store_explicit(&ctx->sender_queue_write_index, (sender_write_index + 1) & (ctx->sender_queue_max - 1), memory_order_release); pthread_mutex_unlock(&ctx->queue_lock); return; } rist_send_common_rtcp(peer, RIST_PAYLOAD_TYPE_RTCP, rtcp_buf, payload_len, 0, peer->local_port, peer->remote_port, cctx->seq++, 0); } void rist_sender_periodic_rtcp(struct rist_peer *peer) { uint8_t *rtcp_buf = get_cctx(peer)->buf.rtcp; int payload_len = 0; rist_rtcp_write_sr(rtcp_buf, &payload_len, peer); rist_rtcp_write_sdes(rtcp_buf, &payload_len, peer->cname, peer->adv_flow_id); // Push it to the FIFO buffer to be sent ASAP (even in the simple profile case) // Enqueue it to not misalign the buffer and to resend lost handshakes in the case of advanced mode rist_sender_send_rtcp(&rtcp_buf[RIST_MAX_PAYLOAD_OFFSET], payload_len, peer); return; } int rist_respond_echoreq(struct rist_peer *peer, const uint64_t echo_request_time) { uint8_t *rtcp_buf = get_cctx(peer)->buf.rtcp; int payload_len = 0; rist_rtcp_write_empty_rr(rtcp_buf, &payload_len, peer->adv_flow_id); rist_rtcp_write_sdes(rtcp_buf, &payload_len, peer->cname, peer->adv_flow_id); rist_rtcp_write_echoresp(rtcp_buf, &payload_len, echo_request_time, peer->adv_flow_id); if (peer->receiver_mode) { uint8_t payload_type = RIST_PAYLOAD_TYPE_RTCP; struct rist_common_ctx *cctx = get_cctx(peer); return rist_send_common_rtcp(peer, payload_type, &rtcp_buf[RIST_MAX_PAYLOAD_OFFSET], payload_len, 0, peer->local_port, peer->remote_port, cctx->seq++, 0); } else { /* I do this to not break advanced mode, however echo responses should really NOT be resend when lost ymmv */ rist_sender_send_rtcp(&rtcp_buf[RIST_MAX_PAYLOAD_OFFSET], payload_len, peer); return 0; } } int rist_request_echo(struct rist_peer *peer) { uint8_t *rtcp_buf = get_cctx(peer)->buf.rtcp; int payload_len = 0; rist_rtcp_write_empty_rr(rtcp_buf, &payload_len, peer->adv_flow_id); rist_rtcp_write_sdes(rtcp_buf, &payload_len, peer->cname, peer->adv_flow_id); rist_rtcp_write_echoreq(rtcp_buf, &payload_len, peer->adv_flow_id); if (peer->receiver_mode) { uint8_t payload_type = RIST_PAYLOAD_TYPE_RTCP; struct rist_common_ctx *cctx = get_cctx(peer); return rist_send_common_rtcp(peer, payload_type, &rtcp_buf[RIST_MAX_PAYLOAD_OFFSET], payload_len, 0, peer->local_port, peer->remote_port, cctx->seq++, 0); } else { /* I do this to not break advanced mode, however echo responses should really NOT be resend when lost ymmv */ rist_sender_send_rtcp(&rtcp_buf[RIST_MAX_PAYLOAD_OFFSET], payload_len, peer); return 0; } } static void rist_send_peer_nacks(struct rist_flow *f, struct rist_peer *peer) { struct rist_peer *outputpeer = peer; if (outputpeer->dead) { // original peer source is dead, use with the peer with the best rtt within this flow instead outputpeer = f->peer_lst[rist_best_rtt_index(f)]; } if (outputpeer) { if (get_cctx(peer)->debug) rist_log_priv(get_cctx(peer), RIST_LOG_DEBUG, "Sending %d nacks starting with %"PRIu32", %"PRIu32", %"PRIu32", %"PRIu32"\n", peer->nacks.counter, peer->nacks.array[0],peer->nacks.array[1],peer->nacks.array[2],peer->nacks.array[3]); if (rist_receiver_send_nacks(outputpeer->peer_rtcp, peer->nacks.array, peer->nacks.counter) == 0) peer->nacks.counter = 0; else rist_log_priv(get_cctx(peer), RIST_LOG_ERROR, "\tCould not send nacks, will try again\n"); } else { rist_log_priv(get_cctx(peer), RIST_LOG_ERROR, "\tCannot send nack, all peers are dead\n"); } } void rist_send_nacks(struct rist_flow *f, struct rist_peer *peer) { if (peer) { // Only a single peer was requested rist_send_peer_nacks(f, peer); return; } // Loop through all peers for the flow and empty the queues for (size_t j = 0; j < f->peer_lst_len; j++) { struct rist_peer *outputpeer = f->peer_lst[j]; if (!outputpeer->is_data) outputpeer = outputpeer->peer_data; if (outputpeer->nacks.counter > 0) { rist_send_peer_nacks(f, outputpeer); } } } int rist_sender_enqueue(struct rist_sender *ctx, const void *data, size_t len, uint64_t datagram_time, uint16_t src_port, uint16_t dst_port, uint32_t seq_rtp) { uint8_t payload_type = RIST_PAYLOAD_TYPE_DATA_RAW; if (ctx->common.PEERS == NULL) { // Do not cache data if the lib user has not added peers return -1; } ctx->last_datagram_time = datagram_time; /* insert into sender fifo queue */ pthread_mutex_lock(&ctx->queue_lock); size_t sender_write_index = atomic_load_explicit(&ctx->sender_queue_write_index, memory_order_acquire); ctx->sender_queue[sender_write_index] = rist_new_buffer(&ctx->common, data, len, payload_type, 0, datagram_time, src_port, dst_port); if (RIST_UNLIKELY(!ctx->sender_queue[sender_write_index])) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, "\t Could not create packet buffer inside sender buffer, OOM, decrease max bitrate or buffer time length\n"); pthread_mutex_unlock(&ctx->queue_lock); return -1; } ctx->sender_queue[sender_write_index]->seq_rtp = (uint16_t)seq_rtp; ctx->sender_queue_bytesize += len; atomic_store_explicit(&ctx->sender_queue_write_index, (sender_write_index + 1) & (ctx->sender_queue_max - 1), memory_order_release); pthread_mutex_unlock(&ctx->queue_lock); return 0; } void rist_sender_send_data_balanced(struct rist_sender *ctx, struct rist_buffer *buffer) { struct rist_peer *peer; struct rist_peer *selected_peer_by_weight = NULL; uint32_t max_remainder = 0; int peercnt; bool looped = false; //We can do it safely here, since this function is only to be called once per packet buffer->seq = ctx->common.seq++; peer_select: peercnt = 0; for (peer = ctx->common.PEERS; peer; peer = peer->next) { if (!peer->is_data || peer->parent) continue; if ((!peer->listening && !peer->authenticated) || peer->dead || (peer->listening && !peer->child_alive_count)) { ctx->weight_counter -= peer->config.weight; if (ctx->weight_counter <= 0) { ctx->weight_counter = ctx->total_weight; } peer->w_count = peer->config.weight; continue; } peercnt++; /*************************************/ /* * * * * * * * * * * * * * * * * * */ /** Heuristics for sender goes here **/ /* * * * * * * * * * * * * * * * * * */ /*************************************/ if (peer->config.weight == 0 && !looped) { if (peer->listening) { struct rist_peer *child = peer->child; while (child) { if (child->is_data && !child->dead) { uint8_t *payload = buffer->data; rist_send_common_rtcp(child, buffer->type, &payload[RIST_MAX_PAYLOAD_OFFSET], buffer->size, buffer->source_time, buffer->src_port, buffer->dst_port, buffer->seq, buffer->seq_rtp); } child = child->sibling_next; } } else { uint8_t *payload = buffer->data; rist_send_common_rtcp(peer, buffer->type, &payload[RIST_MAX_PAYLOAD_OFFSET], buffer->size, buffer->source_time, buffer->src_port, buffer->dst_port, buffer->seq, buffer->seq_rtp); } } else { /* Election of next peer */ // printf("peer election: considering %p, count=%d (wc: %d)\n", // peer, peer->w_count, ctx->weight_counter); if (peer->w_count > max_remainder) { max_remainder = peer->w_count; selected_peer_by_weight = peer; } } } looped = true; if (selected_peer_by_weight) { peer = selected_peer_by_weight; if (peer->listening) { struct rist_peer *child = peer->child; while (child) { if (child->is_data && !child->dead) { uint8_t *payload = buffer->data; rist_send_common_rtcp(child, buffer->type, &payload[RIST_MAX_PAYLOAD_OFFSET], buffer->size, buffer->source_time, buffer->src_port, buffer->dst_port, buffer->seq, buffer->seq_rtp); } child = child->sibling_next; } } else { uint8_t *payload = buffer->data; rist_send_common_rtcp(peer, buffer->type, &payload[RIST_MAX_PAYLOAD_OFFSET], buffer->size, buffer->source_time, buffer->src_port, buffer->dst_port, buffer->seq, buffer->seq_rtp); ctx->weight_counter--; peer->w_count--; } } if (ctx->total_weight > 0 && (ctx->weight_counter == 0 || !selected_peer_by_weight)) { peer = ctx->common.PEERS; ctx->weight_counter = ctx->total_weight; for (; peer; peer = peer->next) { if (peer->listening || !peer->is_data) continue; peer->w_count = peer->config.weight; } if (!selected_peer_by_weight && peercnt > 0) goto peer_select; } } static size_t rist_sender_index_get(struct rist_sender *ctx, uint32_t seq) { // This is by design in advanced mode, that is why we push all output data and handshakes // through the sender_queue, so we can keep the seq and idx in sync size_t idx = (seq + 1)& (ctx->sender_queue_max -1); if (ctx->common.profile < RIST_PROFILE_ADVANCED) { // For simple profile and main profile without extended seq numbers, we use a conversion table idx = ctx->seq_index[(uint16_t)seq]; } return idx; } size_t rist_get_sender_retry_queue_size(struct rist_sender *ctx) { size_t retry_queue_size = (ctx->sender_retry_queue_write_index - ctx->sender_retry_queue_read_index) & (ctx->sender_retry_queue_size - 1); return retry_queue_size; } /* This function must return, 0 when there is nothing to send, < 0 on error and > 0 for bytes sent */ ssize_t rist_retry_dequeue(struct rist_sender *ctx) { // rist_log_priv(&ctx->common, RIST_LOG_ERROR, // "\tCurrent read/write index are %zu/%zu \n", ctx->sender_retry_queue_read_index, // ctx->sender_retry_queue_write_index); // TODO: Is this logic flawed and we are always one unit behind (look at oob_dequee) size_t sender_retry_queue_read_index = (ctx->sender_retry_queue_read_index + 1)& (ctx->sender_retry_queue_size -1); if (sender_retry_queue_read_index == ctx->sender_retry_queue_write_index) { //rist_log_priv(&ctx->common, RIST_LOG_ERROR, // "\t[GOOD] We are all up to date, index is %" PRIu64 "\n", // ctx->sender_retry_queue_read_index); return 0; } ctx->sender_retry_queue_read_index = sender_retry_queue_read_index; struct rist_retry *retry = &ctx->sender_retry_queue[ctx->sender_retry_queue_read_index]; // If they request a non-sense seq number, we will catch it when we check the seq number against // the one on that buffer position and it does not match size_t idx = rist_sender_index_get(ctx, retry->seq); if (ctx->sender_queue[idx] == NULL) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, " Couldn't find block %" PRIu32 " (i=%zu/r=%zu/w=%zu/d=%zu/rs=%zu), consider increasing the buffer size\n", retry->seq, idx, atomic_load_explicit(&ctx->sender_queue_read_index, memory_order_acquire), atomic_load_explicit(&ctx->sender_queue_write_index, memory_order_acquire), ctx->sender_queue_delete_index, rist_get_sender_retry_queue_size(ctx)); retry->peer->stats_sender_instant.retrans_skip++; return -1; } else if (ctx->common.profile == RIST_PROFILE_ADVANCED && ctx->sender_queue[idx]->seq != retry->seq) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, " Couldn't find block %" PRIu32 " (i=%zu/r=%zu/w=%zu/d=%zu/rs=%zu), found an old one instead %" PRIu32 " (%" PRIu64 "), something is very wrong!\n", retry->seq, idx, atomic_load_explicit(&ctx->sender_queue_read_index, memory_order_acquire), atomic_load_explicit(&ctx->sender_queue_write_index, memory_order_acquire), ctx->sender_queue_delete_index, rist_get_sender_retry_queue_size(ctx), ctx->sender_queue[idx]->seq, ctx->sender_queue_max); retry->peer->stats_sender_instant.retrans_skip++; return -1; } else if (ctx->common.profile < RIST_PROFILE_ADVANCED && (uint16_t)retry->seq != ctx->sender_queue[idx]->seq_rtp) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, " Couldn't find block %" PRIu16 " (i=%zu/r=%zu/w=%zu/d=%zu/rs=%zu), found an old one instead %" PRIu32 " (%" PRIu64 "), bitrate is too high, use advanced profile instead\n", (uint16_t)retry->seq, idx, atomic_load_explicit(&ctx->sender_queue_read_index, memory_order_acquire), atomic_load_explicit(&ctx->sender_queue_write_index, memory_order_acquire), ctx->sender_queue_delete_index, rist_get_sender_retry_queue_size(ctx), ctx->sender_queue[idx]->seq_rtp, ctx->sender_queue_max); retry->peer->stats_sender_instant.retrans_skip++; return -1; } // TODO: re-enable rist_send_data_allowed (cooldown feature) // Make sure we do not flood the network with retries struct rist_bandwidth_estimation *retry_bw = &retry->peer->retry_bw; struct rist_bandwidth_estimation *cli_bw = &retry->peer->bw; size_t current_bitrate = cli_bw->bitrate + retry_bw->bitrate; size_t max_bitrate = retry->peer->config.recovery_maxbitrate * 1000; if (current_bitrate > max_bitrate) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, "Bandwidth exceeded: (%zu + %zu) > %d, not resending packet %"PRIu64".\n", cli_bw->bitrate, retry_bw->bitrate, max_bitrate, idx); retry->peer->stats_sender_instant.retrans_skip++; return -1; } // For timing debugging uint64_t now = timestampNTP_u64(); uint64_t data_age = (now - ctx->sender_queue[idx]->time) / RIST_CLOCK; uint64_t retry_age = (now - retry->insert_time) / RIST_CLOCK; if (retry_age > retry->peer->config.recovery_length_max) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, "Retry-request of element %" PRIu32 " (idx %zu) that was sent %" PRIu64 "ms ago has been in the queue too long to matter: %"PRIu64"ms > %ums\n", retry->seq, idx, data_age, retry_age, retry->peer->config.recovery_length_max); return -1; } struct rist_buffer *buffer = ctx->sender_queue[idx]; /* queue_time holds the original insertion time for this seq */ if (ctx->common.debug) rist_log_priv(&ctx->common, RIST_LOG_DEBUG, "Resending %"PRIu32"/%"PRIu32"/%"PRIu16" (idx %zu) after %" PRIu64 "ms of first transmission and %"PRIu64"ms in queue, bitrate is %zu + %zu, %zu\n", retry->seq, buffer->seq, buffer->seq_rtp, idx, data_age, retry_age, retry->peer->bw.bitrate, retry_bw->bitrate, retry->peer->bw.bitrate + retry_bw->bitrate); uint8_t *payload = buffer->data; // TODO: I do not think this check is needed anymore ... we fixed the bug that was causing // this scenario ... and we have thread-locking to prevent this if (!payload) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, "Someone deleted my buffer when resending %" PRIu32 " (idx %zu) after %" PRIu64 "ms of first transmission and %"PRIu64"ms in queue, bitrate is %zu + %zu, %zu\n", retry->seq, idx, data_age, retry_age, retry->peer->bw.bitrate, retry_bw->bitrate, retry->peer->bw.bitrate + retry_bw->bitrate); } buffer->transmit_count++; size_t ret = 0; if (buffer->transmit_count >= retry->peer->config.max_retries) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, "Datagram %"PRIu32 " is missing, but nack count is too large (%u), age is %"PRIu64"ms, retry #%lu\n", buffer->seq, buffer->transmit_count, data_age, buffer->transmit_count); } else { ret = (size_t)rist_send_seq_rtcp(retry->peer->peer_data, buffer->seq, buffer->seq_rtp, buffer->type, &payload[RIST_MAX_PAYLOAD_OFFSET], buffer->size, buffer->source_time, buffer->src_port, buffer->dst_port); } // update bandwidh value rist_calculate_bitrate_sender(ret, retry_bw); if (ret < buffer->size) { rist_log_priv(&ctx->common, RIST_LOG_ERROR, "Resending of packet failed %zu != %zu for seq %"PRIu32"\n", ret, buffer->size, buffer->seq); retry->peer->stats_sender_instant.retrans_skip++; } else { retry->peer->stats_sender_instant.retrans++; } return ret; } void rist_retry_enqueue(struct rist_sender *ctx, uint32_t seq, struct rist_peer *peer) { uint64_t now = timestampNTP_u64(); size_t idx = rist_sender_index_get(ctx, seq); struct rist_buffer *buffer = ctx->sender_queue[idx]; struct rist_retry *retry; // Even though all the checks are on the dequeue function, we leave one here // to prevent the flooding of our fifo .. It is based on the date of the // last queued item with the same seq for this peer. // The policy of whether to allow or not allow duplicate seq entries in the retry queue // is dependent on the bloat_mode. // bloat_mode disabled mode = unlimited duplicates // bloat_mode normal mode = we enforce spacing but allow duplicates // bloat_mode aggresive mode = no duplicates allowed // This is a safety check to protect against buggy or non compliant receivers that request the // same seq number without waiting one RTT. if (!buffer) { rist_log_priv(&ctx->common, RIST_LOG_WARN, "Nack request for seq %"PRIu32" but we do not have it in the buffer (%zu ms)\n", seq, ctx->sender_recover_min_time); return; } else { uint64_t age_ticks = (now - buffer->time); if (peer->config.congestion_control_mode == RIST_CONGESTION_CONTROL_MODE_OFF) { // All duplicates allowed, just report it if (ctx->common.debug) rist_log_priv(&ctx->common, RIST_LOG_DEBUG, "Nack request for seq %" PRIu32 " with age %" PRIu64 "ms and rtt_min %" PRIu32 " for peer #%d\n", buffer->seq, age_ticks / RIST_CLOCK, peer->config.recovery_rtt_min, peer->adv_peer_id); } else if (ctx->peer_lst_len == 1) { // Only one peer (faster algorithm with no lookups) if (buffer->last_retry_request != 0) { // This is a safety check to protect against buggy or non compliant receivers that request the // same seq number without waiting one RTT. uint64_t delta = (now - buffer->last_retry_request) / RIST_CLOCK; if (ctx->common.debug) rist_log_priv(&ctx->common, RIST_LOG_DEBUG, "Nack request for seq %" PRIu32 " with delta %" PRIu64 "ms, age %" PRIu64 "ms and rtt_min %" PRIu32 "\n", buffer->seq, delta, age_ticks / RIST_CLOCK, peer->config.recovery_rtt_min); if (peer->config.congestion_control_mode == RIST_CONGESTION_CONTROL_MODE_NORMAL) { if (delta < peer->config.recovery_rtt_min) { rist_log_priv(&ctx->common, RIST_LOG_WARN, "Nack request for seq %" PRIu32 ", age %"PRIu64"ms, is already queued (too soon to add another one), skipped, %" PRIu64 " < %" PRIu32 " ms\n", buffer->seq, age_ticks / RIST_CLOCK, delta, peer->config.recovery_rtt_min); peer->stats_sender_instant.bloat_skip++; return; } } else { rist_log_priv(&ctx->common, RIST_LOG_WARN, "Nack request for seq %" PRIu32 ", delta/age %"PRIu64"ms/%"PRIu64"ms is already queued, skipped\n", buffer->seq, delta, age_ticks / RIST_CLOCK, peer->config.recovery_rtt_min); peer->stats_sender_instant.bloat_skip++; return; } } else { if (ctx->common.debug) rist_log_priv(&ctx->common, RIST_LOG_DEBUG, "First nack request for seq %"PRIu32", age %"PRIu64"ms\n", buffer->seq, age_ticks / RIST_CLOCK); } } else { // Multiple peers, we need to search for other retries in the queue for comparison uint64_t delta = 0; size_t index_end = 0; size_t index = 0; // We search forward for aggressive mode and backwards for normal if (peer->config.congestion_control_mode == RIST_CONGESTION_CONTROL_MODE_AGGRESSIVE) { index = ctx->sender_retry_queue_read_index; index_end = ctx->sender_retry_queue_write_index; } else { index = ctx->sender_retry_queue_write_index; index_end = ctx->sender_retry_queue_read_index; } uint64_t rist_max_jitter_ticks = (uint64_t)ctx->common.rist_max_jitter; while (index != index_end) { if ((index % 10) == 0) { // We will completely bypass this check if/when it takes too long as we are // blocking the protocol thread (it could happen when the queue gets too big) uint64_t loop_time = timestampNTP_u64() - now; if (loop_time > rist_max_jitter_ticks) { size_t retry_queue_size = rist_get_sender_retry_queue_size(ctx); rist_log_priv(&ctx->common, RIST_LOG_WARN, "Bypassing duplicate nack request check for seq %"PRIu32" after %"PRIu64"us, age %"PRIu64"ms, q_size = %zu (taking too long)\n", buffer->seq, 1000 * loop_time / RIST_CLOCK, age_ticks / RIST_CLOCK, retry_queue_size); break; } } retry = &ctx->sender_retry_queue[index]; if (retry->seq == seq && retry->peer == peer) { delta = (now - retry->insert_time) / RIST_CLOCK; if (peer->config.congestion_control_mode == RIST_CONGESTION_CONTROL_MODE_NORMAL) { if (delta < peer->config.recovery_rtt_min) { rist_log_priv(&ctx->common, RIST_LOG_WARN, "Nack request for seq %" PRIu32 " with delta %" PRIu64 "ms (age %"PRIu64"ms) is already queued (too soon to add another one), skipped, peer #%d '%s'\n", buffer->seq, delta, age_ticks / RIST_CLOCK, peer->adv_peer_id, peer->receiver_name); peer->stats_sender_instant.bloat_skip++; return; } else { // There is only one in the queue for bloat_mode agresive // and we area only interested on the last one queued for bloat_mode normal break; } } else { rist_log_priv(&ctx->common, RIST_LOG_WARN, "Nack request for seq %" PRIu32 " with delta %" PRIu64 "ms (age %"PRIu64"ms) is already queued, skipped, peer #%d '%s'\n", buffer->seq, delta, age_ticks / RIST_CLOCK, peer->adv_peer_id, peer->receiver_name); peer->stats_sender_instant.bloat_skip++; return; } } // We search forward for aggressive mode and backwards for normal if (peer->config.congestion_control_mode == RIST_CONGESTION_CONTROL_MODE_AGGRESSIVE) { if (++index >= ctx->sender_retry_queue_size) index= 0; } else { if (index == 0) index = ctx->sender_retry_queue_size; index--; } } if (ctx->common.debug) { if (delta) { rist_log_priv(&ctx->common, RIST_LOG_DEBUG, "Nack request for seq %" PRIu32 " with delta %" PRIu64 "ms (age %"PRIu64"ms) and rtt_min %" PRIu32 " for peer #%d '%s'\n", buffer->seq, delta, age_ticks / RIST_CLOCK, peer->config.recovery_rtt_min, peer->adv_peer_id, peer->receiver_name); } else { rist_log_priv(&ctx->common, RIST_LOG_DEBUG, "First nack request for seq %"PRIu32", age %"PRIu64"ms, peer #%d '%s'\n", buffer->seq, age_ticks / RIST_CLOCK, peer->adv_peer_id, peer->receiver_name); } } } } // Now insert into the missing queue buffer->last_retry_request = now; retry = &ctx->sender_retry_queue[ctx->sender_retry_queue_write_index]; retry->seq = seq; retry->peer = peer; retry->insert_time = now; if (++ctx->sender_retry_queue_write_index >= ctx->sender_retry_queue_size) { ctx->sender_retry_queue_write_index = 0; } } void rist_print_inet_info(char *prefix, struct rist_peer *peer) { char ipstr[INET6_ADDRSTRLEN]; uint32_t port; // deal with both IPv4 and IPv6: if (peer->address_family == AF_INET6) { struct sockaddr_in6 *s = (struct sockaddr_in6 *) &peer->u.address; port = ntohs(s->sin6_port); inet_ntop(AF_INET6, &s->sin6_addr, ipstr, sizeof ipstr); } else { struct sockaddr_in *addr = (void *) &peer->u.address; port = ntohs(addr->sin_port); snprintf(ipstr, INET6_ADDRSTRLEN, "%s", inet_ntoa(addr->sin_addr)); } struct rist_common_ctx *ctx = get_cctx(peer); if (ctx->profile == RIST_PROFILE_SIMPLE) { rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "%sPeer Information, IP:Port => %s:%u (%d), id: %"PRIu32", simple profile\n", prefix, ipstr, port, peer->listening, peer->adv_peer_id); } else { rist_log_priv(get_cctx(peer), RIST_LOG_INFO, "%sPeer Information, IP:Port => %s:%u (%d), id: %"PRIu32", ports: %u->%u\n", prefix, ipstr, port, peer->listening, peer->adv_peer_id, peer->local_port, peer->remote_port); } }
730180.c
/* * Copyright (C) Igor Sysoev * Copyright (C) Nginx, Inc. */ #include <ngx_config.h> #include <ngx_core.h> #if (NGX_HAVE_ATOMIC_OPS) static void ngx_shmtx_wakeup(ngx_shmtx_t *mtx); ngx_int_t ngx_shmtx_create(ngx_shmtx_t *mtx, ngx_shmtx_sh_t *addr, u_char *name) { mtx->lock = &addr->lock; if (mtx->spin == (ngx_uint_t) -1) { return NGX_OK; } mtx->spin = 2048; #if (NGX_HAVE_POSIX_SEM) mtx->wait = &addr->wait; if (sem_init(&mtx->sem, 1, 0) == -1) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, ngx_errno, "sem_init() failed"); } else { mtx->semaphore = 1; } #endif return NGX_OK; } void ngx_shmtx_destroy(ngx_shmtx_t *mtx) { #if (NGX_HAVE_POSIX_SEM) if (mtx->semaphore) { if (sem_destroy(&mtx->sem) == -1) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, ngx_errno, "sem_destroy() failed"); } } #endif } ngx_uint_t ngx_shmtx_trylock(ngx_shmtx_t *mtx) { return (*mtx->lock == 0 && ngx_atomic_cmp_set(mtx->lock, 0, ngx_pid)); } void ngx_shmtx_lock(ngx_shmtx_t *mtx) { ngx_uint_t i, n; ngx_log_debug0(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0, "shmtx lock"); for ( ;; ) { if (*mtx->lock == 0 && ngx_atomic_cmp_set(mtx->lock, 0, ngx_pid)) { return; } if (ngx_ncpu > 1) { for (n = 1; n < mtx->spin; n <<= 1) { for (i = 0; i < n; i++) { ngx_cpu_pause(); } if (*mtx->lock == 0 && ngx_atomic_cmp_set(mtx->lock, 0, ngx_pid)) { return; } } } #if (NGX_HAVE_POSIX_SEM) if (mtx->semaphore) { (void) ngx_atomic_fetch_add(mtx->wait, 1); if (*mtx->lock == 0 && ngx_atomic_cmp_set(mtx->lock, 0, ngx_pid)) { (void) ngx_atomic_fetch_add(mtx->wait, -1); return; } ngx_log_debug1(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0, "shmtx wait %uA", *mtx->wait); while (sem_wait(&mtx->sem) == -1) { ngx_err_t err; err = ngx_errno; if (err != NGX_EINTR) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, err, "sem_wait() failed while waiting on shmtx"); break; } } ngx_log_debug0(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0, "shmtx awoke"); continue; } #endif ngx_sched_yield(); } } void ngx_shmtx_unlock(ngx_shmtx_t *mtx) { if (mtx->spin != (ngx_uint_t) -1) { ngx_log_debug0(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0, "shmtx unlock"); } if (ngx_atomic_cmp_set(mtx->lock, ngx_pid, 0)) { ngx_shmtx_wakeup(mtx); } } ngx_uint_t ngx_shmtx_force_unlock(ngx_shmtx_t *mtx, ngx_pid_t pid) { ngx_log_debug0(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0, "shmtx forced unlock"); if (ngx_atomic_cmp_set(mtx->lock, pid, 0)) { ngx_shmtx_wakeup(mtx); return 1; } return 0; } static void ngx_shmtx_wakeup(ngx_shmtx_t *mtx) { #if (NGX_HAVE_POSIX_SEM) ngx_atomic_uint_t wait; if (!mtx->semaphore) { return; } for ( ;; ) { wait = *mtx->wait; if ((ngx_atomic_int_t) wait <= 0) { return; } if (ngx_atomic_cmp_set(mtx->wait, wait, wait - 1)) { break; } } ngx_log_debug1(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0, "shmtx wake %uA", wait); if (sem_post(&mtx->sem) == -1) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, ngx_errno, "sem_post() failed while wake shmtx"); } #endif } #else ngx_int_t ngx_shmtx_create(ngx_shmtx_t *mtx, ngx_shmtx_sh_t *addr, u_char *name) { if (mtx->name) { if (ngx_strcmp(name, mtx->name) == 0) { mtx->name = name; return NGX_OK; } ngx_shmtx_destroy(mtx); } mtx->fd = ngx_open_file(name, NGX_FILE_RDWR, NGX_FILE_CREATE_OR_OPEN, NGX_FILE_DEFAULT_ACCESS); if (mtx->fd == NGX_INVALID_FILE) { ngx_log_error(NGX_LOG_EMERG, ngx_cycle->log, ngx_errno, ngx_open_file_n " \"%s\" failed", name); return NGX_ERROR; } if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, ngx_errno, ngx_delete_file_n " \"%s\" failed", name); } mtx->name = name; return NGX_OK; } void ngx_shmtx_destroy(ngx_shmtx_t *mtx) { if (ngx_close_file(mtx->fd) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, ngx_errno, ngx_close_file_n " \"%s\" failed", mtx->name); } } ngx_uint_t ngx_shmtx_trylock(ngx_shmtx_t *mtx) { ngx_err_t err; //os/unix/ngx_files.c err = ngx_trylock_fd(mtx->fd); if (err == 0) { return 1; } if (err == NGX_EAGAIN) { return 0; } #if __osf__ /* Tru64 UNIX */ if (err == NGX_EACCES) { return 0; } #endif ngx_log_abort(err, ngx_trylock_fd_n " %s failed", mtx->name); return 0; } void ngx_shmtx_lock(ngx_shmtx_t *mtx) { ngx_err_t err; err = ngx_lock_fd(mtx->fd); if (err == 0) { return; } ngx_log_abort(err, ngx_lock_fd_n " %s failed", mtx->name); } void ngx_shmtx_unlock(ngx_shmtx_t *mtx) { ngx_err_t err; err = ngx_unlock_fd(mtx->fd); if (err == 0) { return; } ngx_log_abort(err, ngx_unlock_fd_n " %s failed", mtx->name); } ngx_uint_t ngx_shmtx_force_unlock(ngx_shmtx_t *mtx, ngx_pid_t pid) { return 0; } #endif
823622.c
/** ****************************************************************************** * @file stm32f4xx_hal.c * @author MCD Application Team * @brief HAL module driver. * This is the common part of the HAL initialization * @verbatim ============================================================================== ##### How to use this driver ##### ============================================================================== [..] The common HAL driver contains a set of generic and common APIs that can be used by the PPP peripheral drivers and the user to start using the HAL. [..] The HAL contains two APIs' categories: (+) Common HAL APIs (+) Services HAL APIs @endverbatim ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2017 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal.h" /** @addtogroup STM32F4xx_HAL_Driver * @{ */ /** @defgroup HAL HAL * @brief HAL module driver. * @{ */ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /** @addtogroup HAL_Private_Constants * @{ */ /** * @brief STM32F4xx HAL Driver version number V1.7.4 */ #define __STM32F4xx_HAL_VERSION_MAIN (0x01U) /*!< [31:24] main version */ #define __STM32F4xx_HAL_VERSION_SUB1 (0x07U) /*!< [23:16] sub1 version */ #define __STM32F4xx_HAL_VERSION_SUB2 (0x04U) /*!< [15:8] sub2 version */ #define __STM32F4xx_HAL_VERSION_RC (0x00U) /*!< [7:0] release candidate */ #define __STM32F4xx_HAL_VERSION ((__STM32F4xx_HAL_VERSION_MAIN << 24U)\ |(__STM32F4xx_HAL_VERSION_SUB1 << 16U)\ |(__STM32F4xx_HAL_VERSION_SUB2 << 8U )\ |(__STM32F4xx_HAL_VERSION_RC)) #define IDCODE_DEVID_MASK 0x00000FFFU /* ------------ RCC registers bit address in the alias region ----------- */ #define SYSCFG_OFFSET (SYSCFG_BASE - PERIPH_BASE) /* --- MEMRMP Register ---*/ /* Alias word address of UFB_MODE bit */ #define MEMRMP_OFFSET SYSCFG_OFFSET #define UFB_MODE_BIT_NUMBER SYSCFG_MEMRMP_UFB_MODE_Pos #define UFB_MODE_BB (uint32_t)(PERIPH_BB_BASE + (MEMRMP_OFFSET * 32U) + (UFB_MODE_BIT_NUMBER * 4U)) /* --- CMPCR Register ---*/ /* Alias word address of CMP_PD bit */ #define CMPCR_OFFSET (SYSCFG_OFFSET + 0x20U) #define CMP_PD_BIT_NUMBER SYSCFG_CMPCR_CMP_PD_Pos #define CMPCR_CMP_PD_BB (uint32_t)(PERIPH_BB_BASE + (CMPCR_OFFSET * 32U) + (CMP_PD_BIT_NUMBER * 4U)) /* --- MCHDLYCR Register ---*/ /* Alias word address of BSCKSEL bit */ #define MCHDLYCR_OFFSET (SYSCFG_OFFSET + 0x30U) #define BSCKSEL_BIT_NUMBER SYSCFG_MCHDLYCR_BSCKSEL_Pos #define MCHDLYCR_BSCKSEL_BB (uint32_t)(PERIPH_BB_BASE + (MCHDLYCR_OFFSET * 32U) + (BSCKSEL_BIT_NUMBER * 4U)) /** * @} */ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /** @addtogroup HAL_Private_Variables * @{ */ __IO uint32_t uwTick; uint32_t uwTickPrio = (1UL << __NVIC_PRIO_BITS); /* Invalid PRIO */ HAL_TickFreqTypeDef uwTickFreq = HAL_TICK_FREQ_DEFAULT; /* 1KHz */ /** * @} */ /* Private function prototypes -----------------------------------------------*/ /* Private functions ---------------------------------------------------------*/ /** @defgroup HAL_Exported_Functions HAL Exported Functions * @{ */ /** @defgroup HAL_Exported_Functions_Group1 Initialization and de-initialization Functions * @brief Initialization and de-initialization functions * @verbatim =============================================================================== ##### Initialization and Configuration functions ##### =============================================================================== [..] This section provides functions allowing to: (+) Initializes the Flash interface the NVIC allocation and initial clock configuration. It initializes the systick also when timeout is needed and the backup domain when enabled. (+) De-Initializes common part of the HAL. (+) Configure the time base source to have 1ms time base with a dedicated Tick interrupt priority. (++) SysTick timer is used by default as source of time base, but user can eventually implement his proper time base source (a general purpose timer for example or other time source), keeping in mind that Time base duration should be kept 1ms since PPP_TIMEOUT_VALUEs are defined and handled in milliseconds basis. (++) Time base configuration function (HAL_InitTick ()) is called automatically at the beginning of the program after reset by HAL_Init() or at any time when clock is configured, by HAL_RCC_ClockConfig(). (++) Source of time base is configured to generate interrupts at regular time intervals. Care must be taken if HAL_Delay() is called from a peripheral ISR process, the Tick interrupt line must have higher priority (numerically lower) than the peripheral interrupt. Otherwise the caller ISR process will be blocked. (++) functions affecting time base configurations are declared as __weak to make override possible in case of other implementations in user file. @endverbatim * @{ */ /** * @brief This function is used to initialize the HAL Library; it must be the first * instruction to be executed in the main program (before to call any other * HAL function), it performs the following: * Configure the Flash prefetch, instruction and Data caches. * Configures the SysTick to generate an interrupt each 1 millisecond, * which is clocked by the HSI (at this stage, the clock is not yet * configured and thus the system is running from the internal HSI at 16 MHz). * Set NVIC Group Priority to 4. * Calls the HAL_MspInit() callback function defined in user file * "stm32f4xx_hal_msp.c" to do the global low level hardware initialization * * @note SysTick is used as time base for the HAL_Delay() function, the application * need to ensure that the SysTick time base is always set to 1 millisecond * to have correct HAL operation. * @retval HAL status */ HAL_StatusTypeDef HAL_Init(void) { /* Configure Flash prefetch, Instruction cache, Data cache */ #if (INSTRUCTION_CACHE_ENABLE != 0U) __HAL_FLASH_INSTRUCTION_CACHE_ENABLE(); #endif /* INSTRUCTION_CACHE_ENABLE */ #if (DATA_CACHE_ENABLE != 0U) __HAL_FLASH_DATA_CACHE_ENABLE(); #endif /* DATA_CACHE_ENABLE */ #if (PREFETCH_ENABLE != 0U) __HAL_FLASH_PREFETCH_BUFFER_ENABLE(); #endif /* PREFETCH_ENABLE */ /* Set Interrupt Group Priority */ HAL_NVIC_SetPriorityGrouping(NVIC_PRIORITYGROUP_4); /* Use systick as time base source and configure 1ms tick (default clock after Reset is HSI) */ HAL_InitTick(TICK_INT_PRIORITY); /* Init the low level hardware */ HAL_MspInit(); /* Return function status */ return HAL_OK; } /** * @brief This function de-Initializes common part of the HAL and stops the systick. * This function is optional. * @retval HAL status */ HAL_StatusTypeDef HAL_DeInit(void) { /* Reset of all peripherals */ __HAL_RCC_APB1_FORCE_RESET(); __HAL_RCC_APB1_RELEASE_RESET(); __HAL_RCC_APB2_FORCE_RESET(); __HAL_RCC_APB2_RELEASE_RESET(); __HAL_RCC_AHB1_FORCE_RESET(); __HAL_RCC_AHB1_RELEASE_RESET(); __HAL_RCC_AHB2_FORCE_RESET(); __HAL_RCC_AHB2_RELEASE_RESET(); __HAL_RCC_AHB3_FORCE_RESET(); __HAL_RCC_AHB3_RELEASE_RESET(); /* De-Init the low level hardware */ HAL_MspDeInit(); /* Return function status */ return HAL_OK; } /** * @brief Initialize the MSP. * @retval None */ __weak void HAL_MspInit(void) { /* NOTE : This function should not be modified, when the callback is needed, the HAL_MspInit could be implemented in the user file */ } /** * @brief DeInitializes the MSP. * @retval None */ __weak void HAL_MspDeInit(void) { /* NOTE : This function should not be modified, when the callback is needed, the HAL_MspDeInit could be implemented in the user file */ } /** * @brief This function configures the source of the time base. * The time source is configured to have 1ms time base with a dedicated * Tick interrupt priority. * @note This function is called automatically at the beginning of program after * reset by HAL_Init() or at any time when clock is reconfigured by HAL_RCC_ClockConfig(). * @note In the default implementation, SysTick timer is the source of time base. * It is used to generate interrupts at regular time intervals. * Care must be taken if HAL_Delay() is called from a peripheral ISR process, * The SysTick interrupt must have higher priority (numerically lower) * than the peripheral interrupt. Otherwise the caller ISR process will be blocked. * The function is declared as __weak to be overwritten in case of other * implementation in user file. * @param TickPriority Tick interrupt priority. * @retval HAL status */ __weak HAL_StatusTypeDef HAL_InitTick(uint32_t TickPriority) { /* Configure the SysTick to have interrupt in 1ms time basis*/ if (HAL_SYSTICK_Config(SystemCoreClock / (1000U / uwTickFreq)) > 0U) { return HAL_ERROR; } /* Configure the SysTick IRQ priority */ if (TickPriority < (1UL << __NVIC_PRIO_BITS)) { HAL_NVIC_SetPriority(SysTick_IRQn, TickPriority, 0U); uwTickPrio = TickPriority; } else { return HAL_ERROR; } /* Return function status */ return HAL_OK; } /** * @} */ /** @defgroup HAL_Exported_Functions_Group2 HAL Control functions * @brief HAL Control functions * @verbatim =============================================================================== ##### HAL Control functions ##### =============================================================================== [..] This section provides functions allowing to: (+) Provide a tick value in millisecond (+) Provide a blocking delay in millisecond (+) Suspend the time base source interrupt (+) Resume the time base source interrupt (+) Get the HAL API driver version (+) Get the device identifier (+) Get the device revision identifier (+) Enable/Disable Debug module during SLEEP mode (+) Enable/Disable Debug module during STOP mode (+) Enable/Disable Debug module during STANDBY mode @endverbatim * @{ */ /** * @brief This function is called to increment a global variable "uwTick" * used as application time base. * @note In the default implementation, this variable is incremented each 1ms * in SysTick ISR. * @note This function is declared as __weak to be overwritten in case of other * implementations in user file. * @retval None */ __weak void HAL_IncTick(void) { uwTick += uwTickFreq; } /** * @brief Provides a tick value in millisecond. * @note This function is declared as __weak to be overwritten in case of other * implementations in user file. * @retval tick value */ __weak uint32_t HAL_GetTick(void) { return uwTick; } /** * @brief This function returns a tick priority. * @retval tick priority */ uint32_t HAL_GetTickPrio(void) { return uwTickPrio; } /** * @brief Set new tick Freq. * @retval Status */ HAL_StatusTypeDef HAL_SetTickFreq(HAL_TickFreqTypeDef Freq) { HAL_StatusTypeDef status = HAL_OK; assert_param(IS_TICKFREQ(Freq)); if (uwTickFreq != Freq) { uwTickFreq = Freq; /* Apply the new tick Freq */ status = HAL_InitTick(uwTickPrio); } return status; } /** * @brief Return tick frequency. * @retval tick period in Hz */ HAL_TickFreqTypeDef HAL_GetTickFreq(void) { return uwTickFreq; } /** * @brief This function provides minimum delay (in milliseconds) based * on variable incremented. * @note In the default implementation , SysTick timer is the source of time base. * It is used to generate interrupts at regular time intervals where uwTick * is incremented. * @note This function is declared as __weak to be overwritten in case of other * implementations in user file. * @param Delay specifies the delay time length, in milliseconds. * @retval None */ __weak void HAL_Delay(uint32_t Delay) { uint32_t tickstart = HAL_GetTick(); uint32_t wait = Delay; /* Add a freq to guarantee minimum wait */ if (wait < HAL_MAX_DELAY) { wait += (uint32_t)(uwTickFreq); } while((HAL_GetTick() - tickstart) < wait) { } } /** * @brief Suspend Tick increment. * @note In the default implementation , SysTick timer is the source of time base. It is * used to generate interrupts at regular time intervals. Once HAL_SuspendTick() * is called, the SysTick interrupt will be disabled and so Tick increment * is suspended. * @note This function is declared as __weak to be overwritten in case of other * implementations in user file. * @retval None */ __weak void HAL_SuspendTick(void) { /* Disable SysTick Interrupt */ SysTick->CTRL &= ~SysTick_CTRL_TICKINT_Msk; } /** * @brief Resume Tick increment. * @note In the default implementation , SysTick timer is the source of time base. It is * used to generate interrupts at regular time intervals. Once HAL_ResumeTick() * is called, the SysTick interrupt will be enabled and so Tick increment * is resumed. * @note This function is declared as __weak to be overwritten in case of other * implementations in user file. * @retval None */ __weak void HAL_ResumeTick(void) { /* Enable SysTick Interrupt */ SysTick->CTRL |= SysTick_CTRL_TICKINT_Msk; } /** * @brief Returns the HAL revision * @retval version : 0xXYZR (8bits for each decimal, R for RC) */ uint32_t HAL_GetHalVersion(void) { return __STM32F4xx_HAL_VERSION; } /** * @brief Returns the device revision identifier. * @retval Device revision identifier */ uint32_t HAL_GetREVID(void) { return((DBGMCU->IDCODE) >> 16U); } /** * @brief Returns the device identifier. * @retval Device identifier */ uint32_t HAL_GetDEVID(void) { return((DBGMCU->IDCODE) & IDCODE_DEVID_MASK); } /** * @brief Enable the Debug Module during SLEEP mode * @retval None */ void HAL_DBGMCU_EnableDBGSleepMode(void) { SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); } /** * @brief Disable the Debug Module during SLEEP mode * @retval None */ void HAL_DBGMCU_DisableDBGSleepMode(void) { CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); } /** * @brief Enable the Debug Module during STOP mode * @retval None */ void HAL_DBGMCU_EnableDBGStopMode(void) { SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); } /** * @brief Disable the Debug Module during STOP mode * @retval None */ void HAL_DBGMCU_DisableDBGStopMode(void) { CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); } /** * @brief Enable the Debug Module during STANDBY mode * @retval None */ void HAL_DBGMCU_EnableDBGStandbyMode(void) { SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); } /** * @brief Disable the Debug Module during STANDBY mode * @retval None */ void HAL_DBGMCU_DisableDBGStandbyMode(void) { CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); } /** * @brief Enables the I/O Compensation Cell. * @note The I/O compensation cell can be used only when the device supply * voltage ranges from 2.4 to 3.6 V. * @retval None */ void HAL_EnableCompensationCell(void) { *(__IO uint32_t *)CMPCR_CMP_PD_BB = (uint32_t)ENABLE; } /** * @brief Power-down the I/O Compensation Cell. * @note The I/O compensation cell can be used only when the device supply * voltage ranges from 2.4 to 3.6 V. * @retval None */ void HAL_DisableCompensationCell(void) { *(__IO uint32_t *)CMPCR_CMP_PD_BB = (uint32_t)DISABLE; } /** * @brief Return the unique device identifier (UID based on 96 bits) * @param UID pointer to 3 words array. * @retval Device identifier */ void HAL_GetUID(uint32_t *UID) { UID[0] = (uint32_t)(READ_REG(*((uint32_t *)UID_BASE))); UID[1] = (uint32_t)(READ_REG(*((uint32_t *)(UID_BASE + 4U)))); UID[2] = (uint32_t)(READ_REG(*((uint32_t *)(UID_BASE + 8U)))); } #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ defined(STM32F469xx) || defined(STM32F479xx) /** * @brief Enables the Internal FLASH Bank Swapping. * * @note This function can be used only for STM32F42xxx/43xxx devices. * * @note Flash Bank2 mapped at 0x08000000 (and aliased @0x00000000) * and Flash Bank1 mapped at 0x08100000 (and aliased at 0x00100000) * * @retval None */ void HAL_EnableMemorySwappingBank(void) { *(__IO uint32_t *)UFB_MODE_BB = (uint32_t)ENABLE; } /** * @brief Disables the Internal FLASH Bank Swapping. * * @note This function can be used only for STM32F42xxx/43xxx devices. * * @note The default state : Flash Bank1 mapped at 0x08000000 (and aliased @0x00000000) * and Flash Bank2 mapped at 0x08100000 (and aliased at 0x00100000) * * @retval None */ void HAL_DisableMemorySwappingBank(void) { *(__IO uint32_t *)UFB_MODE_BB = (uint32_t)DISABLE; } #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ /** * @} */ /** * @} */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
498569.c
#include <stdio.h> #include<stdlib.h> int main(void) { int no, unit, tenth; printf("\n Enter No :: "); scanf("%d", &no); if(no==0) printf("\n zero"); else { unit=no%10; // unit place tenth= no/10; // tehth place switch(tenth) { case 1: switch(unit) { case 0: printf(" Ten "); break; case 1: printf(" Elevan "); break; case 2: printf(" Twelve "); break; case 3: printf(" Thirteen "); break; } break; case 2: printf(" Twenty "); break; case 3: printf(" Thirty "); break; } // end of switch for theth if(tenth!=1) { switch(unit) { case 1: printf(" One "); break; case 2: printf(" Two "); break; case 3: printf(" Three "); break; } // end of switch for theth } } return 0; }
397262.c
/** ****************************************************************************** * @file stm32l4xx_hal_smartcard.c * @author MCD Application Team * @version V1.0.0 * @date 26-June-2015 * @brief SMARTCARD HAL module driver. * This file provides firmware functions to manage the following * functionalities of the SMARTCARD peripheral: * + Initialization and de-initialization functions * + IO operation functions * + Peripheral Control functions * + Peripheral State and Error functions * @verbatim ============================================================================== ##### How to use this driver ##### ============================================================================== [..] The SMARTCARD HAL driver can be used as follows: (#) Declare a SMARTCARD_HandleTypeDef handle structure (eg. SMARTCARD_HandleTypeDef hsmartcard). (#) Associate a USART to the SMARTCARD handle hsmartcard. (#) Initialize the SMARTCARD low level resources by implementing the HAL_SMARTCARD_MspInit() API: (++) Enable the USARTx interface clock. (++) USART pins configuration: (+++) Enable the clock for the USART GPIOs. (+++) Configure the USART pins (TX as alternate function pull-up, RX as alternate function Input). (++) NVIC configuration if you need to use interrupt process (HAL_SMARTCARD_Transmit_IT() and HAL_SMARTCARD_Receive_IT() APIs): (++) Configure the USARTx interrupt priority. (++) Enable the NVIC USART IRQ handle. (++) DMA Configuration if you need to use DMA process (HAL_SMARTCARD_Transmit_DMA() and HAL_SMARTCARD_Receive_DMA() APIs): (+++) Declare a DMA handle structure for the Tx/Rx channel. (+++) Enable the DMAx interface clock. (+++) Configure the declared DMA handle structure with the required Tx/Rx parameters. (+++) Configure the DMA Tx/Rx channel. (+++) Associate the initialized DMA handle to the SMARTCARD DMA Tx/Rx handle. (+++) Configure the priority and enable the NVIC for the transfer complete interrupt on the DMA Tx/Rx channel. (#) Program the Baud Rate, Parity, Mode(Receiver/Transmitter), clock enabling/disabling and accordingly, the clock parameters (parity, phase, last bit), prescaler value, guard time and NACK on transmission error enabling or disabling in the hsmartcard handle Init structure. (#) If required, program SMARTCARD advanced features (TX/RX pins swap, TimeOut, auto-retry counter,...) in the hsmartcard handle AdvancedInit structure. (#) Initialize the SMARTCARD registers by calling the HAL_SMARTCARD_Init() API: (++) This API configures also the low level Hardware GPIO, CLOCK, CORTEX...etc) by calling the customized HAL_SMARTCARD_MspInit() API. [..] (@) The specific SMARTCARD interrupts (Transmission complete interrupt, RXNE interrupt and Error Interrupts) will be managed using the macros __HAL_SMARTCARD_ENABLE_IT() and __HAL_SMARTCARD_DISABLE_IT() inside the transmit and receive process. [..] [..] Three operation modes are available within this driver : *** Polling mode IO operation *** ================================= [..] (+) Send an amount of data in blocking mode using HAL_SMARTCARD_Transmit() (+) Receive an amount of data in blocking mode using HAL_SMARTCARD_Receive() *** Interrupt mode IO operation *** =================================== [..] (+) Send an amount of data in non-blocking mode using HAL_SMARTCARD_Transmit_IT() (+) At transmission end of transfer HAL_SMARTCARD_TxCpltCallback() is executed and user can add his own code by customization of function pointer HAL_SMARTCARD_TxCpltCallback() (+) Receive an amount of data in non-blocking mode using HAL_SMARTCARD_Receive_IT() (+) At reception end of transfer HAL_SMARTCARD_RxCpltCallback() is executed and user can add his own code by customization of function pointer HAL_SMARTCARD_RxCpltCallback() (+) In case of transfer Error, HAL_SMARTCARD_ErrorCallback() function is executed and user can add his own code by customization of function pointer HAL_SMARTCARD_ErrorCallback() *** DMA mode IO operation *** ============================== [..] (+) Send an amount of data in non-blocking mode (DMA) using HAL_SMARTCARD_Transmit_DMA() (+) At transmission end of transfer HAL_SMARTCARD_TxCpltCallback() is executed and user can add his own code by customization of function pointer HAL_SMARTCARD_TxCpltCallback() (+) Receive an amount of data in non-blocking mode (DMA) using HAL_SMARTCARD_Receive_DMA() (+) At reception end of transfer HAL_SMARTCARD_RxCpltCallback() is executed and user can add his own code by customization of function pointer HAL_SMARTCARD_RxCpltCallback() (+) In case of transfer Error, HAL_SMARTCARD_ErrorCallback() function is executed and user can add his own code by customization of function pointer HAL_SMARTCARD_ErrorCallback() *** SMARTCARD HAL driver macros list *** ======================================== [..] Below the list of most used macros in SMARTCARD HAL driver. (+) __HAL_SMARTCARD_GET_FLAG : Check whether or not the specified SMARTCARD flag is set (+) __HAL_SMARTCARD_CLEAR_FLAG : Clear the specified SMARTCARD pending flag (+) __HAL_SMARTCARD_ENABLE_IT: Enable the specified SMARTCARD interrupt (+) __HAL_SMARTCARD_DISABLE_IT: Disable the specified SMARTCARD interrupt (+) __HAL_SMARTCARD_GET_IT_SOURCE: Check whether or not the specified SMARTCARD interrupt is enabled [..] (@) You can refer to the SMARTCARD HAL driver header file for more useful macros @endverbatim ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2015 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32l4xx_hal.h" /** @addtogroup STM32L4xx_HAL_Driver * @{ */ /** @defgroup SMARTCARD SMARTCARD * @brief HAL SMARTCARD module driver * @{ */ #ifdef HAL_SMARTCARD_MODULE_ENABLED /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /** @defgroup SMARTCARD_Private_Constants SMARTCARD Private Constants * @{ */ #define SMARTCARD_TEACK_REACK_TIMEOUT 1000 /*!< SMARTCARD TX or RX enable acknowledge time-out value */ #define USART_CR1_FIELDS ((uint32_t)(USART_CR1_M | USART_CR1_PCE | USART_CR1_PS | \ USART_CR1_TE | USART_CR1_RE | USART_CR1_OVER8)) /*!< USART CR1 fields of parameters set by SMARTCARD_SetConfig API */ #define USART_CR2_CLK_FIELDS ((uint32_t)(USART_CR2_CLKEN|USART_CR2_CPOL|USART_CR2_CPHA|USART_CR2_LBCL)) /*!< SMARTCARD clock-related USART CR2 fields of parameters */ #define USART_CR2_FIELDS ((uint32_t)(USART_CR2_RTOEN|USART_CR2_CLK_FIELDS|USART_CR2_STOP)) /*!< USART CR2 fields of parameters set by SMARTCARD_SetConfig API */ #define USART_CR3_FIELDS ((uint32_t)(USART_CR3_ONEBIT|USART_CR3_NACK|USART_CR3_SCARCNT)) /*!< USART CR3 fields of parameters set by SMARTCARD_SetConfig API */ /** * @} */ /* Private macros ------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /** @addtogroup SMARTCARD_Private_Functions * @{ */ static void SMARTCARD_DMATransmitCplt(DMA_HandleTypeDef *hdma); static void SMARTCARD_DMAReceiveCplt(DMA_HandleTypeDef *hdma); static void SMARTCARD_DMAError(DMA_HandleTypeDef *hdma); static HAL_StatusTypeDef SMARTCARD_SetConfig(SMARTCARD_HandleTypeDef *hsmartcard); static void SMARTCARD_AdvFeatureConfig(SMARTCARD_HandleTypeDef *hsmartcard); static HAL_StatusTypeDef SMARTCARD_WaitOnFlagUntilTimeout(SMARTCARD_HandleTypeDef *hsmartcard, uint32_t Flag, FlagStatus Status, uint32_t Timeout); static HAL_StatusTypeDef SMARTCARD_CheckIdleState(SMARTCARD_HandleTypeDef *hsmartcard); static HAL_StatusTypeDef SMARTCARD_Transmit_IT(SMARTCARD_HandleTypeDef *hsmartcard); static HAL_StatusTypeDef SMARTCARD_EndTransmit_IT(SMARTCARD_HandleTypeDef *hsmartcard); static HAL_StatusTypeDef SMARTCARD_Receive_IT(SMARTCARD_HandleTypeDef *hsmartcard); /** * @} */ /* Exported functions --------------------------------------------------------*/ /** @defgroup SMARTCARD_Exported_Functions SMARTCARD Exported Functions * @{ */ /** @defgroup SMARTCARD_Exported_Functions_Group1 Initialization and de-initialization functions * @brief Initialization and Configuration functions * @verbatim =============================================================================== ##### Initialization and Configuration functions ##### =============================================================================== [..] This subsection provides a set of functions allowing to initialize the USARTx associated to the SmartCard. (+) These parameters can be configured: (++) Baud Rate (++) Parity: parity should be enabled, Frame Length is fixed to 8 bits plus parity: the USART frame format is given in the following table: (+++) Table 1. USART frame format. (+++) +---------------------------------------------------------------+ (+++) | M1M0 bits | PCE bit | USART frame | (+++) |-----------------------|---------------------------------------| (+++) | 01 | 1 | | SB | 8 bit data | PB | STB | | (+++) +---------------------------------------------------------------+ (++) Receiver/transmitter modes (++) Synchronous mode (and if enabled, phase, polarity and last bit parameters) (++) Prescaler value (++) Guard bit time (++) NACK enabling or disabling on transmission error (+) The following advanced features can be configured as well: (++) TX and/or RX pin level inversion (++) data logical level inversion (++) RX and TX pins swap (++) RX overrun detection disabling (++) DMA disabling on RX error (++) MSB first on communication line (++) Time out enabling (and if activated, timeout value) (++) Block length (++) Auto-retry counter [..] The HAL_SMARTCARD_Init() API follows the USART synchronous configuration procedures (details for the procedures are available in reference manual). @endverbatim * @{ */ /** * @brief Initialize the SMARTCARD mode according to the specified * parameters in the SMARTCARD_HandleTypeDef and initialize the associated handle. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval HAL status */ HAL_StatusTypeDef HAL_SMARTCARD_Init(SMARTCARD_HandleTypeDef *hsmartcard) { /* Check the SMARTCARD handle allocation */ if(hsmartcard == NULL) { return HAL_ERROR; } /* Check the USART associated to the SmartCard */ assert_param(IS_SMARTCARD_INSTANCE(hsmartcard->Instance)); if(hsmartcard->State == HAL_SMARTCARD_STATE_RESET) { /* Allocate lock resource and initialize it */ hsmartcard->Lock = HAL_UNLOCKED; /* Init the low level hardware : GPIO, CLOCK */ HAL_SMARTCARD_MspInit(hsmartcard); } hsmartcard->State = HAL_SMARTCARD_STATE_BUSY; /* Disable the Peripheral to set smartcard mode */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); /* In SmartCard mode, the following bits must be kept cleared: - LINEN in the USART_CR2 register, - HDSEL and IREN bits in the USART_CR3 register.*/ CLEAR_BIT(hsmartcard->Instance->CR2, USART_CR2_LINEN); CLEAR_BIT(hsmartcard->Instance->CR3, (USART_CR3_HDSEL | USART_CR3_IREN)); /* set the USART in SMARTCARD mode */ SET_BIT(hsmartcard->Instance->CR3, USART_CR3_SCEN); /* Set the SMARTCARD Communication parameters */ if (SMARTCARD_SetConfig(hsmartcard) == HAL_ERROR) { return HAL_ERROR; } if (hsmartcard->AdvancedInit.AdvFeatureInit != SMARTCARD_ADVFEATURE_NO_INIT) { SMARTCARD_AdvFeatureConfig(hsmartcard); } /* Enable the Peripheral */ SET_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); /* TEACK and/or REACK to check before moving hsmartcard->State to Ready */ return (SMARTCARD_CheckIdleState(hsmartcard)); } /** * @brief DeInitialize the SMARTCARD peripheral. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval HAL status */ HAL_StatusTypeDef HAL_SMARTCARD_DeInit(SMARTCARD_HandleTypeDef *hsmartcard) { /* Check the SMARTCARD handle allocation */ if(hsmartcard == NULL) { return HAL_ERROR; } /* Check the parameters */ assert_param(IS_SMARTCARD_INSTANCE(hsmartcard->Instance)); hsmartcard->State = HAL_SMARTCARD_STATE_BUSY; /* Disable the Peripheral */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); WRITE_REG(hsmartcard->Instance->CR1, 0x0); WRITE_REG(hsmartcard->Instance->CR2, 0x0); WRITE_REG(hsmartcard->Instance->CR3, 0x0); WRITE_REG(hsmartcard->Instance->RTOR, 0x0); WRITE_REG(hsmartcard->Instance->GTPR, 0x0); /* DeInit the low level hardware */ HAL_SMARTCARD_MspDeInit(hsmartcard); hsmartcard->ErrorCode = HAL_SMARTCARD_ERROR_NONE; hsmartcard->State = HAL_SMARTCARD_STATE_RESET; /* Process Unlock */ __HAL_UNLOCK(hsmartcard); return HAL_OK; } /** * @brief Initialize the SMARTCARD MSP. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval None */ __weak void HAL_SMARTCARD_MspInit(SMARTCARD_HandleTypeDef *hsmartcard) { /* NOTE : This function should not be modified, when the callback is needed, the HAL_SMARTCARD_MspInit can be implemented in the user file */ } /** * @brief DeInitialize the SMARTCARD MSP. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval None */ __weak void HAL_SMARTCARD_MspDeInit(SMARTCARD_HandleTypeDef *hsmartcard) { /* NOTE : This function should not be modified, when the callback is needed, the HAL_SMARTCARD_MspDeInit can be implemented in the user file */ } /** * @} */ /** @defgroup SMARTCARD_Exported_Functions_Group2 IO operation functions * @brief SMARTCARD Transmit and Receive functions * @verbatim ============================================================================== ##### IO operation functions ##### ============================================================================== [..] This subsection provides a set of functions allowing to manage the SMARTCARD data transfers. [..] Smartcard is a single wire half duplex communication protocol. The Smartcard interface is designed to support asynchronous protocol Smartcards as defined in the ISO 7816-3 standard. The USART should be configured as: (+) 8 bits plus parity: where M=1 and PCE=1 in the USART_CR1 register (+) 1.5 stop bits when transmitting and receiving: where STOP=11 in the USART_CR2 register. [..] (+) There are two modes of transfer: (++) Blocking mode: The communication is performed in polling mode. The HAL status of all data processing is returned by the same function after finishing transfer. (++) No-Blocking mode: The communication is performed using Interrupts or DMA, the relevant API's return the HAL status. The end of the data processing will be indicated through the dedicated SMARTCARD IRQ when using Interrupt mode or the DMA IRQ when using DMA mode. (++) The HAL_SMARTCARD_TxCpltCallback(), HAL_SMARTCARD_RxCpltCallback() user callbacks will be executed respectively at the end of the Transmit or Receive process The HAL_SMARTCARD_ErrorCallback() user callback will be executed when a communication error is detected. (+) Blocking mode APIs are : (++) HAL_SMARTCARD_Transmit() (++) HAL_SMARTCARD_Receive() (+) Non Blocking mode APIs with Interrupt are : (++) HAL_SMARTCARD_Transmit_IT() (++) HAL_SMARTCARD_Receive_IT() (++) HAL_SMARTCARD_IRQHandler() (+) Non Blocking mode functions with DMA are : (++) HAL_SMARTCARD_Transmit_DMA() (++) HAL_SMARTCARD_Receive_DMA() (+) A set of Transfer Complete Callbacks are provided in non Blocking mode: (++) HAL_SMARTCARD_TxCpltCallback() (++) HAL_SMARTCARD_RxCpltCallback() (++) HAL_SMARTCARD_ErrorCallback() @endverbatim * @{ */ /** * @brief Send an amount of data in blocking mode. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @param pData: pointer to data buffer. * @param Size: amount of data to be sent. * @param Timeout : Timeout duration. * @retval HAL status */ HAL_StatusTypeDef HAL_SMARTCARD_Transmit(SMARTCARD_HandleTypeDef *hsmartcard, uint8_t *pData, uint16_t Size, uint32_t Timeout) { if ((hsmartcard->State == HAL_SMARTCARD_STATE_READY) || (hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_RX)) { if((pData == NULL) || (Size == 0)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hsmartcard); /* Check if a receive process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_RX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX_RX; } else { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX; } /* Disable the Peripheral first to update mode for TX master */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); /* Disable Rx, enable Tx */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_RE); SET_BIT(hsmartcard->Instance->RQR, SMARTCARD_RXDATA_FLUSH_REQUEST); SET_BIT(hsmartcard->Instance->CR1, USART_CR1_TE); /* Enable the Peripheral */ SET_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); hsmartcard->ErrorCode = HAL_SMARTCARD_ERROR_NONE; hsmartcard->TxXferSize = Size; hsmartcard->TxXferCount = Size; while(hsmartcard->TxXferCount > 0) { hsmartcard->TxXferCount--; if(SMARTCARD_WaitOnFlagUntilTimeout(hsmartcard, SMARTCARD_FLAG_TXE, RESET, Timeout) != HAL_OK) { return HAL_TIMEOUT; } hsmartcard->Instance->TDR = (*pData++ & (uint8_t)0xFF); } if(SMARTCARD_WaitOnFlagUntilTimeout(hsmartcard, SMARTCARD_FLAG_TC, RESET, Timeout) != HAL_OK) { return HAL_TIMEOUT; } /* Re-enable Rx at end of transmission if initial mode is Rx/Tx */ if(hsmartcard->Init.Mode == SMARTCARD_MODE_TX_RX) { /* Disable the Peripheral first to update modes */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); SET_BIT(hsmartcard->Instance->CR1, USART_CR1_RE); /* Enable the Peripheral */ SET_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); } /* Check if a receive process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX_RX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_RX; } else { hsmartcard->State = HAL_SMARTCARD_STATE_READY; } /* Process Unlocked */ __HAL_UNLOCK(hsmartcard); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Receive an amount of data in blocking mode. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @param pData: pointer to data buffer. * @param Size: amount of data to be received. * @param Timeout : Timeout duration. * @retval HAL status */ HAL_StatusTypeDef HAL_SMARTCARD_Receive(SMARTCARD_HandleTypeDef *hsmartcard, uint8_t *pData, uint16_t Size, uint32_t Timeout) { if ((hsmartcard->State == HAL_SMARTCARD_STATE_READY) || (hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX)) { if((pData == NULL) || (Size == 0)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hsmartcard); /* Check if a non-blocking transmit process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX_RX; } else { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_RX; } hsmartcard->ErrorCode = HAL_SMARTCARD_ERROR_NONE; hsmartcard->RxXferSize = Size; hsmartcard->RxXferCount = Size; /* Check the remain data to be received */ while(hsmartcard->RxXferCount > 0) { hsmartcard->RxXferCount--; if(SMARTCARD_WaitOnFlagUntilTimeout(hsmartcard, SMARTCARD_FLAG_RXNE, RESET, Timeout) != HAL_OK) { return HAL_TIMEOUT; } *pData++ = (uint8_t)(hsmartcard->Instance->RDR & (uint8_t)0x00FF); } /* Check if a non-blocking transmit process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX_RX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX; } else { hsmartcard->State = HAL_SMARTCARD_STATE_READY; } /* Process Unlocked */ __HAL_UNLOCK(hsmartcard); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Send an amount of data in interrupt mode. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @param pData: pointer to data buffer. * @param Size: amount of data to be sent. * @retval HAL status */ HAL_StatusTypeDef HAL_SMARTCARD_Transmit_IT(SMARTCARD_HandleTypeDef *hsmartcard, uint8_t *pData, uint16_t Size) { if ((hsmartcard->State == HAL_SMARTCARD_STATE_READY) || (hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_RX)) { if((pData == NULL) || (Size == 0)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hsmartcard); /* Check if a receive process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_RX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX_RX; } else { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX; } hsmartcard->ErrorCode = HAL_SMARTCARD_ERROR_NONE; hsmartcard->pTxBuffPtr = pData; hsmartcard->TxXferSize = Size; hsmartcard->TxXferCount = Size; /* Disable the Peripheral first to update mode for TX master */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); /* Disable Rx, enable Tx */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_RE); SET_BIT(hsmartcard->Instance->RQR, SMARTCARD_RXDATA_FLUSH_REQUEST); SET_BIT(hsmartcard->Instance->CR1, USART_CR1_TE); /* Enable the Peripheral */ SET_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); /* Enable the SMARTCARD Error Interrupt: (Frame error, noise error, overrun error) */ __HAL_SMARTCARD_ENABLE_IT(hsmartcard, SMARTCARD_IT_ERR); /* Process Unlocked */ __HAL_UNLOCK(hsmartcard); /* Enable the SMARTCARD Transmit Data Register Empty Interrupt */ __HAL_SMARTCARD_ENABLE_IT(hsmartcard, SMARTCARD_IT_TXE); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Receive an amount of data in interrupt mode. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @param pData: pointer to data buffer. * @param Size: amount of data to be received. * @retval HAL status */ HAL_StatusTypeDef HAL_SMARTCARD_Receive_IT(SMARTCARD_HandleTypeDef *hsmartcard, uint8_t *pData, uint16_t Size) { if ((hsmartcard->State == HAL_SMARTCARD_STATE_READY) || (hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX)) { if((pData == NULL) || (Size == 0)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hsmartcard); /* Check if a transmit process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX_RX; } else { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_RX; } hsmartcard->ErrorCode = HAL_SMARTCARD_ERROR_NONE; hsmartcard->pRxBuffPtr = pData; hsmartcard->RxXferSize = Size; hsmartcard->RxXferCount = Size; /* Enable the SMARTCARD Parity Error Interrupt */ __HAL_SMARTCARD_ENABLE_IT(hsmartcard, SMARTCARD_IT_PE); /* Enable the SMARTCARD Error Interrupt: (Frame error, noise error, overrun error) */ __HAL_SMARTCARD_ENABLE_IT(hsmartcard, SMARTCARD_IT_ERR); /* Process Unlocked */ __HAL_UNLOCK(hsmartcard); /* Enable the SMARTCARD Data Register not empty Interrupt */ __HAL_SMARTCARD_ENABLE_IT(hsmartcard, SMARTCARD_IT_RXNE); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Send an amount of data in DMA mode. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @param pData: pointer to data buffer. * @param Size: amount of data to be sent. * @retval HAL status */ HAL_StatusTypeDef HAL_SMARTCARD_Transmit_DMA(SMARTCARD_HandleTypeDef *hsmartcard, uint8_t *pData, uint16_t Size) { uint32_t *tmp; if ((hsmartcard->State == HAL_SMARTCARD_STATE_READY) || (hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_RX)) { if((pData == NULL) || (Size == 0)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hsmartcard); /* Check if a receive process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_RX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX_RX; } else { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX; } hsmartcard->ErrorCode = HAL_SMARTCARD_ERROR_NONE; hsmartcard->pTxBuffPtr = pData; hsmartcard->TxXferSize = Size; hsmartcard->TxXferCount = Size; /* Disable the Peripheral first to update mode for TX master */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); /* Disable Rx, enable Tx */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_RE); SET_BIT(hsmartcard->Instance->RQR, SMARTCARD_RXDATA_FLUSH_REQUEST); SET_BIT(hsmartcard->Instance->CR1, USART_CR1_TE); /* Enable the Peripheral */ SET_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); /* Set the SMARTCARD DMA transfer complete callback */ hsmartcard->hdmatx->XferCpltCallback = SMARTCARD_DMATransmitCplt; /* Set the SMARTCARD error callback */ hsmartcard->hdmatx->XferErrorCallback = SMARTCARD_DMAError; /* Enable the SMARTCARD transmit DMA channel */ tmp = (uint32_t*)&pData; HAL_DMA_Start_IT(hsmartcard->hdmatx, *(uint32_t*)tmp, (uint32_t)&hsmartcard->Instance->TDR, Size); /* Enable the DMA transfer for transmit request by setting the DMAT bit in the SMARTCARD associated USART CR3 register */ SET_BIT(hsmartcard->Instance->CR3, USART_CR3_DMAT); /* Process Unlocked */ __HAL_UNLOCK(hsmartcard); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Receive an amount of data in DMA mode. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @param pData: pointer to data buffer. * @param Size: amount of data to be received. * @note The SMARTCARD-associated USART parity is enabled (PCE = 1), * the received data contain the parity bit (MSB position). * @retval HAL status */ HAL_StatusTypeDef HAL_SMARTCARD_Receive_DMA(SMARTCARD_HandleTypeDef *hsmartcard, uint8_t *pData, uint16_t Size) { uint32_t *tmp; if ((hsmartcard->State == HAL_SMARTCARD_STATE_READY) || (hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX)) { if((pData == NULL) || (Size == 0)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hsmartcard); /* Check if a transmit process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX_RX; } else { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_RX; } hsmartcard->ErrorCode = HAL_SMARTCARD_ERROR_NONE; hsmartcard->pRxBuffPtr = pData; hsmartcard->RxXferSize = Size; /* Set the SMARTCARD DMA transfer complete callback */ hsmartcard->hdmarx->XferCpltCallback = SMARTCARD_DMAReceiveCplt; /* Set the SMARTCARD DMA error callback */ hsmartcard->hdmarx->XferErrorCallback = SMARTCARD_DMAError; /* Enable the DMA channel */ tmp = (uint32_t*)&pData; HAL_DMA_Start_IT(hsmartcard->hdmarx, (uint32_t)&hsmartcard->Instance->RDR, *(uint32_t*)tmp, Size); /* Enable the DMA transfer for the receiver request by setting the DMAR bit in the SMARTCARD associated USART CR3 register */ SET_BIT(hsmartcard->Instance->CR3, USART_CR3_DMAR); /* Process Unlocked */ __HAL_UNLOCK(hsmartcard); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Handle SMARTCARD interrupt requests. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval None */ void HAL_SMARTCARD_IRQHandler(SMARTCARD_HandleTypeDef *hsmartcard) { /* SMARTCARD parity error interrupt occurred -------------------------------------*/ if((__HAL_SMARTCARD_GET_IT(hsmartcard, SMARTCARD_IT_PE) != RESET) && (__HAL_SMARTCARD_GET_IT_SOURCE(hsmartcard, SMARTCARD_IT_PE) != RESET)) { __HAL_SMARTCARD_CLEAR_IT(hsmartcard, SMARTCARD_CLEAR_PEF); hsmartcard->ErrorCode |= HAL_SMARTCARD_ERROR_PE; /* Set the SMARTCARD state ready to be able to start again the process */ hsmartcard->State = HAL_SMARTCARD_STATE_READY; } /* SMARTCARD frame error interrupt occurred --------------------------------------*/ if((__HAL_SMARTCARD_GET_IT(hsmartcard, SMARTCARD_IT_FE) != RESET) && (__HAL_SMARTCARD_GET_IT_SOURCE(hsmartcard, SMARTCARD_IT_ERR) != RESET)) { __HAL_SMARTCARD_CLEAR_IT(hsmartcard, SMARTCARD_CLEAR_FEF); hsmartcard->ErrorCode |= HAL_SMARTCARD_ERROR_FE; /* Set the SMARTCARD state ready to be able to start again the process */ hsmartcard->State = HAL_SMARTCARD_STATE_READY; } /* SMARTCARD noise error interrupt occurred --------------------------------------*/ if((__HAL_SMARTCARD_GET_IT(hsmartcard, SMARTCARD_IT_NE) != RESET) && (__HAL_SMARTCARD_GET_IT_SOURCE(hsmartcard, SMARTCARD_IT_ERR) != RESET)) { __HAL_SMARTCARD_CLEAR_IT(hsmartcard, SMARTCARD_CLEAR_NEF); hsmartcard->ErrorCode |= HAL_SMARTCARD_ERROR_NE; /* Set the SMARTCARD state ready to be able to start again the process */ hsmartcard->State = HAL_SMARTCARD_STATE_READY; } /* SMARTCARD Over-Run interrupt occurred -----------------------------------------*/ if((__HAL_SMARTCARD_GET_IT(hsmartcard, SMARTCARD_IT_ORE) != RESET) && (__HAL_SMARTCARD_GET_IT_SOURCE(hsmartcard, SMARTCARD_IT_ERR) != RESET)) { __HAL_SMARTCARD_CLEAR_IT(hsmartcard, SMARTCARD_CLEAR_OREF); hsmartcard->ErrorCode |= HAL_SMARTCARD_ERROR_ORE; /* Set the SMARTCARD state ready to be able to start again the process */ hsmartcard->State = HAL_SMARTCARD_STATE_READY; } /* SMARTCARD receiver timeout interrupt occurred -----------------------------------------*/ if((__HAL_SMARTCARD_GET_IT(hsmartcard, SMARTCARD_IT_RTO) != RESET) && (__HAL_SMARTCARD_GET_IT_SOURCE(hsmartcard, SMARTCARD_IT_RTO) != RESET)) { __HAL_SMARTCARD_CLEAR_IT(hsmartcard, SMARTCARD_CLEAR_RTOF); hsmartcard->ErrorCode |= HAL_SMARTCARD_ERROR_RTO; /* Set the SMARTCARD state ready to be able to start again the process */ hsmartcard->State = HAL_SMARTCARD_STATE_READY; } /* Call SMARTCARD Error Call back function if need be --------------------------*/ if(hsmartcard->ErrorCode != HAL_SMARTCARD_ERROR_NONE) { HAL_SMARTCARD_ErrorCallback(hsmartcard); } /* SMARTCARD in mode Receiver ---------------------------------------------------*/ if((__HAL_SMARTCARD_GET_IT(hsmartcard, SMARTCARD_IT_RXNE) != RESET) && (__HAL_SMARTCARD_GET_IT_SOURCE(hsmartcard, SMARTCARD_IT_RXNE) != RESET)) { SMARTCARD_Receive_IT(hsmartcard); /* Clear RXNE interrupt flag done by reading RDR in SMARTCARD_Receive_IT() */ } /* SMARTCARD in mode Receiver, end of block interruption ------------------------*/ if((__HAL_SMARTCARD_GET_IT(hsmartcard, SMARTCARD_IT_EOB) != RESET) && (__HAL_SMARTCARD_GET_IT_SOURCE(hsmartcard, SMARTCARD_IT_EOB) != RESET)) { hsmartcard->State = HAL_SMARTCARD_STATE_READY; __HAL_UNLOCK(hsmartcard); HAL_SMARTCARD_RxCpltCallback(hsmartcard); /* Clear EOBF interrupt after HAL_SMARTCARD_RxCpltCallback() call for the End of Block information * to be available during HAL_SMARTCARD_RxCpltCallback() processing */ __HAL_SMARTCARD_CLEAR_IT(hsmartcard, SMARTCARD_CLEAR_EOBF); } /* SMARTCARD in mode Transmitter ------------------------------------------------*/ if((__HAL_SMARTCARD_GET_IT(hsmartcard, SMARTCARD_IT_TXE) != RESET) &&(__HAL_SMARTCARD_GET_IT_SOURCE(hsmartcard, SMARTCARD_IT_TXE) != RESET)) { SMARTCARD_Transmit_IT(hsmartcard); } /* SMARTCARD in mode Transmitter (transmission end) ------------------------*/ if((__HAL_SMARTCARD_GET_IT(hsmartcard, SMARTCARD_IT_TC) != RESET) &&(__HAL_SMARTCARD_GET_IT_SOURCE(hsmartcard, SMARTCARD_IT_TC) != RESET)) { SMARTCARD_EndTransmit_IT(hsmartcard); } } /** * @brief Tx Transfer completed callback. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval None */ __weak void HAL_SMARTCARD_TxCpltCallback(SMARTCARD_HandleTypeDef *hsmartcard) { /* NOTE : This function should not be modified, when the callback is needed, the HAL_SMARTCARD_TxCpltCallback can be implemented in the user file. */ } /** * @brief Rx Transfer completed callback. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval None */ __weak void HAL_SMARTCARD_RxCpltCallback(SMARTCARD_HandleTypeDef *hsmartcard) { /* NOTE : This function should not be modified, when the callback is needed, the HAL_SMARTCARD_RxCpltCallback can be implemented in the user file. */ } /** * @brief SMARTCARD error callback. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval None */ __weak void HAL_SMARTCARD_ErrorCallback(SMARTCARD_HandleTypeDef *hsmartcard) { /* NOTE : This function should not be modified, when the callback is needed, the HAL_SMARTCARD_ErrorCallback can be implemented in the user file. */ } /** * @} */ /** @defgroup SMARTCARD_Exported_Functions_Group4 Peripheral State and Errors functions * @brief SMARTCARD State and Errors functions * @verbatim ============================================================================== ##### Peripheral State and Errors functions ##### ============================================================================== [..] This subsection provides a set of functions allowing to return the State of SmartCard handle and also return Peripheral Errors occurred during communication process (+) HAL_SMARTCARD_GetState() API can be helpful to check in run-time the state of the SMARTCARD peripheral. (+) HAL_SMARTCARD_GetError() checks in run-time errors that could occur during communication. @endverbatim * @{ */ /** * @brief Return the SMARTCARD handle state. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval SMARTCARD handle state */ HAL_SMARTCARD_StateTypeDef HAL_SMARTCARD_GetState(SMARTCARD_HandleTypeDef *hsmartcard) { return hsmartcard->State; } /** * @brief Return the SMARTCARD handle error code. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval SMARTCARD handle Error Code */ uint32_t HAL_SMARTCARD_GetError(SMARTCARD_HandleTypeDef *hsmartcard) { return hsmartcard->ErrorCode; } /** * @} */ /** * @} */ /** @defgroup SMARTCARD_Private_Functions SMARTCARD Private Functions * @{ */ /** * @brief Send an amount of data in non-blocking mode. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * Function called under interruption only, once * interruptions have been enabled by HAL_SMARTCARD_Transmit_IT() * @retval HAL status */ static HAL_StatusTypeDef SMARTCARD_Transmit_IT(SMARTCARD_HandleTypeDef *hsmartcard) { if ((hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX) || (hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX_RX)) { if(hsmartcard->TxXferCount == 0) { /* Disable the SMARTCARD Transmit Data Register Empty Interrupt */ __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_TXE); /* Enable the SMARTCARD Transmit Complete Interrupt */ __HAL_SMARTCARD_ENABLE_IT(hsmartcard, SMARTCARD_IT_TC); return HAL_OK; } else { hsmartcard->Instance->TDR = (*hsmartcard->pTxBuffPtr++ & (uint8_t)0xFF); hsmartcard->TxXferCount--; return HAL_OK; } } else { return HAL_BUSY; } } /** * @brief Wrap up transmission in non-blocking mode. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval HAL status */ static HAL_StatusTypeDef SMARTCARD_EndTransmit_IT(SMARTCARD_HandleTypeDef *hsmartcard) { /* Disable the SMARTCARD Transmit Complete Interrupt */ __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_TC); /* Check if a receive process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX_RX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_RX; /* Re-enable Rx at end of transmission if initial mode is Rx/Tx */ if(hsmartcard->Init.Mode == SMARTCARD_MODE_TX_RX) { /* Disable the Peripheral first to update modes */ CLEAR_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); SET_BIT(hsmartcard->Instance->CR1, USART_CR1_RE); /* Enable the Peripheral */ SET_BIT(hsmartcard->Instance->CR1, USART_CR1_UE); } } else { /* Disable the SMARTCARD Error Interrupt: (Frame error, noise error, overrun error) */ __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_ERR); hsmartcard->State = HAL_SMARTCARD_STATE_READY; } HAL_SMARTCARD_TxCpltCallback(hsmartcard); return HAL_OK; } /** * @brief Receive an amount of data in non-blocking mode. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * Function called under interruption only, once * interruptions have been enabled by HAL_SMARTCARD_Receive_IT(). * @retval HAL status */ static HAL_StatusTypeDef SMARTCARD_Receive_IT(SMARTCARD_HandleTypeDef *hsmartcard) { if ((hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_RX) || (hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX_RX)) { *hsmartcard->pRxBuffPtr++ = (uint8_t)(hsmartcard->Instance->RDR & (uint8_t)0xFF); if(--hsmartcard->RxXferCount == 0) { __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_RXNE); /* Check if a transmit process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX_RX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX; } else { /* Disable the SMARTCARD Parity Error Interrupt */ __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_PE); /* Disable the SMARTCARD Error Interrupt: (Frame error, noise error, overrun error) */ __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_ERR); hsmartcard->State = HAL_SMARTCARD_STATE_READY; } HAL_SMARTCARD_RxCpltCallback(hsmartcard); return HAL_OK; } return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Handle SMARTCARD Communication Timeout. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @param Flag: specifies the SMARTCARD flag to check. * @param Status: The new Flag status (SET or RESET). * @param Timeout: Timeout duration. * @retval HAL status */ static HAL_StatusTypeDef SMARTCARD_WaitOnFlagUntilTimeout(SMARTCARD_HandleTypeDef *hsmartcard, uint32_t Flag, FlagStatus Status, uint32_t Timeout) { uint32_t tickstart = HAL_GetTick(); /* Wait until flag is set */ if(Status == RESET) { while(__HAL_SMARTCARD_GET_FLAG(hsmartcard, Flag) == RESET) { /* Check for the Timeout */ if(Timeout != HAL_MAX_DELAY) { if((Timeout == 0) || ((HAL_GetTick()-tickstart) > Timeout)) { /* Disable TXE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts for the interrupt process */ __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_TXE); __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_RXNE); __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_PE); __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_ERR); hsmartcard->State= HAL_SMARTCARD_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(hsmartcard); return HAL_TIMEOUT; } } } } else { while(__HAL_SMARTCARD_GET_FLAG(hsmartcard, Flag) != RESET) { /* Check for the Timeout */ if(Timeout != HAL_MAX_DELAY) { if((Timeout == 0) || ((HAL_GetTick()-tickstart) > Timeout)) { /* Disable TXE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts for the interrupt process */ __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_TXE); __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_RXNE); __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_PE); __HAL_SMARTCARD_DISABLE_IT(hsmartcard, SMARTCARD_IT_ERR); hsmartcard->State= HAL_SMARTCARD_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(hsmartcard); return HAL_TIMEOUT; } } } } return HAL_OK; } /** * @brief DMA SMARTCARD transmit process complete callback. * @param hdma: Pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void SMARTCARD_DMATransmitCplt(DMA_HandleTypeDef *hdma) { SMARTCARD_HandleTypeDef* hsmartcard = ( SMARTCARD_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; hsmartcard->TxXferCount = 0; /* Disable the DMA transfer for transmit request by resetting the DMAT bit in the SMARTCARD associated USART CR3 register */ CLEAR_BIT(hsmartcard->Instance->CR3, USART_CR3_DMAT); /* Enable the SMARTCARD Transmit Complete Interrupt */ __HAL_SMARTCARD_ENABLE_IT(hsmartcard, SMARTCARD_IT_TC); } /** * @brief DMA SMARTCARD receive process complete callback. * @param hdma: Pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void SMARTCARD_DMAReceiveCplt(DMA_HandleTypeDef *hdma) { SMARTCARD_HandleTypeDef* hsmartcard = ( SMARTCARD_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; hsmartcard->RxXferCount = 0; /* Disable the DMA transfer for the receiver request by resetting the DMAR bit in the SMARTCARD associated USART CR3 register */ CLEAR_BIT(hsmartcard->Instance->CR3, USART_CR3_DMAR); /* Check if a transmit process is ongoing or not */ if(hsmartcard->State == HAL_SMARTCARD_STATE_BUSY_TX_RX) { hsmartcard->State = HAL_SMARTCARD_STATE_BUSY_TX; } else { hsmartcard->State = HAL_SMARTCARD_STATE_READY; } HAL_SMARTCARD_RxCpltCallback(hsmartcard); } /** * @brief DMA SMARTCARD communication error callback. * @param hdma: Pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void SMARTCARD_DMAError(DMA_HandleTypeDef *hdma) { SMARTCARD_HandleTypeDef* hsmartcard = ( SMARTCARD_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; hsmartcard->RxXferCount = 0; hsmartcard->TxXferCount = 0; hsmartcard->State= HAL_SMARTCARD_STATE_READY; hsmartcard->ErrorCode |= HAL_SMARTCARD_ERROR_DMA; HAL_SMARTCARD_ErrorCallback(hsmartcard); } /** * @brief Configure the SMARTCARD associated USART peripheral. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval None */ static HAL_StatusTypeDef SMARTCARD_SetConfig(SMARTCARD_HandleTypeDef *hsmartcard) { uint32_t tmpreg = 0x00000000; SMARTCARD_ClockSourceTypeDef clocksource = SMARTCARD_CLOCKSOURCE_UNDEFINED; HAL_StatusTypeDef ret = HAL_OK; /* Check the parameters */ assert_param(IS_SMARTCARD_INSTANCE(hsmartcard->Instance)); assert_param(IS_SMARTCARD_BAUDRATE(hsmartcard->Init.BaudRate)); assert_param(IS_SMARTCARD_WORD_LENGTH(hsmartcard->Init.WordLength)); assert_param(IS_SMARTCARD_STOPBITS(hsmartcard->Init.StopBits)); assert_param(IS_SMARTCARD_PARITY(hsmartcard->Init.Parity)); assert_param(IS_SMARTCARD_MODE(hsmartcard->Init.Mode)); assert_param(IS_SMARTCARD_POLARITY(hsmartcard->Init.CLKPolarity)); assert_param(IS_SMARTCARD_PHASE(hsmartcard->Init.CLKPhase)); assert_param(IS_SMARTCARD_LASTBIT(hsmartcard->Init.CLKLastBit)); assert_param(IS_SMARTCARD_ONE_BIT_SAMPLE(hsmartcard->Init.OneBitSampling)); assert_param(IS_SMARTCARD_NACK(hsmartcard->Init.NACKEnable)); assert_param(IS_SMARTCARD_TIMEOUT(hsmartcard->Init.TimeOutEnable)); assert_param(IS_SMARTCARD_AUTORETRY_COUNT(hsmartcard->Init.AutoRetryCount)); /*-------------------------- USART CR1 Configuration -----------------------*/ /* In SmartCard mode, M and PCE are forced to 1 (8 bits + parity). * Oversampling is forced to 16 (OVER8 = 0). * Configure the Parity and Mode: * set PS bit according to hsmartcard->Init.Parity value * set TE and RE bits according to hsmartcard->Init.Mode value */ tmpreg = (uint32_t) hsmartcard->Init.Parity | hsmartcard->Init.Mode; tmpreg |= (uint32_t) hsmartcard->Init.WordLength; MODIFY_REG(hsmartcard->Instance->CR1, USART_CR1_FIELDS, tmpreg); /*-------------------------- USART CR2 Configuration -----------------------*/ /* Stop bits are forced to 1.5 (STOP = 11) */ tmpreg = hsmartcard->Init.StopBits; /* Synchronous mode is activated by default */ tmpreg |= (uint32_t) USART_CR2_CLKEN | hsmartcard->Init.CLKPolarity; tmpreg |= (uint32_t) hsmartcard->Init.CLKPhase | hsmartcard->Init.CLKLastBit; tmpreg |= (uint32_t) hsmartcard->Init.TimeOutEnable; MODIFY_REG(hsmartcard->Instance->CR2, USART_CR2_FIELDS, tmpreg); /*-------------------------- USART CR3 Configuration -----------------------*/ /* Configure * - one-bit sampling method versus three samples' majority rule * according to hsmartcard->Init.OneBitSampling * - NACK transmission in case of parity error according * to hsmartcard->Init.NACKEnable * - autoretry counter according to hsmartcard->Init.AutoRetryCount */ tmpreg = (uint32_t) hsmartcard->Init.OneBitSampling | hsmartcard->Init.NACKEnable; tmpreg |= ((uint32_t)hsmartcard->Init.AutoRetryCount << SMARTCARD_CR3_SCARCNT_LSB_POS); MODIFY_REG(hsmartcard->Instance-> CR3,USART_CR3_FIELDS, tmpreg); /*-------------------------- USART GTPR Configuration ----------------------*/ tmpreg = (hsmartcard->Init.Prescaler | (((uint32_t)hsmartcard->Init.GuardTime-12) << SMARTCARD_GTPR_GT_LSB_POS)); MODIFY_REG(hsmartcard->Instance->GTPR, (USART_GTPR_GT|USART_GTPR_PSC), tmpreg); /*-------------------------- USART RTOR Configuration ----------------------*/ tmpreg = ((uint32_t)hsmartcard->Init.BlockLength << SMARTCARD_RTOR_BLEN_LSB_POS); if (hsmartcard->Init.TimeOutEnable == SMARTCARD_TIMEOUT_ENABLE) { assert_param(IS_SMARTCARD_TIMEOUT_VALUE(hsmartcard->Init.TimeOutValue)); tmpreg |= (uint32_t) hsmartcard->Init.TimeOutValue; } MODIFY_REG(hsmartcard->Instance->RTOR, (USART_RTOR_RTO|USART_RTOR_BLEN), tmpreg); /*-------------------------- USART BRR Configuration -----------------------*/ SMARTCARD_GETCLOCKSOURCE(hsmartcard, clocksource); switch (clocksource) { case SMARTCARD_CLOCKSOURCE_PCLK1: hsmartcard->Instance->BRR = (uint16_t)(HAL_RCC_GetPCLK1Freq() / hsmartcard->Init.BaudRate); break; case SMARTCARD_CLOCKSOURCE_PCLK2: hsmartcard->Instance->BRR = (uint16_t)(HAL_RCC_GetPCLK2Freq() / hsmartcard->Init.BaudRate); break; case SMARTCARD_CLOCKSOURCE_HSI: hsmartcard->Instance->BRR = (uint16_t)(HSI_VALUE / hsmartcard->Init.BaudRate); break; case SMARTCARD_CLOCKSOURCE_SYSCLK: hsmartcard->Instance->BRR = (uint16_t)(HAL_RCC_GetSysClockFreq() / hsmartcard->Init.BaudRate); break; case SMARTCARD_CLOCKSOURCE_LSE: hsmartcard->Instance->BRR = (uint16_t)(LSE_VALUE / hsmartcard->Init.BaudRate); break; case SMARTCARD_CLOCKSOURCE_UNDEFINED: default: ret = HAL_ERROR; break; } return ret; } /** * @brief Configure the SMARTCARD associated USART peripheral advanced features. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval None */ static void SMARTCARD_AdvFeatureConfig(SMARTCARD_HandleTypeDef *hsmartcard) { /* Check whether the set of advanced features to configure is properly set */ assert_param(IS_SMARTCARD_ADVFEATURE_INIT(hsmartcard->AdvancedInit.AdvFeatureInit)); /* if required, configure TX pin active level inversion */ if (HAL_IS_BIT_SET(hsmartcard->AdvancedInit.AdvFeatureInit, SMARTCARD_ADVFEATURE_TXINVERT_INIT)) { assert_param(IS_SMARTCARD_ADVFEATURE_TXINV(hsmartcard->AdvancedInit.TxPinLevelInvert)); MODIFY_REG(hsmartcard->Instance->CR2, USART_CR2_TXINV, hsmartcard->AdvancedInit.TxPinLevelInvert); } /* if required, configure RX pin active level inversion */ if (HAL_IS_BIT_SET(hsmartcard->AdvancedInit.AdvFeatureInit, SMARTCARD_ADVFEATURE_RXINVERT_INIT)) { assert_param(IS_SMARTCARD_ADVFEATURE_RXINV(hsmartcard->AdvancedInit.RxPinLevelInvert)); MODIFY_REG(hsmartcard->Instance->CR2, USART_CR2_RXINV, hsmartcard->AdvancedInit.RxPinLevelInvert); } /* if required, configure data inversion */ if (HAL_IS_BIT_SET(hsmartcard->AdvancedInit.AdvFeatureInit, SMARTCARD_ADVFEATURE_DATAINVERT_INIT)) { assert_param(IS_SMARTCARD_ADVFEATURE_DATAINV(hsmartcard->AdvancedInit.DataInvert)); MODIFY_REG(hsmartcard->Instance->CR2, USART_CR2_DATAINV, hsmartcard->AdvancedInit.DataInvert); } /* if required, configure RX/TX pins swap */ if (HAL_IS_BIT_SET(hsmartcard->AdvancedInit.AdvFeatureInit, SMARTCARD_ADVFEATURE_SWAP_INIT)) { assert_param(IS_SMARTCARD_ADVFEATURE_SWAP(hsmartcard->AdvancedInit.Swap)); MODIFY_REG(hsmartcard->Instance->CR2, USART_CR2_SWAP, hsmartcard->AdvancedInit.Swap); } /* if required, configure RX overrun detection disabling */ if (HAL_IS_BIT_SET(hsmartcard->AdvancedInit.AdvFeatureInit, SMARTCARD_ADVFEATURE_RXOVERRUNDISABLE_INIT)) { assert_param(IS_SMARTCARD_OVERRUN(hsmartcard->AdvancedInit.OverrunDisable)); MODIFY_REG(hsmartcard->Instance->CR3, USART_CR3_OVRDIS, hsmartcard->AdvancedInit.OverrunDisable); } /* if required, configure DMA disabling on reception error */ if (HAL_IS_BIT_SET(hsmartcard->AdvancedInit.AdvFeatureInit, SMARTCARD_ADVFEATURE_DMADISABLEONERROR_INIT)) { assert_param(IS_SMARTCARD_ADVFEATURE_DMAONRXERROR(hsmartcard->AdvancedInit.DMADisableonRxError)); MODIFY_REG(hsmartcard->Instance->CR3, USART_CR3_DDRE, hsmartcard->AdvancedInit.DMADisableonRxError); } /* if required, configure MSB first on communication line */ if (HAL_IS_BIT_SET(hsmartcard->AdvancedInit.AdvFeatureInit, SMARTCARD_ADVFEATURE_MSBFIRST_INIT)) { assert_param(IS_SMARTCARD_ADVFEATURE_MSBFIRST(hsmartcard->AdvancedInit.MSBFirst)); MODIFY_REG(hsmartcard->Instance->CR2, USART_CR2_MSBFIRST, hsmartcard->AdvancedInit.MSBFirst); } } /** * @brief Check the SMARTCARD Idle State. * @param hsmartcard: Pointer to a SMARTCARD_HandleTypeDef structure that contains * the configuration information for the specified SMARTCARD module. * @retval HAL status */ static HAL_StatusTypeDef SMARTCARD_CheckIdleState(SMARTCARD_HandleTypeDef *hsmartcard) { /* Initialize the SMARTCARD ErrorCode */ hsmartcard->ErrorCode = HAL_SMARTCARD_ERROR_NONE; /* Check if the Transmitter is enabled */ if((hsmartcard->Instance->CR1 & USART_CR1_TE) == USART_CR1_TE) { /* Wait until TEACK flag is set */ if(SMARTCARD_WaitOnFlagUntilTimeout(hsmartcard, USART_ISR_TEACK, RESET, SMARTCARD_TEACK_REACK_TIMEOUT) != HAL_OK) { return HAL_TIMEOUT; } } /* Check if the Receiver is enabled */ if((hsmartcard->Instance->CR1 & USART_CR1_RE) == USART_CR1_RE) { /* Wait until REACK flag is set */ if(SMARTCARD_WaitOnFlagUntilTimeout(hsmartcard, USART_ISR_REACK, RESET, SMARTCARD_TEACK_REACK_TIMEOUT) != HAL_OK) { return HAL_TIMEOUT; } } /* Initialize the SMARTCARD state*/ hsmartcard->State= HAL_SMARTCARD_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(hsmartcard); return HAL_OK; } /** * @} */ #endif /* HAL_SMARTCARD_MODULE_ENABLED */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
178872.c
#include "C:\Factory\Common\all.h" #include "C:\Factory\Common\Options\CRRandom.h" static uint64 TestSub(uint64 a, uint64 b) { for(; ; ) { uint64 t; cout("> %016I64x\n", a); cout("< %016I64x\n", b); if(b == 0) break; t = ~a & b; a = a ^ b; b = t << 1; } return a; } static void DoTest(uint64 a, uint64 b) { uint64 ans1; uint64 ans2; errorCase(a < b); ans1 = a - b; ans2 = TestSub(a, b); cout("%I64u\n", ans1); cout("%I64u\n", ans2); errorCase(ans1 != ans2); } int main(int argc, char **argv) { mt19937_initCRnd(); if(hasArgs(2)) { uint64 a = toValue64(getArg(0)); uint64 b = toValue64(getArg(1)); skipArg(2); DoTest(a, b); return; } while(!waitKey(0)) { uint64 a = mt19937_rnd64(); uint64 b = mt19937_rnd64(); if(a < b) m_swap(a, b, uint64); DoTest(a, b); cout("\n"); } }
291201.c
// // bubble_sort.c // SortAlgorithm // // Created by pmst on 2019/3/12. // Copyright © 2019 pmst. All rights reserved. // #include "bubble_sort.h" void bubble_swap(int array[],int lhs,int rhs) { int tmp = array[lhs]; array[lhs] = array[rhs]; array[rhs] = tmp; } void bubble_sort(int array[],int count){ for (int i = 0; i < count; i++) { for (int j = 0; j < count - i -1; j++) { if (array[j] > array[j+1]) { bubble_swap(array, j, j+1); } } } }
558482.c
/* FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd. All rights reserved VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION. This file is part of the FreeRTOS distribution. FreeRTOS is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License (version 2) as published by the Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception. *************************************************************************** >>! NOTE: The modification to the GPL is included to allow you to !<< >>! distribute a combined work that includes FreeRTOS without being !<< >>! obliged to provide the source code for proprietary components !<< >>! outside of the FreeRTOS kernel. !<< *************************************************************************** FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Full license text is available on the following link: http://www.freertos.org/a00114.html *************************************************************************** * * * FreeRTOS provides completely free yet professionally developed, * * robust, strictly quality controlled, supported, and cross * * platform software that is more than just the market leader, it * * is the industry's de facto standard. * * * * Help yourself get started quickly while simultaneously helping * * to support the FreeRTOS project by purchasing a FreeRTOS * * tutorial book, reference manual, or both: * * http://www.FreeRTOS.org/Documentation * * * *************************************************************************** http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading the FAQ page "My application does not run, what could be wrong?". Have you defined configASSERT()? http://www.FreeRTOS.org/support - In return for receiving this top quality embedded software for free we request you assist our global community by participating in the support forum. http://www.FreeRTOS.org/training - Investing in training allows your team to be as productive as possible as early as possible. Now you can receive FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers Ltd, and the world's leading authority on the world's leading RTOS. http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products, including FreeRTOS+Trace - an indispensable productivity tool, a DOS compatible FAT file system, and our tiny thread aware UDP/IP stack. http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate. Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS. http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS licenses offer ticketed support, indemnification and commercial middleware. http://www.SafeRTOS.com - High Integrity Systems also provide a safety engineered and independently SIL3 certified version for use in safety and mission critical applications that require provable dependability. 1 tab == 4 spaces! */ /*----------------------------------------------------------- * Implementation of functions defined in portable.h for the Tern EE 186 * port. *----------------------------------------------------------*/ /* Library includes. */ #include <embedded.h> #include <ae.h> /* Scheduler includes. */ #include "FreeRTOS.h" #include "task.h" #include "portasm.h" /* The timer increments every four clocks, hence the divide by 4. */ #define portTIMER_COMPARE ( uint16_t ) ( ( configCPU_CLOCK_HZ / configTICK_RATE_HZ ) / ( uint32_t ) 4 ) /* From the RDC data sheet. */ #define portENABLE_TIMER_AND_INTERRUPT ( uint16_t ) 0xe001 /* Interrupt control. */ #define portEIO_REGISTER 0xff22 #define portCLEAR_INTERRUPT 0x0008 /* Setup the hardware to generate the required tick frequency. */ static void prvSetupTimerInterrupt( void ); /* The ISR used depends on whether the preemptive or cooperative scheduler is being used. */ #if( configUSE_PREEMPTION == 1 ) /* Tick service routine used by the scheduler when preemptive scheduling is being used. */ static void __interrupt __far prvPreemptiveTick( void ); #else /* Tick service routine used by the scheduler when cooperative scheduling is being used. */ static void __interrupt __far prvNonPreemptiveTick( void ); #endif /* Trap routine used by taskYIELD() to manually cause a context switch. */ static void __interrupt __far prvYieldProcessor( void ); /* The timer initialisation functions leave interrupts enabled, which is not what we want. This ISR is installed temporarily in case the timer fires before we get a change to disable interrupts again. */ static void __interrupt __far prvDummyISR( void ); /*-----------------------------------------------------------*/ /* See header file for description. */ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) { StackType_t DS_Reg = 0; /* Place a few bytes of known values on the bottom of the stack. This is just useful for debugging. */ *pxTopOfStack = 0x1111; pxTopOfStack--; *pxTopOfStack = 0x2222; pxTopOfStack--; *pxTopOfStack = 0x3333; pxTopOfStack--; /* We are going to start the scheduler using a return from interrupt instruction to load the program counter, so first there would be the function call with parameters preamble. */ *pxTopOfStack = FP_SEG( pvParameters ); pxTopOfStack--; *pxTopOfStack = FP_OFF( pvParameters ); pxTopOfStack--; *pxTopOfStack = FP_SEG( pxCode ); pxTopOfStack--; *pxTopOfStack = FP_OFF( pxCode ); pxTopOfStack--; /* Next the status register and interrupt return address. */ *pxTopOfStack = portINITIAL_SW; pxTopOfStack--; *pxTopOfStack = FP_SEG( pxCode ); pxTopOfStack--; *pxTopOfStack = FP_OFF( pxCode ); pxTopOfStack--; /* The remaining registers would be pushed on the stack by our context switch function. These are loaded with values simply to make debugging easier. */ *pxTopOfStack = ( StackType_t ) 0xAAAA; /* AX */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) 0xBBBB; /* BX */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) 0xCCCC; /* CX */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) 0xDDDD; /* DX */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) 0xEEEE; /* ES */ pxTopOfStack--; /* We need the true data segment. */ __asm{ MOV DS_Reg, DS }; *pxTopOfStack = DS_Reg; /* DS */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) 0x0123; /* SI */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) 0xDDDD; /* DI */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) 0xBBBB; /* BP */ return pxTopOfStack; } /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) { /* This is called with interrupts already disabled. */ /* Put our manual switch (yield) function on a known vector. */ setvect( portSWITCH_INT_NUMBER, prvYieldProcessor ); /* Setup the tick interrupt. */ prvSetupTimerInterrupt(); /* Kick off the scheduler by setting up the context of the first task. */ portFIRST_CONTEXT(); /* Should not get here! */ return pdFALSE; } /*-----------------------------------------------------------*/ static void __interrupt __far prvDummyISR( void ) { /* The timer initialisation functions leave interrupts enabled, which is not what we want. This ISR is installed temporarily in case the timer fires before we get a change to disable interrupts again. */ outport( portEIO_REGISTER, portCLEAR_INTERRUPT ); } /*-----------------------------------------------------------*/ /* The ISR used depends on whether the preemptive or cooperative scheduler is being used. */ #if( configUSE_PREEMPTION == 1 ) static void __interrupt __far prvPreemptiveTick( void ) { /* Get the scheduler to update the task states following the tick. */ if( xTaskIncrementTick() != pdFALSE ) { /* Switch in the context of the next task to be run. */ portSWITCH_CONTEXT(); } /* Reset interrupt. */ outport( portEIO_REGISTER, portCLEAR_INTERRUPT ); } #else static void __interrupt __far prvNonPreemptiveTick( void ) { /* Same as preemptive tick, but the cooperative scheduler is being used so we don't have to switch in the context of the next task. */ xTaskIncrementTick(); /* Reset interrupt. */ outport( portEIO_REGISTER, portCLEAR_INTERRUPT ); } #endif /*-----------------------------------------------------------*/ static void __interrupt __far prvYieldProcessor( void ) { /* Switch in the context of the next task to be run. */ portSWITCH_CONTEXT(); } /*-----------------------------------------------------------*/ void vPortEndScheduler( void ) { /* Not implemented. */ } /*-----------------------------------------------------------*/ static void prvSetupTimerInterrupt( void ) { const uint16_t usTimerACompare = portTIMER_COMPARE, usTimerAMode = portENABLE_TIMER_AND_INTERRUPT; const uint16_t usT2_IRQ = 0x13; /* Configure the timer, the dummy handler is used here as the init function leaves interrupts enabled. */ t2_init( usTimerAMode, usTimerACompare, prvDummyISR ); /* Disable interrupts again before installing the real handlers. */ portDISABLE_INTERRUPTS(); #if( configUSE_PREEMPTION == 1 ) /* Tick service routine used by the scheduler when preemptive scheduling is being used. */ setvect( usT2_IRQ, prvPreemptiveTick ); #else /* Tick service routine used by the scheduler when cooperative scheduling is being used. */ setvect( usT2_IRQ, prvNonPreemptiveTick ); #endif }
257389.c
/* * File: clh_glk_in.c * Author: Vasileios Trigonakis <[email protected]> * * Description: * CLH lock implementation for GLS. * * The MIT License (MIT) * * Copyright (c) 2016 Vasileios Trigonakis * Distributed Programming Lab (LPD), EPFL * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "clh_glk_impl.h" #if defined(PAUSE_IN) # if PAUSE_IN == 0 # undef PAUSE_IN # define PAUSE_IN() # elif PAUSE_IN == 1 # undef PAUSE_IN # define PAUSE_IN() asm volatile("nop"); # elif PAUSE_IN == 2 # undef PAUSE_IN # define PAUSE_IN() asm volatile("pause"); # elif PAUSE_IN == 3 # undef PAUSE_IN # define PAUSE_IN() asm volatile("mfence"); # elif PAUSE_IN == 4 # undef PAUSE_IN # define PAUSE_IN() asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); # else # undef PAUSE_IN # define PAUSE_IN() # warning unknown code for pause_in mcs # endif #endif static __thread int __clh_id = -1; static volatile int __clh_ids = 0; static inline volatile clh_lock_node_t* clh_get_local(clh_lock_t* lock) { if (__builtin_expect(__clh_id < 0, 0)) { __clh_id = __sync_fetch_and_add(&__clh_ids, 1); assert(__clh_id < CLH_MAX_THR); } if (__builtin_expect(lock->local[__clh_id] == NULL, 0)) { lock->local[__clh_id] = (clh_lock_node_t*) malloc(sizeof(clh_lock_node_t)); assert(lock->local[__clh_id] != NULL); } return lock->local[__clh_id]; } static inline void clh_set_local(clh_lock_t* lock, clh_lock_node_t* new) { lock->local[__clh_id] = new; } inline int clh_lock_trylock(clh_lock_t* lock) { /* volatile clh_lock_node_t* local = clh_get_local(lock); */ /* volatile clh_lock_node_t* pred = lock->head; */ /* if (pred == NULL || pred->locked == 0) */ /* { */ /* local->locked = 1; */ /* volatile int i = 1230; while (i--); */ /* /\* has the ABA problem :-( *\/ */ /* if (__sync_val_compare_and_swap(&lock->head, pred, local) == pred) */ /* { */ /* if (__builtin_expect(pred == NULL, 0)) */ /* { */ /* pred = (clh_lock_node_t*) malloc(sizeof(clh_lock_node_t)); */ /* assert(pred != NULL); */ /* } */ /* local->pred = pred; */ /* return 0; */ /* } */ /* } */ fprintf(stderr, "clh_lock_trylock() is not yet implemented\n"); return 1; } inline int clh_lock_lock(clh_lock_t* lock) { volatile clh_lock_node_t* local = clh_get_local(lock); local->locked = 1; clh_lock_node_t* pred = swap_ptr((void*) &lock->head, (void*) local); if (__builtin_expect(pred == NULL, 0)) { pred = (clh_lock_node_t*) malloc(sizeof(clh_lock_node_t)); assert(pred != NULL); local->pred = pred; return 0; } local->pred = pred; while (pred->locked == 1) { PAUSE_IN(); } return 0; } inline int clh_lock_unlock(clh_lock_t* lock) { volatile clh_lock_node_t* local = clh_get_local(lock); clh_lock_node_t* pred = (clh_lock_node_t*) local->pred; local->locked = 0; clh_set_local(lock, pred); return 0; } int clh_lock_init(clh_lock_t* lock, pthread_mutexattr_t* a) { lock->head = NULL; int i; for (i = 0; i < CLH_MAX_THR; i++) { lock->local[i] = NULL; } clh_get_local(lock); asm volatile ("mfence"); return 0; } int clh_lock_destroy(clh_lock_t* the_lock) { return 0; }
177864.c
#include <stdint.h> #include <stdio.h> #include "mtest.h" static struct f_f t[] = { #include "sanity/logbf.h" #include "special/logbf.h" }; int main(void) { #pragma STDC FENV_ACCESS ON float y; float d; int e, i, err = 0; struct f_f *p; for (i = 0; i < sizeof t/sizeof *t; i++) { p = t + i; if (p->r < 0) continue; fesetround(p->r); feclearexcept(FE_ALL_EXCEPT); y = logbf(p->x); e = fetestexcept(INEXACT|INVALID|DIVBYZERO|UNDERFLOW|OVERFLOW); if (!checkexceptall(e, p->e, p->r)) { printf("%s:%d: bad fp exception: %s logbf(%a)=%a, want %s", p->file, p->line, rstr(p->r), p->x, p->y, estr(p->e)); printf(" got %s\n", estr(e)); err++; } d = ulperrf(y, p->y, p->dy); if (!checkcr(y, p->y, p->r)) { printf("%s:%d: %s logbf(%a) want %a got %a ulperr %.3f = %a + %a\n", p->file, p->line, rstr(p->r), p->x, p->y, y, d, d-p->dy, p->dy); err++; } } return !!err; }
925641.c
/** @file Copyright (C) 2013, dmazar. All rights reserved. Copyright (C) 2019, vit9696. All rights reserved. All rights reserved. This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #include "BootCompatInternal.h" #include <Guid/OcVariable.h> #include <IndustryStandard/AppleHibernate.h> #include <Library/BaseLib.h> #include <Library/BaseMemoryLib.h> #include <Library/DebugLib.h> #include <Library/OcBootManagementLib.h> #include <Library/DeviceTreeLib.h> #include <Library/MachoLib.h> #include <Library/OcMemoryLib.h> #include <Library/OcMiscLib.h> #include <Library/OcStringLib.h> #include <Library/PrintLib.h> #include <Library/UefiBootServicesTableLib.h> #include <Library/UefiLib.h> #include <Library/UefiRuntimeServicesTableLib.h> /** Protect RT data from boot.efi relocation by marking them MemMapIO. See more details in the function definition. @param[in,out] RtReloc Relocation entry list to store entry types. @param[in] MemoryMapSize Memory map size. @param[in] DescriptorSize Memory map descriptor size. @param[in,out] MemoryMap MemoryMap to protect entries in. @param[in] SysTableArea Special address that should not be protected. **/ STATIC VOID ProtectRtMemoryFromRelocation ( IN OUT RT_RELOC_PROTECT_DATA *RtReloc, IN UINTN MemoryMapSize, IN UINTN DescriptorSize, IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap, IN EFI_PHYSICAL_ADDRESS SysTableArea, IN UINTN SysTableAreaSize ) { // // We protect RT data & code from relocation by marking them MemMapIO except EFI_SYSTEM_TABLE area. // // This fixes NVRAM issues on some boards where access to NVRAM after boot services is possible // only in SMM mode. RT driver passes data to SMM handler through previously negotiated buffer // and this buffer must not be relocated. // Explained and examined in detail by CodeRush and night199uk: // https://web.archive.org/web/20141025080709/http://www.projectosx.com/forum/lofiversion/index.php/t3298.html // // Starting with APTIO V for NVRAM to work not only RT data but RT code too can no longer be moved // due to the use of commbuffers. This, however, creates a memory protection issue, because // XNU maps RT data as RW and code as RX, and AMI appears use global variables in some RT drivers. // For this reason we shim (most?) affected RT services via wrapers that unset the WP bit during // the UEFI call and set it back on return in a separate driver. // Explained in detail by Download-Fritz and vit9696: // http://www.insanelymac.com/forum/topic/331381-aptiomemoryfix (first 2 links in particular). // // EFI_SYSTEM_TABLE is passed directly through kernel boot arguments, and thus goes through static // mapping (ml_static_ptovirt) in efi_set_tables_64 call. This mapping works as PHYS | CONST = VIRT. // To avoid kernel accessing unmapped virtual address we let boot.efi relocate the page with // EFI_SYSTEM_TABLE area. While technically it is possible to let the original page to be relocated, // we pick a safer root by using a private copy. // // The primary downside of this approach is that boot.efi will still reserve the contiguous memory // for runtime services after the kernel: efiRuntimeServicesPageCount pages starting from // efiRuntimeServicesPageStart within kaddr ~ ksize range. However, unlike Macs, which have reserved // gaps only for ACPI NVS, MemMapIO and similar regions, with this approach almost no physical memory // in efiRuntimeServicesPageStart area is used at all. This memory is never reclaimed by XNU, which // marks it as allocated in i386_vm_init. Expirements show that at least 85 MBs (Z170) are used for // this process. On server systems the issue is much worse due to many devices in place. // Ideally boot.efi should only count RT code and RT data pages, but it is not easy to change. // UINTN NumEntries; UINTN Index; EFI_MEMORY_DESCRIPTOR *Desc; RT_RELOC_PROTECT_INFO *RelocInfo; Desc = MemoryMap; RtReloc->NumEntries = 0; RelocInfo = &RtReloc->RelocInfo[0]; NumEntries = MemoryMapSize / DescriptorSize; for (Index = 0; Index < NumEntries; ++Index) { if ((Desc->Attribute & EFI_MEMORY_RUNTIME) != 0 && Desc->NumberOfPages > 0 && (Desc->Type == EfiRuntimeServicesCode || Desc->Type == EfiRuntimeServicesData) && !AREA_WITHIN_DESCRIPTOR (Desc, SysTableArea, SysTableAreaSize)) { if (RtReloc->NumEntries == ARRAY_SIZE (RtReloc->RelocInfo)) { RUNTIME_DEBUG (( DEBUG_ERROR, "OCABC: Cannot save mem type for entry: %Lx (type 0x%x)\n", (UINT64) Desc->PhysicalStart, (UINT32) Desc->Type )); return; } RelocInfo->PhysicalStart = Desc->PhysicalStart; RelocInfo->PhysicalEnd = LAST_DESCRIPTOR_ADDR (Desc); RelocInfo->Type = Desc->Type; Desc->Type = EfiMemoryMappedIO; ++RelocInfo; ++RtReloc->NumEntries; } Desc = NEXT_MEMORY_DESCRIPTOR (Desc, DescriptorSize); } } /** Copy RT flagged areas to separate memmap, define virtual to physical address mapping, and call SetVirtualAddressMap() only with that partial memmap. @param[in,out] KernelState Kernel support state. @param[in] MemoryMapSize Memory map size. @param[in] DescriptorSize Memory map descriptor size. @param[in] DescriptorVersion Memor map descriptor version. @param[in,out] MemoryMap Complete memory map with all entries. @retval EFI_SUCCESS on success. **/ STATIC EFI_STATUS PerformRtMemoryVirtualMapping ( IN OUT KERNEL_SUPPORT_STATE *KernelState, IN UINTN MemoryMapSize, IN UINTN DescriptorSize, IN UINT32 DescriptorVersion, IN EFI_MEMORY_DESCRIPTOR *MemoryMap ) { // // About partial memmap: // Some UEFIs are converting pointers to virtual addresses even if they do not // point to regions with RT flag. This means that those UEFIs are using // Desc->VirtualStart even for non-RT regions. Linux had issues with this: // http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=7cb00b72876ea2451eb79d468da0e8fb9134aa8a // They are doing it Windows way now - copying RT descriptors to separate // mem map and passing that stripped map to SetVirtualAddressMap(). // We'll do the same, although it seems that just assigning // VirtualStart = PhysicalStart for non-RT areas also does the job. // // About virtual to physical mappings: // Also adds virtual to physical address mappings for RT areas. This is needed since // SetVirtualAddressMap() does not work on my Aptio without that. Probably because some driver // has a bug and is trying to access new virtual addresses during the change. // Linux and Windows are doing the same thing and problem is // not visible there. // UINTN NumEntries; UINTN Index; EFI_MEMORY_DESCRIPTOR *Desc; EFI_MEMORY_DESCRIPTOR *VirtualDesc; EFI_STATUS Status; PAGE_MAP_AND_DIRECTORY_POINTER *PageTable; Desc = MemoryMap; NumEntries = MemoryMapSize / DescriptorSize; VirtualDesc = KernelState->VmMap; KernelState->VmMapSize = 0; KernelState->VmMapDescSize = DescriptorSize; // // Get current VM page table. // PageTable = OcGetCurrentPageTable (NULL); for (Index = 0; Index < NumEntries; ++Index) { // // Legacy note. Some UEFIs end up with "reserved" area with EFI_MEMORY_RUNTIME flag set when // Intel HD3000 or HD4000 is used. For example, on GA-H81N-D2H there is a single 1 GB descriptor: // 000000009F800000-00000000DF9FFFFF 0000000000040200 8000000000000000 // // All known boot.efi starting from at least 10.5.8 properly handle this flag and do not assign // virtual addresses to reserved descriptors. However, our legacy code had a bug, and did not // check for EfiReservedMemoryType. Therefore it replaced such entries by EfiMemoryMappedIO // to "prevent" boot.efi relocations. // // The relevant discussion and the original fix can be found here: // http://web.archive.org/web/20141111124211/http://www.projectosx.com:80/forum/lofiversion/index.php/t2428-450.html // https://sourceforge.net/p/cloverefiboot/code/605/ // // The correct approach is to properly handle EfiReservedMemoryType with EFI_MEMORY_RUNTIME // attribute set, and not mess with the memory map passed to boot.efi. As done here. // if (Desc->Type != EfiReservedMemoryType && (Desc->Attribute & EFI_MEMORY_RUNTIME) != 0) { // // Check if there is enough space in virtual map. // if (KernelState->VmMapSize + DescriptorSize > sizeof (KernelState->VmMap)) { RUNTIME_DEBUG ((DEBUG_ERROR, "OCABC: Too many RT entries to memory map\n")); return EFI_OUT_OF_RESOURCES; } // // Copy region with EFI_MEMORY_RUNTIME flag to virtual map. // CopyMem (VirtualDesc, Desc, DescriptorSize); // // Define virtual to physical mapping. // Status = VmMapVirtualPages ( &KernelState->VmContext, PageTable, Desc->VirtualStart, Desc->NumberOfPages, Desc->PhysicalStart ); if (EFI_ERROR(Status)) { RUNTIME_DEBUG ((DEBUG_ERROR, "OCABC: RT mapping failure - %r\n", Status)); return EFI_OUT_OF_RESOURCES; } // // Proceed to next virtual map slot. // VirtualDesc = NEXT_MEMORY_DESCRIPTOR (VirtualDesc, DescriptorSize); KernelState->VmMapSize += DescriptorSize; } // // Proceed to next original map slot. // Desc = NEXT_MEMORY_DESCRIPTOR (Desc, DescriptorSize); } VmFlushCaches (); Status = gRT->SetVirtualAddressMap ( KernelState->VmMapSize, DescriptorSize, DescriptorVersion, KernelState->VmMap ); return Status; } /** Revert RT data protected types to let XNU kernel kernel properly map data. @param[in] RtReloc Relocated entry list with entry types. @param[in] MemoryMapSize Memory map size. @param[in] DescriptorSize Memory map descriptor size. @param[in,out] MemoryMap MemoryMap to restore protected entries in. **/ STATIC VOID RestoreProtectedRtMemoryTypes ( IN RT_RELOC_PROTECT_DATA *RtReloc, IN UINTN MemoryMapSize, IN UINTN DescriptorSize, IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap ) { UINTN Index; UINTN Index2; UINTN NumEntriesLeft; UINTN NumEntries; EFI_PHYSICAL_ADDRESS PhysicalStart; EFI_PHYSICAL_ADDRESS PhysicalEnd; EFI_MEMORY_DESCRIPTOR *Desc; NumEntriesLeft = RtReloc->NumEntries; NumEntries = MemoryMapSize / DescriptorSize; Desc = MemoryMap; for (Index = 0; Index < NumEntries && NumEntriesLeft > 0; ++Index) { PhysicalStart = Desc->PhysicalStart; PhysicalEnd = LAST_DESCRIPTOR_ADDR (Desc); for (Index2 = 0; Index2 < RtReloc->NumEntries; ++Index2) { // // PhysicalStart match is enough, but just in case. // Select firmwares, like Lenovo ThinkPad X240, have insane reserved areas. // For example 0000000000000000-FFFFFFFFFFFFFFFF 0000000000000000 0000000000000000. // Any fuzzy matching is prone to errors, so just do exact comparison. // if (PhysicalStart == RtReloc->RelocInfo[Index2].PhysicalStart && PhysicalEnd == RtReloc->RelocInfo[Index2].PhysicalEnd) { Desc->Type = RtReloc->RelocInfo[Index2].Type; --NumEntriesLeft; break; } } Desc = NEXT_MEMORY_DESCRIPTOR (Desc, DescriptorSize); } if (NumEntriesLeft > 0) { RUNTIME_DEBUG (( DEBUG_ERROR, "OCABC: Failed to restore %u entries out of %u\n", (UINT32) NumEntriesLeft, (UINT32) RtReloc->NumEntries )); } } /** Prepare environment for normal booting. Called when boot.efi jumps to kernel. @param[in,out] BootCompat Boot compatibility context. @param[in,out] BootArgs Apple kernel boot arguments. **/ STATIC VOID AppleMapPrepareForBooting ( IN OUT BOOT_COMPAT_CONTEXT *BootCompat, IN OUT VOID *BootArgs ) { EFI_STATUS Status; DTEntry Chosen; CHAR8 *ArgsStr; UINT32 ArgsSize; OC_BOOT_ARGUMENTS BA; UINTN MemoryMapSize; EFI_MEMORY_DESCRIPTOR *MemoryMap; UINTN DescriptorSize; OcParseBootArgs (&BA, BootArgs); if (BootCompat->Settings.ProvideCustomSlide) { // // Restore the variables we tampered with to support custom slides. // AppleSlideRestore (BootCompat, &BA); } if (BootCompat->Settings.DisableSingleUser) { // // First, there is a BootArgs entry for XNU. // OcRemoveArgumentFromCmd (BA.CommandLine, "-s"); // // Second, there is a DT entry. // DTInit ((VOID *)(UINTN) *BA.DeviceTreeP, BA.DeviceTreeLength); Status = DTLookupEntry (NULL, "/chosen", &Chosen); if (!EFI_ERROR(Status)) { Status = DTGetProperty (Chosen, "boot-args", (VOID **) &ArgsStr, &ArgsSize); if (!EFI_ERROR(Status) && ArgsSize > 0) { OcRemoveArgumentFromCmd (ArgsStr, "-s"); } } } if (BootCompat->Settings.AvoidRuntimeDefrag) { MemoryMapSize = *BA.MemoryMapSize; MemoryMap = (EFI_MEMORY_DESCRIPTOR *)(UINTN) (*BA.MemoryMap); DescriptorSize = *BA.MemoryMapDescriptorSize; // // We must restore EfiRuntimeServicesCode memory area types, because otherwise // RuntimeServices won't be mapped. // RestoreProtectedRtMemoryTypes ( &BootCompat->RtReloc, MemoryMapSize, DescriptorSize, MemoryMap ); // // On native Macs due to EfiBoot defragmentation it is guaranteed that // VADDR % BASE_1GB == PADDR. macOS 11 started to rely on this in // acpi_count_enabled_logical_processors, which needs to access MADT (APIC) // ACPI table, and does that through ConfigurationTables. // // The simplest approach is to just copy the table, so that it is accessible // at both actual mapping and 1:1 defragmented mapping. This should be safe, // as the memory for 1:1 defragmented mapping is reserved by EfiBoot in the // first place and is otherwise stolen anyway. // if (BootCompat->KernelState.ConfigurationTable != NULL) { CopyMem ( (VOID*) ((UINTN) BA.SystemTable->ConfigurationTable & (BASE_1GB - 1)), BootCompat->KernelState.ConfigurationTable, sizeof (*BootCompat->KernelState.ConfigurationTable) * BA.SystemTable->NumberOfTableEntries ); } } } /** Prepare environment for hibernate wake. Called when boot.efi jumps to kernel. @param[in,out] BootCompat Boot compatibility context. @param[in,out] ImageHeaderPage Apple hibernate image page number. **/ STATIC VOID AppleMapPrepareForHibernateWake ( IN OUT BOOT_COMPAT_CONTEXT *BootCompat, IN UINTN ImageHeaderPage ) { IOHibernateImageHeader *ImageHeader; IOHibernateHandoff *Handoff; ImageHeader = (IOHibernateImageHeader *) EFI_PAGES_TO_SIZE (ImageHeaderPage); // // Legacy note. In legacy implementations systemTableOffset was unconditionally overwritten // with a wrong address due to ImageHeader->runtimePages not being converted from pages to bytes. // Fortunately systemTableOffset was unused when kIOHibernateHandoffTypeMemoryMap is unspecified. // systemTableOffset is calculated properly by boot.efi itself starting from 10.6.8 at least, // and thus this assignment was useless in the first place. // // // At this step we have two routes. // // 1. Remove newly generated memory map from hibernate image to let XNU use the original mapping. // This is known to work well on most systems primarily because Windows requires UEFI firmwares // to preserve physical memory consistency at S4 wake. "On a UEFI platform, firmware runtime memory // must be consistent across S4 sleep state transitions, in both size and location.", see: // https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/oem-uefi#hibernation-state-s4-transition-requirements // 2. Recover memory map just as we do for normal booting. This was causing issues on some firmwares, // which provided very strange memory maps after S4 wake. In other cases this should not immediately // break things. XNU will entirely remove efiRuntimeServicesPageStart/efiRuntimeServicesPageSize // mapping, and our new memory map entries will unconditionally overwrite previous ones. In case // no physical memory changes happened this should work fine. // Handoff = (IOHibernateHandoff *) EFI_PAGES_TO_SIZE ((UINTN) ImageHeader->handoffPages); while (Handoff->type != kIOHibernateHandoffTypeEnd) { if (Handoff->type == kIOHibernateHandoffTypeMemoryMap) { if (BootCompat->Settings.DiscardHibernateMap) { // // Route 1. Discard the new memory map here, and let XNU use what it had. // It is unknown whether there still are any firmwares that need this. // Handoff->type = kIOHibernateHandoffType; } else { // // Route 2. Recovery memory protection types just as normal boot. // if (BootCompat->KernelState.VmMapDescSize == 0) { RUNTIME_DEBUG ((DEBUG_ERROR, "OCABC: Saved descriptor size cannot be 0\n")); return; } if (BootCompat->Settings.AvoidRuntimeDefrag) { // // I think we should not be there, but ideally all quirks are relatively independent. // RestoreProtectedRtMemoryTypes ( &BootCompat->RtReloc, Handoff->bytecount, BootCompat->KernelState.VmMapDescSize, (EFI_MEMORY_DESCRIPTOR *)(UINTN) Handoff->data ); } } break; } Handoff = (IOHibernateHandoff *) ((UINTN) Handoff + sizeof(Handoff) + Handoff->bytecount); } } VOID AppleMapPrepareMemoryPool ( IN OUT BOOT_COMPAT_CONTEXT *BootCompat ) { EFI_STATUS Status; if (!BootCompat->Settings.SetupVirtualMap || BootCompat->KernelState.VmContext.MemoryPool != NULL) { return; } Status = VmAllocateMemoryPool ( &BootCompat->KernelState.VmContext, OC_DEFAULT_VMEM_PAGE_COUNT, BootCompat->ServicePtrs.GetMemoryMap ); if (EFI_ERROR(Status)) { DEBUG ((DEBUG_ERROR, "OCABC: Memory pool allocation failure - %r\n", Status)); } } VOID AppleMapPrepareBooterState ( IN OUT BOOT_COMPAT_CONTEXT *BootCompat, IN OUT EFI_LOADED_IMAGE *LoadedImage, IN EFI_GET_MEMORY_MAP GetMemoryMap OPTIONAL ) { EFI_STATUS Status; // // Allocate memory pool if needed. // AppleMapPrepareMemoryPool ( BootCompat ); // // This function may be called twice, do not redo in this case. // AppleMapPlatformSaveState ( &BootCompat->KernelState.AsmState, &BootCompat->KernelState.KernelJump ); if (BootCompat->Settings.AvoidRuntimeDefrag) { if (BootCompat->KernelState.SysTableRtArea == 0) { // // Allocate RT data pages for copy of UEFI system table for kernel. // This one also has to be 32-bit due to XNU BootArgs structure. // The reason for this allocation to be required is because XNU uses static // mapping for directly passed pointers (see ProtectRtMemoryFromRelocation). // BootCompat->KernelState.SysTableRtArea = BASE_4GB; BootCompat->KernelState.SysTableRtAreaSize = gST->Hdr.HeaderSize; Status = OcAllocatePagesFromTop ( EfiRuntimeServicesData, EFI_SIZE_TO_PAGES (gST->Hdr.HeaderSize), &BootCompat->KernelState.SysTableRtArea, GetMemoryMap, NULL ); if (EFI_ERROR(Status)) { DEBUG (( DEBUG_ERROR, "OCABC: Failed to allocate system table memory - %r\n", Status )); BootCompat->KernelState.SysTableRtArea = 0; return; } // // Copy UEFI system table to the new location. // CopyMem ( (VOID *)(UINTN) BootCompat->KernelState.SysTableRtArea, gST, gST->Hdr.HeaderSize ); // // Remember physical configuration table location. // BootCompat->KernelState.ConfigurationTable = gST->ConfigurationTable; } // // Assign loaded image with custom system table. // LoadedImage->SystemTable = (EFI_SYSTEM_TABLE *)(UINTN) BootCompat->KernelState.SysTableRtArea; } } VOID AppleMapPrepareKernelJump ( IN OUT BOOT_COMPAT_CONTEXT *BootCompat, IN UINTN ImageAddress, IN BOOLEAN AppleHibernateWake ) { UINT64 KernelEntryVaddr; UINT32 KernelEntry; IOHibernateImageHeader *ImageHeader; // // There is no reason to patch the kernel when we do not need it. // if (!BootCompat->Settings.AvoidRuntimeDefrag && !BootCompat->Settings.DiscardHibernateMap) { return; } // // Check whether we have image address and abort if not. // if (ImageAddress == 0) { RUNTIME_DEBUG ((DEBUG_ERROR, "OCABC: Failed to find image address, hibernate %d\n", AppleHibernateWake)); return; } if (!AppleHibernateWake) { // // ImageAddress points to the first kernel segment, __HIB. // Kernel image header is located in __TEXT, which follows __HIB. // ImageAddress += KERNEL_BASE_PADDR; // // Cut higher virtual address bits. // KernelEntryVaddr = MachoRuntimeGetEntryAddress ( (VOID*) ImageAddress ); if (KernelEntryVaddr == 0) { RUNTIME_DEBUG ((DEBUG_ERROR, "OCABC: Kernel entry point was not found!")); return; } // // Perform virtual to physical address conversion by subtracting __TEXT base // and adding current physical kernel location. // KernelEntry = (UINT32) (KernelEntryVaddr - KERNEL_TEXT_VADDR + ImageAddress); } else { // // Read kernel entry from hibernation image and patch it with jump. // At this stage HIB section is not yet copied from sleep image to it's // proper memory destination. so we'll patch entry point in sleep image. // Note the virtual -> physical conversion through truncation. // ImageHeader = (IOHibernateImageHeader *) ImageAddress; KernelEntry = ((UINT32)(UINTN) &ImageHeader->fileExtentMap[0]) + ImageHeader->fileExtentMapSize + ImageHeader->restore1CodeOffset; } // // Save original kernel entry code. // CopyMem ( &BootCompat->KernelState.KernelOrg[0], (VOID *)(UINTN) KernelEntry, sizeof (BootCompat->KernelState.KernelOrg) ); // // Copy kernel jump code to kernel entry address. // CopyMem ( (VOID *)(UINTN) KernelEntry, &BootCompat->KernelState.KernelJump, sizeof (BootCompat->KernelState.KernelJump) ); } EFI_STATUS AppleMapPrepareMemState ( IN OUT BOOT_COMPAT_CONTEXT *BootCompat, IN UINTN MemoryMapSize, IN UINTN DescriptorSize, IN UINT32 DescriptorVersion, IN EFI_MEMORY_DESCRIPTOR *MemoryMap ) { EFI_STATUS Status; // // Protect RT areas from relocation by marking then MemMapIO. // if (BootCompat->Settings.AvoidRuntimeDefrag) { ProtectRtMemoryFromRelocation ( &BootCompat->RtReloc, MemoryMapSize, DescriptorSize, MemoryMap, BootCompat->KernelState.SysTableRtArea, BootCompat->KernelState.SysTableRtAreaSize ); } // // Virtualize RT services with all needed fixes. // if (BootCompat->Settings.SetupVirtualMap) { Status = PerformRtMemoryVirtualMapping ( &BootCompat->KernelState, MemoryMapSize, DescriptorSize, DescriptorVersion, MemoryMap ); } else { Status = gRT->SetVirtualAddressMap ( MemoryMapSize, DescriptorSize, DescriptorVersion, MemoryMap ); } // // Copy now virtualized UEFI system table for boot.efi to hand it to the kernel. // if (BootCompat->Settings.AvoidRuntimeDefrag) { CopyMem ( (VOID *)(UINTN) BootCompat->KernelState.SysTableRtArea, gST, gST->Hdr.HeaderSize ); } return Status; } UINTN EFIAPI AppleMapPrepareKernelState ( IN UINTN Args, IN BOOLEAN ModeX64 ) { BOOT_COMPAT_CONTEXT *BootCompatContext; BootCompatContext = GetBootCompatContext (); if (BootCompatContext->ServiceState.AppleHibernateWake) { AppleMapPrepareForHibernateWake ( BootCompatContext, Args ); } else { AppleMapPrepareForBooting ( BootCompatContext, (VOID *) Args ); } // // Restore original kernel entry code. // CopyMem ( BootCompatContext->KernelState.AsmState.KernelEntry, &BootCompatContext->KernelState.KernelOrg[0], sizeof (BootCompatContext->KernelState.KernelOrg) ); return Args; }
398274.c
//***************************************************************************** // // startup_gcc.c - Startup code for use with GNU tools. // // Copyright (c) 2013-2014 Texas Instruments Incorporated. All rights reserved. // Software License Agreement // // Texas Instruments (TI) is supplying this software for use solely and // exclusively on TI's microcontroller products. The software is owned by // TI and/or its suppliers, and is protected under applicable copyright // laws. You may not combine this software with "viral" open-source // software in order to form a larger program. // // THIS SOFTWARE IS PROVIDED "AS IS" AND WITH ALL FAULTS. // NO WARRANTIES, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT // NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. TI SHALL NOT, UNDER ANY // CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR CONSEQUENTIAL // DAMAGES, FOR ANY REASON WHATSOEVER. // // This is part of revision 2.1.0.12573 of the EK-TM4C1294XL Firmware Package. // //***************************************************************************** #include <stdint.h> #include "inc/hw_nvic.h" #include "inc/hw_types.h" //***************************************************************************** // // Forward declaration of the default fault handlers. // //***************************************************************************** void ResetISR(void); static void NmiSR(void); static void FaultISR(void); static void IntDefaultHandler(void); //***************************************************************************** // // External declarations for the interrupt handlers used by the application. // //***************************************************************************** extern void GPIOPortMIntHandler(void); extern void MPU9150I2CIntHandler(void); extern void UARTStdioIntHandler(void); //***************************************************************************** // // The entry point for the application. // //***************************************************************************** extern int main(void); //***************************************************************************** // // Reserve space for the system stack. // //***************************************************************************** static uint32_t pui32Stack[256]; //***************************************************************************** // // The vector table. Note that the proper constructs must be placed on this to // ensure that it ends up at physical address 0x0000.0000. // //***************************************************************************** __attribute__ ((section(".isr_vector"))) void (* const g_pfnVectors[])(void) = { (void (*)(void))((uint32_t)pui32Stack + sizeof(pui32Stack)), // The initial stack pointer ResetISR, // The reset handler NmiSR, // The NMI handler FaultISR, // The hard fault handler IntDefaultHandler, // The MPU fault handler IntDefaultHandler, // The bus fault handler IntDefaultHandler, // The usage fault handler 0, // Reserved 0, // Reserved 0, // Reserved 0, // Reserved IntDefaultHandler, // SVCall handler IntDefaultHandler, // Debug monitor handler 0, // Reserved IntDefaultHandler, // The PendSV handler IntDefaultHandler, // The SysTick handler IntDefaultHandler, // GPIO Port A IntDefaultHandler, // GPIO Port B IntDefaultHandler, // GPIO Port C IntDefaultHandler, // GPIO Port D IntDefaultHandler, // GPIO Port E UARTStdioIntHandler, // UART0 Rx and Tx IntDefaultHandler, // UART1 Rx and Tx IntDefaultHandler, // SSI0 Rx and Tx IntDefaultHandler, // I2C0 Master and Slave IntDefaultHandler, // PWM Fault IntDefaultHandler, // PWM Generator 0 IntDefaultHandler, // PWM Generator 1 IntDefaultHandler, // PWM Generator 2 IntDefaultHandler, // Quadrature Encoder 0 IntDefaultHandler, // ADC Sequence 0 IntDefaultHandler, // ADC Sequence 1 IntDefaultHandler, // ADC Sequence 2 IntDefaultHandler, // ADC Sequence 3 IntDefaultHandler, // Watchdog timer IntDefaultHandler, // Timer 0 subtimer A IntDefaultHandler, // Timer 0 subtimer B IntDefaultHandler, // Timer 1 subtimer A IntDefaultHandler, // Timer 1 subtimer B IntDefaultHandler, // Timer 2 subtimer A IntDefaultHandler, // Timer 2 subtimer B IntDefaultHandler, // Analog Comparator 0 IntDefaultHandler, // Analog Comparator 1 IntDefaultHandler, // Analog Comparator 2 IntDefaultHandler, // System Control (PLL, OSC, BO) IntDefaultHandler, // FLASH Control IntDefaultHandler, // GPIO Port F IntDefaultHandler, // GPIO Port G IntDefaultHandler, // GPIO Port H IntDefaultHandler, // UART2 Rx and Tx IntDefaultHandler, // SSI1 Rx and Tx IntDefaultHandler, // Timer 3 subtimer A IntDefaultHandler, // Timer 3 subtimer B IntDefaultHandler, // I2C1 Master and Slave IntDefaultHandler, // CAN0 IntDefaultHandler, // CAN1 IntDefaultHandler, // Ethernet IntDefaultHandler, // Hibernate IntDefaultHandler, // USB0 IntDefaultHandler, // PWM Generator 3 IntDefaultHandler, // uDMA Software Transfer IntDefaultHandler, // uDMA Error IntDefaultHandler, // ADC1 Sequence 0 IntDefaultHandler, // ADC1 Sequence 1 IntDefaultHandler, // ADC1 Sequence 2 IntDefaultHandler, // ADC1 Sequence 3 IntDefaultHandler, // External Bus Interface 0 IntDefaultHandler, // GPIO Port J IntDefaultHandler, // GPIO Port K IntDefaultHandler, // GPIO Port L IntDefaultHandler, // SSI2 Rx and Tx IntDefaultHandler, // SSI3 Rx and Tx IntDefaultHandler, // UART3 Rx and Tx IntDefaultHandler, // UART4 Rx and Tx IntDefaultHandler, // UART5 Rx and Tx IntDefaultHandler, // UART6 Rx and Tx IntDefaultHandler, // UART7 Rx and Tx IntDefaultHandler, // I2C2 Master and Slave IntDefaultHandler, // I2C3 Master and Slave IntDefaultHandler, // Timer 4 subtimer A IntDefaultHandler, // Timer 4 subtimer B IntDefaultHandler, // Timer 5 subtimer A IntDefaultHandler, // Timer 5 subtimer B IntDefaultHandler, // FPU 0, // Reserved 0, // Reserved IntDefaultHandler, // I2C4 Master and Slave IntDefaultHandler, // I2C5 Master and Slave GPIOPortMIntHandler, // GPIO Port M IntDefaultHandler, // GPIO Port N 0, // Reserved IntDefaultHandler, // Tamper IntDefaultHandler, // GPIO Port P (Summary or P0) IntDefaultHandler, // GPIO Port P1 IntDefaultHandler, // GPIO Port P2 IntDefaultHandler, // GPIO Port P3 IntDefaultHandler, // GPIO Port P4 IntDefaultHandler, // GPIO Port P5 IntDefaultHandler, // GPIO Port P6 IntDefaultHandler, // GPIO Port P7 IntDefaultHandler, // GPIO Port Q (Summary or Q0) IntDefaultHandler, // GPIO Port Q1 IntDefaultHandler, // GPIO Port Q2 IntDefaultHandler, // GPIO Port Q3 IntDefaultHandler, // GPIO Port Q4 IntDefaultHandler, // GPIO Port Q5 IntDefaultHandler, // GPIO Port Q6 IntDefaultHandler, // GPIO Port Q7 IntDefaultHandler, // GPIO Port R IntDefaultHandler, // GPIO Port S IntDefaultHandler, // SHA/MD5 0 IntDefaultHandler, // AES 0 IntDefaultHandler, // DES3DES 0 IntDefaultHandler, // LCD Controller 0 IntDefaultHandler, // Timer 6 subtimer A IntDefaultHandler, // Timer 6 subtimer B IntDefaultHandler, // Timer 7 subtimer A IntDefaultHandler, // Timer 7 subtimer B IntDefaultHandler, // I2C6 Master and Slave MPU9150I2CIntHandler, // I2C7 Master and Slave IntDefaultHandler, // HIM Scan Matrix Keyboard 0 IntDefaultHandler, // One Wire 0 IntDefaultHandler, // HIM PS/2 0 IntDefaultHandler, // HIM LED Sequencer 0 IntDefaultHandler, // HIM Consumer IR 0 IntDefaultHandler, // I2C8 Master and Slave IntDefaultHandler, // I2C9 Master and Slave IntDefaultHandler // GPIO Port T }; //***************************************************************************** // // The following are constructs created by the linker, indicating where the // the "data" and "bss" segments reside in memory. The initializers for the // for the "data" segment resides immediately following the "text" segment. // //***************************************************************************** extern uint32_t _etext; extern uint32_t _data; extern uint32_t _edata; extern uint32_t _bss; extern uint32_t _ebss; //***************************************************************************** // // This is the code that gets called when the processor first starts execution // following a reset event. Only the absolutely necessary set is performed, // after which the application supplied entry() routine is called. Any fancy // actions (such as making decisions based on the reset cause register, and // resetting the bits in that register) are left solely in the hands of the // application. // //***************************************************************************** void ResetISR(void) { uint32_t *pui32Src, *pui32Dest; // // Copy the data segment initializers from flash to SRAM. // pui32Src = &_etext; for(pui32Dest = &_data; pui32Dest < &_edata; ) { *pui32Dest++ = *pui32Src++; } // // Zero fill the bss segment. // __asm(" ldr r0, =_bss\n" " ldr r1, =_ebss\n" " mov r2, #0\n" " .thumb_func\n" "zero_loop:\n" " cmp r0, r1\n" " it lt\n" " strlt r2, [r0], #4\n" " blt zero_loop"); // // Enable the floating-point unit. This must be done here to handle the // case where main() uses floating-point and the function prologue saves // floating-point registers (which will fault if floating-point is not // enabled). Any configuration of the floating-point unit using DriverLib // APIs must be done here prior to the floating-point unit being enabled. // // Note that this does not use DriverLib since it might not be included in // this project. // HWREG(NVIC_CPAC) = ((HWREG(NVIC_CPAC) & ~(NVIC_CPAC_CP10_M | NVIC_CPAC_CP11_M)) | NVIC_CPAC_CP10_FULL | NVIC_CPAC_CP11_FULL); // // Call the application's entry point. // main(); } //***************************************************************************** // // This is the code that gets called when the processor receives a NMI. This // simply enters an infinite loop, preserving the system state for examination // by a debugger. // //***************************************************************************** static void NmiSR(void) { // // Enter an infinite loop. // while(1) { } } //***************************************************************************** // // This is the code that gets called when the processor receives a fault // interrupt. This simply enters an infinite loop, preserving the system state // for examination by a debugger. // //***************************************************************************** static void FaultISR(void) { // // Enter an infinite loop. // while(1) { } } //***************************************************************************** // // This is the code that gets called when the processor receives an unexpected // interrupt. This simply enters an infinite loop, preserving the system state // for examination by a debugger. // //***************************************************************************** static void IntDefaultHandler(void) { // // Go into an infinite loop. // while(1) { } }
902107.c
/* * Copyright (c) 2002 Michael Niedermayer <[email protected]> * Copyright (c) 2013 Paul B Mahol * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * (de)interleave fields filter */ #include "libavutil/opt.h" #include "libavutil/imgutils.h" #include "libavutil/pixdesc.h" #include "avfilter.h" #include "internal.h" enum FilterMode { MODE_NONE, MODE_INTERLEAVE, MODE_DEINTERLEAVE }; typedef struct { const AVClass *class; int luma_mode, chroma_mode, alpha_mode; ///<FilterMode int luma_swap, chroma_swap, alpha_swap; int nb_planes; int linesize[4], chroma_height; int has_alpha; } IlContext; #define OFFSET(x) offsetof(IlContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM static const AVOption il_options[] = { {"luma_mode", "select luma mode", OFFSET(luma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "luma_mode"}, {"l", "select luma mode", OFFSET(luma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "luma_mode"}, {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "luma_mode"}, {"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "luma_mode"}, {"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "luma_mode"}, {"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "luma_mode"}, {"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "luma_mode"}, {"chroma_mode", "select chroma mode", OFFSET(chroma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "chroma_mode"}, {"c", "select chroma mode", OFFSET(chroma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "chroma_mode"}, {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "chroma_mode"}, {"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "chroma_mode"}, {"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "chroma_mode"}, {"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "chroma_mode"}, {"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "chroma_mode"}, {"alpha_mode", "select alpha mode", OFFSET(alpha_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "alpha_mode"}, {"a", "select alpha mode", OFFSET(alpha_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "alpha_mode"}, {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "alpha_mode"}, {"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "alpha_mode"}, {"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "alpha_mode"}, {"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "alpha_mode"}, {"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "alpha_mode"}, {"luma_swap", "swap luma fields", OFFSET(luma_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, {"ls", "swap luma fields", OFFSET(luma_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, {"chroma_swap", "swap chroma fields", OFFSET(chroma_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, {"cs", "swap chroma fields", OFFSET(chroma_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, {"alpha_swap", "swap alpha fields", OFFSET(alpha_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, {"as", "swap alpha fields", OFFSET(alpha_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, {NULL} }; AVFILTER_DEFINE_CLASS(il); static int query_formats(AVFilterContext *ctx) { AVFilterFormats *formats = NULL; int fmt, ret; for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt); if (!(desc->flags & AV_PIX_FMT_FLAG_PAL) && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL) && (ret = ff_add_format(&formats, fmt)) < 0) return ret; } return ff_set_common_formats(ctx, formats); } static int config_input(AVFilterLink *inlink) { IlContext *s = inlink->dst->priv; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); int ret; s->nb_planes = av_pix_fmt_count_planes(inlink->format); s->has_alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA); if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0) return ret; s->chroma_height = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); return 0; } static void interleave(uint8_t *dst, uint8_t *src, int w, int h, int dst_linesize, int src_linesize, enum FilterMode mode, int swap) { const int a = swap; const int b = 1 - a; const int m = h >> 1; int y; switch (mode) { case MODE_DEINTERLEAVE: for (y = 0; y < m; y++) { memcpy(dst + dst_linesize * y , src + src_linesize * (y * 2 + a), w); memcpy(dst + dst_linesize * (y + m), src + src_linesize * (y * 2 + b), w); } break; case MODE_NONE: for (y = 0; y < m; y++) { memcpy(dst + dst_linesize * y * 2 , src + src_linesize * (y * 2 + a), w); memcpy(dst + dst_linesize * (y * 2 + 1), src + src_linesize * (y * 2 + b), w); } break; case MODE_INTERLEAVE: for (y = 0; y < m; y++) { memcpy(dst + dst_linesize * (y * 2 + a), src + src_linesize * y , w); memcpy(dst + dst_linesize * (y * 2 + b), src + src_linesize * (y + m), w); } break; } } static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) { IlContext *s = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFrame *out; int comp; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&inpicref); return AVERROR(ENOMEM); } av_frame_copy_props(out, inpicref); interleave(out->data[0], inpicref->data[0], s->linesize[0], inlink->h, out->linesize[0], inpicref->linesize[0], s->luma_mode, s->luma_swap); for (comp = 1; comp < (s->nb_planes - s->has_alpha); comp++) { interleave(out->data[comp], inpicref->data[comp], s->linesize[comp], s->chroma_height, out->linesize[comp], inpicref->linesize[comp], s->chroma_mode, s->chroma_swap); } if (s->has_alpha) { comp = s->nb_planes - 1; interleave(out->data[comp], inpicref->data[comp], s->linesize[comp], inlink->h, out->linesize[comp], inpicref->linesize[comp], s->alpha_mode, s->alpha_swap); } av_frame_free(&inpicref); return ff_filter_frame(outlink, out); } static const AVFilterPad inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .filter_frame = filter_frame, .config_props = config_input, }, { NULL } }; static const AVFilterPad outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; AVFilter ff_vf_il = { .name = "il", .description = NULL_IF_CONFIG_SMALL("Deinterleave or interleave fields."), .priv_size = sizeof(IlContext), .query_formats = query_formats, .inputs = inputs, .outputs = outputs, .priv_class = &il_class, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, };
256001.c
/* * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ #include <stdio.h> #include <string.h> #include "jvmti.h" #include "agent_common.h" #include "JVMTITools.h" #ifdef __cplusplus extern "C" { #endif #ifndef JNI_ENV_ARG #ifdef __cplusplus #define JNI_ENV_ARG(x, y) y #define JNI_ENV_PTR(x) x #else #define JNI_ENV_ARG(x,y) x, y #define JNI_ENV_PTR(x) (*x) #endif #endif #define PASSED 0 #define STATUS_FAILED 2 static jvmtiEnv *jvmti = NULL; static jvmtiCapabilities caps; static jint result = PASSED; #ifdef STATIC_BUILD JNIEXPORT jint JNICALL Agent_OnLoad_objmonusage005(JavaVM *jvm, char *options, void *reserved) { return Agent_Initialize(jvm, options, reserved); } JNIEXPORT jint JNICALL Agent_OnAttach_objmonusage005(JavaVM *jvm, char *options, void *reserved) { return Agent_Initialize(jvm, options, reserved); } JNIEXPORT jint JNI_OnLoad_objmonusage005(JavaVM *jvm, char *options, void *reserved) { return JNI_VERSION_1_8; } #endif jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) { jint res; jvmtiError err; res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti), JVMTI_VERSION_1_1); if (res != JNI_OK || jvmti == NULL) { printf("Wrong result of a valid call to GetEnv !\n"); return JNI_ERR; } err = (*jvmti)->GetPotentialCapabilities(jvmti, &caps); if (err != JVMTI_ERROR_NONE) { printf("(GetPotentialCapabilities) unexpected error: %s (%d)\n", TranslateError(err), err); return JNI_ERR; } err = (*jvmti)->AddCapabilities(jvmti, &caps); if (err != JVMTI_ERROR_NONE) { printf("(AddCapabilities) unexpected error: %s (%d)\n", TranslateError(err), err); return JNI_ERR; } err = (*jvmti)->GetCapabilities(jvmti, &caps); if (err != JVMTI_ERROR_NONE) { printf("(GetCapabilities) unexpected error: %s (%d)\n", TranslateError(err), err); return JNI_ERR; } if (!caps.can_get_monitor_info) { printf("Warning: GetObjectMonitorUsage is not implemented\n"); } return JNI_OK; } JNIEXPORT void JNICALL Java_nsk_jvmti_GetObjectMonitorUsage_objmonusage005_check(JNIEnv *env, jclass cls, jobject obj) { jvmtiError err; jvmtiMonitorUsage inf; err = (*jvmti)->GetObjectMonitorUsage(jvmti, obj, &inf); if (err == JVMTI_ERROR_MUST_POSSESS_CAPABILITY && !caps.can_get_monitor_info) { /* Ok, it's expected */ } else if (err != JVMTI_ERROR_NONE) { printf("(GetObjectMonitorUsage) unexpected error: %s (%d)\n", TranslateError(err), err); result = STATUS_FAILED; } } JNIEXPORT jint JNICALL Java_nsk_jvmti_GetObjectMonitorUsage_objmonusage005_getRes(JNIEnv *env, jclass cls) { return result; } #ifdef __cplusplus } #endif
271747.c
/* lapack/double/dlarft.f -- translated by f2c (version 20050501). You must link the resulting object file with libf2c: on Microsoft Windows system, link with libf2c.lib; on Linux or Unix systems, link with .../path/to/libf2c.a -lm or, if you install libf2c.a in a standard place, with -lf2c -lm -- in that order, at the end of the command line, as in cc *.o -lf2c -lm Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., http://www.netlib.org/f2c/libf2c.zip */ #ifdef __cplusplus extern "C" { #endif #include "v3p_netlib.h" /* Table of constant values */ static integer c__1 = 1; static doublereal c_b8 = 0.; /*< SUBROUTINE DLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) >*/ /* Subroutine */ int dlarft_(char *direct, char *storev, integer *n, integer * k, doublereal *v, integer *ldv, doublereal *tau, doublereal *t, integer *ldt, ftnlen direct_len, ftnlen storev_len) { /* System generated locals */ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3; doublereal d__1; /* Local variables */ integer i__, j; doublereal vii; extern logical lsame_(const char *, const char *, ftnlen, ftnlen); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, ftnlen), dtrmv_(char *, char *, char *, integer *, doublereal *, integer *, doublereal *, integer *, ftnlen, ftnlen, ftnlen); (void)direct_len; (void)storev_len; /* -- LAPACK auxiliary routine (version 3.0) -- */ /* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */ /* Courant Institute, Argonne National Lab, and Rice University */ /* February 29, 1992 */ /* .. Scalar Arguments .. */ /*< CHARACTER DIRECT, STOREV >*/ /*< INTEGER K, LDT, LDV, N >*/ /* .. */ /* .. Array Arguments .. */ /*< DOUBLE PRECISION T( LDT, * ), TAU( * ), V( LDV, * ) >*/ /* .. */ /* Purpose */ /* ======= */ /* DLARFT forms the triangular factor T of a real block reflector H */ /* of order n, which is defined as a product of k elementary reflectors. */ /* If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; */ /* If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. */ /* If STOREV = 'C', the vector which defines the elementary reflector */ /* H(i) is stored in the i-th column of the array V, and */ /* H = I - V * T * V' */ /* If STOREV = 'R', the vector which defines the elementary reflector */ /* H(i) is stored in the i-th row of the array V, and */ /* H = I - V' * T * V */ /* Arguments */ /* ========= */ /* DIRECT (input) CHARACTER*1 */ /* Specifies the order in which the elementary reflectors are */ /* multiplied to form the block reflector: */ /* = 'F': H = H(1) H(2) . . . H(k) (Forward) */ /* = 'B': H = H(k) . . . H(2) H(1) (Backward) */ /* STOREV (input) CHARACTER*1 */ /* Specifies how the vectors which define the elementary */ /* reflectors are stored (see also Further Details): */ /* = 'C': columnwise */ /* = 'R': rowwise */ /* N (input) INTEGER */ /* The order of the block reflector H. N >= 0. */ /* K (input) INTEGER */ /* The order of the triangular factor T (= the number of */ /* elementary reflectors). K >= 1. */ /* V (input/output) DOUBLE PRECISION array, dimension */ /* (LDV,K) if STOREV = 'C' */ /* (LDV,N) if STOREV = 'R' */ /* The matrix V. See further details. */ /* LDV (input) INTEGER */ /* The leading dimension of the array V. */ /* If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. */ /* TAU (input) DOUBLE PRECISION array, dimension (K) */ /* TAU(i) must contain the scalar factor of the elementary */ /* reflector H(i). */ /* T (output) DOUBLE PRECISION array, dimension (LDT,K) */ /* The k by k triangular factor T of the block reflector. */ /* If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is */ /* lower triangular. The rest of the array is not used. */ /* LDT (input) INTEGER */ /* The leading dimension of the array T. LDT >= K. */ /* Further Details */ /* =============== */ /* The shape of the matrix V and the storage of the vectors which define */ /* the H(i) is best illustrated by the following example with n = 5 and */ /* k = 3. The elements equal to 1 are not stored; the corresponding */ /* array elements are modified but restored on exit. The rest of the */ /* array is not used. */ /* DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': */ /* V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) */ /* ( v1 1 ) ( 1 v2 v2 v2 ) */ /* ( v1 v2 1 ) ( 1 v3 v3 ) */ /* ( v1 v2 v3 ) */ /* ( v1 v2 v3 ) */ /* DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': */ /* V = ( v1 v2 v3 ) V = ( v1 v1 1 ) */ /* ( v1 v2 v3 ) ( v2 v2 v2 1 ) */ /* ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) */ /* ( 1 v3 ) */ /* ( 1 ) */ /* ===================================================================== */ /* .. Parameters .. */ /*< DOUBLE PRECISION ONE, ZERO >*/ /*< PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) >*/ /* .. */ /* .. Local Scalars .. */ /*< INTEGER I, J >*/ /*< DOUBLE PRECISION VII >*/ /* .. */ /* .. External Subroutines .. */ /*< EXTERNAL DGEMV, DTRMV >*/ /* .. */ /* .. External Functions .. */ /*< LOGICAL LSAME >*/ /*< EXTERNAL LSAME >*/ /* .. */ /* .. Executable Statements .. */ /* Quick return if possible */ /*< >*/ /* Parameter adjustments */ v_dim1 = *ldv; v_offset = 1 + v_dim1; v -= v_offset; --tau; t_dim1 = *ldt; t_offset = 1 + t_dim1; t -= t_offset; /* Function Body */ if (*n == 0) { return 0; } /*< IF( LSAME( DIRECT, 'F' ) ) THEN >*/ if (lsame_(direct, "F", (ftnlen)1, (ftnlen)1)) { /*< DO 20 I = 1, K >*/ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { /*< IF( TAU( I ).EQ.ZERO ) THEN >*/ if (tau[i__] == 0.) { /* H(i) = I */ /*< DO 10 J = 1, I >*/ i__2 = i__; for (j = 1; j <= i__2; ++j) { /*< T( J, I ) = ZERO >*/ t[j + i__ * t_dim1] = 0.; /*< 10 CONTINUE >*/ /* L10: */ } /*< ELSE >*/ } else { /* general case */ /*< VII = V( I, I ) >*/ vii = v[i__ + i__ * v_dim1]; /*< V( I, I ) = ONE >*/ v[i__ + i__ * v_dim1] = 1.; /*< IF( LSAME( STOREV, 'C' ) ) THEN >*/ if (lsame_(storev, "C", (ftnlen)1, (ftnlen)1)) { /* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */ /*< >*/ i__2 = *n - i__ + 1; i__3 = i__ - 1; d__1 = -tau[i__]; dgemv_("Transpose", &i__2, &i__3, &d__1, &v[i__ + v_dim1], ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b8, &t[ i__ * t_dim1 + 1], &c__1, (ftnlen)9); /*< ELSE >*/ } else { /* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */ /*< >*/ i__2 = i__ - 1; i__3 = *n - i__ + 1; d__1 = -tau[i__]; dgemv_("No transpose", &i__2, &i__3, &d__1, &v[i__ * v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, & c_b8, &t[i__ * t_dim1 + 1], &c__1, (ftnlen)12); /*< END IF >*/ } /*< V( I, I ) = VII >*/ v[i__ + i__ * v_dim1] = vii; /* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */ /*< >*/ i__2 = i__ - 1; dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1, (ftnlen) 5, (ftnlen)12, (ftnlen)8); /*< T( I, I ) = TAU( I ) >*/ t[i__ + i__ * t_dim1] = tau[i__]; /*< END IF >*/ } /*< 20 CONTINUE >*/ /* L20: */ } /*< ELSE >*/ } else { /*< DO 40 I = K, 1, -1 >*/ for (i__ = *k; i__ >= 1; --i__) { /*< IF( TAU( I ).EQ.ZERO ) THEN >*/ if (tau[i__] == 0.) { /* H(i) = I */ /*< DO 30 J = I, K >*/ i__1 = *k; for (j = i__; j <= i__1; ++j) { /*< T( J, I ) = ZERO >*/ t[j + i__ * t_dim1] = 0.; /*< 30 CONTINUE >*/ /* L30: */ } /*< ELSE >*/ } else { /* general case */ /*< IF( I.LT.K ) THEN >*/ if (i__ < *k) { /*< IF( LSAME( STOREV, 'C' ) ) THEN >*/ if (lsame_(storev, "C", (ftnlen)1, (ftnlen)1)) { /*< VII = V( N-K+I, I ) >*/ vii = v[*n - *k + i__ + i__ * v_dim1]; /*< V( N-K+I, I ) = ONE >*/ v[*n - *k + i__ + i__ * v_dim1] = 1.; /* T(i+1:k,i) := */ /* - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i) */ /*< >*/ i__1 = *n - *k + i__; i__2 = *k - i__; d__1 = -tau[i__]; dgemv_("Transpose", &i__1, &i__2, &d__1, &v[(i__ + 1) * v_dim1 + 1], ldv, &v[i__ * v_dim1 + 1], & c__1, &c_b8, &t[i__ + 1 + i__ * t_dim1], & c__1, (ftnlen)9); /*< V( N-K+I, I ) = VII >*/ v[*n - *k + i__ + i__ * v_dim1] = vii; /*< ELSE >*/ } else { /*< VII = V( I, N-K+I ) >*/ vii = v[i__ + (*n - *k + i__) * v_dim1]; /*< V( I, N-K+I ) = ONE >*/ v[i__ + (*n - *k + i__) * v_dim1] = 1.; /* T(i+1:k,i) := */ /* - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)' */ /*< >*/ i__1 = *k - i__; i__2 = *n - *k + i__; d__1 = -tau[i__]; dgemv_("No transpose", &i__1, &i__2, &d__1, &v[i__ + 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, & c_b8, &t[i__ + 1 + i__ * t_dim1], &c__1, ( ftnlen)12); /*< V( I, N-K+I ) = VII >*/ v[i__ + (*n - *k + i__) * v_dim1] = vii; /*< END IF >*/ } /* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */ /*< >*/ i__1 = *k - i__; dtrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ * t_dim1], &c__1, (ftnlen)5, (ftnlen)12, (ftnlen)8) ; /*< END IF >*/ } /*< T( I, I ) = TAU( I ) >*/ t[i__ + i__ * t_dim1] = tau[i__]; /*< END IF >*/ } /*< 40 CONTINUE >*/ /* L40: */ } /*< END IF >*/ } /*< RETURN >*/ return 0; /* End of DLARFT */ /*< END >*/ } /* dlarft_ */ #ifdef __cplusplus } #endif
290189.c
#include "func.h" #include "main.h" JSValue getJS_UNDEFINED() { return JS_UNDEFINED; } void addMyPrint(JSContext *ctx) { JSValue global_obj, console, foo; foo = JS_NewObject(ctx); JS_SetPropertyStr(ctx, foo, "myPrint", JS_NewCFunction(ctx, myPrint, "myPrint", 0)); global_obj = JS_GetGlobalObject(ctx); JS_SetPropertyStr(ctx, global_obj, "foo", foo); JS_FreeValue(ctx, global_obj); }
719558.c
/* Fontname: -Adobe-Times-Medium-R-Normal--20-140-100-100-P-96-ISO10646-1 Copyright: Copyright (c) 1984, 1987 Adobe Systems Incorporated. All Rights Reserved. Copyright (c) 1988, 1991 Digital Equipment Corporation. All Rights Reserved. Capital A Height: 13, '1' Height: 13 Calculated Max Values w=18 h=18 x= 2 y=10 dx=18 dy= 0 ascent=18 len=39 Font Bounding box w=22 h=29 x=-3 y=-7 Calculated Min Values x=-2 y=-4 dx= 0 dy= 0 Pure Font ascent =13 descent=-4 X Font ascent =13 descent=-4 Max Font ascent =18 descent=-4 */ #include "u8g.h" const u8g_fntpgm_uint8_t u8g_font_timR14[4577] U8G_FONT_SECTION("u8g_font_timR14") = { 0,22,29,253,249,13,2,131,6,16,32,255,252,18,252,13, 252,0,0,0,5,0,1,2,13,13,4,1,0,192,192,192, 192,192,192,64,64,64,0,0,192,192,6,4,4,8,1,9, 204,204,204,136,12,11,22,10,255,1,12,192,12,192,12,192, 127,240,25,128,25,128,25,128,255,224,51,0,51,0,51,0, 7,16,16,9,1,254,16,124,214,210,208,240,120,60,28,22, 22,150,214,124,16,16,13,13,26,15,1,0,56,48,111,224, 196,64,196,128,205,128,251,0,114,112,6,216,13,136,9,136, 25,152,49,240,32,224,12,13,26,14,1,0,28,0,50,0, 50,0,50,0,52,0,25,224,56,192,109,128,207,0,199,0, 199,128,237,240,120,224,2,4,4,4,1,9,192,192,192,128, 5,17,17,7,1,252,24,48,96,96,64,192,192,192,192,192, 192,192,64,96,96,48,24,5,17,17,7,1,252,192,96,48, 48,16,24,24,24,24,24,24,24,16,48,48,96,192,7,7, 7,9,1,6,16,214,84,56,214,146,16,8,8,8,10,1, 1,24,24,24,255,255,24,24,24,3,5,5,4,0,253,96, 96,32,96,192,5,2,2,6,0,3,248,248,2,2,2,4, 1,0,192,192,7,17,17,5,255,252,2,2,6,4,12,12, 8,24,24,16,48,48,32,96,96,64,192,8,13,13,9,1, 0,60,102,102,195,195,195,195,195,195,195,102,102,60,6,13, 13,9,2,0,48,112,240,48,48,48,48,48,48,48,48,120, 252,7,13,13,9,1,0,60,126,206,134,6,6,12,12,24, 48,98,254,254,7,13,13,9,1,0,124,206,134,6,12,56, 60,14,6,6,6,204,248,8,13,13,9,0,0,2,6,14, 14,22,38,70,70,255,255,6,6,6,7,13,13,9,1,0, 126,124,192,192,240,60,12,14,6,6,12,220,240,8,13,13, 9,1,0,7,28,48,96,96,252,198,195,195,195,227,118,60, 8,13,13,9,1,0,127,255,130,6,4,12,12,8,24,24, 16,48,48,8,13,13,9,1,0,60,102,194,230,124,56,60, 110,199,195,195,102,60,8,13,13,9,1,0,60,110,198,195, 195,195,227,127,54,6,12,56,224,2,9,9,5,1,0,192, 192,0,0,0,0,0,192,192,3,12,12,5,0,253,96,96, 0,0,0,0,0,96,96,32,96,192,9,9,18,11,1,0, 1,128,7,0,28,0,112,0,192,0,112,0,28,0,7,0, 1,128,9,5,10,11,1,2,255,128,255,128,0,0,255,128, 255,128,9,9,18,11,1,0,192,0,112,0,28,0,7,0, 1,128,7,0,28,0,112,0,192,0,6,13,13,8,1,0, 120,140,196,204,12,24,24,48,32,32,0,48,48,14,16,32, 17,1,253,7,224,14,48,56,24,48,8,99,236,103,228,198, 100,204,100,204,204,204,200,205,216,103,112,96,0,48,0,28, 48,7,224,13,13,26,14,1,0,2,0,7,0,7,0,5, 0,13,128,9,128,25,192,16,192,31,192,48,224,32,96,96, 112,240,248,10,13,26,13,2,0,255,0,99,128,97,128,97, 128,97,128,99,0,127,128,97,192,96,192,96,192,96,192,97, 128,255,0,11,13,26,13,1,0,15,32,48,224,96,96,96, 32,192,0,192,0,192,0,192,0,192,0,96,0,96,96,56, 192,15,0,11,13,26,14,2,0,255,0,99,128,96,192,96, 192,96,96,96,96,96,96,96,96,96,96,96,192,96,192,99, 128,255,0,9,13,26,12,1,0,255,128,97,128,96,128,96, 0,96,0,97,0,127,0,97,0,96,0,96,0,96,128,97, 128,255,128,9,13,26,11,1,0,255,128,97,128,96,128,96, 0,96,0,97,0,127,0,97,0,96,0,96,0,96,0,96, 0,240,0,12,13,26,14,1,0,15,32,48,224,96,96,96, 32,192,0,192,0,192,240,192,96,192,96,96,96,96,96,57, 192,15,0,12,13,26,14,1,0,240,240,96,96,96,96,96, 96,96,96,96,96,127,224,96,96,96,96,96,96,96,96,96, 96,240,240,4,13,13,6,1,0,240,96,96,96,96,96,96, 96,96,96,96,96,240,6,13,13,7,0,0,60,24,24,24, 24,24,24,24,24,24,24,216,240,12,13,26,14,1,0,243, 224,97,128,99,0,102,0,108,0,120,0,120,0,124,0,110, 0,103,0,99,128,97,192,240,240,10,13,26,12,1,0,240, 0,96,0,96,0,96,0,96,0,96,0,96,0,96,0,96, 0,96,0,96,64,96,192,255,192,14,13,26,17,1,0,224, 28,112,56,112,56,120,120,88,88,88,216,92,216,76,152,77, 152,71,24,71,24,66,24,226,60,12,13,26,14,1,0,224, 112,96,32,112,32,120,32,92,32,76,32,78,32,71,32,67, 160,65,224,64,224,64,96,224,32,12,13,26,14,1,0,15, 0,48,192,96,96,96,96,192,48,192,48,192,48,192,48,192, 48,96,96,96,96,48,192,15,0,9,13,26,11,1,0,255, 0,99,128,97,128,97,128,97,128,99,0,126,0,96,0,96, 0,96,0,96,0,96,0,240,0,12,17,34,14,1,252,15, 0,48,192,96,96,96,96,192,48,192,48,192,48,192,48,192, 48,96,96,96,96,48,192,15,0,6,0,3,0,1,192,0, 240,11,13,26,13,1,0,255,0,99,128,97,128,97,128,97, 128,99,0,126,0,110,0,102,0,99,0,97,128,96,192,240, 224,8,13,13,11,2,0,58,70,194,192,224,120,60,14,7, 3,131,198,184,10,13,26,12,1,0,255,192,204,192,140,64, 12,0,12,0,12,0,12,0,12,0,12,0,12,0,12,0, 12,0,30,0,11,13,26,14,2,0,240,224,96,64,96,64, 96,64,96,64,96,64,96,64,96,64,96,64,96,64,112,192, 57,128,31,0,13,13,26,14,0,0,248,120,112,48,48,32, 56,32,24,96,24,64,28,64,12,192,14,128,6,128,7,128, 3,0,3,0,18,13,39,18,0,0,249,227,192,112,193,128, 48,193,0,56,225,0,24,99,0,24,226,0,24,226,0,29, 166,0,13,52,0,15,60,0,14,56,0,6,24,0,6,24, 0,12,13,26,14,1,0,240,112,112,96,56,192,25,128,13, 0,14,0,6,0,15,0,27,0,17,128,49,192,96,224,240, 240,12,13,26,14,1,0,240,240,112,96,48,192,24,128,25, 0,15,0,6,0,6,0,6,0,6,0,6,0,6,0,15, 0,10,13,26,12,1,0,127,192,97,192,65,128,3,128,7, 0,6,0,14,0,28,0,56,0,48,0,112,64,224,192,255, 192,4,16,16,6,2,253,240,192,192,192,192,192,192,192,192, 192,192,192,192,192,192,240,5,13,13,5,0,0,128,128,192, 64,96,96,32,48,48,16,24,8,8,4,16,16,6,0,253, 240,48,48,48,48,48,48,48,48,48,48,48,48,48,48,240, 7,7,7,9,1,6,16,56,40,108,68,198,130,9,2,4, 9,0,252,255,128,255,128,4,3,3,4,0,10,192,96,16, 7,9,9,9,1,0,120,200,204,28,108,204,204,252,102,8, 13,13,9,0,0,96,224,96,96,110,119,99,99,99,99,99, 102,92,7,9,9,8,0,0,60,102,192,192,192,192,192,102, 60,8,13,13,9,0,0,6,14,6,6,62,102,198,198,198, 198,198,102,63,7,9,9,8,0,0,60,102,194,254,192,192, 192,102,60,6,13,13,6,0,0,28,52,32,96,248,96,96, 96,96,96,96,96,240,8,13,13,9,0,252,62,204,196,196, 204,120,64,124,127,131,193,226,124,9,13,26,10,0,0,96, 0,224,0,96,0,96,0,102,0,111,0,115,0,99,0,99, 0,99,0,99,0,99,0,243,128,4,13,13,5,0,0,96, 96,0,0,96,224,96,96,96,96,96,96,240,4,17,17,5, 255,252,48,48,0,0,48,112,48,48,48,48,48,48,48,48, 48,224,192,10,13,26,9,0,0,96,0,224,0,96,0,96, 0,103,0,98,0,100,0,104,0,120,0,108,0,110,0,103, 0,227,192,4,13,13,5,0,0,96,224,96,96,96,96,96, 96,96,96,96,96,240,14,9,18,15,0,0,102,48,239,120, 115,152,99,24,99,24,99,24,99,24,99,24,247,188,9,9, 18,10,0,0,102,0,239,0,115,0,99,0,99,0,99,0, 99,0,99,0,243,128,8,9,9,9,0,0,60,102,195,195, 195,195,195,102,60,8,13,13,9,0,252,110,247,99,99,99, 99,99,118,124,96,96,96,240,8,13,13,9,0,252,62,102, 198,198,198,198,198,102,62,6,6,6,15,6,9,9,7,0, 0,108,236,112,96,96,96,96,96,240,5,9,9,7,1,0, 104,152,200,224,112,56,152,200,176,6,11,11,6,0,0,32, 96,248,96,96,96,96,96,96,116,56,9,9,18,10,0,0, 231,0,99,0,99,0,99,0,99,0,99,0,99,0,119,0, 59,128,9,9,18,9,255,0,243,128,99,0,98,0,50,0, 54,0,20,0,28,0,8,0,8,0,13,9,18,14,0,0, 231,56,102,48,98,96,54,96,55,96,29,64,29,192,8,128, 8,128,9,9,18,9,0,0,225,128,99,0,54,0,28,0, 28,0,28,0,54,0,99,0,195,128,8,13,13,9,0,252, 243,99,114,50,54,28,28,12,8,24,16,240,224,7,9,9, 8,0,0,254,206,140,24,48,48,98,230,254,7,17,17,9, 1,252,14,24,48,48,48,48,48,96,192,96,48,48,48,48, 48,24,14,1,13,13,3,1,0,128,128,128,128,128,128,128, 128,128,128,128,128,128,7,17,17,9,1,252,224,48,24,24, 24,24,24,12,6,12,24,24,24,24,24,48,224,9,4,8, 11,1,3,48,0,121,128,207,0,6,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,5, 0,1,2,13,13,4,1,252,192,192,0,0,128,128,128,192, 192,192,192,192,192,7,14,14,9,1,253,6,4,60,110,200, 216,208,208,240,114,124,64,192,128,10,13,26,11,0,0,15, 0,25,128,25,128,24,0,24,0,24,0,126,0,24,0,24, 0,16,0,120,64,191,192,231,128,9,7,14,11,1,3,221, 128,247,128,99,0,65,0,99,0,247,128,221,128,8,13,13, 9,0,0,247,98,98,118,52,52,126,24,126,24,24,24,126, 1,13,13,3,1,0,128,128,128,128,128,0,0,0,128,128, 128,128,128,8,16,16,10,1,253,60,102,102,112,56,124,142, 199,227,113,62,28,14,102,102,60,5,2,2,5,0,10,216, 216,13,13,26,15,1,0,15,128,48,96,64,16,71,144,136, 136,144,8,144,8,144,8,136,136,71,16,64,16,48,96,15, 128,5,8,8,5,0,5,96,144,16,112,144,232,0,248,8, 7,7,10,1,1,17,51,102,204,102,51,17,9,5,10,11, 1,2,255,128,255,128,1,128,1,128,1,128,5,2,2,6, 0,3,248,248,13,13,26,15,1,0,15,128,48,96,64,16, 95,16,136,136,136,136,143,8,137,8,136,136,92,208,64,16, 48,96,15,128,5,2,2,5,0,10,248,248,5,5,5,7, 1,8,112,136,136,136,112,8,11,11,10,1,0,24,24,24, 255,255,24,24,24,0,255,255,5,8,8,6,0,5,112,152, 24,16,32,32,64,248,5,8,8,6,0,5,112,136,24,112, 24,8,136,112,4,3,3,4,0,10,48,96,128,9,13,26, 9,255,252,231,0,99,0,99,0,99,0,99,0,99,0,99, 0,119,0,123,128,64,0,64,0,96,0,96,0,7,17,17, 8,1,252,62,116,244,244,244,244,244,116,20,20,20,20,20, 20,20,20,20,2,2,2,4,1,4,192,192,4,5,5,6, 1,252,32,32,16,176,112,3,8,8,6,1,5,64,192,64, 64,64,64,64,224,5,8,8,6,0,5,112,216,136,136,216, 112,0,248,8,7,7,10,1,1,136,204,102,51,102,204,136, 11,13,26,13,1,0,64,128,193,128,65,0,67,0,70,0, 68,64,76,192,233,64,27,64,50,64,39,224,96,64,64,64, 11,13,26,13,1,0,64,128,193,128,65,0,67,0,70,0, 69,192,78,96,232,96,24,64,48,128,32,128,97,0,67,224, 13,13,26,13,255,0,112,32,136,96,24,64,112,192,25,128, 9,16,139,48,114,80,6,208,12,144,9,248,24,16,16,16, 6,13,13,8,1,252,48,48,0,16,16,48,96,96,192,204, 140,196,120,13,17,34,14,1,0,24,0,12,0,2,0,0, 0,2,0,7,0,7,0,5,0,13,128,9,128,25,192,16, 192,31,192,48,224,32,96,96,112,240,248,13,17,34,14,1, 0,0,192,1,128,2,0,0,0,2,0,7,0,7,0,5, 0,13,128,9,128,25,192,16,192,31,192,48,224,32,96,96, 112,240,248,13,17,34,14,1,0,2,0,7,0,13,128,0, 0,2,0,7,0,7,0,5,0,13,128,9,128,25,192,16, 192,31,192,48,224,32,96,96,112,240,248,13,17,34,14,1, 0,12,128,31,128,19,0,0,0,2,0,7,0,7,0,5, 0,13,128,9,128,25,192,16,192,31,192,48,224,32,96,96, 112,240,248,13,16,32,14,1,0,13,128,13,128,0,0,2, 0,7,0,7,0,5,0,13,128,9,128,25,192,16,192,31, 192,48,224,32,96,96,112,240,248,13,18,36,14,1,0,6, 0,9,0,9,0,6,0,0,0,2,0,7,0,7,0,5, 0,13,128,9,128,25,192,16,192,31,192,48,224,32,96,96, 112,240,248,15,13,26,17,1,0,15,252,7,140,5,132,5, 128,13,128,9,136,25,248,31,136,17,128,49,128,33,130,97, 134,247,254,11,17,34,13,1,252,15,32,48,224,96,96,96, 32,192,0,192,0,192,0,192,0,192,0,96,0,96,96,56, 192,15,0,4,0,2,0,22,0,14,0,9,17,34,12,1, 0,48,0,24,0,4,0,0,0,255,128,97,128,96,128,96, 0,96,0,97,0,127,0,97,0,96,0,96,0,96,128,97, 128,255,128,9,17,34,12,1,0,3,0,6,0,8,0,0, 0,255,128,97,128,96,128,96,0,96,0,97,0,127,0,97, 0,96,0,96,0,96,128,97,128,255,128,9,17,34,12,1, 0,8,0,28,0,54,0,0,0,255,128,97,128,96,128,96, 0,96,0,97,0,127,0,97,0,96,0,96,0,96,128,97, 128,255,128,9,16,32,12,1,0,54,0,54,0,0,0,255, 128,97,128,96,128,96,0,96,0,97,0,127,0,97,0,96, 0,96,0,96,128,97,128,255,128,6,17,17,6,255,0,192, 96,16,0,60,24,24,24,24,24,24,24,24,24,24,24,60, 6,17,17,6,1,0,12,24,32,0,240,96,96,96,96,96, 96,96,96,96,96,96,240,5,17,17,6,1,0,32,112,216, 0,240,96,96,96,96,96,96,96,96,96,96,96,240,5,16, 16,6,1,0,216,216,0,240,96,96,96,96,96,96,96,96, 96,96,96,240,12,13,26,13,0,0,127,128,49,192,48,96, 48,96,48,48,48,48,252,48,48,48,48,48,48,96,48,96, 49,192,127,128,12,17,34,14,1,0,12,128,31,128,19,0, 0,0,224,112,96,32,112,32,120,32,92,32,76,32,78,32, 71,32,67,160,65,224,64,224,64,96,224,32,12,17,34,14, 1,0,48,0,24,0,4,0,0,0,15,0,48,192,96,96, 96,96,192,48,192,48,192,48,192,48,192,48,96,96,96,96, 48,192,15,0,12,17,34,14,1,0,0,192,1,128,2,0, 0,0,15,0,48,192,96,96,96,96,192,48,192,48,192,48, 192,48,192,48,96,96,96,96,48,192,15,0,12,17,34,14, 1,0,4,0,14,0,27,0,0,0,15,0,48,192,96,96, 96,96,192,48,192,48,192,48,192,48,192,48,96,96,96,96, 48,192,15,0,12,17,34,14,1,0,12,128,31,128,19,0, 0,0,15,0,48,192,96,96,96,96,192,48,192,48,192,48, 192,48,192,48,96,96,96,96,48,192,15,0,12,16,32,14, 1,0,27,0,27,0,0,0,15,0,48,192,96,96,96,96, 192,48,192,48,192,48,192,48,192,48,96,96,96,96,48,192, 15,0,8,7,7,10,1,1,195,102,60,24,60,102,195,12, 15,30,14,1,255,0,48,15,96,48,192,96,224,97,160,195, 48,195,48,198,48,204,48,204,48,88,96,112,96,48,192,111, 0,192,0,11,17,34,14,2,0,48,0,24,0,4,0,0, 0,240,224,96,64,96,64,96,64,96,64,96,64,96,64,96, 64,96,64,96,64,112,192,57,128,31,0,11,17,34,14,2, 0,0,192,1,128,2,0,0,0,240,224,96,64,96,64,96, 64,96,64,96,64,96,64,96,64,96,64,96,64,112,192,57, 128,31,0,11,17,34,14,2,0,4,0,14,0,27,0,0, 0,240,224,96,64,96,64,96,64,96,64,96,64,96,64,96, 64,96,64,96,64,112,192,57,128,31,0,11,16,32,14,2, 0,27,0,27,0,0,0,240,224,96,64,96,64,96,64,96, 64,96,64,96,64,96,64,96,64,96,64,112,192,57,128,31, 0,12,17,34,14,1,0,0,192,1,128,2,0,0,0,240, 240,112,96,48,192,24,128,25,0,15,0,6,0,6,0,6, 0,6,0,6,0,6,0,15,0,9,13,26,10,1,0,240, 0,96,0,96,0,127,0,99,128,97,128,97,128,97,128,99, 0,126,0,96,0,96,0,240,0,8,13,13,9,0,0,28, 50,99,99,102,110,124,102,99,99,107,111,238,7,13,13,9, 1,0,192,96,16,0,120,200,204,28,108,204,204,252,102,7, 13,13,9,1,0,12,24,32,0,120,200,204,28,108,204,204, 252,102,7,13,13,9,1,0,16,56,108,0,120,200,204,28, 108,204,204,252,102,7,13,13,9,1,0,100,252,152,0,120, 200,204,28,108,204,204,252,102,7,12,12,9,1,0,108,108, 0,120,200,204,28,108,204,204,252,102,7,14,14,9,1,0, 48,72,72,48,0,120,200,204,28,108,204,204,252,102,11,9, 18,12,0,0,123,192,206,96,204,32,31,224,108,0,204,0, 204,0,254,96,99,192,7,13,13,8,0,252,60,102,192,192, 192,192,192,102,60,16,8,88,56,7,13,13,8,0,0,192, 96,16,0,60,102,194,254,192,192,192,102,60,7,13,13,8, 0,0,6,12,16,0,60,102,194,254,192,192,192,102,60,7, 13,13,8,0,0,16,56,108,0,60,102,194,254,192,192,192, 102,60,7,12,12,8,0,0,108,108,0,60,102,194,254,192, 192,192,102,60,6,13,13,5,254,0,192,96,16,0,24,56, 24,24,24,24,24,24,60,6,13,13,5,0,0,12,24,32, 0,96,224,96,96,96,96,96,96,240,5,13,13,5,0,0, 32,112,216,0,96,224,96,96,96,96,96,96,240,5,12,12, 5,0,0,216,216,0,96,224,96,96,96,96,96,96,240,8, 13,13,9,0,0,96,54,56,76,62,102,195,195,195,195,195, 102,60,9,13,26,10,0,0,50,0,126,0,76,0,0,0, 102,0,239,0,115,0,99,0,99,0,99,0,99,0,99,0, 243,128,8,13,13,9,0,0,48,24,4,0,60,102,195,195, 195,195,195,102,60,8,13,13,9,0,0,6,12,16,0,60, 102,195,195,195,195,195,102,60,8,13,13,9,0,0,16,56, 108,0,60,102,195,195,195,195,195,102,60,8,13,13,9,0, 0,50,126,76,0,60,102,195,195,195,195,195,102,60,8,12, 12,9,0,0,108,108,0,60,102,195,195,195,195,195,102,60, 8,8,8,10,1,1,24,24,0,255,255,0,24,24,8,11, 11,9,0,255,1,63,102,207,203,219,211,243,102,124,192,9, 13,26,10,0,0,96,0,48,0,8,0,0,0,231,0,99, 0,99,0,99,0,99,0,99,0,99,0,119,0,59,128,9, 13,26,10,0,0,6,0,12,0,16,0,0,0,231,0,99, 0,99,0,99,0,99,0,99,0,99,0,119,0,59,128,9, 13,26,10,0,0,8,0,28,0,54,0,0,0,231,0,99, 0,99,0,99,0,99,0,99,0,99,0,119,0,59,128,9, 12,24,10,0,0,54,0,54,0,0,0,231,0,99,0,99, 0,99,0,99,0,99,0,99,0,119,0,59,128,8,17,17, 8,255,252,3,6,8,0,243,99,114,50,54,28,28,12,8, 24,16,240,224,8,17,17,9,0,252,96,224,96,96,110,119, 99,99,99,99,99,118,124,96,96,96,240,8,16,16,9,0, 252,54,54,0,243,99,114,50,54,28,28,12,8,24,16,240, 224};
517986.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE369_Divide_by_Zero__int_rand_divide_53b.c Label Definition File: CWE369_Divide_by_Zero__int.label.xml Template File: sources-sinks-53b.tmpl.c */ /* * @description * CWE: 369 Divide by Zero * BadSource: rand Set data to result of rand(), which may be zero * GoodSource: Non-zero * Sinks: divide * GoodSink: Check for zero before dividing * BadSink : Divide a constant by data * Flow Variant: 53 Data flow: data passed as an argument from one function through two others to a fourth; all four functions are in different source files * * */ #include "std_testcase.h" #ifndef OMITBAD /* bad function declaration */ void CWE369_Divide_by_Zero__int_rand_divide_53c_badSink(int data); void CWE369_Divide_by_Zero__int_rand_divide_53b_badSink(int data) { CWE369_Divide_by_Zero__int_rand_divide_53c_badSink(data); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void CWE369_Divide_by_Zero__int_rand_divide_53c_goodG2BSink(int data); void CWE369_Divide_by_Zero__int_rand_divide_53b_goodG2BSink(int data) { CWE369_Divide_by_Zero__int_rand_divide_53c_goodG2BSink(data); } /* goodB2G uses the BadSource with the GoodSink */ void CWE369_Divide_by_Zero__int_rand_divide_53c_goodB2GSink(int data); void CWE369_Divide_by_Zero__int_rand_divide_53b_goodB2GSink(int data) { CWE369_Divide_by_Zero__int_rand_divide_53c_goodB2GSink(data); } #endif /* OMITGOOD */
138560.c
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stddef.h> #include <stdint.h> #include <stdatomic.h> #include "attributes.h" #include "cpu.h" #include "cpu_internal.h" #include "config.h" #include "opt.h" #include "common.h" #if HAVE_SCHED_GETAFFINITY #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include <sched.h> #endif #if HAVE_GETPROCESSAFFINITYMASK || HAVE_WINRT #include <windows.h> #endif #if HAVE_SYSCTL #if HAVE_SYS_PARAM_H #include <sys/param.h> #endif #include <sys/types.h> #include <sys/sysctl.h> #endif #if HAVE_UNISTD_H #include <unistd.h> #endif static atomic_int cpu_flags = ATOMIC_VAR_INIT(-1); static int get_cpu_flags(void) { if (ARCH_AARCH64) return ff_get_cpu_flags_aarch64(); if (ARCH_ARM) return ff_get_cpu_flags_arm(); if (ARCH_PPC) return ff_get_cpu_flags_ppc(); if (ARCH_X86) return ff_get_cpu_flags_x86(); return 0; } void av_force_cpu_flags(int arg){ if (ARCH_X86 && (arg & ( AV_CPU_FLAG_3DNOW | AV_CPU_FLAG_3DNOWEXT | AV_CPU_FLAG_MMXEXT | AV_CPU_FLAG_SSE | AV_CPU_FLAG_SSE2 | AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE3 | AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSSE3 | AV_CPU_FLAG_SSE4 | AV_CPU_FLAG_SSE42 | AV_CPU_FLAG_AVX | AV_CPU_FLAG_AVXSLOW | AV_CPU_FLAG_XOP | AV_CPU_FLAG_FMA3 | AV_CPU_FLAG_FMA4 | AV_CPU_FLAG_AVX2 )) && !(arg & AV_CPU_FLAG_MMX)) { av_log(NULL, AV_LOG_WARNING, "MMX implied by specified flags\n"); arg |= AV_CPU_FLAG_MMX; } atomic_store_explicit(&cpu_flags, arg, memory_order_relaxed); } int av_get_cpu_flags(void) { int flags = atomic_load_explicit(&cpu_flags, memory_order_relaxed); if (flags == -1) { flags = get_cpu_flags(); atomic_store_explicit(&cpu_flags, flags, memory_order_relaxed); } return flags; } void av_set_cpu_flags_mask(int mask) { atomic_store_explicit(&cpu_flags, get_cpu_flags() & mask, memory_order_relaxed); } int av_parse_cpu_flags(const char *s) { #define CPUFLAG_MMXEXT (AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT | AV_CPU_FLAG_CMOV) #define CPUFLAG_3DNOW (AV_CPU_FLAG_3DNOW | AV_CPU_FLAG_MMX) #define CPUFLAG_3DNOWEXT (AV_CPU_FLAG_3DNOWEXT | CPUFLAG_3DNOW) #define CPUFLAG_SSE (AV_CPU_FLAG_SSE | CPUFLAG_MMXEXT) #define CPUFLAG_SSE2 (AV_CPU_FLAG_SSE2 | CPUFLAG_SSE) #define CPUFLAG_SSE2SLOW (AV_CPU_FLAG_SSE2SLOW | CPUFLAG_SSE2) #define CPUFLAG_SSE3 (AV_CPU_FLAG_SSE3 | CPUFLAG_SSE2) #define CPUFLAG_SSE3SLOW (AV_CPU_FLAG_SSE3SLOW | CPUFLAG_SSE3) #define CPUFLAG_SSSE3 (AV_CPU_FLAG_SSSE3 | CPUFLAG_SSE3) #define CPUFLAG_SSE4 (AV_CPU_FLAG_SSE4 | CPUFLAG_SSSE3) #define CPUFLAG_SSE42 (AV_CPU_FLAG_SSE42 | CPUFLAG_SSE4) #define CPUFLAG_AVX (AV_CPU_FLAG_AVX | CPUFLAG_SSE42) #define CPUFLAG_AVXSLOW (AV_CPU_FLAG_AVXSLOW | CPUFLAG_AVX) #define CPUFLAG_XOP (AV_CPU_FLAG_XOP | CPUFLAG_AVX) #define CPUFLAG_FMA3 (AV_CPU_FLAG_FMA3 | CPUFLAG_AVX) #define CPUFLAG_FMA4 (AV_CPU_FLAG_FMA4 | CPUFLAG_AVX) #define CPUFLAG_AVX2 (AV_CPU_FLAG_AVX2 | CPUFLAG_AVX) #define CPUFLAG_BMI2 (AV_CPU_FLAG_BMI2 | AV_CPU_FLAG_BMI1) #define CPUFLAG_AESNI (AV_CPU_FLAG_AESNI | CPUFLAG_SSE42) static const AVOption cpuflags_opts[] = { { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" }, #if ARCH_PPC { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ALTIVEC }, .unit = "flags" }, #elif ARCH_X86 { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" }, { "mmxext" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_MMXEXT }, .unit = "flags" }, { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE }, .unit = "flags" }, { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE2 }, .unit = "flags" }, { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE2SLOW }, .unit = "flags" }, { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE3 }, .unit = "flags" }, { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE3SLOW }, .unit = "flags" }, { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSSE3 }, .unit = "flags" }, { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ATOM }, .unit = "flags" }, { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE4 }, .unit = "flags" }, { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE42 }, .unit = "flags" }, { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVX }, .unit = "flags" }, { "avxslow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVXSLOW }, .unit = "flags" }, { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_XOP }, .unit = "flags" }, { "fma3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_FMA3 }, .unit = "flags" }, { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_FMA4 }, .unit = "flags" }, { "avx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVX2 }, .unit = "flags" }, { "bmi1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI1 }, .unit = "flags" }, { "bmi2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_BMI2 }, .unit = "flags" }, { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_3DNOW }, .unit = "flags" }, { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_3DNOWEXT }, .unit = "flags" }, { "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" }, { "aesni" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AESNI }, .unit = "flags" }, #elif ARCH_ARM { "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV5TE }, .unit = "flags" }, { "armv6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6 }, .unit = "flags" }, { "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" }, { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, { "vfp_vm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP_VM }, .unit = "flags" }, { "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFPV3 }, .unit = "flags" }, { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, #elif ARCH_AARCH64 { "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" }, { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, #endif { NULL }, }; static const AVClass class = { .class_name = "cpuflags", .item_name = av_default_item_name, .option = cpuflags_opts, .version = LIBAVUTIL_VERSION_INT, }; int flags = 0, ret; const AVClass *pclass = &class; if ((ret = av_opt_eval_flags(&pclass, &cpuflags_opts[0], s, &flags)) < 0) return ret; return flags & INT_MAX; } int av_parse_cpu_caps(unsigned *flags, const char *s) { static const AVOption cpuflags_opts[] = { { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" }, #if ARCH_PPC { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ALTIVEC }, .unit = "flags" }, #elif ARCH_X86 { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" }, { "mmx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" }, { "mmxext" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" }, { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE }, .unit = "flags" }, { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2 }, .unit = "flags" }, { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2SLOW }, .unit = "flags" }, { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3 }, .unit = "flags" }, { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3SLOW }, .unit = "flags" }, { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSSE3 }, .unit = "flags" }, { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ATOM }, .unit = "flags" }, { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE4 }, .unit = "flags" }, { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE42 }, .unit = "flags" }, { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX }, .unit = "flags" }, { "avxslow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVXSLOW }, .unit = "flags" }, { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_XOP }, .unit = "flags" }, { "fma3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA3 }, .unit = "flags" }, { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA4 }, .unit = "flags" }, { "avx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX2 }, .unit = "flags" }, { "bmi1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI1 }, .unit = "flags" }, { "bmi2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI2 }, .unit = "flags" }, { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOW }, .unit = "flags" }, { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOWEXT }, .unit = "flags" }, { "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" }, { "aesni", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AESNI }, .unit = "flags" }, #define CPU_FLAG_P2 AV_CPU_FLAG_CMOV | AV_CPU_FLAG_MMX #define CPU_FLAG_P3 CPU_FLAG_P2 | AV_CPU_FLAG_MMX2 | AV_CPU_FLAG_SSE #define CPU_FLAG_P4 CPU_FLAG_P3| AV_CPU_FLAG_SSE2 { "pentium2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P2 }, .unit = "flags" }, { "pentium3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P3 }, .unit = "flags" }, { "pentium4", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P4 }, .unit = "flags" }, #define CPU_FLAG_K62 AV_CPU_FLAG_MMX | AV_CPU_FLAG_3DNOW #define CPU_FLAG_ATHLON CPU_FLAG_K62 | AV_CPU_FLAG_CMOV | AV_CPU_FLAG_3DNOWEXT | AV_CPU_FLAG_MMX2 #define CPU_FLAG_ATHLONXP CPU_FLAG_ATHLON | AV_CPU_FLAG_SSE #define CPU_FLAG_K8 CPU_FLAG_ATHLONXP | AV_CPU_FLAG_SSE2 { "k6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" }, { "k62", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K62 }, .unit = "flags" }, { "athlon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLON }, .unit = "flags" }, { "athlonxp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLONXP }, .unit = "flags" }, { "k8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K8 }, .unit = "flags" }, #elif ARCH_ARM { "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV5TE }, .unit = "flags" }, { "armv6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6 }, .unit = "flags" }, { "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" }, { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, { "vfp_vm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP_VM }, .unit = "flags" }, { "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFPV3 }, .unit = "flags" }, { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, { "setend", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SETEND }, .unit = "flags" }, #elif ARCH_AARCH64 { "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" }, { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, #endif { NULL }, }; static const AVClass class = { .class_name = "cpuflags", .item_name = av_default_item_name, .option = cpuflags_opts, .version = LIBAVUTIL_VERSION_INT, }; const AVClass *pclass = &class; return av_opt_eval_flags(&pclass, &cpuflags_opts[0], s, flags); } int av_cpu_count(void) { static volatile int printed; int nb_cpus = 1; #if HAVE_WINRT SYSTEM_INFO sysinfo; #endif #if HAVE_SCHED_GETAFFINITY && defined(CPU_COUNT) cpu_set_t cpuset; CPU_ZERO(&cpuset); if (!sched_getaffinity(0, sizeof(cpuset), &cpuset)) nb_cpus = CPU_COUNT(&cpuset); #elif HAVE_GETPROCESSAFFINITYMASK DWORD_PTR proc_aff, sys_aff; if (GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff)) nb_cpus = av_popcount64(proc_aff); #elif HAVE_SYSCTL && defined(HW_NCPU) int mib[2] = { CTL_HW, HW_NCPU }; size_t len = sizeof(nb_cpus); if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1) nb_cpus = 0; #elif HAVE_SYSCONF && defined(_SC_NPROC_ONLN) nb_cpus = sysconf(_SC_NPROC_ONLN); #elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) nb_cpus = sysconf(_SC_NPROCESSORS_ONLN); #elif HAVE_WINRT GetNativeSystemInfo(&sysinfo); nb_cpus = sysinfo.dwNumberOfProcessors; #endif if (!printed) { av_log(NULL, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus); printed = 1; } return nb_cpus; } size_t av_cpu_max_align(void) { if (ARCH_AARCH64) return ff_get_cpu_max_align_aarch64(); if (ARCH_ARM) return ff_get_cpu_max_align_arm(); if (ARCH_PPC) return ff_get_cpu_max_align_ppc(); if (ARCH_X86) return ff_get_cpu_max_align_x86(); return 8; }
378732.c
// Thwomp // 0x0500B7D0 - 0x0500B92C const Collision thwomp_seg5_collision_0500B7D0[] = { COL_INIT(), COL_VERTEX_INIT(0x14), COL_VERTEX(-156, 3, 73), COL_VERTEX(98, 3, 157), COL_VERTEX(-97, 3, 157), COL_VERTEX(157, 3, 73), COL_VERTEX(106, 252, 157), COL_VERTEX(-105, 252, 157), COL_VERTEX(157, 252, 79), COL_VERTEX(0, 302, 102), COL_VERTEX(93, 330, 0), COL_VERTEX(-156, 252, 79), COL_VERTEX(-92, 330, 0), COL_VERTEX(-156, 3, -72), COL_VERTEX(-97, 3, -156), COL_VERTEX(98, 3, -156), COL_VERTEX(-105, 252, -156), COL_VERTEX(106, 252, -156), COL_VERTEX(-156, 252, -78), COL_VERTEX(0, 302, -101), COL_VERTEX(157, 3, -72), COL_VERTEX(157, 252, -78), COL_TRI_INIT(SURFACE_NO_CAM_COLLISION, 36), COL_TRI(0, 1, 2), COL_TRI(0, 3, 1), COL_TRI(1, 4, 5), COL_TRI(1, 5, 2), COL_TRI(1, 3, 6), COL_TRI(4, 1, 6), COL_TRI(7, 4, 8), COL_TRI(6, 8, 4), COL_TRI(5, 4, 7), COL_TRI(9, 5, 10), COL_TRI(2, 5, 9), COL_TRI(10, 5, 7), COL_TRI(2, 9, 0), COL_TRI(0, 11, 12), COL_TRI(0, 12, 13), COL_TRI(8, 10, 7), COL_TRI(12, 14, 15), COL_TRI(12, 15, 13), COL_TRI(17, 14, 10), COL_TRI(16, 10, 14), COL_TRI(14, 12, 16), COL_TRI(15, 14, 17), COL_TRI(17, 8, 15), COL_TRI(15, 8, 19), COL_TRI(13, 15, 19), COL_TRI(0, 13, 18), COL_TRI(13, 19, 18), COL_TRI(19, 8, 6), COL_TRI(17, 10, 8), COL_TRI(9, 10, 16), COL_TRI(18, 19, 6), COL_TRI(0, 9, 16), COL_TRI(0, 18, 3), COL_TRI(18, 6, 3), COL_TRI(0, 16, 11), COL_TRI(12, 11, 16), COL_TRI_STOP(), COL_END(), }; // 0x0500B92C - 0x0500BA88 const Collision thwomp_seg5_collision_0500B92C[] = { COL_INIT(), COL_VERTEX_INIT(0x14), COL_VERTEX(-156, 3, 73), COL_VERTEX(98, 3, 157), COL_VERTEX(-97, 3, 157), COL_VERTEX(157, 3, 73), COL_VERTEX(106, 252, 157), COL_VERTEX(-105, 252, 157), COL_VERTEX(157, 252, 79), COL_VERTEX(0, 302, 102), COL_VERTEX(93, 330, 0), COL_VERTEX(-156, 252, 79), COL_VERTEX(-92, 330, 0), COL_VERTEX(-156, 3, -72), COL_VERTEX(-97, 3, -156), COL_VERTEX(98, 3, -156), COL_VERTEX(-105, 252, -156), COL_VERTEX(106, 252, -156), COL_VERTEX(-156, 252, -78), COL_VERTEX(0, 302, -101), COL_VERTEX(157, 3, -72), COL_VERTEX(157, 252, -78), COL_TRI_INIT(SURFACE_NO_CAM_COLLISION, 36), COL_TRI(0, 1, 2), COL_TRI(0, 3, 1), COL_TRI(1, 4, 5), COL_TRI(1, 5, 2), COL_TRI(1, 3, 6), COL_TRI(4, 1, 6), COL_TRI(7, 4, 8), COL_TRI(6, 8, 4), COL_TRI(5, 4, 7), COL_TRI(9, 5, 10), COL_TRI(2, 5, 9), COL_TRI(10, 5, 7), COL_TRI(2, 9, 0), COL_TRI(0, 11, 12), COL_TRI(0, 12, 13), COL_TRI(8, 10, 7), COL_TRI(12, 14, 15), COL_TRI(12, 15, 13), COL_TRI(17, 14, 10), COL_TRI(16, 10, 14), COL_TRI(14, 12, 16), COL_TRI(15, 14, 17), COL_TRI(17, 8, 15), COL_TRI(15, 8, 19), COL_TRI(13, 15, 19), COL_TRI(0, 13, 18), COL_TRI(13, 19, 18), COL_TRI(19, 8, 6), COL_TRI(17, 10, 8), COL_TRI(9, 10, 16), COL_TRI(18, 19, 6), COL_TRI(0, 9, 16), COL_TRI(0, 18, 3), COL_TRI(18, 6, 3), COL_TRI(0, 16, 11), COL_TRI(12, 11, 16), COL_TRI_STOP(), COL_END(), };
477039.c
/* * Copyright (c) 2017 Paul B Mahol * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/opt.h" #include "libavutil/imgutils.h" #include "avfilter.h" #include "formats.h" #include "internal.h" #include "video.h" typedef struct DespillContext { const AVClass *class; int co[4]; /* color offsets rgba */ int alpha; int type; float spillmix; float spillexpand; float redscale; float greenscale; float bluescale; float brightness; } DespillContext; static int do_despill_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) { DespillContext *s = ctx->priv; AVFrame *frame = arg; const int ro = s->co[0], go = s->co[1], bo = s->co[2], ao = s->co[3]; const int slice_start = (frame->height * jobnr) / nb_jobs; const int slice_end = (frame->height * (jobnr + 1)) / nb_jobs; const float brightness = s->brightness; const float redscale = s->redscale; const float greenscale = s->greenscale; const float bluescale = s->bluescale; const float spillmix = s->spillmix; const float factor = (1.f - spillmix) * (1.f - s->spillexpand); float red, green, blue; int x, y; for (y = slice_start; y < slice_end; y++) { uint8_t *dst = frame->data[0] + y * frame->linesize[0]; for (x = 0; x < frame->width; x++) { float spillmap; red = dst[x * 4 + ro] / 255.f; green = dst[x * 4 + go] / 255.f; blue = dst[x * 4 + bo] / 255.f; if (s->type) { spillmap = FFMAX(blue - (red * spillmix + green * factor), 0.f); } else { spillmap = FFMAX(green - (red * spillmix + blue * factor), 0.f); } red = FFMAX(red + spillmap * redscale + brightness * spillmap, 0.f); green = FFMAX(green + spillmap * greenscale + brightness * spillmap, 0.f); blue = FFMAX(blue + spillmap * bluescale + brightness * spillmap, 0.f); dst[x * 4 + ro] = av_clip_uint8(red * 255); dst[x * 4 + go] = av_clip_uint8(green * 255); dst[x * 4 + bo] = av_clip_uint8(blue * 255); if (s->alpha) { spillmap = 1.f - spillmap; dst[x * 4 + ao] = av_clip_uint8(spillmap * 255); } } } return 0; } static int filter_frame(AVFilterLink *link, AVFrame *frame) { AVFilterContext *ctx = link->dst; int ret; if (ret = ff_filter_execute(ctx, do_despill_slice, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(ctx)))) return ret; return ff_filter_frame(ctx->outputs[0], frame); } static av_cold int config_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; DespillContext *s = ctx->priv; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format); int i; for (i = 0; i < 4; ++i) s->co[i] = desc->comp[i].offset; return 0; } static av_cold int query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat pixel_fmts[] = { AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE }; return ff_set_common_formats_from_list(ctx, pixel_fmts); } static const AVFilterPad despill_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .flags = AVFILTERPAD_FLAG_NEEDS_WRITABLE, .filter_frame = filter_frame, }, }; static const AVFilterPad despill_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .config_props = config_output, }, }; #define OFFSET(x) offsetof(DespillContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption despill_options[] = { { "type", "set the screen type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "type" }, { "green", "greenscreen", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" }, { "blue", "bluescreen", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" }, { "mix", "set the spillmap mix", OFFSET(spillmix), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0, 1, FLAGS }, { "expand", "set the spillmap expand", OFFSET(spillexpand), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, FLAGS }, { "red", "set red scale", OFFSET(redscale), AV_OPT_TYPE_FLOAT, {.dbl=0}, -100, 100, FLAGS }, { "green", "set green scale", OFFSET(greenscale), AV_OPT_TYPE_FLOAT, {.dbl=-1}, -100, 100, FLAGS }, { "blue", "set blue scale", OFFSET(bluescale), AV_OPT_TYPE_FLOAT, {.dbl=0}, -100, 100, FLAGS }, { "brightness", "set brightness", OFFSET(brightness), AV_OPT_TYPE_FLOAT, {.dbl=0}, -10, 10, FLAGS }, { "alpha", "change alpha component", OFFSET(alpha), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS }, { NULL } }; AVFILTER_DEFINE_CLASS(despill); const AVFilter ff_vf_despill = { .name = "despill", .description = NULL_IF_CONFIG_SMALL("Despill video."), .priv_size = sizeof(DespillContext), .priv_class = &despill_class, .query_formats = query_formats, FILTER_INPUTS(despill_inputs), FILTER_OUTPUTS(despill_outputs), .process_command = ff_filter_process_command, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, };
833778.c
/* ************************************************************************* * File: UTIL.C * * Module: USBCCGP.SYS * USB Common Class Generic Parent driver. * * Copyright (c) 1998 Microsoft Corporation * * * Author: ervinp * ************************************************************************* */ #include <wdm.h> #include <stdio.h> #include <usb.h> #include <usbdlib.h> #include <usbioctl.h> #include "usbccgp.h" #include "security.h" #include "debug.h" #ifdef ALLOC_PRAGMA #pragma alloc_text(PAGE, AppendInterfaceNumber) #pragma alloc_text(PAGE, CopyDeviceRelations) #pragma alloc_text(PAGE, GetFunctionInterfaceListBase) #pragma alloc_text(PAGE, CallDriverSync) #pragma alloc_text(PAGE, CallNextDriverSync) #pragma alloc_text(PAGE, SetPdoRegistryParameter) #pragma alloc_text(PAGE, GetPdoRegistryParameter) #pragma alloc_text(PAGE, GetMsOsFeatureDescriptor) #endif #define USB_REQUEST_TIMEOUT 5000 // Timeout in ms (5 sec) NTSTATUS CallNextDriverSync(PPARENT_FDO_EXT parentFdoExt, PIRP irp) /*++ Routine Description: Pass the IRP down to the next device object in the stack synchronously, and bump the pendingActionCount around the call to prevent the current device object from getting removed before the IRP completes. Arguments: parentFdoExt - device extension of one of our device objects irp - Io Request Packet Return Value: NT status code, indicates result returned by lower driver for this IRP. --*/ { NTSTATUS status; PAGED_CODE(); IncrementPendingActionCount(parentFdoExt); status = CallDriverSync(parentFdoExt->topDevObj, irp); DecrementPendingActionCount(parentFdoExt); return status; } VOID IncrementPendingActionCount(PPARENT_FDO_EXT parentFdoExt) /*++ Routine Description: Increment the pendingActionCount for a device object. This keeps the device object from getting freed before the action is completed. Arguments: devExt - device extension of device object Return Value: VOID --*/ { ASSERT(parentFdoExt->pendingActionCount >= 0); InterlockedIncrement(&parentFdoExt->pendingActionCount); } VOID DecrementPendingActionCount(PPARENT_FDO_EXT parentFdoExt) /*++ Routine Description: Decrement the pendingActionCount for a device object. This is called when an asynchronous action is completed AND ALSO when we get the REMOVE_DEVICE IRP. If the pendingActionCount goes to -1, that means that all actions are completed and we've gotten the REMOVE_DEVICE IRP; in this case, set the removeEvent event so we can finish unloading. Arguments: devExt - device extension of device object Return Value: VOID --*/ { ASSERT(parentFdoExt->pendingActionCount >= 0); InterlockedDecrement(&parentFdoExt->pendingActionCount); if (parentFdoExt->pendingActionCount < 0){ /* * All pending actions have completed and we've gotten * the REMOVE_DEVICE IRP. * Set the removeEvent so we'll stop waiting on REMOVE_DEVICE. */ ASSERT((parentFdoExt->state == STATE_REMOVING) || (parentFdoExt->state == STATE_REMOVED)); KeSetEvent(&parentFdoExt->removeEvent, 0, FALSE); } } /* ******************************************************************************** * CallDriverSyncCompletion ******************************************************************************** * * */ NTSTATUS CallDriverSyncCompletion(IN PDEVICE_OBJECT devObjOrNULL, IN PIRP irp, IN PVOID Context) /*++ Routine Description: Completion routine for CallDriverSync. Arguments: devObjOrNULL - Usually, this is this driver's device object. However, if this driver created the IRP, there is no stack location in the IRP for this driver; so the kernel has no place to store the device object; ** so devObj will be NULL in this case **. irp - completed Io Request Packet context - context passed to IoSetCompletionRoutine by CallDriverSync. Return Value: NT status code, indicates result returned by lower driver for this IRP. --*/ { PUSB_REQUEST_TIMEOUT_CONTEXT timeoutContext = Context; PKEVENT event = timeoutContext->event; PLONG lock = timeoutContext->lock; ASSERT(irp->IoStatus.Status != STATUS_IO_TIMEOUT); InterlockedExchange(lock, 3); KeSetEvent(event, 0, FALSE); return STATUS_MORE_PROCESSING_REQUIRED; } NTSTATUS CallDriverSync(IN PDEVICE_OBJECT devObj, IN OUT PIRP irp) /*++ Routine Description: Call IoCallDriver to send the irp to the device object; then, synchronize with the completion routine. When CallDriverSync returns, the action has completed and the irp again belongs to the current driver. NOTE: In order to keep the device object from getting freed while this IRP is pending, you should call IncrementPendingActionCount() and DecrementPendingActionCount() around the CallDriverSync call. Arguments: devObj - targetted device object irp - Io Request Packet Return Value: NT status code, indicates result returned by lower driver for this IRP. --*/ { PUSB_REQUEST_TIMEOUT_CONTEXT timeoutContext; KEVENT event; LONG lock; LARGE_INTEGER dueTime; PIO_STACK_LOCATION irpStack; ULONG majorFunction; ULONG minorFunction; NTSTATUS status; PAGED_CODE(); irpStack = IoGetNextIrpStackLocation(irp); majorFunction = irpStack->MajorFunction; minorFunction = irpStack->MinorFunction; KeInitializeEvent(&event, NotificationEvent, FALSE); lock = 0; timeoutContext = ALLOCPOOL(NonPagedPool, sizeof(USB_REQUEST_TIMEOUT_CONTEXT)); if (timeoutContext) { timeoutContext->event = &event; timeoutContext->lock = &lock; IoSetCompletionRoutine( irp, CallDriverSyncCompletion, // context timeoutContext, TRUE, TRUE, TRUE); status = IoCallDriver(devObj, irp); if (status == STATUS_PENDING) { dueTime.QuadPart = -10000 * USB_REQUEST_TIMEOUT; status = KeWaitForSingleObject( &event, Executive, // wait reason KernelMode, FALSE, // not alertable &dueTime); if (status == STATUS_TIMEOUT) { DBGWARN(("CallDriverSync timed out!\n")); if (InterlockedExchange(&lock, 1) == 0) { // // We got it to the IRP before it was completed. We can cancel // the IRP without fear of losing it, as the completion routine // won't let go of the IRP until we say so. // IoCancelIrp(irp); // // Release the completion routine. If it already got there, // then we need to complete it ourselves. Otherwise we got // through IoCancelIrp before the IRP completed entirely. // if (InterlockedExchange(&lock, 2) == 3) { // // Mark it pending because we switched threads. // IoMarkIrpPending(irp); IoCompleteRequest(irp, IO_NO_INCREMENT); } } KeWaitForSingleObject(&event, Executive, KernelMode, FALSE, NULL); // Return an error code because STATUS_TIMEOUT is a successful // code. irp->IoStatus.Status = STATUS_DEVICE_DATA_ERROR; } } FREEPOOL(timeoutContext); status = irp->IoStatus.Status; } else { status = STATUS_INSUFFICIENT_RESOURCES; } if (!NT_SUCCESS(status)){ DBGVERBOSE(("IRP 0x%02X/0x%02X failed in CallDriverSync w/ status %xh.", majorFunction, minorFunction, status)); } return status; } /* ******************************************************************************** * AppendInterfaceNumber ******************************************************************************** * * oldIDs is a multi-String of hardware IDs. * Return a new string with '&MI_xx' appended to each id, * where 'xx' is the interface number of the first interface in that function. */ PWCHAR AppendInterfaceNumber(PWCHAR oldIDs, ULONG interfaceNum) { ULONG newIdLen; PWCHAR id, newIDs; WCHAR suffix[] = L"&MI_xx"; PAGED_CODE(); /* * Calculate the length of the final multi-string. */ for (id = oldIDs, newIdLen = 0; *id; ){ ULONG thisIdLen = WStrLen(id); newIdLen += thisIdLen + 1 + sizeof(suffix); id += thisIdLen + 1; } /* * Add one for the extra NULL at the end of the multi-string. */ newIdLen++; newIDs = ALLOCPOOL(NonPagedPool, newIdLen*sizeof(WCHAR)); if (newIDs){ ULONG oldIdOff, newIdOff; /* * Copy each string in the multi-string, replacing the bus name. */ for (oldIdOff = newIdOff = 0; oldIDs[oldIdOff]; ){ ULONG thisIdLen = WStrLen(oldIDs+oldIdOff); swprintf(suffix, L"&MI_%02x", interfaceNum); /* * Copy the new bus name to the new string. */ newIdOff += WStrCpy(newIDs+newIdOff, oldIDs+oldIdOff); newIdOff += WStrCpy(newIDs+newIdOff, (PWSTR)suffix) + 1; oldIdOff += thisIdLen + 1; } /* * Add extra NULL to terminate multi-string. */ newIDs[newIdOff] = UNICODE_NULL; } return newIDs; } /* ******************************************************************************** * CopyDeviceRelations ******************************************************************************** * * */ PDEVICE_RELATIONS CopyDeviceRelations(PDEVICE_RELATIONS deviceRelations) { PDEVICE_RELATIONS newDeviceRelations; PAGED_CODE(); if (deviceRelations){ ULONG size = sizeof(DEVICE_RELATIONS) + (deviceRelations->Count*sizeof(PDEVICE_OBJECT)); newDeviceRelations = MemDup(deviceRelations, size); } else { newDeviceRelations = NULL; } return newDeviceRelations; } PUSBD_INTERFACE_LIST_ENTRY GetFunctionInterfaceListBase( PPARENT_FDO_EXT parentFdoExt, ULONG functionIndex, PULONG numFunctionInterfaces) { PUSBD_INTERFACE_LIST_ENTRY iface = NULL; PUSB_CONFIGURATION_DESCRIPTOR configDesc; ULONG i, func; UCHAR ifaceClass; ULONG audFuncBaseIndex = -1; PAGED_CODE(); configDesc = parentFdoExt->selectedConfigDesc; ASSERT(configDesc->bNumInterfaces); for (func = 0, i = 0; i < (ULONG)configDesc->bNumInterfaces-1; i++){ ifaceClass = parentFdoExt->interfaceList[i].InterfaceDescriptor->bInterfaceClass; if (ifaceClass == USB_DEVICE_CLASS_CONTENT_SECURITY){ /* * We don't expose the CS interface(s). */ continue; } if (func == functionIndex){ break; } switch (ifaceClass){ case USB_DEVICE_CLASS_AUDIO: /* * For USB_DEVICE_CLASS_AUDIO, we return groups of interfaces * with common class as functions. * * BUT, only while the interface subclass is different than the * first one in this grouping. If the subclass is the same, * then this is a different function. * Note that it is conceivable that a device could be created * where a second audio function starts with an interface with * a different subclass than the previous audio interface, but * this is how USBHUB's generic parent driver works and thus we * are bug-compatible with the older driver. */ if (audFuncBaseIndex == -1){ audFuncBaseIndex = i; } if ((parentFdoExt->interfaceList[i+1].InterfaceDescriptor->bInterfaceClass != USB_DEVICE_CLASS_AUDIO) || (parentFdoExt->interfaceList[audFuncBaseIndex].InterfaceDescriptor->bInterfaceSubClass == parentFdoExt->interfaceList[i+1].InterfaceDescriptor->bInterfaceSubClass)) { func++; audFuncBaseIndex = -1; // Reset base index for next audio function. } break; default: audFuncBaseIndex = -1; // Reset base index for next audio function. /* * For other classes, each interface is a function. * Count alternate interfaces as part of the same function. */ ASSERT(parentFdoExt->interfaceList[i+1].InterfaceDescriptor->bAlternateSetting == 0); if (parentFdoExt->interfaceList[i+1].InterfaceDescriptor->bAlternateSetting == 0){ func++; } break; } } // note: need this redundant check outside in case bNumInterfaces == 1 if (func == functionIndex){ iface = &parentFdoExt->interfaceList[i]; ifaceClass = iface->InterfaceDescriptor->bInterfaceClass; *numFunctionInterfaces = 1; if (ifaceClass == USB_DEVICE_CLASS_CONTENT_SECURITY){ /* * The CS interface was the last interface on the device. * Don't return it as a function. */ iface = NULL; } else if (ifaceClass == USB_DEVICE_CLASS_AUDIO){ for (i = i + 1; i < (ULONG)configDesc->bNumInterfaces; i++){ if ((parentFdoExt->interfaceList[i].InterfaceDescriptor->bInterfaceClass == iface->InterfaceDescriptor->bInterfaceClass) && (parentFdoExt->interfaceList[i].InterfaceDescriptor->bInterfaceSubClass != iface->InterfaceDescriptor->bInterfaceSubClass)){ (*numFunctionInterfaces)++; } else { break; } } } } else { *numFunctionInterfaces = 0; } return iface; } /* ******************************************************************************** * GetStringDescriptor ******************************************************************************** * * * */ NTSTATUS GetStringDescriptor( PPARENT_FDO_EXT parentFdoExt, UCHAR stringIndex, LANGID langId, PUSB_STRING_DESCRIPTOR stringDesc, ULONG bufferLen) { NTSTATUS status; URB urb; UsbBuildGetDescriptorRequest(&urb, (USHORT)sizeof(struct _URB_CONTROL_DESCRIPTOR_REQUEST), USB_STRING_DESCRIPTOR_TYPE, stringIndex, langId, stringDesc, NULL, bufferLen, NULL); status = SubmitUrb(parentFdoExt, &urb, TRUE, NULL, NULL); return status; } /* ******************************************************************************** * SetPdoRegistryParameter ******************************************************************************** * * * */ NTSTATUS SetPdoRegistryParameter ( IN PDEVICE_OBJECT PhysicalDeviceObject, IN PWCHAR KeyName, IN PVOID Data, IN ULONG DataLength, IN ULONG KeyType, IN ULONG DevInstKeyType ) { UNICODE_STRING keyNameUnicodeString; HANDLE handle; NTSTATUS ntStatus; PAGED_CODE(); RtlInitUnicodeString(&keyNameUnicodeString, KeyName); ntStatus = IoOpenDeviceRegistryKey(PhysicalDeviceObject, DevInstKeyType, STANDARD_RIGHTS_ALL, &handle); if (NT_SUCCESS(ntStatus)) { ntStatus = ZwSetValueKey(handle, &keyNameUnicodeString, 0, KeyType, Data, DataLength); ZwClose(handle); } DBGVERBOSE(("SetPdoRegistryParameter status 0x%x\n", ntStatus)); return ntStatus; } /* ******************************************************************************** * GetPdoRegistryParameter ******************************************************************************** * * * */ NTSTATUS GetPdoRegistryParameter ( IN PDEVICE_OBJECT PhysicalDeviceObject, IN PWCHAR ValueName, OUT PVOID Data, IN ULONG DataLength, OUT PULONG Type, OUT PULONG ActualDataLength ) /*++ Routine Description: This routines queries the data for a registry value entry associated with the device instance specific registry key for the PDO. The registry value entry would be found under this registry key: HKLM\System\CCS\Enum\<DeviceID>\<InstanceID>\Device Parameters Arguments: PhysicalDeviceObject - Yep, the PDO ValueName - Name of the registry value entry for which the data is requested Data - Buffer in which the requested data is returned DataLength - Length of the data buffer Type - (optional) The data type (e.g. REG_SZ, REG_DWORD) is returned here ActualDataLength - (optional) The actual length of the data is returned here If this is larger than DataLength then not all of the value data has been returned. Return Value: --*/ { HANDLE handle; NTSTATUS ntStatus; PAGED_CODE(); ntStatus = IoOpenDeviceRegistryKey(PhysicalDeviceObject, PLUGPLAY_REGKEY_DEVICE, STANDARD_RIGHTS_ALL, &handle); if (NT_SUCCESS(ntStatus)) { PKEY_VALUE_PARTIAL_INFORMATION partialInfo; UNICODE_STRING valueName; ULONG length; ULONG resultLength; RtlInitUnicodeString(&valueName, ValueName); // Size and allocate a KEY_VALUE_PARTIAL_INFORMATION structure, // including room for the returned value data. // length = FIELD_OFFSET(KEY_VALUE_PARTIAL_INFORMATION, Data) + DataLength; partialInfo = ALLOCPOOL(PagedPool, length); if (partialInfo) { // Query the value data. // ntStatus = ZwQueryValueKey(handle, &valueName, KeyValuePartialInformation, partialInfo, length, &resultLength); // If we got any data that is good enough // if (ntStatus == STATUS_BUFFER_OVERFLOW) { ntStatus = STATUS_SUCCESS; } if (NT_SUCCESS(ntStatus)) { // Only copy the smaller of the the requested data length or // the actual data length. // RtlCopyMemory(Data, partialInfo->Data, DataLength < partialInfo->DataLength ? DataLength : partialInfo->DataLength); // Return the value data type and actual length, if requested. // if (Type) { *Type = partialInfo->Type; } if (ActualDataLength) { *ActualDataLength = partialInfo->DataLength; } } FREEPOOL(partialInfo); } else { ntStatus = STATUS_INSUFFICIENT_RESOURCES; } ZwClose(handle); } return ntStatus; } /* ******************************************************************************** * GetMsOsFeatureDescriptor ******************************************************************************** * * * */ NTSTATUS GetMsOsFeatureDescriptor ( PPARENT_FDO_EXT ParentFdoExt, UCHAR Recipient, UCHAR InterfaceNumber, USHORT Index, PVOID DataBuffer, ULONG DataBufferLength, PULONG BytesReturned ) { struct _URB_OS_FEATURE_DESCRIPTOR_REQUEST *urb; NTSTATUS ntStatus; PAGED_CODE(); if (BytesReturned) { *BytesReturned = 0; } urb = ALLOCPOOL(NonPagedPool, sizeof(struct _URB_OS_FEATURE_DESCRIPTOR_REQUEST)); if (urb != NULL) { // Initialize the URB_FUNCTION_GET_MS_FEATURE_DESCRIPTOR request // RtlZeroMemory(urb, sizeof(struct _URB_OS_FEATURE_DESCRIPTOR_REQUEST)); urb->Hdr.Function = URB_FUNCTION_GET_MS_FEATURE_DESCRIPTOR; urb->Hdr.Length = sizeof(struct _URB_OS_FEATURE_DESCRIPTOR_REQUEST); urb->TransferBufferLength = DataBufferLength; urb->TransferBuffer = DataBuffer; urb->Recipient = Recipient; urb->InterfaceNumber = InterfaceNumber; urb->MS_FeatureDescriptorIndex = Index; // Submit the URB_FUNCTION_GET_MS_FEATURE_DESCRIPTOR request // ntStatus = SubmitUrb(ParentFdoExt, (PURB)urb, TRUE, NULL, NULL); if (NT_SUCCESS(ntStatus) && BytesReturned) { *BytesReturned = urb->TransferBufferLength; } FREEPOOL(urb); } else { ntStatus = STATUS_INSUFFICIENT_RESOURCES; } return ntStatus; } /* ******************************************************************************** * GetMsExtendedConfigDescriptor ******************************************************************************** * * * */ NTSTATUS GetMsExtendedConfigDescriptor ( IN PPARENT_FDO_EXT ParentFdoExt ) /*++ Routine Description: This routines queries a device for an Extended Configuration Descriptor. Arguments: ParentFdoExt - The device extension of the parent FDO Return Value: If successful, a pointer to the Extended Configuration Descriptor, which the caller must free, else NULL. --*/ { MS_EXT_CONFIG_DESC_HEADER msExtConfigDescHeader; PMS_EXT_CONFIG_DESC pMsExtConfigDesc; ULONG bytesReturned; NTSTATUS ntStatus; PAGED_CODE(); ntStatus = STATUS_NOT_SUPPORTED; pMsExtConfigDesc = NULL; RtlZeroMemory(&msExtConfigDescHeader, sizeof(MS_EXT_CONFIG_DESC_HEADER)); // Request just the header of the MS Extended Configuration Descriptor // ntStatus = GetMsOsFeatureDescriptor( ParentFdoExt, 0, // Recipient Device 0, // Interface MS_EXT_CONFIG_DESCRIPTOR_INDEX, &msExtConfigDescHeader, sizeof(MS_EXT_CONFIG_DESC_HEADER), &bytesReturned); // Make sure the MS Extended Configuration Descriptor header looks ok // if (NT_SUCCESS(ntStatus) && bytesReturned == sizeof(MS_EXT_CONFIG_DESC_HEADER) && msExtConfigDescHeader.bcdVersion == MS_EXT_CONFIG_DESC_VER && msExtConfigDescHeader.wIndex == MS_EXT_CONFIG_DESCRIPTOR_INDEX && msExtConfigDescHeader.bCount > 0 && msExtConfigDescHeader.dwLength == sizeof(MS_EXT_CONFIG_DESC_HEADER) + msExtConfigDescHeader.bCount * sizeof(MS_EXT_CONFIG_DESC_FUNCTION)) { // Allocate a buffer large enough for the entire descriptor // pMsExtConfigDesc = ALLOCPOOL(NonPagedPool, msExtConfigDescHeader.dwLength); if (pMsExtConfigDesc) { RtlZeroMemory(pMsExtConfigDesc, msExtConfigDescHeader.dwLength); // Request the entire MS Extended Configuration Descriptor // ntStatus = GetMsOsFeatureDescriptor( ParentFdoExt, 0, // Recipient Device 0, // Interface MS_EXT_CONFIG_DESCRIPTOR_INDEX, pMsExtConfigDesc, msExtConfigDescHeader.dwLength, &bytesReturned); if (!( NT_SUCCESS(ntStatus) && bytesReturned == msExtConfigDescHeader.dwLength && RtlCompareMemory(&msExtConfigDescHeader, pMsExtConfigDesc, sizeof(MS_EXT_CONFIG_DESC_HEADER)) == sizeof(MS_EXT_CONFIG_DESC_HEADER) && ValidateMsExtendedConfigDescriptor( pMsExtConfigDesc, ParentFdoExt->selectedConfigDesc) )) { // Something went wrong retrieving the MS Extended Configuration // Descriptor, or it doesn't look valid. Free the buffer. // FREEPOOL(pMsExtConfigDesc); pMsExtConfigDesc = NULL; } else { ntStatus = STATUS_SUCCESS; } } else { ntStatus = STATUS_INSUFFICIENT_RESOURCES; } } ASSERT(!ISPTR(ParentFdoExt->msExtConfigDesc)); ParentFdoExt->msExtConfigDesc = pMsExtConfigDesc; return ntStatus; } /* ******************************************************************************** * ValidateMsExtendedConfigDescriptor ******************************************************************************** * * * */ BOOLEAN ValidateMsExtendedConfigDescriptor ( IN PMS_EXT_CONFIG_DESC MsExtConfigDesc, IN PUSB_CONFIGURATION_DESCRIPTOR ConfigurationDescriptor ) /*++ Routine Description: This routines validates an Extended Configuration Descriptor. Arguments: MsExtConfigDesc - The Extended Configuration Descriptor to be validated. It is assumed that the header of this descriptor has already been validated. ConfigurationDescriptor - Configuration Descriptor, assumed to already validated. Return Value: TRUE if the Extended Configuration Descriptor appears to be valid, else FALSE. --*/ { UCHAR interfacesRemaining; ULONG i; ULONG j; UCHAR c; BOOLEAN gotNull; PAGED_CODE(); interfacesRemaining = ConfigurationDescriptor->bNumInterfaces; for (i = 0; i < MsExtConfigDesc->Header.bCount; i++) { // Make sure that there is at least one interface in this function. // if (MsExtConfigDesc->Function[i].bInterfaceCount == 0) { return FALSE; } // Make sure that there are not too many interfaces in this function. // if (MsExtConfigDesc->Function[i].bInterfaceCount > interfacesRemaining) { return FALSE; } interfacesRemaining -= MsExtConfigDesc->Function[i].bInterfaceCount; // Make sure the no interfaces were skipped between the interfaces // of the previous function and the interfaces of this function. // if (i && MsExtConfigDesc->Function[i-1].bFirstInterfaceNumber + MsExtConfigDesc->Function[i-1].bInterfaceCount != MsExtConfigDesc->Function[i].bFirstInterfaceNumber) { return FALSE; } // Make sure that the CompatibleID is valid. // Valid characters are 'A' through 'Z', '0' through '9', and '_" // and null padded to the the right end of the array, but not // necessarily null terminated. // for (j = 0, gotNull = FALSE; j < sizeof(MsExtConfigDesc->Function[i].CompatibleID); j++) { c = MsExtConfigDesc->Function[i].CompatibleID[j]; if (c == 0) { gotNull = TRUE; } else { if (gotNull || !((c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '_'))) { return FALSE; } } } // Make sure that the SubCompatibleID is valid. // Valid characters are 'A' through 'Z', '0' through '9', and '_" // and null padded to the the right end of the array, but not // necessarily null terminated. // for (j = 0, gotNull = FALSE; j < sizeof(MsExtConfigDesc->Function[i].SubCompatibleID); j++) { c = MsExtConfigDesc->Function[i].SubCompatibleID[j]; if (c == 0) { gotNull = TRUE; } else { if (gotNull || !((c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '_'))) { return FALSE; } } } // Make sure that if the SubCompatibleID is non-null then the // CompatibleID is also non-null. // if (MsExtConfigDesc->Function[i].SubCompatibleID[0] != 0 && MsExtConfigDesc->Function[i].CompatibleID[0] == 0) { return FALSE; } } // Make sure that all of the interfaces were consumed by functions. // if (interfacesRemaining > 0) { return FALSE; } return TRUE; } /* ******************************************************************************** * MemDup ******************************************************************************** * * Return a fresh copy of the argument. * */ PVOID MemDup(PVOID dataPtr, ULONG length) { PVOID newPtr; newPtr = (PVOID)ALLOCPOOL(NonPagedPool, length); if (newPtr){ RtlCopyMemory(newPtr, dataPtr, length); } else { DBGWARN(("MemDup: Memory allocation (size %xh) failed -- not a bug if verifier pool failures enabled.", length)); } return newPtr; } /* ******************************************************************************** * WStrLen ******************************************************************************** * */ ULONG WStrLen(PWCHAR str) { ULONG result = 0; while (*str++ != UNICODE_NULL){ result++; } return result; } /* ******************************************************************************** * WStrCpy ******************************************************************************** * */ ULONG WStrCpy(PWCHAR dest, PWCHAR src) { ULONG result = 0; while (*dest++ = *src++){ result++; } return result; } BOOLEAN WStrCompareN(PWCHAR str1, PWCHAR str2, ULONG maxChars) { while ((maxChars > 0) && *str1 && (*str1 == *str2)){ maxChars--; str1++; str2++; } return (BOOLEAN)((maxChars == 0) || (!*str1 && !*str2)); }
581446.c
/* Copyright (c) 2013, Dust Networks. All rights reserved. */ #include "dn_common.h" #include <string.h> #include "stdio.h" #include "cli_task.h" #include "loc_task.h" #include "dn_system.h" #include "dn_uart.h" #include "dn_exe_hdr.h" #include "app_task_cfg.h" #include "Ver.h" //=========================== definitions ===================================== #define DLFT_LEN 10 #define DLFT_DELAY 100 #define TX_BUFFER_PATTERN 0x0a #define MAX_UART_PACKET_SIZE (128u) #define MAX_UART_TRX_CHNL_SIZE (sizeof(dn_chan_msg_hdr_t) + MAX_UART_PACKET_SIZE) //=========================== prototypes ====================================== //===== CLI handlers dn_error_t cli_lenCmdHandler(INT8U* arg, INT32U len); dn_error_t cli_delayCmdHandler(INT8U* arg, INT32U len); dn_error_t cli_txCmdHandler(INT8U* arg, INT32U len); //===== tasks static void uartTxTask(void* unused); static void uartRxTask(void* unused); //=========================== const =========================================== const dnm_ucli_cmdDef_t cliCmdDefs[] = { {&cli_lenCmdHandler, "len", "length", DN_CLI_ACCESS_LOGIN}, {&cli_delayCmdHandler, "delay", "num ms", DN_CLI_ACCESS_LOGIN}, {&cli_txCmdHandler, "tx", "num packets", DN_CLI_ACCESS_LOGIN}, {NULL, NULL, NULL, 0}, }; //=========================== variables ======================================= typedef struct { // uartTxTask OS_STK uartTxTaskStack[TASK_APP_UART_TX_STK_SIZE]; INT8U uartTxBuffer[MAX_UART_PACKET_SIZE]; INT16U uartTxLen; INT16U uartTxDelay; OS_EVENT* uartTxSem; INT16U uartTxNumLeft; // uartRxTask OS_STK uartRxTaskStack[TASK_APP_UART_RX_STK_SIZE]; INT32U uartRxChannelMemBuf[1+MAX_UART_TRX_CHNL_SIZE/sizeof(INT32U)]; OS_MEM* uartRxChannelMem; CH_DESC uartRxChannel; INT8U uartRxBuffer[MAX_UART_PACKET_SIZE]; } uart_app_vars_t; uart_app_vars_t uart_app_v; //=========================== initialization ================================== /** \brief This is the entry point in the application code. */ int p2_init(void) { INT8U osErr; //==== initialize local variables memset(&uart_app_v,0x00,sizeof(uart_app_v)); uart_app_v.uartTxLen = DLFT_LEN; uart_app_v.uartTxDelay = DLFT_DELAY; //==== initialize helper tasks cli_task_init( "uart", // appName &cliCmdDefs // cliCmds ); loc_task_init( JOIN_NO, // fJoin NETID_NONE, // netId UDPPORT_NONE, // udpPort NULL, // joinedSem BANDWIDTH_NONE, // bandwidth NULL // serviceSem ); //===== create tasks // uartTxTask task osErr = OSTaskCreateExt( uartTxTask, (void *)0, (OS_STK*)(&uart_app_v.uartTxTaskStack[TASK_APP_UART_TX_STK_SIZE-1]), TASK_APP_UART_TX_PRIORITY, TASK_APP_UART_TX_PRIORITY, (OS_STK*)uart_app_v.uartTxTaskStack, TASK_APP_UART_TX_STK_SIZE, (void *)0, OS_TASK_OPT_STK_CHK | OS_TASK_OPT_STK_CLR ); ASSERT(osErr == OS_ERR_NONE); OSTaskNameSet(TASK_APP_UART_TX_PRIORITY, (INT8U*)TASK_APP_UART_TX_NAME, &osErr); ASSERT(osErr == OS_ERR_NONE); // uartRxTask task osErr = OSTaskCreateExt( uartRxTask, (void *)0, (OS_STK*)(&uart_app_v.uartRxTaskStack[TASK_APP_UART_RX_STK_SIZE-1]), TASK_APP_UART_RX_PRIORITY, TASK_APP_UART_RX_PRIORITY, (OS_STK*)uart_app_v.uartRxTaskStack, TASK_APP_UART_RX_STK_SIZE, (void *)0, OS_TASK_OPT_STK_CHK | OS_TASK_OPT_STK_CLR ); ASSERT(osErr == OS_ERR_NONE); OSTaskNameSet(TASK_APP_UART_RX_PRIORITY, (INT8U*)TASK_APP_UART_RX_NAME, &osErr); ASSERT(osErr == OS_ERR_NONE); return 0; } //=========================== CLI handlers ==================================== dn_error_t cli_lenCmdHandler(INT8U* arg, INT32U len) { int uartTxLen, l; //--- param 0: len l = sscanf(arg, "%d", &uartTxLen); if (l < 1) { return DN_ERR_INVALID; } //---- store uart_app_v.uartTxLen = (INT16U)uartTxLen; return DN_ERR_NONE; } dn_error_t cli_delayCmdHandler(INT8U* arg, INT32U len) { int delay, l; //--- param 0: len l = sscanf(arg, "%d", &delay); if (l < 1) { return DN_ERR_INVALID; } //---- store uart_app_v.uartTxDelay = (INT16U)delay; return DN_ERR_NONE; } dn_error_t cli_txCmdHandler(INT8U* arg, INT32U len) { int numLeft, l; INT8U osErr; //--- param 0: len l = sscanf(arg, "%d", &numLeft); if (l < 1) { return DN_ERR_INVALID; } //---- store uart_app_v.uartTxNumLeft = (INT16U)numLeft; //---- post semaphore osErr = OSSemPost(uart_app_v.uartTxSem); ASSERT(osErr == OS_ERR_NONE); return DN_ERR_NONE; } //=========================== tasks =========================================== static void uartTxTask(void* unused) { INT8U osErr; dn_error_t dnErr; INT8U reply; INT32U replyLen; // create a semaphore uart_app_v.uartTxSem = OSSemCreate(0); ASSERT (uart_app_v.uartTxSem!=NULL); // prepare TX buffer memset(uart_app_v.uartTxBuffer,TX_BUFFER_PATTERN,sizeof(uart_app_v.uartTxBuffer)); while(1) { // this is a task, it executes forever // wait for the semaphore to be posted OSSemPend( uart_app_v.uartTxSem, // pevent 0, // timeout &osErr // perr ); ASSERT (osErr == OS_ERR_NONE); // print dnm_ucli_printf("Sending %d UART packets, %d bytes, delay %d ms\r\n", uart_app_v.uartTxNumLeft, uart_app_v.uartTxLen, uart_app_v.uartTxDelay ); while(uart_app_v.uartTxNumLeft>0) { // send packet dnErr = dn_sendSyncMsgByType( uart_app_v.uartTxBuffer, uart_app_v.uartTxLen, DN_MSG_TYPE_UART_TX_CTRL, (void*)&reply, sizeof(reply), &replyLen ); ASSERT(replyLen==sizeof(INT8U)); ASSERT(reply==DN_ERR_NONE); // decrement uart_app_v.uartTxNumLeft--; // wait a bit if (uart_app_v.uartTxDelay) { OSTimeDly(uart_app_v.uartTxDelay); } } // print dnm_ucli_printf("done.\r\n"); } } static void uartRxTask(void* unused) { dn_error_t dnErr; INT8U osErr; dn_uart_open_args_t uartOpenArgs; INT32U rxLen; INT32U msgType; INT8U i; INT32S err; // create the memory block for the UART channel uart_app_v.uartRxChannelMem = OSMemCreate( uart_app_v.uartRxChannelMemBuf, 1, sizeof(uart_app_v.uartRxChannelMemBuf), &osErr ); ASSERT(osErr==OS_ERR_NONE); // create an asynchronous notification channel dnErr = dn_createAsyncChannel(uart_app_v.uartRxChannelMem, &uart_app_v.uartRxChannel); ASSERT(dnErr==DN_ERR_NONE); // associate the channel descriptor with UART notifications dnErr = dn_registerChannel(uart_app_v.uartRxChannel, DN_MSG_TYPE_UART_NOTIF); ASSERT(dnErr==DN_ERR_NONE); // open the UART device uartOpenArgs.rxChId = uart_app_v.uartRxChannel; uartOpenArgs.eventChId = 0; uartOpenArgs.rate = 115200u; uartOpenArgs.mode = DN_UART_MODE_M4; uartOpenArgs.ctsOutVal = 0; uartOpenArgs.fNoSleep = 0; err = dn_open( DN_UART_DEV_ID, &uartOpenArgs, sizeof(uartOpenArgs) ); ASSERT(err>=0); while(1) { // this is a task, it executes forever // wait for UART messages dnErr = dn_readAsyncMsg( uart_app_v.uartRxChannel, // chDesc uart_app_v.uartRxBuffer, // msg &rxLen, // rxLen &msgType, // msgType MAX_UART_PACKET_SIZE, // maxLen 0 // timeout (0==never) ); ASSERT(dnErr==DN_ERR_NONE); ASSERT(msgType==DN_MSG_TYPE_UART_NOTIF); // print message received dnm_ucli_printf("uart RX (%d bytes)",rxLen); for (i=0;i<rxLen;i++) { dnm_ucli_printf(" %02x",uart_app_v.uartRxBuffer[i]); } dnm_ucli_printf("\r\n"); } } //============================================================================= //=========================== install a kernel header ========================= //============================================================================= /** A kernel header is a set of bytes prepended to the actual binary image of this application. Thus header is needed for your application to start running. */ DN_CREATE_EXE_HDR(DN_VENDOR_ID_NOT_SET, DN_APP_ID_NOT_SET, VER_MAJOR, VER_MINOR, VER_PATCH, VER_BUILD);
560288.c
/* * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "webrtc/common_audio/signal_processing/complex_fft_tables.h" #include "signal_processing_library.h" #define CFFTSFT 14 #define CFFTRND 1 #define CFFTRND2 16384 #define CIFFTSFT 14 #define CIFFTRND 1 int WebRtcSpl_ComplexFFT(int16_t frfi[], int stages, int mode) { int i = 0; int l = 0; int k = 0; int istep = 0; int n = 0; int m = 0; int32_t wr = 0, wi = 0; int32_t tmp1 = 0; int32_t tmp2 = 0; int32_t tmp3 = 0; int32_t tmp4 = 0; int32_t tmp5 = 0; int32_t tmp6 = 0; int32_t tmp = 0; int16_t* ptr_j = NULL; int16_t* ptr_i = NULL; n = 1 << stages; if (n > 1024) { return -1; } __asm __volatile ( ".set push \n\t" ".set noreorder \n\t" "addiu %[k], $zero, 10 \n\t" "addiu %[l], $zero, 1 \n\t" "3: \n\t" "sll %[istep], %[l], 1 \n\t" "move %[m], $zero \n\t" "sll %[tmp], %[l], 2 \n\t" "move %[i], $zero \n\t" "2: \n\t" #if defined(MIPS_DSP_R1_LE) "sllv %[tmp3], %[m], %[k] \n\t" "addiu %[tmp2], %[tmp3], 512 \n\t" "addiu %[m], %[m], 1 \n\t" "lhx %[wi], %[tmp3](%[kSinTable1024]) \n\t" "lhx %[wr], %[tmp2](%[kSinTable1024]) \n\t" #else // #if defined(MIPS_DSP_R1_LE) "sllv %[tmp3], %[m], %[k] \n\t" "addu %[ptr_j], %[tmp3], %[kSinTable1024] \n\t" "addiu %[ptr_i], %[ptr_j], 512 \n\t" "addiu %[m], %[m], 1 \n\t" "lh %[wi], 0(%[ptr_j]) \n\t" "lh %[wr], 0(%[ptr_i]) \n\t" #endif // #if defined(MIPS_DSP_R1_LE) "1: \n\t" "sll %[tmp1], %[i], 2 \n\t" "addu %[ptr_i], %[frfi], %[tmp1] \n\t" "addu %[ptr_j], %[ptr_i], %[tmp] \n\t" "lh %[tmp6], 0(%[ptr_i]) \n\t" "lh %[tmp5], 2(%[ptr_i]) \n\t" "lh %[tmp3], 0(%[ptr_j]) \n\t" "lh %[tmp4], 2(%[ptr_j]) \n\t" "addu %[i], %[i], %[istep] \n\t" #if defined(MIPS_DSP_R2_LE) "mult %[wr], %[tmp3] \n\t" "madd %[wi], %[tmp4] \n\t" "mult $ac1, %[wr], %[tmp4] \n\t" "msub $ac1, %[wi], %[tmp3] \n\t" "mflo %[tmp1] \n\t" "mflo %[tmp2], $ac1 \n\t" "sll %[tmp6], %[tmp6], 14 \n\t" "sll %[tmp5], %[tmp5], 14 \n\t" "shra_r.w %[tmp1], %[tmp1], 1 \n\t" "shra_r.w %[tmp2], %[tmp2], 1 \n\t" "subu %[tmp4], %[tmp6], %[tmp1] \n\t" "addu %[tmp1], %[tmp6], %[tmp1] \n\t" "addu %[tmp6], %[tmp5], %[tmp2] \n\t" "subu %[tmp5], %[tmp5], %[tmp2] \n\t" "shra_r.w %[tmp1], %[tmp1], 15 \n\t" "shra_r.w %[tmp6], %[tmp6], 15 \n\t" "shra_r.w %[tmp4], %[tmp4], 15 \n\t" "shra_r.w %[tmp5], %[tmp5], 15 \n\t" #else // #if defined(MIPS_DSP_R2_LE) "mul %[tmp2], %[wr], %[tmp4] \n\t" "mul %[tmp1], %[wr], %[tmp3] \n\t" "mul %[tmp4], %[wi], %[tmp4] \n\t" "mul %[tmp3], %[wi], %[tmp3] \n\t" "sll %[tmp6], %[tmp6], 14 \n\t" "sll %[tmp5], %[tmp5], 14 \n\t" "addiu %[tmp6], %[tmp6], 16384 \n\t" "addiu %[tmp5], %[tmp5], 16384 \n\t" "addu %[tmp1], %[tmp1], %[tmp4] \n\t" "subu %[tmp2], %[tmp2], %[tmp3] \n\t" "addiu %[tmp1], %[tmp1], 1 \n\t" "addiu %[tmp2], %[tmp2], 1 \n\t" "sra %[tmp1], %[tmp1], 1 \n\t" "sra %[tmp2], %[tmp2], 1 \n\t" "subu %[tmp4], %[tmp6], %[tmp1] \n\t" "addu %[tmp1], %[tmp6], %[tmp1] \n\t" "addu %[tmp6], %[tmp5], %[tmp2] \n\t" "subu %[tmp5], %[tmp5], %[tmp2] \n\t" "sra %[tmp4], %[tmp4], 15 \n\t" "sra %[tmp1], %[tmp1], 15 \n\t" "sra %[tmp6], %[tmp6], 15 \n\t" "sra %[tmp5], %[tmp5], 15 \n\t" #endif // #if defined(MIPS_DSP_R2_LE) "sh %[tmp1], 0(%[ptr_i]) \n\t" "sh %[tmp6], 2(%[ptr_i]) \n\t" "sh %[tmp4], 0(%[ptr_j]) \n\t" "blt %[i], %[n], 1b \n\t" " sh %[tmp5], 2(%[ptr_j]) \n\t" "blt %[m], %[l], 2b \n\t" " addu %[i], $zero, %[m] \n\t" "move %[l], %[istep] \n\t" "blt %[l], %[n], 3b \n\t" " addiu %[k], %[k], -1 \n\t" ".set pop \n\t" : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6), [ptr_i] "=&r" (ptr_i), [i] "=&r" (i), [wi] "=&r" (wi), [wr] "=&r" (wr), [m] "=&r" (m), [istep] "=&r" (istep), [l] "=&r" (l), [k] "=&r" (k), [ptr_j] "=&r" (ptr_j), [tmp] "=&r" (tmp) : [n] "r" (n), [frfi] "r" (frfi), [kSinTable1024] "r" (kSinTable1024) : "hi", "lo", "$ac1hi", "$ac1lo", "memory" ); return 0; } int WebRtcSpl_ComplexIFFT(int16_t frfi[], int stages, int mode) { int i = 0, l = 0, k = 0; int istep = 0, n = 0, m = 0; int scale = 0, shift = 0; int32_t wr = 0, wi = 0; int32_t tmp1 = 0, tmp2 = 0, tmp3 = 0, tmp4 = 0; int32_t tmp5 = 0, tmp6 = 0, tmp = 0, tempMax = 0, round2 = 0; int16_t* ptr_j = NULL; int16_t* ptr_i = NULL; n = 1 << stages; if (n > 1024) { return -1; } __asm __volatile ( ".set push \n\t" ".set noreorder \n\t" "addiu %[k], $zero, 10 \n\t" "addiu %[l], $zero, 1 \n\t" "move %[scale], $zero \n\t" "3: \n\t" "addiu %[shift], $zero, 14 \n\t" "addiu %[round2], $zero, 8192 \n\t" "move %[ptr_i], %[frfi] \n\t" "move %[tempMax], $zero \n\t" "addu %[i], %[n], %[n] \n\t" "5: \n\t" "lh %[tmp1], 0(%[ptr_i]) \n\t" "lh %[tmp2], 2(%[ptr_i]) \n\t" "lh %[tmp3], 4(%[ptr_i]) \n\t" "lh %[tmp4], 6(%[ptr_i]) \n\t" #if defined(MIPS_DSP_R1_LE) "absq_s.w %[tmp1], %[tmp1] \n\t" "absq_s.w %[tmp2], %[tmp2] \n\t" "absq_s.w %[tmp3], %[tmp3] \n\t" "absq_s.w %[tmp4], %[tmp4] \n\t" #else // #if defined(MIPS_DSP_R1_LE) "slt %[tmp5], %[tmp1], $zero \n\t" "subu %[tmp6], $zero, %[tmp1] \n\t" "movn %[tmp1], %[tmp6], %[tmp5] \n\t" "slt %[tmp5], %[tmp2], $zero \n\t" "subu %[tmp6], $zero, %[tmp2] \n\t" "movn %[tmp2], %[tmp6], %[tmp5] \n\t" "slt %[tmp5], %[tmp3], $zero \n\t" "subu %[tmp6], $zero, %[tmp3] \n\t" "movn %[tmp3], %[tmp6], %[tmp5] \n\t" "slt %[tmp5], %[tmp4], $zero \n\t" "subu %[tmp6], $zero, %[tmp4] \n\t" "movn %[tmp4], %[tmp6], %[tmp5] \n\t" #endif // #if defined(MIPS_DSP_R1_LE) "slt %[tmp5], %[tempMax], %[tmp1] \n\t" "movn %[tempMax], %[tmp1], %[tmp5] \n\t" "addiu %[i], %[i], -4 \n\t" "slt %[tmp5], %[tempMax], %[tmp2] \n\t" "movn %[tempMax], %[tmp2], %[tmp5] \n\t" "slt %[tmp5], %[tempMax], %[tmp3] \n\t" "movn %[tempMax], %[tmp3], %[tmp5] \n\t" "slt %[tmp5], %[tempMax], %[tmp4] \n\t" "movn %[tempMax], %[tmp4], %[tmp5] \n\t" "bgtz %[i], 5b \n\t" " addiu %[ptr_i], %[ptr_i], 8 \n\t" "addiu %[tmp1], $zero, 13573 \n\t" "addiu %[tmp2], $zero, 27146 \n\t" #if !defined(MIPS32_R2_LE) "sll %[tempMax], %[tempMax], 16 \n\t" "sra %[tempMax], %[tempMax], 16 \n\t" #else // #if !defined(MIPS32_R2_LE) "seh %[tempMax] \n\t" #endif // #if !defined(MIPS32_R2_LE) "slt %[tmp1], %[tmp1], %[tempMax] \n\t" "slt %[tmp2], %[tmp2], %[tempMax] \n\t" "addu %[tmp1], %[tmp1], %[tmp2] \n\t" "addu %[shift], %[shift], %[tmp1] \n\t" "addu %[scale], %[scale], %[tmp1] \n\t" "sllv %[round2], %[round2], %[tmp1] \n\t" "sll %[istep], %[l], 1 \n\t" "move %[m], $zero \n\t" "sll %[tmp], %[l], 2 \n\t" "2: \n\t" #if defined(MIPS_DSP_R1_LE) "sllv %[tmp3], %[m], %[k] \n\t" "addiu %[tmp2], %[tmp3], 512 \n\t" "addiu %[m], %[m], 1 \n\t" "lhx %[wi], %[tmp3](%[kSinTable1024]) \n\t" "lhx %[wr], %[tmp2](%[kSinTable1024]) \n\t" #else // #if defined(MIPS_DSP_R1_LE) "sllv %[tmp3], %[m], %[k] \n\t" "addu %[ptr_j], %[tmp3], %[kSinTable1024] \n\t" "addiu %[ptr_i], %[ptr_j], 512 \n\t" "addiu %[m], %[m], 1 \n\t" "lh %[wi], 0(%[ptr_j]) \n\t" "lh %[wr], 0(%[ptr_i]) \n\t" #endif // #if defined(MIPS_DSP_R1_LE) "1: \n\t" "sll %[tmp1], %[i], 2 \n\t" "addu %[ptr_i], %[frfi], %[tmp1] \n\t" "addu %[ptr_j], %[ptr_i], %[tmp] \n\t" "lh %[tmp3], 0(%[ptr_j]) \n\t" "lh %[tmp4], 2(%[ptr_j]) \n\t" "lh %[tmp6], 0(%[ptr_i]) \n\t" "lh %[tmp5], 2(%[ptr_i]) \n\t" "addu %[i], %[i], %[istep] \n\t" #if defined(MIPS_DSP_R2_LE) "mult %[wr], %[tmp3] \n\t" "msub %[wi], %[tmp4] \n\t" "mult $ac1, %[wr], %[tmp4] \n\t" "madd $ac1, %[wi], %[tmp3] \n\t" "mflo %[tmp1] \n\t" "mflo %[tmp2], $ac1 \n\t" "sll %[tmp6], %[tmp6], 14 \n\t" "sll %[tmp5], %[tmp5], 14 \n\t" "shra_r.w %[tmp1], %[tmp1], 1 \n\t" "shra_r.w %[tmp2], %[tmp2], 1 \n\t" "addu %[tmp6], %[tmp6], %[round2] \n\t" "addu %[tmp5], %[tmp5], %[round2] \n\t" "subu %[tmp4], %[tmp6], %[tmp1] \n\t" "addu %[tmp1], %[tmp6], %[tmp1] \n\t" "addu %[tmp6], %[tmp5], %[tmp2] \n\t" "subu %[tmp5], %[tmp5], %[tmp2] \n\t" "srav %[tmp4], %[tmp4], %[shift] \n\t" "srav %[tmp1], %[tmp1], %[shift] \n\t" "srav %[tmp6], %[tmp6], %[shift] \n\t" "srav %[tmp5], %[tmp5], %[shift] \n\t" #else // #if defined(MIPS_DSP_R2_LE) "mul %[tmp1], %[wr], %[tmp3] \n\t" "mul %[tmp2], %[wr], %[tmp4] \n\t" "mul %[tmp4], %[wi], %[tmp4] \n\t" "mul %[tmp3], %[wi], %[tmp3] \n\t" "sll %[tmp6], %[tmp6], 14 \n\t" "sll %[tmp5], %[tmp5], 14 \n\t" "sub %[tmp1], %[tmp1], %[tmp4] \n\t" "addu %[tmp2], %[tmp2], %[tmp3] \n\t" "addiu %[tmp1], %[tmp1], 1 \n\t" "addiu %[tmp2], %[tmp2], 1 \n\t" "sra %[tmp2], %[tmp2], 1 \n\t" "sra %[tmp1], %[tmp1], 1 \n\t" "addu %[tmp6], %[tmp6], %[round2] \n\t" "addu %[tmp5], %[tmp5], %[round2] \n\t" "subu %[tmp4], %[tmp6], %[tmp1] \n\t" "addu %[tmp1], %[tmp6], %[tmp1] \n\t" "addu %[tmp6], %[tmp5], %[tmp2] \n\t" "subu %[tmp5], %[tmp5], %[tmp2] \n\t" "sra %[tmp4], %[tmp4], %[shift] \n\t" "sra %[tmp1], %[tmp1], %[shift] \n\t" "sra %[tmp6], %[tmp6], %[shift] \n\t" "sra %[tmp5], %[tmp5], %[shift] \n\t" #endif // #if defined(MIPS_DSP_R2_LE) "sh %[tmp1], 0(%[ptr_i]) \n\t" "sh %[tmp6], 2(%[ptr_i]) \n\t" "sh %[tmp4], 0(%[ptr_j]) \n\t" "blt %[i], %[n], 1b \n\t" " sh %[tmp5], 2(%[ptr_j]) \n\t" "blt %[m], %[l], 2b \n\t" " addu %[i], $zero, %[m] \n\t" "move %[l], %[istep] \n\t" "blt %[l], %[n], 3b \n\t" " addiu %[k], %[k], -1 \n\t" ".set pop \n\t" : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6), [ptr_i] "=&r" (ptr_i), [i] "=&r" (i), [m] "=&r" (m), [tmp] "=&r" (tmp), [istep] "=&r" (istep), [wi] "=&r" (wi), [wr] "=&r" (wr), [l] "=&r" (l), [k] "=&r" (k), [round2] "=&r" (round2), [ptr_j] "=&r" (ptr_j), [shift] "=&r" (shift), [scale] "=&r" (scale), [tempMax] "=&r" (tempMax) : [n] "r" (n), [frfi] "r" (frfi), [kSinTable1024] "r" (kSinTable1024) : "hi", "lo", "$ac1hi", "$ac1lo", "memory" ); return scale; }
749910.c
/****************************************************************************** * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface * $Revision: 1.1.1.2 $ * *****************************************************************************/ /* * Copyright (C) 2000, 2001 R. Byron Moore * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "acpi.h" #include "achware.h" #define _COMPONENT ACPI_HARDWARE MODULE_NAME ("hwacpi") /****************************************************************************** * * FUNCTION: Acpi_hw_initialize * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize and validate various ACPI registers * ******************************************************************************/ acpi_status acpi_hw_initialize ( void) { acpi_status status = AE_OK; u32 index; FUNCTION_TRACE ("Hw_initialize"); /* We must have the ACPI tables by the time we get here */ if (!acpi_gbl_FADT) { acpi_gbl_restore_acpi_chipset = FALSE; ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No FADT!\n")); return_ACPI_STATUS (AE_NO_ACPI_TABLES); } /* Identify current ACPI/legacy mode */ switch (acpi_gbl_system_flags & SYS_MODES_MASK) { case (SYS_MODE_ACPI): acpi_gbl_original_mode = SYS_MODE_ACPI; ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "System supports ACPI mode only.\n")); break; case (SYS_MODE_LEGACY): acpi_gbl_original_mode = SYS_MODE_LEGACY; ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Tables loaded from buffer, hardware assumed to support LEGACY mode only.\n")); break; case (SYS_MODE_ACPI | SYS_MODE_LEGACY): if (acpi_hw_get_mode () == SYS_MODE_ACPI) { acpi_gbl_original_mode = SYS_MODE_ACPI; } else { acpi_gbl_original_mode = SYS_MODE_LEGACY; } ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "System supports both ACPI and LEGACY modes.\n")); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "System is currently in %s mode.\n", (acpi_gbl_original_mode == SYS_MODE_ACPI) ? "ACPI" : "LEGACY")); break; } if (acpi_gbl_system_flags & SYS_MODE_ACPI) { /* Target system supports ACPI mode */ /* * The purpose of this code is to save the initial state * of the ACPI event enable registers. An exit function will be * registered which will restore this state when the application * exits. The exit function will also clear all of the ACPI event * status bits prior to restoring the original mode. * * The location of the PM1a_evt_blk enable registers is defined as the * base of PM1a_evt_blk + DIV_2(PM1a_evt_blk_length). Since the spec further * fully defines the PM1a_evt_blk to be a total of 4 bytes, the offset * for the enable registers is always 2 from the base. It is hard * coded here. If this changes in the spec, this code will need to * be modified. The PM1b_evt_blk behaves as expected. */ acpi_gbl_pm1_enable_register_save = (u16) acpi_hw_register_read ( ACPI_MTX_LOCK, PM1_EN); /* * The GPEs behave similarly, except that the length of the register * block is not fixed, so the buffer must be allocated with malloc */ if (ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xgpe0blk.address) && acpi_gbl_FADT->gpe0blk_len) { /* GPE0 specified in FADT */ acpi_gbl_gpe0enable_register_save = ACPI_MEM_ALLOCATE ( DIV_2 (acpi_gbl_FADT->gpe0blk_len)); if (!acpi_gbl_gpe0enable_register_save) { return_ACPI_STATUS (AE_NO_MEMORY); } /* Save state of GPE0 enable bits */ for (index = 0; index < DIV_2 (acpi_gbl_FADT->gpe0blk_len); index++) { acpi_gbl_gpe0enable_register_save[index] = (u8) acpi_hw_register_read (ACPI_MTX_LOCK, GPE0_EN_BLOCK | index); } } else { acpi_gbl_gpe0enable_register_save = NULL; } if (ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xgpe1_blk.address) && acpi_gbl_FADT->gpe1_blk_len) { /* GPE1 defined */ acpi_gbl_gpe1_enable_register_save = ACPI_MEM_ALLOCATE ( DIV_2 (acpi_gbl_FADT->gpe1_blk_len)); if (!acpi_gbl_gpe1_enable_register_save) { return_ACPI_STATUS (AE_NO_MEMORY); } /* save state of GPE1 enable bits */ for (index = 0; index < DIV_2 (acpi_gbl_FADT->gpe1_blk_len); index++) { acpi_gbl_gpe1_enable_register_save[index] = (u8) acpi_hw_register_read (ACPI_MTX_LOCK, GPE1_EN_BLOCK | index); } } else { acpi_gbl_gpe1_enable_register_save = NULL; } } return_ACPI_STATUS (status); } /****************************************************************************** * * FUNCTION: Acpi_hw_set_mode * * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY * * RETURN: Status * * DESCRIPTION: Transitions the system into the requested mode or does nothing * if the system is already in that mode. * ******************************************************************************/ acpi_status acpi_hw_set_mode ( u32 mode) { acpi_status status = AE_NO_HARDWARE_RESPONSE; FUNCTION_TRACE ("Hw_set_mode"); if (mode == SYS_MODE_ACPI) { /* BIOS should have disabled ALL fixed and GP events */ acpi_os_write_port (acpi_gbl_FADT->smi_cmd, acpi_gbl_FADT->acpi_enable, 8); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Attempting to enable ACPI mode\n")); } else if (mode == SYS_MODE_LEGACY) { /* * BIOS should clear all fixed status bits and restore fixed event * enable bits to default */ acpi_os_write_port (acpi_gbl_FADT->smi_cmd, acpi_gbl_FADT->acpi_disable, 8); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Attempting to enable Legacy (non-ACPI) mode\n")); } /* Give the platform some time to react */ acpi_os_stall (20000); if (acpi_hw_get_mode () == mode) { ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Mode %X successfully enabled\n", mode)); status = AE_OK; } return_ACPI_STATUS (status); } /****************************************************************************** * * FUNCTION: Acpi_hw_get_mode * * PARAMETERS: none * * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY * * DESCRIPTION: Return current operating state of system. Determined by * querying the SCI_EN bit. * ******************************************************************************/ u32 acpi_hw_get_mode (void) { FUNCTION_TRACE ("Hw_get_mode"); if (acpi_hw_register_bit_access (ACPI_READ, ACPI_MTX_LOCK, SCI_EN)) { return_VALUE (SYS_MODE_ACPI); } else { return_VALUE (SYS_MODE_LEGACY); } } /****************************************************************************** * * FUNCTION: Acpi_hw_get_mode_capabilities * * PARAMETERS: none * * RETURN: logical OR of SYS_MODE_ACPI and SYS_MODE_LEGACY determined at initial * system state. * * DESCRIPTION: Returns capablities of system * ******************************************************************************/ u32 acpi_hw_get_mode_capabilities (void) { FUNCTION_TRACE ("Hw_get_mode_capabilities"); if (!(acpi_gbl_system_flags & SYS_MODES_MASK)) { if (acpi_hw_get_mode () == SYS_MODE_LEGACY) { /* * Assume that if this call is being made, Acpi_init has been called * and ACPI support has been established by the presence of the * tables. Therefore since we're in SYS_MODE_LEGACY, the system * must support both modes */ acpi_gbl_system_flags |= (SYS_MODE_ACPI | SYS_MODE_LEGACY); } else { /* TBD: [Investigate] !!! this may be unsafe... */ /* * system is is ACPI mode, so try to switch back to LEGACY to see if * it is supported */ acpi_hw_set_mode (SYS_MODE_LEGACY); if (acpi_hw_get_mode () == SYS_MODE_LEGACY) { /* Now in SYS_MODE_LEGACY, so both are supported */ acpi_gbl_system_flags |= (SYS_MODE_ACPI | SYS_MODE_LEGACY); acpi_hw_set_mode (SYS_MODE_ACPI); } else { /* Still in SYS_MODE_ACPI so this must be an ACPI only system */ acpi_gbl_system_flags |= SYS_MODE_ACPI; } } } return_VALUE (acpi_gbl_system_flags & SYS_MODES_MASK); }
168928.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <math.h> #include <openssl/sha.h> #include "types.h" #include "sysenv.h" #include "sys/mem.h" #include "wrappers/base64.h" #include "wrappers/pbc_ext.h" #include "misc/misc.h" #include "exim.h" #include "cpy06.h" #include "groupsig/cpy06/proof.h" /* Private functions */ /** * @fn static int _is_supported_format(groupsig_proof_format_t format) * @brief Returns 1 if the specified format is supported by this scheme. 0 if not. * * @param[in] format The format to be "tested" * * @return 1 if the specified format is supported, 0 if not. */ static int _is_supported_format(groupsig_proof_format_t format) { int i; for(i=0; i<CPY06_SUPPORTED_PROOF_FORMATS_N; i++) { if(CPY06_SUPPORTED_PROOF_FORMATS[i] == format) { return 1; } } return 0; } /** * @fn static int _get_size_bytearray_null(exim_t *obj) * @brief Returns the size in bytes of the exim wrapped object. The size will be * equal to the size of bytearray output by _export_fd() or created by * _import_fd(). * * @param[in] obj The object to be sized. * * @return The size in bytes of the object contained in obj. */ static int _get_size_bytearray_null(exim_t* obj) { int size; if(!obj || !obj->eximable) { LOG_EINVAL(&logger, __FILE__, "_get_size_bytearray_null", __LINE__, LOGERROR); return -1; } cpy06_proof_t *proof = obj->eximable; // size = sizeof(code) + 2*size(element size data) + elements size = sizeof(byte_t) + 2*sizeof(int) + element_length_in_bytes(proof->c) + element_length_in_bytes(proof->s); return size; } /** * @fn static int _export_fd(exim_t* obj, FILE *fd) * @brief Exports a CPY06 proof to a bytearray, * * The format of the produced bytearray will be will be: * * | CPY06_CODE | sizeof(c) | c | sizeof(s) | s| * * Where the first field is a byte and the sizeof fields are ints indicating * the number of bytes of the following field. * * @param[in] obj The exim wrapped proof to export. * @param[in] fd The destination file descriptor. Must be big enough to store the result. * * @return IOK or IERROR: */ static int _export_fd(exim_t* obj, FILE *fd) { byte_t *bytes; uint64_t size, offset, written; cpy06_proof_t *proof; if(!obj || !obj->eximable || !fd) { LOG_EINVAL(&logger, __FILE__, "_export_fd", __LINE__, LOGERROR); return IERROR; } proof = obj->eximable; size = _get_size_bytearray_null(obj);//element_length_in_bytes(proof->c)+element_length_in_bytes(proof->s)+ //sizeof(uint64_t)*2+1; if(!(bytes = (byte_t *) mem_malloc(sizeof(byte_t *)*size))) { return IERROR; } /* Dump GROUPSIG_CPY06_CODE */ bytes[0] = GROUPSIG_CPY06_CODE; offset = 1; /* Dump T1 */ if(pbcext_dump_element_bytes(&bytes[offset], &written, proof->c) == IERROR) { mem_free(bytes); bytes = NULL; return IERROR; } offset += written; /* Dump T2 */ if(pbcext_dump_element_bytes(&bytes[offset], &written, proof->s) == IERROR) { mem_free(bytes); bytes = NULL; return IERROR; } offset += written; if(fwrite(bytes, offset, 1, fd) != 1){ return IERROR; } mem_free(bytes); bytes = NULL; return IOK; } /** * @fn static int _import_fd(FILE *fd, exim_t* obj) * @brief Import a representation of the given key from a file descriptor. * Expects the same format as the output from _export_fd(). * * @return IOK or IERROR */ static int _import_fd(FILE *fd, exim_t* obj) { groupsig_proof_t *proof; cpy06_proof_t *cpy06_proof; struct pairing_s *pairing; uint64_t size, offset, rd; int scheme; byte_t* buffer; if(!fd || !obj) { LOG_EINVAL(&logger, __FILE__, "_import_fd", __LINE__, LOGERROR); return IERROR; } if(!(proof = cpy06_proof_init())) { return IERROR; } size = misc_get_fd_size(fd); if(!(buffer = (byte_t*)mem_malloc(size))){ return IERROR; } fread(buffer, size, 1, fd); cpy06_proof = proof->proof; pairing = ((cpy06_sysenv_t *) sysenv->data)->pairing; /* First byte: scheme */ scheme = buffer[0]; offset = 1; if(scheme != proof->scheme) { LOG_ERRORCODE_MSG(&logger, __FILE__, "_import_fd", __LINE__, EDQUOT, "Unexpected proof scheme.", LOGERROR); cpy06_proof_free(proof); proof = NULL; return IERROR; } /* Get Zr */ element_init_Zr(cpy06_proof->c, pairing); if(pbcext_get_element_bytes(cpy06_proof->c, &rd, &buffer[offset]) == IERROR) { cpy06_proof_free(proof); proof = NULL; return IERROR; } offset += rd; /* Get s */ element_init_Zr(cpy06_proof->s, pairing); if(pbcext_get_element_bytes(cpy06_proof->s, &rd, &buffer[offset]) == IERROR) { cpy06_proof_free(proof); proof = NULL; return IERROR; } offset += rd; obj->eximable = proof; return IOK; } /* Export/import handle definition */ static exim_handle_t _exim_h = { &_get_size_bytearray_null, &_export_fd, &_import_fd, }; groupsig_proof_t* cpy06_proof_init() { groupsig_proof_t *proof; if(!(proof = (groupsig_proof_t *) mem_malloc(sizeof(groupsig_proof_t)))) { return NULL; } proof->scheme = GROUPSIG_CPY06_CODE; if(!(proof->proof = (cpy06_proof_t *) mem_malloc(sizeof(cpy06_proof_t)))) { mem_free(proof); proof = NULL; return NULL; } return proof; } int cpy06_proof_free(groupsig_proof_t *proof) { if(!proof) { LOG_EINVAL_MSG(&logger, __FILE__, "cpy06_proof_free", __LINE__, "Nothing to free.", LOGWARN); return IERROR; } if(proof->proof) { element_clear(((cpy06_proof_t *) proof->proof)->c); element_clear(((cpy06_proof_t *) proof->proof)->s); mem_free(proof->proof); proof->proof = NULL; } mem_free(proof); return IOK; } /* int cpy06_proof_init_set_c(cpy06_proof_t *proof, bigz_t c) { */ /* if(!proof || !c) { */ /* LOG_EINVAL(&logger, __FILE__, "cpy06_proof_init_set_c", __LINE__, LOGERROR); */ /* return IERROR; */ /* } */ /* if(!(proof->c = bigz_init_set(c))) { */ /* return IERROR; */ /* } */ /* return IOK; */ /* } */ /* int cpy06_proof_init_set_s(cpy06_proof_t *proof, bigz_t s) { */ /* if(!proof || !s) { */ /* LOG_EINVAL(&logger, __FILE__, "cpy06_proof_init_set_s", __LINE__, LOGERROR); */ /* return IERROR; */ /* } */ /* if(!(proof->s = bigz_init_set(s))) { */ /* return IERROR; */ /* } */ /* return IOK; */ /* } */ char* cpy06_proof_to_string(groupsig_proof_t *proof) { if(!proof || proof->scheme != GROUPSIG_CPY06_CODE) { LOG_EINVAL(&logger, __FILE__, "cpy06_proof_to_string", __LINE__, LOGERROR); return NULL; } return NULL; } int cpy06_proof_export(groupsig_proof_t *proof, groupsig_proof_format_t format, void *dst) { cpy06_proof_t *cpy06_proof; if(!proof || proof->scheme != GROUPSIG_CPY06_CODE) { LOG_EINVAL(&logger, __FILE__, "cpy06_proof_export", __LINE__, LOGERROR); return IERROR; } cpy06_proof = (cpy06_proof_t *) proof->proof; /* See if the current scheme supports the given format */ if(!_is_supported_format(format)) { LOG_EINVAL_MSG(&logger, __FILE__, "cpy06_proof_export", __LINE__, "The specified format is not supported.", LOGERROR); return IERROR; } exim_t wrap = {cpy06_proof, &_exim_h }; return exim_export(&wrap, format, dst); } groupsig_proof_t* cpy06_proof_import(groupsig_proof_format_t format, void *source) { if(!source) { LOG_EINVAL(&logger, __FILE__, "cpy06_proof_import", __LINE__, LOGERROR); return NULL; } /* See if the current scheme supports the given format */ if(!_is_supported_format(format)) { LOG_EINVAL_MSG(&logger, __FILE__, "cpy06_proof_import", __LINE__, "The specified format is not supported.", LOGERROR); return NULL; } exim_t wrap = {NULL, &_exim_h }; if(exim_import(format, source, &wrap) != IOK){ return NULL; } return wrap.eximable; } int cpy06_proof_get_size_in_format(groupsig_proof_t *proof, groupsig_proof_format_t format) { if(!proof || proof->scheme != GROUPSIG_CPY06_CODE) { LOG_EINVAL(&logger, __FILE__, "cpy06_proof_get_size_in_format", __LINE__, LOGERROR); return -1; } /* See if the current scheme supports the given format */ if(!_is_supported_format(format)) { LOG_EINVAL_MSG(&logger, __FILE__, "cpy06_proof_get_size_in_format", __LINE__, "The specified format is not supported.", LOGERROR); return -1; } exim_t wrap = {proof->proof, &_exim_h }; return exim_get_size_in_format(&wrap, format); } /* proof.c ends here */
974981.c
/* * hmap_del: remove the given key and return its value. * => If key was present, return its associated value; otherwise NULL. */ #include "hmap_common.h" #include "hmap_aux.h" #include "hmap_resize.h" #include "hmap_del.h" int hmap_del( hmap_t *H, const void *key, bool *ptr_is_found, dbg_t *ptr_dbg ) { int status = 0; if ( key == NULL ) { go_BYE(-1); } // not a valid key if ( key == NULL ) { go_BYE(-1); } // not a valid key uint16_t len_to_hash; char *str_to_hash = NULL; bool free_to_hash; status = H->key_hash(key, &str_to_hash, &len_to_hash, &free_to_hash); register uint32_t hash = set_hash(str_to_hash, len_to_hash, H, ptr_dbg); register uint32_t probe_loc = set_probe_loc(hash, H, ptr_dbg); register bkt_t *bkts = H->bkts; register uint32_t my_psl = 0; register uint32_t num_probes = 0; *ptr_is_found = false; register bkt_t *this_bkt; for ( ; ; ) { if ( num_probes >= H->size ) { go_BYE(-1); } this_bkt = bkts + probe_loc; // same probing logic as in lookup function. if ( ( this_bkt->key == NULL ) || ( my_psl > this_bkt->psl) ) { // key does not exist goto BYE; } if (( this_bkt->hash == hash ) && ( H->key_cmp(this_bkt->key, key) )) { *ptr_is_found = true; break; } my_psl++; /* Continue to the next bucket. */ probe_loc++; if ( probe_loc == H->size ) { probe_loc = 0; } num_probes++; } if ( *ptr_is_found == false ) { go_BYE(-1); } // Free the bucket. H->key_free(this_bkt->key); H->val_free(this_bkt->val); H->nitems--; /* * The probe sequence must be preserved in the deletion case. * Use the backwards-shifting method to maintain low variance. */ for ( ; ; ) { memset(this_bkt, 0, sizeof(bkt_t)); bkt_t *next_bkt; probe_loc++; if ( probe_loc == H->size ) { probe_loc = 0; } next_bkt = bkts + probe_loc; /* * Stop if we reach an empty bucket or hit a key which * is in its base (original) location. */ if ( ( next_bkt->key == NULL ) || ( next_bkt->psl == 0 ) ) { break; } next_bkt->psl--; *this_bkt = *next_bkt; this_bkt = next_bkt; } /* * If the load factor is less than threshold, then shrink the hash table * However, don't go below minimum size */ size_t threshold = LOW_WATER_MARK * H->size; if ( ( H->nitems > H->config.min_size ) && ( H->nitems < threshold ) ) { size_t new_size = ((LOW_WATER_MARK+HIGH_WATER_MARK)/2.0)*H->nitems; if ( new_size < H->config.min_size ) { new_size = H->config.min_size; } status = hmap_resize(H, new_size); cBYE(status); } BYE: return status; }
640965.c
#include <stdio.h> int main () { int a, b; printf("Informe o 1o valor: "); scanf("%d", &a); printf("Informe o 2o valor: "); scanf("%d", &b); if (a==b) printf("Numeros iguais=%d\n", a); else printf("Numeros diferentes %d e %d\n", a, b); return(0); }
387338.c
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2020, Daniel Stenberg, <[email protected]>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include "curl_setup.h" #include <curl/curl.h> #include "curl_memory.h" #include "memdebug.h" static char* GetEnv(const char* variable) { #if defined(_WIN32_WCE) || defined(CURL_WINDOWS_APP) (void)variable; return NULL; #elif defined(WIN32) /* This uses Windows API instead of C runtime getenv() to get the environment variable since some changes aren't always visible to the latter. #4774 */ char* buf = NULL; char* tmp; DWORD bufsize; DWORD rc = 1; const DWORD max = 32768; /* max env var size from MSCRT source */ for (;;) { tmp = realloc(buf, rc); if (!tmp) { free(buf); return NULL; } buf = tmp; bufsize = rc; /* It's possible for rc to be 0 if the variable was found but empty. Since getenv doesn't make that distinction we ignore it as well. */ rc = GetEnvironmentVariableA(variable, buf, bufsize); if (!rc || rc == bufsize || rc > max) { free(buf); return NULL; } /* if rc < bufsize then rc is bytes written not including null */ if (rc < bufsize) return buf; /* else rc is bytes needed, try again */ } #else char* env = getenv(variable); return (env && env[0]) ? strdup(env) : NULL; #endif } char* curl_getenv(const char* v) { return GetEnv(v); }
488433.c
/* * coral.c * * Created on: Mar 17, 2019 * Author: AQaut */ #include <coralI2C.h> #include "gio.h" #include "system.h" #include "i2c.h" #include "freeRTOS.h" #include "os_semphr.h" #include "os_queue.h" #include "pinmux.h" #include "sys_core.h" uint8 coral__parity(uint8 x); int coral__sendMDMessage(uint8 addr, MDmessage_t* message) { #if CONFIG_I2C_USE_INTERRUPTS #if CONFIG_I2C_FAIL_ON_TX_UNAVAILABLE if(i2cInUse == true) return -1; #else while(i2cInUse){}; #endif #endif uint8 raw_message[3]; //Generate message. raw_message[0] = message->checkSum; raw_message[1] = message->speedData; raw_message[2] = (0U | ((message->CounterClockwise) ? 64U : 0U) | ((message->RequestMotorStartup) ? 16U : 0U) | ((message->RequestMotorShutdown) ? 8U : 0U)); //Set Parity Bit if(coral__parity(raw_message[0]) ^ coral__parity(raw_message[1]) ^ coral__parity(raw_message[2])) { raw_message[2] |= 1U; } //Configure options. i2cSetSlaveAdd(i2cREG1, (uint32)addr); i2cSetDirection(i2cREG1, I2C_TRANSMITTER); i2cSetMode(i2cREG1, I2C_MASTER); i2cSetCount(i2cREG1, MD_MESSAGE_LEN); i2cSetStop(i2cREG1); //Send a stop after the count reaches 0 i2cSetStart(i2cREG1); #if CONFIG_I2C_USE_INTERRUPTS i2cInUse = true; #endif i2cSend(i2cREG1, MD_MESSAGE_LEN, raw_message); #if !CONFIG_I2C_USE_INTERRUPTS // Wait until Bus Busy is cleared while(i2cIsBusBusy(i2cREG1) == true); // Wait until Stop is detected while(i2cIsStopDetected(i2cREG1) == 0); // Clear the Stop condition i2cClearSCD(i2cREG1); #endif return 0; } int coral__receiveMDStatus(uint8 addr, MDmessage_t* status) { //Make sure the master is ready before continuing. while(i2cIsMasterReady(i2cREG1) != true); uint8 raw_message[3]; /* Configure address of Slave to talk to */ i2cSetSlaveAdd(i2cREG1, (uint32)addr); /* Set direction to receiver */ i2cSetDirection(i2cREG1, I2C_RECEIVER); i2cSetCount(i2cREG1, MD_MESSAGE_LEN); /* Set mode as Master */ i2cSetMode(i2cREG1, I2C_MASTER); /* Set Stop after programmed Count */ i2cSetStop(i2cREG1); /* Transmit Start Condition */ i2cSetStart(i2cREG1); i2cReceive(i2cREG1, MD_MESSAGE_LEN, raw_message); /* Wait until Bus Busy is cleared */ while(i2cIsBusBusy(i2cREG1) == true); /* Wait until Stop is detected */ while(i2cIsStopDetected(i2cREG1) == 0); /* Clear the Stop condition */ i2cClearSCD(i2cREG1); if((raw_message[0] == 'A') && ((coral__parity(raw_message[0]) ^ coral__parity(raw_message[1]) ^ coral__parity(raw_message[2])) == 0U)) { status->checkSum = raw_message[0]; status->speedData = raw_message[1]; status->CounterClockwise = (raw_message[2] & 64U) ? true : false; status->Error = (raw_message[2] & 32U) ? true : false; status->MotorDriverNotOK = (raw_message[2] & 4U) ? true : false; status->MotorDriverOK = (raw_message[2] & 2U) ? true : false; status->RequestMotorShutdown = false; status->RequestMotorStartup = false; return 0; } else { //Invalid status received. return -1; } } void coral__i2cSetup(void) { //Make sure I2C pins act like I2C pins. muxInit(); /* I2C Init as per GUI * Mode = Master - Transmitter * baud rate = 100KHz * Bit Count = 8bit */ i2cInit(); _enable_interrupt_(); #if CONFIG_I2C_USE_INTERRUPTS i2cEnableNotification(i2cREG1, I2C_TX_INT | I2C_SCD_INT); i2cInUse = false; #endif } void coral__ledOn() { gioInit(); gioSetDirection(gioPORTB, 0x2); gioSetBit(gioPORTB,1,1); for(;;); return; } /* * Returns the parity of an individual char. */ uint8 coral__parity(uint8 x) { uint8 par = 0; int i; for(i = 0; i < 8; i++) { par ^= x; x >>= 1; } return par & 0x01; } void i2cNotification(i2cBASE_t *i2c, uint32 flags) { #if CONFIG_I2C_USE_INTERRUPTS //When we reach the stop condition after a successful transmission... if(flags == (uint32)I2C_SCD_INT) { //We are allowed to send new messages again. i2cInUse = false; } #endif }
31636.c
/** * libpsd - Photoshop file formats (*.psd) decode library * Copyright (C) 2004-2007 Graphest Software. * * libpsd is the legal property of its developers, whose names are too numerous * to list here. Please refer to the COPYRIGHT file distributed with this * source distribution. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * $Id: effects.c, created by Patrick in 2006.05.23, [email protected] Exp $ */ #include "libpsd.h" #include "psd_config.h" #include "psd_system.h" #include "psd_stream.h" #include "psd_color.h" #include "psd_rect.h" #include "psd_bitmap.h" #include "psd_math.h" #include "psd_descriptor.h" extern psd_status psd_get_layer_drop_shadow(psd_context * context, psd_layer_effects_drop_shadow * drop_shadow); extern psd_status psd_get_layer_drop_shadow2(psd_context * context, psd_layer_effects_drop_shadow * drop_shadow); extern psd_status psd_get_layer_inner_shadow(psd_context * context, psd_layer_effects_inner_shadow * inner_shadow); extern psd_status psd_get_layer_inner_shadow2(psd_context * context, psd_layer_effects_inner_shadow * inner_shadow); extern psd_status psd_get_layer_outer_glow(psd_context * context, psd_layer_effects_outer_glow * outer_glow); extern psd_status psd_get_layer_outer_glow2(psd_context * context, psd_layer_effects_outer_glow * outer_glow); extern psd_status psd_get_layer_inner_glow(psd_context * context, psd_layer_effects_inner_glow * inner_glow); extern psd_status psd_get_layer_inner_glow2(psd_context * context, psd_layer_effects_inner_glow * inner_glow); extern psd_status psd_get_layer_bevel_emboss(psd_context * context, psd_layer_effects_bevel_emboss * bevel_emboss); extern psd_status psd_get_layer_bevel_emboss2(psd_context * context, psd_layer_effects_bevel_emboss * bevel_emboss); extern psd_status psd_get_layer_color_overlay(psd_context * context, psd_layer_effects_color_overlay * color_overlay); extern psd_status psd_get_layer_color_overlay2(psd_context * context, psd_layer_effects_color_overlay * color_overlay); extern psd_status psd_get_layer_gradient_overlay2(psd_context * context, psd_layer_effects_gradient_overlay * gradient_overlay); extern psd_status psd_get_layer_pattern_overlay2(psd_context * context, psd_layer_effects_pattern_overlay * pattern_overlay); extern psd_status psd_get_layer_stroke2(psd_context * context, psd_layer_effects_stroke * stroke); extern psd_status psd_get_layer_satin2(psd_context * context, psd_layer_effects_satin * satin); extern psd_status psd_layer_effects_blend_drop_shadow(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_status psd_layer_effects_blend_inner_shadow(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_status psd_layer_effects_blend_outer_glow(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_status psd_layer_effects_blend_inner_glow(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_status psd_layer_effects_blend_satin(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_status psd_layer_effects_blend_color_overlay(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_status psd_layer_effects_blend_gradient_overlay(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_status psd_layer_effects_blend_pattern_overlay(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_status psd_layer_effects_blend_stroke(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_status psd_layer_effects_blend_bevel_emboss(psd_context * context, psd_layer_record * layer, psd_layer_effects * data); extern psd_bool psd_layer_check_restricted(psd_context * context, psd_layer_record * layer); extern void psd_layer_blend_normal(psd_context * context, psd_layer_record * layer, psd_rect * dst_rect); extern void psd_layer_blend(psd_context * context, psd_layer_record * layer, psd_rect * dst_rect); extern void psd_layer_blend_normal_restricted(psd_context * context, psd_layer_record * layer, psd_rect * dst_rect); extern void psd_layer_blend_restricted(psd_context * context, psd_layer_record * layer, psd_rect * dst_rect); // Effects Layer (Photoshop 5.0) psd_status psd_get_layer_effects(psd_context * context, psd_layer_record * layer) { psd_layer_effects * data; psd_int i, size; psd_uint tag; layer->layer_info_type[layer->layer_info_count] = psd_layer_info_type_effects; data = (psd_layer_effects *)psd_malloc(sizeof(psd_layer_effects)); if(data == NULL) return psd_status_malloc_failed; memset(data, 0, sizeof(psd_layer_effects)); layer->layer_info_data[layer->layer_info_count] = (psd_uint)(void *)data; layer->layer_info_count ++; // Version: 0 if(psd_stream_get_short(context) != 0) return psd_status_effects_unsupport_version; // Effects count: may be 6 (for the 6 effects in Photoshop 5 and 6) or 7 (for // Photoshop 7.0) data->effects_count = psd_stream_get_short(context); for(i = 0; i < data->effects_count; i ++) { // Signature: '8BIM' tag = psd_stream_get_int(context); if(tag != '8BIM') return psd_status_effects_signature_error; // Effects signatures tag = psd_stream_get_int(context); switch(tag) { case 'cmnS': // common state // Effects layer, common state info // Size of next three items: 7 size = psd_stream_get_int(context); psd_assert(size == 7); // Version: 0 if(psd_stream_get_int(context) != 0) return psd_status_common_state_unsupport_version; // Visible: always psd_true data->visible = psd_stream_get_bool(context); // Unused: always 0 psd_stream_get_short(context); break; case 'dsdw': // drop shadow psd_get_layer_drop_shadow(context, &data->drop_shadow); data->fill[psd_layer_effects_type_drop_shadow] = psd_true; data->valid[psd_layer_effects_type_drop_shadow] = psd_true; break; case 'isdw': // inner shadow psd_get_layer_inner_shadow(context, &data->inner_shadow); data->fill[psd_layer_effects_type_inner_shadow] = psd_true; data->valid[psd_layer_effects_type_inner_shadow] = psd_true; break; case 'oglw': // outer glow psd_get_layer_outer_glow(context, &data->outer_glow); data->fill[psd_layer_effects_type_outer_glow] = psd_true; data->valid[psd_layer_effects_type_outer_glow] = psd_true; break; case 'iglw': // inner glow psd_get_layer_inner_glow(context, &data->inner_glow); data->fill[psd_layer_effects_type_inner_glow] = psd_true; data->valid[psd_layer_effects_type_inner_glow] = psd_true; break; case 'bevl': // bevel psd_get_layer_bevel_emboss(context, &data->bevel_emboss); data->fill[psd_layer_effects_type_bevel_emboss] = psd_true; data->valid[psd_layer_effects_type_bevel_emboss] = psd_true; break; case 'sofi': // solid fill (Photoshop 7.0) psd_get_layer_color_overlay(context, &data->color_overlay); data->fill[psd_layer_effects_type_color_overlay] = psd_true; data->valid[psd_layer_effects_type_color_overlay] = psd_true; break; default: psd_assert(0); return psd_status_unsupport_effects_type; } } return psd_status_done; } // Object-based effects layer info (Photoshop 6.0) psd_status psd_get_layer_effects2(psd_context * context, psd_layer_record * layer) { psd_layer_effects * data; psd_int length, number_items; psd_uint rootkey, type, key; psd_uchar keychar[256]; layer->layer_info_type[layer->layer_info_count] = psd_layer_info_type_effects2; data = (psd_layer_effects *)psd_malloc(sizeof(psd_layer_effects)); if(data == NULL) return psd_status_malloc_failed; memset(data, 0, sizeof(psd_layer_effects)); layer->layer_info_data[layer->layer_info_count] = (psd_uint)(void *)data; layer->layer_info_count ++; // Object effects version: 0 if(psd_stream_get_int(context) != 0) return psd_status_effects_unsupport_version; // Descriptor version ( = 16 for Photoshop 6.0). if(psd_stream_get_int(context) != 16) return psd_status_effects_unsupport_version; // Unicode string: name from classID length = psd_stream_get_int(context) * 2; psd_stream_get_null(context, length); // classID: 4 bytes (length), followed either by string or (if length is zero) 4- // byte classID length = psd_stream_get_int(context); if(length == 0) psd_stream_get_int(context); else psd_stream_get_null(context, length); // Number of items in descriptor number_items = psd_stream_get_int(context); while(number_items--) { length = psd_stream_get_int(context); if(length == 0) rootkey = psd_stream_get_int(context); else { rootkey = 0; psd_stream_get(context, keychar, length); keychar[length] = 0; } // Type: OSType key type = psd_stream_get_int(context); switch(rootkey) { case 0: // pattern overlay if(strcmp(keychar, "patternFill") == 0) { // Descriptor psd_assert(type == 'Objc'); psd_get_layer_pattern_overlay2(context, &data->pattern_overlay); data->fill[psd_layer_effects_type_pattern_overlay] = psd_true; data->valid[psd_layer_effects_type_pattern_overlay] = psd_true; data->effects_count ++; } else { psd_assert(0); psd_stream_get_object_null(type, context); } break; // scale, do not used case 'Scl ': // Unit psd_float psd_assert(type == 'UntF'); // percent key = psd_stream_get_int(context); psd_assert(key == '#Prc'); // Actual value (double) psd_stream_get_double(context); number_items --; length = psd_stream_get_int(context); if(length == 0) psd_stream_get_int(context); else psd_stream_get_null(context, length); // Type: OSType key type = psd_stream_get_int(context); psd_assert(type == 'bool'); // Boolean value data->visible = psd_stream_get_bool(context); break; // drop shadow case 'DrSh': // Descriptor psd_assert(type == 'Objc'); psd_get_layer_drop_shadow2(context, &data->drop_shadow); data->fill[psd_layer_effects_type_drop_shadow] = psd_true; data->valid[psd_layer_effects_type_drop_shadow] = psd_true; data->effects_count ++; break; // inner shadow case 'IrSh': // Descriptor psd_assert(type == 'Objc'); psd_get_layer_inner_shadow2(context, &data->inner_shadow); data->fill[psd_layer_effects_type_inner_shadow] = psd_true; data->valid[psd_layer_effects_type_inner_shadow] = psd_true; data->effects_count ++; break; // outer glow case 'OrGl': // Descriptor psd_assert(type == 'Objc'); psd_get_layer_outer_glow2(context, &data->outer_glow); data->fill[psd_layer_effects_type_outer_glow] = psd_true; data->valid[psd_layer_effects_type_outer_glow] = psd_true; data->effects_count ++; break; // inner glow case 'IrGl': // Descriptor psd_assert(type == 'Objc'); psd_get_layer_inner_glow2(context, &data->inner_glow); data->fill[psd_layer_effects_type_inner_glow] = psd_true; data->valid[psd_layer_effects_type_inner_glow] = psd_true; data->effects_count ++; break; // bevel and emboss case 'ebbl': // Descriptor psd_assert(type == 'Objc'); psd_get_layer_bevel_emboss2(context, &data->bevel_emboss); data->fill[psd_layer_effects_type_bevel_emboss] = psd_true; data->valid[psd_layer_effects_type_bevel_emboss] = psd_true; data->effects_count ++; break; // satin case 'ChFX': // Descriptor psd_assert(type == 'Objc'); psd_get_layer_satin2(context, &data->satin); data->fill[psd_layer_effects_type_satin] = psd_true; data->valid[psd_layer_effects_type_satin] = psd_true; data->effects_count ++; break; // color overlay case 'SoFi': // Descriptor psd_assert(type == 'Objc'); psd_get_layer_color_overlay2(context, &data->color_overlay); data->fill[psd_layer_effects_type_color_overlay] = psd_true; data->valid[psd_layer_effects_type_color_overlay] = psd_true; data->effects_count ++; break; // gradient overlay case 'GrFl': // Descriptor psd_assert(type == 'Objc'); psd_get_layer_gradient_overlay2(context, &data->gradient_overlay); data->fill[psd_layer_effects_type_gradient_overlay] = psd_true; data->valid[psd_layer_effects_type_gradient_overlay] = psd_true; data->effects_count ++; break; // stroke case 'FrFX': // Descriptor psd_assert(type == 'Objc'); psd_get_layer_stroke2(context, &data->stroke); data->fill[psd_layer_effects_type_stroke] = psd_true; data->valid[psd_layer_effects_type_stroke] = psd_true; data->effects_count ++; break; default: psd_assert(0); psd_stream_get_object_null(type, context); break; } } return psd_status_done; } psd_status psd_layer_effects_update(psd_layer_record * layer, psd_layer_effects_type type) { psd_layer_effects * data = NULL; psd_int i; if(layer == NULL) return psd_status_invalid_layer; if(type < 0 || type >= psd_layer_effects_type_count) return psd_status_invalid_layer_effects; for(i = 0; i < layer->layer_info_count; i ++) { if(layer->layer_info_type[i] == psd_layer_info_type_effects) { data = (psd_layer_effects *)layer->layer_info_data[i]; break; } } if(data == NULL) return psd_status_invalid_layer_effects; data->valid[type] = psd_true; return psd_status_done; } #ifdef PSD_SUPPORT_EFFECTS_BLEND static void psd_layer_blend_effects_image(psd_context * context, psd_layer_record * layer, psd_rect * dst_rect, psd_layer_effects * data, psd_int index) { psd_rect layer_rect, mask_rect; psd_layer_record effects_layer; if(data->image_data[index] == NULL || data->opacity[index] == 0) return; memcpy(&effects_layer, layer, sizeof(psd_layer_record)); effects_layer.left = data->left[index] + layer->left; effects_layer.top = data->top[index] + layer->top; effects_layer.right = data->right[index] + layer->left; effects_layer.bottom = data->bottom[index] + layer->top; effects_layer.width = data->width[index]; effects_layer.height = data->height[index]; effects_layer.fill_opacity = data->opacity[index]; effects_layer.blend_mode = data->blend_mode[index]; effects_layer.image_data = data->image_data[index]; effects_layer.layer_mask_info.disabled = psd_true; psd_make_rect(&layer_rect, effects_layer.left, effects_layer.top, effects_layer.right, effects_layer.bottom); if(psd_incept_rect(dst_rect, &layer_rect, &layer_rect) == psd_true) { if(effects_layer.layer_mask_info.disabled == psd_false && effects_layer.layer_mask_info.mask_data != NULL && effects_layer.layer_mask_info.default_color == 0) { psd_make_rect(&mask_rect, effects_layer.layer_mask_info.left, effects_layer.layer_mask_info.top, effects_layer.layer_mask_info.right, effects_layer.layer_mask_info.bottom); if(psd_incept_rect(&mask_rect, &layer_rect, &layer_rect) == psd_false) return; } if(psd_layer_check_restricted(context, &effects_layer) == psd_true) { if(effects_layer.blend_mode != psd_blend_mode_normal || (effects_layer.group_layer != NULL && effects_layer.group_layer->divider_blend_mode != psd_blend_mode_pass_through)) psd_layer_blend_restricted(context, &effects_layer, &layer_rect); else psd_layer_blend_normal_restricted(context, &effects_layer, &layer_rect); } else { if(effects_layer.blend_mode != psd_blend_mode_normal || (effects_layer.group_layer != NULL && effects_layer.group_layer->divider_blend_mode != psd_blend_mode_pass_through)) psd_layer_blend(context, &effects_layer, &layer_rect); else psd_layer_blend_normal(context, &effects_layer, &layer_rect); } } } psd_bool psd_layer_effects_blend_background(psd_context * context, psd_layer_record * layer, psd_rect * dst_rect) { psd_layer_effects * data = NULL; psd_int i; if(layer->image_data == NULL || layer->width <= 0 || layer->height <= 0) return psd_false; for(i = 0; i < layer->layer_info_count; i ++) { if(layer->layer_info_type[i] == psd_layer_info_type_effects2) { data = (psd_layer_effects *)layer->layer_info_data[i]; break; } } if(data == NULL) { for(i = 0; i < layer->layer_info_count; i ++) { if(layer->layer_info_type[i] == psd_layer_info_type_effects) { data = (psd_layer_effects *)layer->layer_info_data[i]; break; } } } if(data == NULL) return psd_false; if(data->effects_count == 0 || data->visible == psd_false) return psd_false; for(i = psd_layer_effects_type_count - 1; i >= 0; i --) { if(data->fill[i] == psd_false) continue; switch(i) { case psd_layer_effects_type_drop_shadow: if(data->drop_shadow.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) psd_layer_effects_blend_drop_shadow(context, layer, data); break; case psd_layer_effects_type_bevel_emboss: if(data->bevel_emboss.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) NULL; break; default: continue; } if(i == psd_layer_effects_type_bevel_emboss) { psd_layer_blend_effects_image(context, layer, dst_rect, data, psd_layer_effects_bevel_emboss_outer_shadow); psd_layer_blend_effects_image(context, layer, dst_rect, data, psd_layer_effects_bevel_emboss_inner_shadow); } else { psd_layer_blend_effects_image(context, layer, dst_rect, data, i); } } return psd_true; } psd_bool psd_layer_effects_blend_foreground(psd_context * context, psd_layer_record * layer, psd_rect * dst_rect) { psd_layer_effects * data = NULL; psd_int i; if(layer->image_data == NULL || layer->width <= 0 || layer->height <= 0) return psd_false; for(i = 0; i < layer->layer_info_count; i ++) { if(layer->layer_info_type[i] == psd_layer_info_type_effects2) { data = (psd_layer_effects *)layer->layer_info_data[i]; break; } } if(data == NULL) { for(i = 0; i < layer->layer_info_count; i ++) { if(layer->layer_info_type[i] == psd_layer_info_type_effects) { data = (psd_layer_effects *)layer->layer_info_data[i]; break; } } } if(data == NULL) return psd_false; if(data->effects_count == 0 || data->visible == psd_false) return psd_false; for(i = psd_layer_effects_type_count - 1; i >= 0; i --) { if(data->fill[i] == psd_false) continue; switch(i) { case psd_layer_effects_type_inner_shadow: if(data->inner_shadow.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) psd_layer_effects_blend_inner_shadow(context, layer, data); break; case psd_layer_effects_type_outer_glow: if(data->outer_glow.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) psd_layer_effects_blend_outer_glow(context, layer, data); break; case psd_layer_effects_type_inner_glow: if(data->inner_glow.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) psd_layer_effects_blend_inner_glow(context, layer, data); break; case psd_layer_effects_type_bevel_emboss: if(data->bevel_emboss.effect_enable == psd_false) continue; // do nothing break; case psd_layer_effects_type_satin: if(data->satin.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) psd_layer_effects_blend_satin(context, layer, data); break; case psd_layer_effects_type_color_overlay: if(data->color_overlay.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) psd_layer_effects_blend_color_overlay(context, layer, data); break; case psd_layer_effects_type_gradient_overlay: if(data->gradient_overlay.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) psd_layer_effects_blend_gradient_overlay(context, layer, data); break; case psd_layer_effects_type_pattern_overlay: if(data->pattern_overlay.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) psd_layer_effects_blend_pattern_overlay(context, layer, data); break; case psd_layer_effects_type_stroke: if(data->stroke.effect_enable == psd_false) continue; if(data->valid[i] == psd_true) psd_layer_effects_blend_stroke(context, layer, data); break; default: continue; } if(data->image_data[i] == NULL) continue; if(i == psd_layer_effects_type_bevel_emboss) { psd_layer_blend_effects_image(context, layer, dst_rect, data, psd_layer_effects_bevel_emboss_outer_light); psd_layer_blend_effects_image(context, layer, dst_rect, data, psd_layer_effects_bevel_emboss_inner_light); psd_layer_blend_effects_image(context, layer, dst_rect, data, psd_layer_effects_bevel_emboss_texture); } else { psd_layer_blend_effects_image(context, layer, dst_rect, data, i); } } return psd_true; } void psd_effects_add_noise(psd_bitmap * bitmap, psd_int noise, psd_int left, psd_int top, psd_context * context) { psd_int i, j, x, y, width, height; psd_int src_alpha, dst_alpha, next_alpha; psd_argb_color * src_data, * dst_data; psd_uchar * rand_data; psd_bitmap noise_bmp; width = bitmap->width; height = bitmap->height; psd_get_bitmap(&noise_bmp, width, height, context); psd_fill_bitmap(&noise_bmp, psd_color_clear); for(i = PSD_MAX(-top, 0); i < height; i ++) { if(i + top >= context->height) break; src_data = bitmap->image_data + i * bitmap->width + PSD_MAX(-left, 0); rand_data = context->rand_data + (i + top) * context->width + left; for(j = PSD_MAX(-left, 0); j < width; j ++, src_data ++, rand_data ++) { if(j + left >= context->width) break; x = j + (*rand_data >> 4) - 8; y = i + (*rand_data & 0x0F) - 8; x = (x + j) >> 1; y = (y + i) >> 1; if(x < 0 || x >= width || y < 0 || y >= height) continue; src_alpha = PSD_GET_ALPHA_COMPONENT(*src_data); dst_data = noise_bmp.image_data + y * width + x; dst_alpha = PSD_GET_ALPHA_COMPONENT(*dst_data); if(dst_alpha + src_alpha <= 255) { dst_alpha = dst_alpha + src_alpha; *dst_data = (*src_data & 0x00FFFFFF) | (dst_alpha << 24); } else { *dst_data = (*src_data & 0x00FFFFFF) | 0xFF000000; if(x + 1 < width) { next_alpha = dst_alpha + src_alpha - 255; *(dst_data + 1) = (*src_data & 0x00FFFFFF) | (next_alpha << 24); } } } } noise = noise * 256 / 100; if(noise == 256) { psd_copy_bitmap(bitmap, &noise_bmp); } else { src_data = noise_bmp.image_data; dst_data = bitmap->image_data; for(i = width * height; i --; src_data ++, dst_data ++) { src_alpha = PSD_GET_ALPHA_COMPONENT(*src_data); dst_alpha = PSD_GET_ALPHA_COMPONENT(*dst_data); dst_alpha = ((dst_alpha << 8) + (src_alpha - dst_alpha) * noise) >> 8; *dst_data = (*dst_data & 0x00FFFFFF) | (dst_alpha << 24); } } } void psd_effects_apply_gradient(psd_bitmap * bitmap, psd_argb_color * gradient_table, psd_bool edge_hidden, psd_int jitter, psd_int left, psd_int top, psd_context * context) { psd_int i, j, alpha, gradient_index; psd_argb_color * dst_data; psd_uchar * rand_data; if(jitter > 0) { jitter = jitter * 256 / 100; for(i = PSD_MAX(-top, 0); i < bitmap->height; i ++) { if(i + top >= context->height) break; dst_data = bitmap->image_data + i * bitmap->width + PSD_MAX(-left, 0); rand_data = context->rand_data + (i + top) * context->width + left; for(j = PSD_MAX(-left, 0); j < bitmap->width; j ++, dst_data ++, rand_data ++) { if(j + left >= context->width) break; alpha = PSD_GET_ALPHA_COMPONENT(*dst_data); gradient_index = 255 - alpha; gradient_index += jitter * *rand_data >> 8; gradient_index &= 0xFF; if(edge_hidden == psd_true && alpha < 24) { if(PSD_GET_ALPHA_COMPONENT(gradient_table[gradient_index]) != 255) { *dst_data = gradient_table[gradient_index]; } else { *dst_data = (gradient_table[gradient_index] & 0x00FFFFFF) | (alpha * 10 * PSD_GET_ALPHA_COMPONENT(gradient_table[gradient_index]) >> 8 << 24); } } else { *dst_data = gradient_table[gradient_index]; } } } } else { dst_data = bitmap->image_data; for(i = bitmap->width * bitmap->height; i --; ) { alpha = PSD_GET_ALPHA_COMPONENT(*dst_data); if(edge_hidden == psd_true && alpha < 24) { if(PSD_GET_ALPHA_COMPONENT(gradient_table[255 - alpha]) != 255) { *dst_data = gradient_table[255 - alpha]; } else { *dst_data = (gradient_table[255 - alpha] & 0x00FFFFFF) | (alpha * 10 * PSD_GET_ALPHA_COMPONENT(gradient_table[255 - alpha]) >> 8 << 24); } } else { *dst_data = gradient_table[255 - alpha]; } dst_data ++; } } } #endif // ifdef PSD_SUPPORT_EFFECTS_BLEND static void psd_gradient_color_free(psd_gradient_color * gradient_color) { psd_freeif(gradient_color->name); psd_freeif(gradient_color->color_stop); psd_freeif(gradient_color->transparency_stop); } static void psd_pattern_info_free(psd_pattern_info * pattern_info) { psd_freeif(pattern_info->name); } void psd_layer_effects_free(psd_uint layer_info) { psd_layer_effects * data; psd_int i; data = (psd_layer_effects *)layer_info; if(data == NULL) return; for(i = 0; i < psd_layer_effects_image_count; i ++) { psd_freeif(data->image_data[i]); data->image_data[i] = NULL; } psd_pattern_info_free(&data->bevel_emboss.texture_pattern_info); psd_gradient_color_free(&data->gradient_overlay.gradient_color); psd_pattern_info_free(&data->pattern_overlay.pattern_info); psd_gradient_color_free(&data->stroke.gradient_color); psd_pattern_info_free(&data->stroke.pattern_info); psd_free(data); }
546646.c
/** * @license Apache-2.0 * * Copyright (c) 2020 The Stdlib Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Benchmark `smeanors`. */ #include "stdlib/stats/base/smeanors.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <sys/time.h> #define NAME "smeanors" #define ITERATIONS 1000000 #define REPEATS 3 #define MIN 1 #define MAX 6 /** * Prints the TAP version. */ void print_version() { printf( "TAP version 13\n" ); } /** * Prints the TAP summary. * * @param total total number of tests * @param passing total number of passing tests */ void print_summary( int total, int passing ) { printf( "#\n" ); printf( "1..%d\n", total ); // TAP plan printf( "# total %d\n", total ); printf( "# pass %d\n", passing ); printf( "#\n" ); printf( "# ok\n" ); } /** * Prints benchmarks results. * * @param iterations number of iterations * @param elapsed elapsed time in seconds */ void print_results( int iterations, double elapsed ) { double rate = (double)iterations / elapsed; printf( " ---\n" ); printf( " iterations: %d\n", iterations ); printf( " elapsed: %0.9f\n", elapsed ); printf( " rate: %0.9f\n", rate ); printf( " ...\n" ); } /** * Returns a clock time. * * @return clock time */ double tic() { struct timeval now; gettimeofday( &now, NULL ); return (double)now.tv_sec + (double)now.tv_usec/1.0e6; } /** * Generates a random number on the interval [0,1]. * * @return random number */ float rand_float() { int r = rand(); return (float)r / ( (float)RAND_MAX + 1.0f ); } /* * Runs a benchmark. * * @param iterations number of iterations * @param len array length * @return elapsed time in seconds */ double benchmark( int iterations, int len ) { double elapsed; float x[ len ]; float v; double t; int i; for ( i = 0; i < len; i++ ) { x[ i ] = ( rand_float()*20000.0f ) - 10000.0f; } t = tic(); for ( i = 0; i < iterations; i++ ) { v = stdlib_strided_smeanors( len, x, 1 ); if ( v != v ) { printf( "should not return NaN\n" ); break; } } elapsed = tic() - t; if ( v != v ) { printf( "should not return NaN\n" ); } return elapsed; } /** * Main execution sequence. */ int main( void ) { double elapsed; int count; int iter; int len; int i; int j; // Use the current time to seed the random number generator: srand( time( NULL ) ); print_version(); count = 0; for ( i = MIN; i <= MAX; i++ ) { len = pow( 10, i ); iter = ITERATIONS / pow( 10, i-1 ); for ( j = 0; j < REPEATS; j++ ) { count += 1; printf( "# c::%s:len=%d\n", NAME, len ); elapsed = benchmark( iter, len ); print_results( iter, elapsed ); printf( "ok %d benchmark finished\n", count ); } } print_summary( count, count ); }
561100.c
/* clagtm.f -- translated by f2c (version 20061008). You must link the resulting object file with libf2c: on Microsoft Windows system, link with libf2c.lib; on Linux or Unix systems, link with .../path/to/libf2c.a -lm or, if you install libf2c.a in a standard place, with -lf2c -lm -- in that order, at the end of the command line, as in cc *.o -lf2c -lm Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., http://www.netlib.org/f2c/libf2c.zip */ #include "f2c.h" #include "blaswrap.h" /* Subroutine */ int clagtm_(char *trans, integer *n, integer *nrhs, real * alpha, complex *dl, complex *d__, complex *du, complex *x, integer * ldx, real *beta, complex *b, integer *ldb) { /* System generated locals */ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10; complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8, q__9; /* Builtin functions */ void r_cnjg(complex *, complex *); /* Local variables */ integer i__, j; extern logical lsame_(char *, char *); /* -- LAPACK auxiliary routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CLAGTM performs a matrix-vector product of the form */ /* B := alpha * A * X + beta * B */ /* where A is a tridiagonal matrix of order N, B and X are N by NRHS */ /* matrices, and alpha and beta are real scalars, each of which may be */ /* 0., 1., or -1. */ /* Arguments */ /* ========= */ /* TRANS (input) CHARACTER*1 */ /* Specifies the operation applied to A. */ /* = 'N': No transpose, B := alpha * A * X + beta * B */ /* = 'T': Transpose, B := alpha * A**T * X + beta * B */ /* = 'C': Conjugate transpose, B := alpha * A**H * X + beta * B */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* NRHS (input) INTEGER */ /* The number of right hand sides, i.e., the number of columns */ /* of the matrices X and B. */ /* ALPHA (input) REAL */ /* The scalar alpha. ALPHA must be 0., 1., or -1.; otherwise, */ /* it is assumed to be 0. */ /* DL (input) COMPLEX array, dimension (N-1) */ /* The (n-1) sub-diagonal elements of T. */ /* D (input) COMPLEX array, dimension (N) */ /* The diagonal elements of T. */ /* DU (input) COMPLEX array, dimension (N-1) */ /* The (n-1) super-diagonal elements of T. */ /* X (input) COMPLEX array, dimension (LDX,NRHS) */ /* The N by NRHS matrix X. */ /* LDX (input) INTEGER */ /* The leading dimension of the array X. LDX >= max(N,1). */ /* BETA (input) REAL */ /* The scalar beta. BETA must be 0., 1., or -1.; otherwise, */ /* it is assumed to be 1. */ /* B (input/output) COMPLEX array, dimension (LDB,NRHS) */ /* On entry, the N by NRHS matrix B. */ /* On exit, B is overwritten by the matrix expression */ /* B := alpha * A * X + beta * B. */ /* LDB (input) INTEGER */ /* The leading dimension of the array B. LDB >= max(N,1). */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ --dl; --d__; --du; x_dim1 = *ldx; x_offset = 1 + x_dim1; x -= x_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; /* Function Body */ if (*n == 0) { return 0; } /* Multiply B by BETA if BETA.NE.1. */ if (*beta == 0.f) { i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; b[i__3].r = 0.f, b[i__3].i = 0.f; /* L10: */ } /* L20: */ } } else if (*beta == -1.f) { i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; i__4 = i__ + j * b_dim1; q__1.r = -b[i__4].r, q__1.i = -b[i__4].i; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L30: */ } /* L40: */ } } if (*alpha == 1.f) { if (lsame_(trans, "N")) { /* Compute B := B + A*X */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { if (*n == 1) { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; i__4 = j * x_dim1 + 1; q__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i, q__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4] .r; q__1.r = b[i__3].r + q__2.r, q__1.i = b[i__3].i + q__2.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; } else { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; i__4 = j * x_dim1 + 1; q__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i, q__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4] .r; q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i; i__5 = j * x_dim1 + 2; q__4.r = du[1].r * x[i__5].r - du[1].i * x[i__5].i, q__4.i = du[1].r * x[i__5].i + du[1].i * x[i__5] .r; q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n + j * b_dim1; i__3 = *n + j * b_dim1; i__4 = *n - 1; i__5 = *n - 1 + j * x_dim1; q__3.r = dl[i__4].r * x[i__5].r - dl[i__4].i * x[i__5].i, q__3.i = dl[i__4].r * x[i__5].i + dl[i__4].i * x[ i__5].r; q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i; i__6 = *n; i__7 = *n + j * x_dim1; q__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7] .i, q__4.i = d__[i__6].r * x[i__7].i + d__[i__6] .i * x[i__7].r; q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n - 1; for (i__ = 2; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; i__4 = i__ + j * b_dim1; i__5 = i__ - 1; i__6 = i__ - 1 + j * x_dim1; q__4.r = dl[i__5].r * x[i__6].r - dl[i__5].i * x[i__6] .i, q__4.i = dl[i__5].r * x[i__6].i + dl[i__5] .i * x[i__6].r; q__3.r = b[i__4].r + q__4.r, q__3.i = b[i__4].i + q__4.i; i__7 = i__; i__8 = i__ + j * x_dim1; q__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[ i__8].i, q__5.i = d__[i__7].r * x[i__8].i + d__[i__7].i * x[i__8].r; q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i; i__9 = i__; i__10 = i__ + 1 + j * x_dim1; q__6.r = du[i__9].r * x[i__10].r - du[i__9].i * x[ i__10].i, q__6.i = du[i__9].r * x[i__10].i + du[i__9].i * x[i__10].r; q__1.r = q__2.r + q__6.r, q__1.i = q__2.i + q__6.i; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L50: */ } } /* L60: */ } } else if (lsame_(trans, "T")) { /* Compute B := B + A**T * X */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { if (*n == 1) { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; i__4 = j * x_dim1 + 1; q__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i, q__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4] .r; q__1.r = b[i__3].r + q__2.r, q__1.i = b[i__3].i + q__2.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; } else { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; i__4 = j * x_dim1 + 1; q__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i, q__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4] .r; q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i; i__5 = j * x_dim1 + 2; q__4.r = dl[1].r * x[i__5].r - dl[1].i * x[i__5].i, q__4.i = dl[1].r * x[i__5].i + dl[1].i * x[i__5] .r; q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n + j * b_dim1; i__3 = *n + j * b_dim1; i__4 = *n - 1; i__5 = *n - 1 + j * x_dim1; q__3.r = du[i__4].r * x[i__5].r - du[i__4].i * x[i__5].i, q__3.i = du[i__4].r * x[i__5].i + du[i__4].i * x[ i__5].r; q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i; i__6 = *n; i__7 = *n + j * x_dim1; q__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7] .i, q__4.i = d__[i__6].r * x[i__7].i + d__[i__6] .i * x[i__7].r; q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n - 1; for (i__ = 2; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; i__4 = i__ + j * b_dim1; i__5 = i__ - 1; i__6 = i__ - 1 + j * x_dim1; q__4.r = du[i__5].r * x[i__6].r - du[i__5].i * x[i__6] .i, q__4.i = du[i__5].r * x[i__6].i + du[i__5] .i * x[i__6].r; q__3.r = b[i__4].r + q__4.r, q__3.i = b[i__4].i + q__4.i; i__7 = i__; i__8 = i__ + j * x_dim1; q__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[ i__8].i, q__5.i = d__[i__7].r * x[i__8].i + d__[i__7].i * x[i__8].r; q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i; i__9 = i__; i__10 = i__ + 1 + j * x_dim1; q__6.r = dl[i__9].r * x[i__10].r - dl[i__9].i * x[ i__10].i, q__6.i = dl[i__9].r * x[i__10].i + dl[i__9].i * x[i__10].r; q__1.r = q__2.r + q__6.r, q__1.i = q__2.i + q__6.i; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L70: */ } } /* L80: */ } } else if (lsame_(trans, "C")) { /* Compute B := B + A**H * X */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { if (*n == 1) { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; r_cnjg(&q__3, &d__[1]); i__4 = j * x_dim1 + 1; q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i = q__3.r * x[i__4].i + q__3.i * x[i__4].r; q__1.r = b[i__3].r + q__2.r, q__1.i = b[i__3].i + q__2.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; } else { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; r_cnjg(&q__4, &d__[1]); i__4 = j * x_dim1 + 1; q__3.r = q__4.r * x[i__4].r - q__4.i * x[i__4].i, q__3.i = q__4.r * x[i__4].i + q__4.i * x[i__4].r; q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i; r_cnjg(&q__6, &dl[1]); i__5 = j * x_dim1 + 2; q__5.r = q__6.r * x[i__5].r - q__6.i * x[i__5].i, q__5.i = q__6.r * x[i__5].i + q__6.i * x[i__5].r; q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n + j * b_dim1; i__3 = *n + j * b_dim1; r_cnjg(&q__4, &du[*n - 1]); i__4 = *n - 1 + j * x_dim1; q__3.r = q__4.r * x[i__4].r - q__4.i * x[i__4].i, q__3.i = q__4.r * x[i__4].i + q__4.i * x[i__4].r; q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i; r_cnjg(&q__6, &d__[*n]); i__5 = *n + j * x_dim1; q__5.r = q__6.r * x[i__5].r - q__6.i * x[i__5].i, q__5.i = q__6.r * x[i__5].i + q__6.i * x[i__5].r; q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n - 1; for (i__ = 2; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; i__4 = i__ + j * b_dim1; r_cnjg(&q__5, &du[i__ - 1]); i__5 = i__ - 1 + j * x_dim1; q__4.r = q__5.r * x[i__5].r - q__5.i * x[i__5].i, q__4.i = q__5.r * x[i__5].i + q__5.i * x[i__5] .r; q__3.r = b[i__4].r + q__4.r, q__3.i = b[i__4].i + q__4.i; r_cnjg(&q__7, &d__[i__]); i__6 = i__ + j * x_dim1; q__6.r = q__7.r * x[i__6].r - q__7.i * x[i__6].i, q__6.i = q__7.r * x[i__6].i + q__7.i * x[i__6] .r; q__2.r = q__3.r + q__6.r, q__2.i = q__3.i + q__6.i; r_cnjg(&q__9, &dl[i__]); i__7 = i__ + 1 + j * x_dim1; q__8.r = q__9.r * x[i__7].r - q__9.i * x[i__7].i, q__8.i = q__9.r * x[i__7].i + q__9.i * x[i__7] .r; q__1.r = q__2.r + q__8.r, q__1.i = q__2.i + q__8.i; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L90: */ } } /* L100: */ } } } else if (*alpha == -1.f) { if (lsame_(trans, "N")) { /* Compute B := B - A*X */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { if (*n == 1) { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; i__4 = j * x_dim1 + 1; q__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i, q__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4] .r; q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; } else { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; i__4 = j * x_dim1 + 1; q__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i, q__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4] .r; q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i; i__5 = j * x_dim1 + 2; q__4.r = du[1].r * x[i__5].r - du[1].i * x[i__5].i, q__4.i = du[1].r * x[i__5].i + du[1].i * x[i__5] .r; q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n + j * b_dim1; i__3 = *n + j * b_dim1; i__4 = *n - 1; i__5 = *n - 1 + j * x_dim1; q__3.r = dl[i__4].r * x[i__5].r - dl[i__4].i * x[i__5].i, q__3.i = dl[i__4].r * x[i__5].i + dl[i__4].i * x[ i__5].r; q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i; i__6 = *n; i__7 = *n + j * x_dim1; q__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7] .i, q__4.i = d__[i__6].r * x[i__7].i + d__[i__6] .i * x[i__7].r; q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n - 1; for (i__ = 2; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; i__4 = i__ + j * b_dim1; i__5 = i__ - 1; i__6 = i__ - 1 + j * x_dim1; q__4.r = dl[i__5].r * x[i__6].r - dl[i__5].i * x[i__6] .i, q__4.i = dl[i__5].r * x[i__6].i + dl[i__5] .i * x[i__6].r; q__3.r = b[i__4].r - q__4.r, q__3.i = b[i__4].i - q__4.i; i__7 = i__; i__8 = i__ + j * x_dim1; q__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[ i__8].i, q__5.i = d__[i__7].r * x[i__8].i + d__[i__7].i * x[i__8].r; q__2.r = q__3.r - q__5.r, q__2.i = q__3.i - q__5.i; i__9 = i__; i__10 = i__ + 1 + j * x_dim1; q__6.r = du[i__9].r * x[i__10].r - du[i__9].i * x[ i__10].i, q__6.i = du[i__9].r * x[i__10].i + du[i__9].i * x[i__10].r; q__1.r = q__2.r - q__6.r, q__1.i = q__2.i - q__6.i; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L110: */ } } /* L120: */ } } else if (lsame_(trans, "T")) { /* Compute B := B - A'*X */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { if (*n == 1) { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; i__4 = j * x_dim1 + 1; q__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i, q__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4] .r; q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; } else { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; i__4 = j * x_dim1 + 1; q__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i, q__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4] .r; q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i; i__5 = j * x_dim1 + 2; q__4.r = dl[1].r * x[i__5].r - dl[1].i * x[i__5].i, q__4.i = dl[1].r * x[i__5].i + dl[1].i * x[i__5] .r; q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n + j * b_dim1; i__3 = *n + j * b_dim1; i__4 = *n - 1; i__5 = *n - 1 + j * x_dim1; q__3.r = du[i__4].r * x[i__5].r - du[i__4].i * x[i__5].i, q__3.i = du[i__4].r * x[i__5].i + du[i__4].i * x[ i__5].r; q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i; i__6 = *n; i__7 = *n + j * x_dim1; q__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7] .i, q__4.i = d__[i__6].r * x[i__7].i + d__[i__6] .i * x[i__7].r; q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n - 1; for (i__ = 2; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; i__4 = i__ + j * b_dim1; i__5 = i__ - 1; i__6 = i__ - 1 + j * x_dim1; q__4.r = du[i__5].r * x[i__6].r - du[i__5].i * x[i__6] .i, q__4.i = du[i__5].r * x[i__6].i + du[i__5] .i * x[i__6].r; q__3.r = b[i__4].r - q__4.r, q__3.i = b[i__4].i - q__4.i; i__7 = i__; i__8 = i__ + j * x_dim1; q__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[ i__8].i, q__5.i = d__[i__7].r * x[i__8].i + d__[i__7].i * x[i__8].r; q__2.r = q__3.r - q__5.r, q__2.i = q__3.i - q__5.i; i__9 = i__; i__10 = i__ + 1 + j * x_dim1; q__6.r = dl[i__9].r * x[i__10].r - dl[i__9].i * x[ i__10].i, q__6.i = dl[i__9].r * x[i__10].i + dl[i__9].i * x[i__10].r; q__1.r = q__2.r - q__6.r, q__1.i = q__2.i - q__6.i; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L130: */ } } /* L140: */ } } else if (lsame_(trans, "C")) { /* Compute B := B - A'*X */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { if (*n == 1) { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; r_cnjg(&q__3, &d__[1]); i__4 = j * x_dim1 + 1; q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i = q__3.r * x[i__4].i + q__3.i * x[i__4].r; q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; } else { i__2 = j * b_dim1 + 1; i__3 = j * b_dim1 + 1; r_cnjg(&q__4, &d__[1]); i__4 = j * x_dim1 + 1; q__3.r = q__4.r * x[i__4].r - q__4.i * x[i__4].i, q__3.i = q__4.r * x[i__4].i + q__4.i * x[i__4].r; q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i; r_cnjg(&q__6, &dl[1]); i__5 = j * x_dim1 + 2; q__5.r = q__6.r * x[i__5].r - q__6.i * x[i__5].i, q__5.i = q__6.r * x[i__5].i + q__6.i * x[i__5].r; q__1.r = q__2.r - q__5.r, q__1.i = q__2.i - q__5.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n + j * b_dim1; i__3 = *n + j * b_dim1; r_cnjg(&q__4, &du[*n - 1]); i__4 = *n - 1 + j * x_dim1; q__3.r = q__4.r * x[i__4].r - q__4.i * x[i__4].i, q__3.i = q__4.r * x[i__4].i + q__4.i * x[i__4].r; q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i; r_cnjg(&q__6, &d__[*n]); i__5 = *n + j * x_dim1; q__5.r = q__6.r * x[i__5].r - q__6.i * x[i__5].i, q__5.i = q__6.r * x[i__5].i + q__6.i * x[i__5].r; q__1.r = q__2.r - q__5.r, q__1.i = q__2.i - q__5.i; b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = *n - 1; for (i__ = 2; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; i__4 = i__ + j * b_dim1; r_cnjg(&q__5, &du[i__ - 1]); i__5 = i__ - 1 + j * x_dim1; q__4.r = q__5.r * x[i__5].r - q__5.i * x[i__5].i, q__4.i = q__5.r * x[i__5].i + q__5.i * x[i__5] .r; q__3.r = b[i__4].r - q__4.r, q__3.i = b[i__4].i - q__4.i; r_cnjg(&q__7, &d__[i__]); i__6 = i__ + j * x_dim1; q__6.r = q__7.r * x[i__6].r - q__7.i * x[i__6].i, q__6.i = q__7.r * x[i__6].i + q__7.i * x[i__6] .r; q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i; r_cnjg(&q__9, &dl[i__]); i__7 = i__ + 1 + j * x_dim1; q__8.r = q__9.r * x[i__7].r - q__9.i * x[i__7].i, q__8.i = q__9.r * x[i__7].i + q__9.i * x[i__7] .r; q__1.r = q__2.r - q__8.r, q__1.i = q__2.i - q__8.i; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L150: */ } } /* L160: */ } } } return 0; /* End of CLAGTM */ } /* clagtm_ */
16170.c
#include "../../include/stringptr.h" //TODO use read() API because fgets pulls in a whole lot of bloat #include <stdio.h> /* supply a buffer long enough to hold the maximum size of an expected input line*/ stringptr read_stdin_line(char* buf, size_t bufsize, int chomp) { char* fgets_result; static const stringptr nullsp = {NULL, 0}; if((fgets_result = fgets(buf, bufsize, stdin))) { stringptr line; stringptr_fromchar(fgets_result, &line); if(chomp) stringptr_shiftleft(&line, 1); // remove trailing \n return line; } return nullsp; }
220887.c
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019 The Fluent Bit Authors * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_compat.h> #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_log.h> #include <stdlib.h> #include <string.h> #include <inttypes.h> static int octal_digit(char c) { return (c >= '0' && c <= '7'); } static int hex_digit(char c) { return ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F') || (c >= 'a' && c <= 'f')); } static int u8_wc_toutf8(char *dest, uint32_t ch) { if (ch < 0x80) { dest[0] = (char)ch; return 1; } if (ch < 0x800) { dest[0] = (ch>>6) | 0xC0; dest[1] = (ch & 0x3F) | 0x80; return 2; } if (ch < 0x10000) { dest[0] = (ch>>12) | 0xE0; dest[1] = ((ch>>6) & 0x3F) | 0x80; dest[2] = (ch & 0x3F) | 0x80; return 3; } if (ch < 0x110000) { dest[0] = (ch>>18) | 0xF0; dest[1] = ((ch>>12) & 0x3F) | 0x80; dest[2] = ((ch>>6) & 0x3F) | 0x80; dest[3] = (ch & 0x3F) | 0x80; return 4; } return 0; } /* assumes that src points to the character after a backslash returns number of input characters processed */ static int u8_read_escape_sequence(const char *str, uint32_t *dest) { uint32_t ch; char digs[9]="\0\0\0\0\0\0\0\0"; int dno=0, i=1; ch = (uint32_t)str[0]; /* take literal character */ if (str[0] == 'n') ch = L'\n'; else if (str[0] == 't') ch = L'\t'; else if (str[0] == 'r') ch = L'\r'; else if (str[0] == 'b') ch = L'\b'; else if (str[0] == 'f') ch = L'\f'; else if (str[0] == 'v') ch = L'\v'; else if (str[0] == 'a') ch = L'\a'; else if (octal_digit(str[0])) { i = 0; do { digs[dno++] = str[i++]; } while (octal_digit(str[i]) && dno < 3); ch = strtol(digs, NULL, 8); } else if (str[0] == 'x') { while (hex_digit(str[i]) && dno < 2) { digs[dno++] = str[i++]; } if (dno > 0) ch = strtol(digs, NULL, 16); } else if (str[0] == 'u') { while (hex_digit(str[i]) && dno < 4) { digs[dno++] = str[i++]; } if (dno > 0) ch = strtol(digs, NULL, 16); } else if (str[0] == 'U') { while (hex_digit(str[i]) && dno < 8) { digs[dno++] = str[i++]; } if (dno > 0) ch = strtol(digs, NULL, 16); } *dest = ch; return i; } static inline int is_json_escape(char c) { return ( (c == '\"') || /* double-quote */ (c == '\'') || /* single-quote */ (c == '\\') || /* solidus */ (c == 'n') || /* new-line */ (c == 'r') || /* carriage return */ (c == 't') || /* horizontal tab */ (c == 'b') || /* backspace */ (c == 'f') || /* form feed */ (c == '/') /* reverse-solidus */ ); } int flb_unescape_string_utf8(const char *in_buf, int sz, char *out_buf) { uint32_t ch; char temp[4]; const char *next; int count_out = 0; int count_in = 0; int esc_in = 0; int esc_out = 0; while (*in_buf && count_in < sz) { next = in_buf + 1; if (*in_buf == '\\') { if (is_json_escape(*next)) { switch (*next) { case '"': ch = '"'; break; case '\\': ch = '\\'; break; case '/': ch = '/'; break; case 'n': ch = '\n'; break; case 'a': ch = '\a'; break; case 'b': ch = '\b'; break; case 't': ch = '\t'; break; case 'v': ch = '\v'; break; case 'f': ch = '\f'; break; case 'r': ch = '\r'; break; } esc_in = 2; } else { esc_in = u8_read_escape_sequence((in_buf + 1), &ch) + 1; } } else { ch = (uint32_t) *in_buf; esc_in = 1; } in_buf += esc_in; count_in += esc_in; esc_out = u8_wc_toutf8(temp, ch); if (esc_out > sz-count_out) { flb_error("Crossing over string boundary"); break; } if (esc_out == 0) { out_buf[count_out] = ch; esc_out = 1; } else if (esc_out == 1) { out_buf[count_out] = temp[0]; } else { memcpy(&out_buf[count_out], temp, esc_out); } count_out += esc_out; } if (count_in < sz) { flb_error("Not at boundary but still NULL terminating : %d - '%s'", sz, in_buf); } out_buf[count_out] = '\0'; return count_out; } int flb_unescape_string(const char *buf, int buf_len, char **unesc_buf) { int i = 0; int j = 0; char *p; char n; p = *unesc_buf; while (i < buf_len) { if (buf[i] == '\\') { if (i + 1 < buf_len) { n = buf[i + 1]; if (n == 'n') { p[j++] = '\n'; i++; } else if (n == 'a') { p[j++] = '\a'; i++; } else if (n == 'b') { p[j++] = '\b'; i++; } else if (n == 't') { p[j++] = '\t'; i++; } else if (n == 'v') { p[j++] = '\v'; i++; } else if (n == 'f') { p[j++] = '\f'; i++; } else if (n == 'r') { p[j++] = '\r'; i++; } else if (n == '\\') { p[j++] = '\\'; i++; } i++; continue; } else { i++; } } p[j++] = buf[i++]; } p[j] = '\0'; return j; }
721352.c
/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <grpc/support/port_platform.h> #include <limits.h> #include <string.h> #include <grpc/load_reporting.h> #include <grpc/support/alloc.h> #include <grpc/support/sync.h> #include "src/core/ext/filters/load_reporting/load_reporting.h" #include "src/core/ext/filters/load_reporting/load_reporting_filter.h" #include "src/core/lib/channel/channel_stack_builder.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/surface/call.h" #include "src/core/lib/surface/channel_init.h" static bool is_load_reporting_enabled(const grpc_channel_args *a) { return grpc_channel_arg_get_bool( grpc_channel_args_find(a, GRPC_ARG_ENABLE_LOAD_REPORTING), false); } static bool maybe_add_load_reporting_filter(grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) { const grpc_channel_args *args = grpc_channel_stack_builder_get_channel_arguments(builder); if (is_load_reporting_enabled(args)) { return grpc_channel_stack_builder_prepend_filter( builder, (const grpc_channel_filter *)arg, NULL, NULL); } return true; } grpc_arg grpc_load_reporting_enable_arg() { return grpc_channel_arg_integer_create(GRPC_ARG_ENABLE_LOAD_REPORTING, 1); } /* Plugin registration */ void grpc_load_reporting_plugin_init(void) { grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, maybe_add_load_reporting_filter, (void *)&grpc_load_reporting_filter); } void grpc_load_reporting_plugin_shutdown() {}
706414.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_strlen.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: shovsepy <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2021/01/28 20:55:22 by shovsepy #+# #+# */ /* Updated: 2021/02/02 19:14:59 by shovsepy ### ########.fr */ /* */ /* ************************************************************************** */ #include "libft.h" size_t ft_strlen(const char *str) { size_t i; if (!str) return (0); i = 0; while (str[i] != '\0') i++; return (i); }
628327.c
// Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include "sdkconfig.h" #include "soc/soc.h" #include "soc/cpu.h" #include "soc/rtc_cntl_reg.h" #include "esp32/rom/ets_sys.h" #include "esp_private/system_internal.h" #include "driver/rtc_cntl.h" #include "freertos/FreeRTOS.h" #ifdef CONFIG_ESP32_BROWNOUT_DET_LVL #define BROWNOUT_DET_LVL CONFIG_ESP32_BROWNOUT_DET_LVL #else #define BROWNOUT_DET_LVL 0 #endif //CONFIG_ESP32_BROWNOUT_DET_LVL static void rtc_brownout_isr_handler() { /* Normally RTC ISR clears the interrupt flag after the application-supplied * handler returns. Since restart is called here, the flag needs to be * cleared manually. */ REG_WRITE(RTC_CNTL_INT_CLR_REG, RTC_CNTL_BROWN_OUT_INT_CLR); /* Stall the other CPU to make sure the code running there doesn't use UART * at the same time as the following ets_printf. */ esp_cpu_stall(!xPortGetCoreID()); esp_reset_reason_set_hint(ESP_RST_BROWNOUT); ets_printf("\r\nBrownout detector was triggered\r\n\r\n"); esp_restart_noos(); } void esp_brownout_init() { REG_WRITE(RTC_CNTL_BROWN_OUT_REG, RTC_CNTL_BROWN_OUT_ENA /* Enable BOD */ | RTC_CNTL_BROWN_OUT_PD_RF_ENA /* Automatically power down RF */ /* Reset timeout must be set to >1 even if BOR feature is not used */ | (2 << RTC_CNTL_BROWN_OUT_RST_WAIT_S) | (BROWNOUT_DET_LVL << RTC_CNTL_DBROWN_OUT_THRES_S)); ESP_ERROR_CHECK( rtc_isr_register(rtc_brownout_isr_handler, NULL, RTC_CNTL_BROWN_OUT_INT_ENA_M) ); REG_SET_BIT(RTC_CNTL_INT_ENA_REG, RTC_CNTL_BROWN_OUT_INT_ENA_M); }
340138.c
/* * Academic License - for use in teaching, academic research, and meeting * course requirements at degree granting institutions only. Not for * government, commercial, or other organizational use. * * _coder_sprdmpF10_info.c * * Code generation for function '_coder_sprdmpF10_info' * */ /* Include files */ #include "rt_nonfinite.h" #include "sprdmpF10.h" #include "_coder_sprdmpF10_info.h" /* Function Definitions */ mxArray *emlrtMexFcnProperties(void) { mxArray *xResult; mxArray *xEntryPoints; const char * fldNames[6] = { "Name", "NumberOfInputs", "NumberOfOutputs", "ConstantInputs", "FullPath", "TimeStamp" }; mxArray *xInputs; const char * b_fldNames[4] = { "Version", "ResolvedFunctions", "EntryPoints", "CoverageInfo" }; xEntryPoints = emlrtCreateStructMatrix(1, 1, 6, fldNames); xInputs = emlrtCreateLogicalMatrix(1, 3); emlrtSetField(xEntryPoints, 0, "Name", emlrtMxCreateString("sprdmpF10")); emlrtSetField(xEntryPoints, 0, "NumberOfInputs", emlrtMxCreateDoubleScalar(3.0)); emlrtSetField(xEntryPoints, 0, "NumberOfOutputs", emlrtMxCreateDoubleScalar (8.0)); emlrtSetField(xEntryPoints, 0, "ConstantInputs", xInputs); emlrtSetField(xEntryPoints, 0, "FullPath", emlrtMxCreateString( "/home/hadi/MEGAsync/Hadi/AutoTMTDyn/Code/AutoTMTDyn/Beta/v1.0/code/sprdmpF10.m")); emlrtSetField(xEntryPoints, 0, "TimeStamp", emlrtMxCreateDoubleScalar (737429.62541666662)); xResult = emlrtCreateStructMatrix(1, 1, 4, b_fldNames); emlrtSetField(xResult, 0, "Version", emlrtMxCreateString( "9.5.0.944444 (R2018b)")); emlrtSetField(xResult, 0, "ResolvedFunctions", (mxArray *) emlrtMexFcnResolvedFunctionsInfo()); emlrtSetField(xResult, 0, "EntryPoints", xEntryPoints); return xResult; } const mxArray *emlrtMexFcnResolvedFunctionsInfo(void) { const mxArray *nameCaptureInfo; const mxArray *m0; static const int32_T iv1[2] = { 0, 1 }; nameCaptureInfo = NULL; m0 = emlrtCreateNumericArray(2, iv1, mxDOUBLE_CLASS, mxREAL); emlrtAssign(&nameCaptureInfo, m0); return nameCaptureInfo; } /* End of code generation (_coder_sprdmpF10_info.c) */
468175.c
/* * Copyright (c) Edward Thomson. All rights reserved. * * This file is part of ntlmclient, distributed under the MIT license. * For full terms and copyright information, and for third-party * copyright information, see the included LICENSE.txt file. */ #include <locale.h> #include <iconv.h> #include <string.h> #include <errno.h> #include "ntlmclient.h" #include "unicode.h" #include "ntlm.h" #include "compat.h" typedef enum { unicode_iconv_utf8_to_16, unicode_iconv_utf16_to_8 } unicode_iconv_encoding_direction; bool ntlm_unicode_init(ntlm_client *ntlm) { ntlm->unicode_ctx.utf8_to_16 = iconv_open("UTF-16LE", "UTF-8"); ntlm->unicode_ctx.utf16_to_8 = iconv_open("UTF-8", "UTF-16LE"); if (ntlm->unicode_ctx.utf8_to_16 == (iconv_t)-1 || ntlm->unicode_ctx.utf16_to_8 == (iconv_t)-1) { if (errno == EINVAL) ntlm_client_set_errmsg(ntlm, "iconv does not support UTF8 <-> UTF16 conversion"); else ntlm_client_set_errmsg(ntlm, strerror(errno)); return false; } return true; } static inline bool unicode_iconv_encoding_convert( char **converted, size_t *converted_len, ntlm_client *ntlm, const char *string, size_t string_len, unicode_iconv_encoding_direction direction) { char *in_start, *out_start, *out, *new_out; size_t in_start_len, out_start_len, out_size, nul_size, ret, written = 0; iconv_t converter; *converted = NULL; *converted_len = 0; /* * When translating UTF8 to UTF16, these strings are only used * internally, and we obey the given length, so we can simply * use a buffer that is 2x the size. When translating from UTF16 * to UTF8, we may need to return to callers, so we need to NUL * terminate and expect an extra byte for UTF8, two for UTF16. */ if (direction == unicode_iconv_utf8_to_16) { converter = ntlm->unicode_ctx.utf8_to_16; out_size = (string_len * 2) + 2; nul_size = 2; } else { converter = ntlm->unicode_ctx.utf16_to_8; out_size = (string_len / 2) + 1; nul_size = 1; } /* Round to the nearest multiple of 8 */ out_size = (out_size + 7) & ~7; if ((out = malloc(out_size)) == NULL) { ntlm_client_set_errmsg(ntlm, "out of memory"); return false; } in_start = (char *)string; in_start_len = string_len; while (true) { out_start = out + written; out_start_len = (out_size - nul_size) - written; ret = iconv(converter, &in_start, &in_start_len, &out_start, &out_start_len); written = (out_size - nul_size) - out_start_len; if (ret == 0) break; if (ret == (size_t)-1 && errno != E2BIG) { ntlm_client_set_errmsg(ntlm, strerror(errno)); goto on_error; } /* Grow buffer size by 1.5 (rounded up to a multiple of 8) */ out_size = ((((out_size << 1) - (out_size >> 1)) + 7) & ~7); if (out_size > NTLM_UNICODE_MAX_LEN) { ntlm_client_set_errmsg(ntlm, "unicode conversion too large"); goto on_error; } if ((new_out = realloc(out, out_size)) == NULL) { ntlm_client_set_errmsg(ntlm, "out of memory"); goto on_error; } out = new_out; } if (in_start_len != 0) { ntlm_client_set_errmsg(ntlm, "invalid unicode string; trailing data remains"); goto on_error; } /* NUL terminate */ out[written] = '\0'; if (direction == unicode_iconv_utf8_to_16) out[written + 1] = '\0'; *converted = out; if (converted_len) *converted_len = written; return true; on_error: free(out); return false; } bool ntlm_unicode_utf8_to_16( char **converted, size_t *converted_len, ntlm_client *ntlm, const char *string, size_t string_len) { return unicode_iconv_encoding_convert( converted, converted_len, ntlm, string, string_len, unicode_iconv_utf8_to_16); } bool ntlm_unicode_utf16_to_8( char **converted, size_t *converted_len, ntlm_client *ntlm, const char *string, size_t string_len) { return unicode_iconv_encoding_convert( converted, converted_len, ntlm, string, string_len, unicode_iconv_utf16_to_8); } void ntlm_unicode_shutdown(ntlm_client *ntlm) { if (ntlm->unicode_ctx.utf16_to_8 != (iconv_t)0 && ntlm->unicode_ctx.utf16_to_8 != (iconv_t)-1) iconv_close(ntlm->unicode_ctx.utf16_to_8); if (ntlm->unicode_ctx.utf8_to_16 != (iconv_t)0 && ntlm->unicode_ctx.utf8_to_16 != (iconv_t)-1) iconv_close(ntlm->unicode_ctx.utf8_to_16); ntlm->unicode_ctx.utf8_to_16 = (iconv_t)-1; ntlm->unicode_ctx.utf16_to_8 = (iconv_t)-1; }
402664.c
#include <stdio.h> #include <stdlib.h> #include "inputlib.h" /* assume filename is no longer than 256 characters */ #define NAMESIZE 257 int main(void) { int ch; FILE *fp; unsigned long count = 0; char filename[NAMESIZE]; puts("Please enter the name of the file to be opened:"); if (get_string(filename, NAMESIZE, stdin) == NULL) { if (feof(stdin)) fputs("EOF encountered on stdin.\n", stderr); if (ferror(stdout)) fputs("Read error occurred on stdin.\n", stderr); exit(EXIT_FAILURE); } if ((fp = fopen(filename, "r")) == NULL) { fprintf(stderr, "Can't open %s\n", filename); exit(EXIT_FAILURE); } while ((ch = getc(fp)) != EOF) { putc(ch, stdout); count++; } printf("\nFile %s has %lu characters\n", filename, count); if (fclose(fp) == EOF) { fprintf(stderr, "Error closing file %s\n", filename); exit(EXIT_FAILURE); } return 0; }
425122.c
/** \brief Applications running on top of the OpenWSN stack. \author Thomas Watteyne <[email protected]>, September 2014. */ #include "opendefs.h" // CoAP #include "c6t.h" #include "c6top.h" #include "cinfo.h" #include "csensors.h" #include "cleds.h" #include "cexample.h" #include "cstorm.h" #include "cwellknown.h" #include "lwm2m.h" #include "lwm2m_dev.h" #include "coap_rd.h" #include "rrt.h" // TCP #include "techo.h" // UDP #include "uecho.h" #include "uinject.h" #include "ufirealarm.h" #include "upiano.h" #include "umonitor.h" //=========================== variables ======================================= //=========================== prototypes ====================================== //=========================== public ========================================== //=========================== private ========================================= void openapps_init(void) { // CoAP // c6t_init(); // cinfo_init(); // cexample_init(); // cleds__init(); //cstorm_init(); //csensors_init(); cwellknown_init(); // ufirealarm_init(); // umonitor_init(); upiano_init(); // rrt_init(); // TCP // techo_init(); c6top_init(); coap_rd_init(); // lwm2m_init(); // lwm2m_dev_init(); }
665370.c
/* $Id: tabid.c,v 1.2 2013/01/22 17:52:56 prs Exp $ */ #include "tabid.h" #include <stdio.h> #include <stdlib.h> #include <string.h> extern int yyerror(char*); static char buf[80]; /* for error messages */ int IDdebug; static struct id { int type; long attrib; char *name; struct id *next; } *root = 0; /* define a new ID => 'name' != 0 (if 'next' is 0 then EOF) define a new BUCKET => 'name' == 0 && 'next' points to previous BUCKET */ void *IDroot(void *swap) { struct id *old = root; root = (struct id *)swap; return old; } static int level; static void IDadd(int typ, char *s, long attrib) { struct id *aux = (struct id*)malloc(sizeof(struct id)); if (aux == 0) { yyerror("No memory.\n"); return; } aux->name = s; aux->next = root; aux->type = typ; aux->attrib = attrib; root = aux; if (IDdebug != 0) { if (s == 0) printf("#>>PUSH=%d\n", level); else printf("#>>ADD=%s\n", s); } } void IDpush() { level++; IDadd(0,0,0); } void IDpop() { struct id *aux; while ((aux = root) != 0) { root = aux->next; if (aux->name == 0) { free(aux); break; } free(aux); } if (IDdebug != 0) printf("#>>POP=%d\n", level); level--; } void IDclear() { while (level > 0) IDpop(); } /* insert a new ID into the current bucket, return: 1 - if is new ID (might have been defined in an upper bucket) 0 - there is an ID with the same name in the bucket */ int IDnew(int typ, char *s, long attrib) { struct id *aux; for (aux = root; aux != 0 && aux->name != 0; aux = aux->next) if (strcmp(aux->name, s) == 0) { if (attrib != IDtest) { sprintf(buf, "\t%s: already defined.\n", s); yyerror(buf); } return 0; } IDadd(typ, s, attrib); return 1; } int IDinsert(int lev, int typ, char *s, long attrib) { struct id *aux, *scout = root, **base; if (lev > level) { yyerror("Invalid scope level"); return 0; } if (lev == level) return IDnew(typ, s, attrib); for (lev = level - lev; lev > 0; lev--) { while (scout->name != 0) scout = scout->next; base = &scout->next; scout = scout->next; } aux = (struct id*)malloc(sizeof(struct id)); if (aux == 0) { yyerror("No memory.\n"); return 0; } aux->name = s; aux->next = *base; aux->type = typ; aux->attrib = attrib; *base = aux; if (IDdebug != 0) printf("#>>INSERT(%d)=%s\n", lev, s); return 1; } int IDreplace(int typ, char *s, long attrib) { struct id *aux; for (aux = root; aux != 0; aux = aux->next) if (aux->name != 0 && strcmp(aux->name, s) == 0) { aux->type = typ; aux->attrib = attrib; return 1; } return -1; } /* find an ID, return: -1 - if no ID can be found in any visible bucket up to the root type - there is an accessible ID previously defined */ int IDfind(char *s, long *attrib) { struct id *aux; for (aux = root; aux != 0; aux = aux->next) if (aux->name != 0 && strcmp(aux->name, s) == 0) { if (attrib != 0 && attrib != ((long*)IDtest)) *attrib = aux->attrib; return aux->type; } /* else if (aux->name == 0 && lev > 0 && --lev == 0) break; */ if (attrib != ((long*)IDtest)) { sprintf(buf, "%s: undefined.", s); yyerror(buf); } return -1; } int IDsearch(char *s, long *attrib, int skip, int lev) { struct id *aux = root; if (skip > level) skip = level; while (skip-- > 0) { /* skip the first 'skip' levels */ while (aux->name != 0) aux = aux->next; aux = aux->next; } for (; aux != 0; aux = aux->next) if (aux->name != 0 && strcmp(aux->name, s) == 0) { if (attrib != 0 && attrib != ((long*)IDtest)) *attrib = aux->attrib; return aux->type; } else if (aux->name == 0 && lev > 0 && --lev == 0) break; /* stop after 'lev' levels */ if (attrib != ((long*)IDtest)) { sprintf(buf, "%s: undefined.", s); yyerror(buf); } return -1; } int IDlevel() { return level; } int IDforall(IDfunc f, long user, int skip, int lev) { struct id *aux; int cnt = 0, ret; if (skip > level) skip = level; while (skip-- > 0) { /* skip the first 'skip' levels */ while (aux->name != 0) aux = aux->next; aux = aux->next; } for (aux = root; aux != 0; aux = aux->next, cnt += ret) if (aux->name == 0) { if ((ret = (*f)(0,"",0,user)) < 0) break; if (lev > 0 && --lev == 0) break; /* stop after 'lev' levels */ } else if ((ret = (*f)(aux->type, aux->name, aux->attrib, user)) < 0) break; return cnt; } void IDprint(int skip, int lev) { struct id *aux; if (skip > level) skip = level; while (skip-- > 0) { /* skip the first 'skip' levels */ while (aux->name != 0) aux = aux->next; aux = aux->next; } for (aux = root; aux != 0; aux = aux->next) if (aux->name == 0) { printf(" :"); if (lev > 0 && --lev == 0) break; /* stop after 'lev' levels */ } else printf(" %s:%d#%ld", aux->name, aux->type, aux->attrib); printf("\n"); }
951394.c
/* * msvcrt.dll ctype functions * * Copyright 2000 Jon Griffiths * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #include "msvcrt.h" #include "winnls.h" /* Some abbreviations to make the following table readable */ #define _C_ MSVCRT__CONTROL #define _S_ MSVCRT__SPACE #define _P_ MSVCRT__PUNCT #define _D_ MSVCRT__DIGIT #define _H_ MSVCRT__HEX #define _U_ MSVCRT__UPPER #define _L_ MSVCRT__LOWER WORD MSVCRT__ctype [257] = { 0, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _S_|_C_, _S_|_C_, _S_|_C_, _S_|_C_, _S_|_C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _C_, _S_|MSVCRT__BLANK, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _D_|_H_, _D_|_H_, _D_|_H_, _D_|_H_, _D_|_H_, _D_|_H_, _D_|_H_, _D_|_H_, _D_|_H_, _D_|_H_, _P_, _P_, _P_, _P_, _P_, _P_, _P_, _U_|_H_, _U_|_H_, _U_|_H_, _U_|_H_, _U_|_H_, _U_|_H_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _U_, _P_, _P_, _P_, _P_, _P_, _P_, _L_|_H_, _L_|_H_, _L_|_H_, _L_|_H_, _L_|_H_, _L_|_H_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _L_, _P_, _P_, _P_, _P_, _C_, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* pctype is used by macros in the Win32 headers. It must point * To a table of flags exactly like ctype. To allow locale * changes to affect ctypes (i.e. isleadbyte), we use a second table * and update its flags whenever the current locale changes. */ WORD* MSVCRT__pctype = NULL; /********************************************************************* * __pctype_func (MSVCRT.@) */ WORD** CDECL MSVCRT___pctype_func(void) { return &get_locale()->locinfo->pctype; } /********************************************************************* * _isctype (MSVCRT.@) */ int CDECL _isctype(int c, int type) { MSVCRT__locale_t locale = get_locale(); if (c >= -1 && c <= 255) return locale->locinfo->pctype[c] & type; if (locale->locinfo->mb_cur_max != 1 && c > 0) { /* FIXME: Is there a faster way to do this? */ WORD typeInfo; char convert[3], *pconv = convert; if (locale->locinfo->pctype[(UINT)c >> 8] & MSVCRT__LEADBYTE) *pconv++ = (UINT)c >> 8; *pconv++ = c & 0xff; *pconv = 0; /* FIXME: Use ctype LCID, not lc_all */ if (GetStringTypeExA(get_locale()->locinfo->lc_handle[MSVCRT_LC_CTYPE], CT_CTYPE1, convert, convert[1] ? 2 : 1, &typeInfo)) return typeInfo & type; } return 0; } /********************************************************************* * isalnum (MSVCRT.@) */ int CDECL MSVCRT_isalnum(int c) { return _isctype( c, MSVCRT__ALPHA | MSVCRT__DIGIT ); } /********************************************************************* * isalpha (MSVCRT.@) */ int CDECL MSVCRT_isalpha(int c) { return _isctype( c, MSVCRT__ALPHA ); } /********************************************************************* * iscntrl (MSVCRT.@) */ int CDECL MSVCRT_iscntrl(int c) { return _isctype( c, MSVCRT__CONTROL ); } /********************************************************************* * isdigit (MSVCRT.@) */ int CDECL MSVCRT_isdigit(int c) { return _isctype( c, MSVCRT__DIGIT ); } /********************************************************************* * isgraph (MSVCRT.@) */ int CDECL MSVCRT_isgraph(int c) { return _isctype( c, MSVCRT__ALPHA | MSVCRT__DIGIT | MSVCRT__PUNCT ); } /********************************************************************* * isleadbyte (MSVCRT.@) */ int CDECL MSVCRT_isleadbyte(int c) { return _isctype( c, MSVCRT__LEADBYTE ); } /********************************************************************* * islower (MSVCRT.@) */ int CDECL MSVCRT_islower(int c) { return _isctype( c, MSVCRT__LOWER ); } /********************************************************************* * isprint (MSVCRT.@) */ int CDECL MSVCRT_isprint(int c) { return _isctype( c, MSVCRT__ALPHA | MSVCRT__DIGIT | MSVCRT__BLANK | MSVCRT__PUNCT ); } /********************************************************************* * ispunct (MSVCRT.@) */ int CDECL MSVCRT_ispunct(int c) { return _isctype( c, MSVCRT__PUNCT ); } /********************************************************************* * isspace (MSVCRT.@) */ int CDECL MSVCRT_isspace(int c) { return _isctype( c, MSVCRT__SPACE ); } /********************************************************************* * isupper (MSVCRT.@) */ int CDECL MSVCRT_isupper(int c) { return _isctype( c, MSVCRT__UPPER ); } /********************************************************************* * isxdigit (MSVCRT.@) */ int CDECL MSVCRT_isxdigit(int c) { return _isctype( c, MSVCRT__HEX ); } /********************************************************************* * __isascii (MSVCRT.@) */ int CDECL MSVCRT___isascii(int c) { return isascii((unsigned)c); } /********************************************************************* * __toascii (MSVCRT.@) */ int CDECL MSVCRT___toascii(int c) { return (unsigned)c & 0x7f; } /********************************************************************* * iswascii (MSVCRT.@) * */ int CDECL MSVCRT_iswascii(MSVCRT_wchar_t c) { return ((unsigned)c < 0x80); } /********************************************************************* * __iscsym (MSVCRT.@) */ int CDECL MSVCRT___iscsym(int c) { return (c < 127 && (isalnum(c) || c == '_')); } /********************************************************************* * __iscsymf (MSVCRT.@) */ int CDECL MSVCRT___iscsymf(int c) { return (c < 127 && (isalpha(c) || c == '_')); } /********************************************************************* * _toupper (MSVCRT.@) */ int CDECL MSVCRT__toupper(int c) { return c - 0x20; /* sic */ } /********************************************************************* * _tolower (MSVCRT.@) */ int CDECL MSVCRT__tolower(int c) { return c + 0x20; /* sic */ }
642436.c
/* Socket module */ /* This module provides an interface to Berkeley socket IPC. Limitations: - Only AF_INET, AF_INET6 and AF_UNIX address families are supported in a portable manner, though AF_PACKET, AF_NETLINK and AF_TIPC are supported under Linux. - No read/write operations (use sendall/recv or makefile instead). - Additional restrictions apply on some non-Unix platforms (compensated for by socket.py). Module interface: - socket.error: exception raised for socket specific errors - socket.gaierror: exception raised for getaddrinfo/getnameinfo errors, a subclass of socket.error - socket.herror: exception raised for gethostby* errors, a subclass of socket.error - socket.fromfd(fd, family, type[, proto]) --> new socket object (created from an existing file descriptor) - socket.gethostbyname(hostname) --> host IP address (string: 'dd.dd.dd.dd') - socket.gethostbyaddr(IP address) --> (hostname, [alias, ...], [IP addr, ...]) - socket.gethostname() --> host name (string: 'spam' or 'spam.domain.com') - socket.getprotobyname(protocolname) --> protocol number - socket.getservbyname(servicename[, protocolname]) --> port number - socket.getservbyport(portnumber[, protocolname]) --> service name - socket.socket([family[, type [, proto]]]) --> new socket object - socket.socketpair([family[, type [, proto]]]) --> (socket, socket) - socket.ntohs(16 bit value) --> new int object - socket.ntohl(32 bit value) --> new int object - socket.htons(16 bit value) --> new int object - socket.htonl(32 bit value) --> new int object - socket.getaddrinfo(host, port [, family, socktype, proto, flags]) --> List of (family, socktype, proto, canonname, sockaddr) - socket.getnameinfo(sockaddr, flags) --> (host, port) - socket.AF_INET, socket.SOCK_STREAM, etc.: constants from <socket.h> - socket.has_ipv6: boolean value indicating if IPv6 is supported - socket.inet_aton(IP address) -> 32-bit packed IP representation - socket.inet_ntoa(packed IP) -> IP address string - socket.getdefaulttimeout() -> None | float - socket.setdefaulttimeout(None | float) - an Internet socket address is a pair (hostname, port) where hostname can be anything recognized by gethostbyname() (including the dd.dd.dd.dd notation) and port is in host byte order - where a hostname is returned, the dd.dd.dd.dd notation is used - a UNIX domain socket address is a string specifying the pathname - an AF_PACKET socket address is a tuple containing a string specifying the ethernet interface and an integer specifying the Ethernet protocol number to be received. For example: ("eth0",0x1234). Optional 3rd,4th,5th elements in the tuple specify packet-type and ha-type/addr. - an AF_TIPC socket address is expressed as (addr_type, v1, v2, v3 [, scope]); where addr_type can be one of: TIPC_ADDR_NAMESEQ, TIPC_ADDR_NAME, and TIPC_ADDR_ID; and scope can be one of: TIPC_ZONE_SCOPE, TIPC_CLUSTER_SCOPE, and TIPC_NODE_SCOPE. The meaning of v1, v2 and v3 depends on the value of addr_type: if addr_type is TIPC_ADDR_NAME: v1 is the server type v2 is the port identifier v3 is ignored if addr_type is TIPC_ADDR_NAMESEQ: v1 is the server type v2 is the lower port number v3 is the upper port number if addr_type is TIPC_ADDR_ID: v1 is the node v2 is the ref v3 is ignored Local naming conventions: - names starting with sock_ are socket object methods - names starting with socket_ are module-level functions - names starting with PySocket are exported through socketmodule.h */ #ifdef __APPLE__ #include <AvailabilityMacros.h> /* for getaddrinfo thread safety test on old versions of OS X */ #ifndef MAC_OS_X_VERSION_10_5 #define MAC_OS_X_VERSION_10_5 1050 #endif /* * inet_aton is not available on OSX 10.3, yet we want to use a binary * that was build on 10.4 or later to work on that release, weak linking * comes to the rescue. */ # pragma weak inet_aton #endif #include "Python.h" #include "structmember.h" #include "timefuncs.h" #ifndef INVALID_SOCKET /* MS defines this */ #define INVALID_SOCKET (-1) #endif #undef MAX #define MAX(x, y) ((x) < (y) ? (y) : (x)) /* Socket object documentation */ PyDoc_STRVAR(sock_doc, "socket([family[, type[, proto]]]) -> socket object\n\ \n\ Open a socket of the given type. The family argument specifies the\n\ address family; it defaults to AF_INET. The type argument specifies\n\ whether this is a stream (SOCK_STREAM, this is the default)\n\ or datagram (SOCK_DGRAM) socket. The protocol argument defaults to 0,\n\ specifying the default protocol. Keyword arguments are accepted.\n\ \n\ A socket object represents one endpoint of a network connection.\n\ \n\ Methods of socket objects (keyword arguments not allowed):\n\ \n\ accept() -- accept a connection, returning new socket and client address\n\ bind(addr) -- bind the socket to a local address\n\ close() -- close the socket\n\ connect(addr) -- connect the socket to a remote address\n\ connect_ex(addr) -- connect, return an error code instead of an exception\n\ dup() -- return a new socket object identical to the current one [*]\n\ fileno() -- return underlying file descriptor\n\ getpeername() -- return remote address [*]\n\ getsockname() -- return local address\n\ getsockopt(level, optname[, buflen]) -- get socket options\n\ gettimeout() -- return timeout or None\n\ listen(n) -- start listening for incoming connections\n\ makefile([mode, [bufsize]]) -- return a file object for the socket [*]\n\ recv(buflen[, flags]) -- receive data\n\ recv_into(buffer[, nbytes[, flags]]) -- receive data (into a buffer)\n\ recvfrom(buflen[, flags]) -- receive data and sender\'s address\n\ recvfrom_into(buffer[, nbytes, [, flags])\n\ -- receive data and sender\'s address (into a buffer)\n\ sendall(data[, flags]) -- send all data\n\ send(data[, flags]) -- send data, may not send all of it\n\ sendto(data[, flags], addr) -- send data to a given address\n\ setblocking(0 | 1) -- set or clear the blocking I/O flag\n\ setsockopt(level, optname, value) -- set socket options\n\ settimeout(None | float) -- set or clear the timeout\n\ shutdown(how) -- shut down traffic in one or both directions\n\ \n\ [*] not available on all platforms!"); /* XXX This is a terrible mess of platform-dependent preprocessor hacks. I hope some day someone can clean this up please... */ /* Hacks for gethostbyname_r(). On some non-Linux platforms, the configure script doesn't get this right, so we hardcode some platform checks below. On the other hand, not all Linux versions agree, so there the settings computed by the configure script are needed! */ #ifndef linux # undef HAVE_GETHOSTBYNAME_R_3_ARG # undef HAVE_GETHOSTBYNAME_R_5_ARG # undef HAVE_GETHOSTBYNAME_R_6_ARG #endif #ifndef WITH_THREAD # undef HAVE_GETHOSTBYNAME_R #endif #ifdef HAVE_GETHOSTBYNAME_R # if defined(_AIX) && !defined(_LINUX_SOURCE_COMPAT) || defined(__osf__) # define HAVE_GETHOSTBYNAME_R_3_ARG # elif defined(__sun) || defined(__sgi) # define HAVE_GETHOSTBYNAME_R_5_ARG # elif defined(linux) /* Rely on the configure script */ # elif defined(_LINUX_SOURCE_COMPAT) /* Linux compatibility on AIX */ # define HAVE_GETHOSTBYNAME_R_6_ARG # else # undef HAVE_GETHOSTBYNAME_R # endif #endif #if !defined(HAVE_GETHOSTBYNAME_R) && defined(WITH_THREAD) && \ !defined(MS_WINDOWS) # define USE_GETHOSTBYNAME_LOCK #endif /* To use __FreeBSD_version, __OpenBSD__, and __NetBSD_Version__ */ #ifdef HAVE_SYS_PARAM_H #include <sys/param.h> #endif /* On systems on which getaddrinfo() is believed to not be thread-safe, (this includes the getaddrinfo emulation) protect access with a lock. getaddrinfo is thread-safe on Mac OS X 10.5 and later. Originally it was a mix of code including an unsafe implementation from an old BSD's libresolv. In 10.5 Apple reimplemented it as a safe IPC call to the mDNSResponder process. 10.5 is the first be UNIX '03 certified, which includes the requirement that getaddrinfo be thread-safe. See issue #25924. It's thread-safe in OpenBSD starting with 5.4, released Nov 2013: http://www.openbsd.org/plus54.html It's thread-safe in NetBSD starting with 4.0, released Dec 2007: http://cvsweb.netbsd.org/bsdweb.cgi/src/lib/libc/net/getaddrinfo.c.diff?r1=1.82&r2=1.83 */ #if defined(WITH_THREAD) && ( \ (defined(__APPLE__) && \ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_5) || \ (defined(__FreeBSD__) && __FreeBSD_version+0 < 503000) || \ (defined(__OpenBSD__) && OpenBSD+0 < 201311) || \ (defined(__NetBSD__) && __NetBSD_Version__+0 < 400000000) || \ defined(__VMS) || !defined(HAVE_GETADDRINFO)) #define USE_GETADDRINFO_LOCK #endif #ifdef USE_GETADDRINFO_LOCK #define ACQUIRE_GETADDRINFO_LOCK PyThread_acquire_lock(netdb_lock, 1); #define RELEASE_GETADDRINFO_LOCK PyThread_release_lock(netdb_lock); #else #define ACQUIRE_GETADDRINFO_LOCK #define RELEASE_GETADDRINFO_LOCK #endif #if defined(USE_GETHOSTBYNAME_LOCK) || defined(USE_GETADDRINFO_LOCK) # include "pythread.h" #endif #if defined(PYCC_VACPP) # include <types.h> # include <io.h> # include <sys/ioctl.h> # include <utils.h> # include <ctype.h> #endif #if defined(__VMS) # include <ioctl.h> #endif #if defined(PYOS_OS2) # define INCL_DOS # define INCL_DOSERRORS # define INCL_NOPMAPI # include <os2.h> #endif #if defined(__sgi) && _COMPILER_VERSION>700 && !_SGIAPI /* make sure that the reentrant (gethostbyaddr_r etc) functions are declared correctly if compiling with MIPSPro 7.x in ANSI C mode (default) */ /* XXX Using _SGIAPI is the wrong thing, but I don't know what the right thing is. */ #undef _SGIAPI /* to avoid warning */ #define _SGIAPI 1 #undef _XOPEN_SOURCE #include <sys/socket.h> #include <sys/types.h> #include <netinet/in.h> #ifdef _SS_ALIGNSIZE #define HAVE_GETADDRINFO 1 #define HAVE_GETNAMEINFO 1 #endif #define HAVE_INET_PTON #include <netdb.h> #endif /* Irix 6.5 fails to define this variable at all. This is needed for both GCC and SGI's compiler. I'd say that the SGI headers are just busted. Same thing for Solaris. */ #if (defined(__sgi) || defined(sun)) && !defined(INET_ADDRSTRLEN) #define INET_ADDRSTRLEN 16 #endif /* Generic includes */ #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif /* Generic socket object definitions and includes */ #define PySocket_BUILDING_SOCKET #include "socketmodule.h" /* Addressing includes */ #ifndef MS_WINDOWS /* Non-MS WINDOWS includes */ # include <netdb.h> /* Headers needed for inet_ntoa() and inet_addr() */ # ifdef __BEOS__ # include <net/netdb.h> # elif defined(PYOS_OS2) && defined(PYCC_VACPP) # include <netdb.h> typedef size_t socklen_t; # else # include <arpa/inet.h> # endif # ifndef RISCOS # include <fcntl.h> # else # include <sys/ioctl.h> # include <socklib.h> # define NO_DUP int h_errno; /* not used */ # define INET_ADDRSTRLEN 16 # endif #else /* MS_WINDOWS includes */ # ifdef HAVE_FCNTL_H # include <fcntl.h> # endif #endif #include <stddef.h> #ifndef offsetof # define offsetof(type, member) ((size_t)(&((type *)0)->member)) #endif #ifndef O_NONBLOCK # define O_NONBLOCK O_NDELAY #endif /* include Python's addrinfo.h unless it causes trouble */ #if defined(__sgi) && _COMPILER_VERSION>700 && defined(_SS_ALIGNSIZE) /* Do not include addinfo.h on some newer IRIX versions. * _SS_ALIGNSIZE is defined in sys/socket.h by 6.5.21, * for example, but not by 6.5.10. */ #elif defined(_MSC_VER) && _MSC_VER>1201 /* Do not include addrinfo.h for MSVC7 or greater. 'addrinfo' and * EAI_* constants are defined in (the already included) ws2tcpip.h. */ #else # include "addrinfo.h" #endif #ifndef HAVE_INET_PTON #if !defined(NTDDI_VERSION) || (NTDDI_VERSION < NTDDI_LONGHORN) int inet_pton(int af, const char *src, void *dst); const char *inet_ntop(int af, const void *src, char *dst, socklen_t size); #endif #endif #ifdef __APPLE__ /* On OS X, getaddrinfo returns no error indication of lookup failure, so we must use the emulation instead of the libinfo implementation. Unfortunately, performing an autoconf test for this bug would require DNS access for the machine performing the configuration, which is not acceptable. Therefore, we determine the bug just by checking for __APPLE__. If this bug gets ever fixed, perhaps checking for sys/version.h would be appropriate, which is 10/0 on the system with the bug. */ #ifndef HAVE_GETNAMEINFO /* This bug seems to be fixed in Jaguar. Ths easiest way I could Find to check for Jaguar is that it has getnameinfo(), which older releases don't have */ #undef HAVE_GETADDRINFO #endif #ifdef HAVE_INET_ATON #define USE_INET_ATON_WEAKLINK #endif #endif /* I know this is a bad practice, but it is the easiest... */ #if !defined(HAVE_GETADDRINFO) /* avoid clashes with the C library definition of the symbol. */ #define getaddrinfo fake_getaddrinfo #define gai_strerror fake_gai_strerror #define freeaddrinfo fake_freeaddrinfo #include "getaddrinfo.c" #endif #if !defined(HAVE_GETNAMEINFO) #define getnameinfo fake_getnameinfo #include "getnameinfo.c" #endif #if defined(MS_WINDOWS) || defined(__BEOS__) /* BeOS suffers from the same socket dichotomy as Win32... - [cjh] */ /* seem to be a few differences in the API */ #define SOCKETCLOSE closesocket #define NO_DUP /* Actually it exists on NT 3.5, but what the heck... */ #endif #ifdef MS_WIN32 #define EAFNOSUPPORT WSAEAFNOSUPPORT #define snprintf _snprintf #endif #if defined(PYOS_OS2) && !defined(PYCC_GCC) #define SOCKETCLOSE soclose #define NO_DUP /* Sockets are Not Actual File Handles under OS/2 */ #endif #ifndef SOCKETCLOSE #define SOCKETCLOSE close #endif #if (defined(HAVE_BLUETOOTH_H) || defined(HAVE_BLUETOOTH_BLUETOOTH_H)) && !defined(__NetBSD__) && !defined(__DragonFly__) #define USE_BLUETOOTH 1 #if defined(__FreeBSD__) #define BTPROTO_L2CAP BLUETOOTH_PROTO_L2CAP #define BTPROTO_RFCOMM BLUETOOTH_PROTO_RFCOMM #define BTPROTO_HCI BLUETOOTH_PROTO_HCI #define SOL_HCI SOL_HCI_RAW #define HCI_FILTER SO_HCI_RAW_FILTER #define sockaddr_l2 sockaddr_l2cap #define sockaddr_rc sockaddr_rfcomm #define hci_dev hci_node #define _BT_L2_MEMB(sa, memb) ((sa)->l2cap_##memb) #define _BT_RC_MEMB(sa, memb) ((sa)->rfcomm_##memb) #define _BT_HCI_MEMB(sa, memb) ((sa)->hci_##memb) #elif defined(__NetBSD__) || defined(__DragonFly__) #define sockaddr_l2 sockaddr_bt #define sockaddr_rc sockaddr_bt #define sockaddr_hci sockaddr_bt #define sockaddr_sco sockaddr_bt #define SOL_HCI BTPROTO_HCI #define HCI_DATA_DIR SO_HCI_DIRECTION #define _BT_L2_MEMB(sa, memb) ((sa)->bt_##memb) #define _BT_RC_MEMB(sa, memb) ((sa)->bt_##memb) #define _BT_HCI_MEMB(sa, memb) ((sa)->bt_##memb) #define _BT_SCO_MEMB(sa, memb) ((sa)->bt_##memb) #else #define _BT_L2_MEMB(sa, memb) ((sa)->l2_##memb) #define _BT_RC_MEMB(sa, memb) ((sa)->rc_##memb) #define _BT_HCI_MEMB(sa, memb) ((sa)->hci_##memb) #define _BT_SCO_MEMB(sa, memb) ((sa)->sco_##memb) #endif #endif #ifdef __VMS /* TCP/IP Services for VMS uses a maximum send/recv buffer length */ #define SEGMENT_SIZE (32 * 1024 -1) #endif #define SAS2SA(x) ((struct sockaddr *)(x)) /* * Constants for getnameinfo() */ #if !defined(NI_MAXHOST) #define NI_MAXHOST 1025 #endif #if !defined(NI_MAXSERV) #define NI_MAXSERV 32 #endif /* XXX There's a problem here: *static* functions are not supposed to have a Py prefix (or use CapitalizedWords). Later... */ /* Global variable holding the exception type for errors detected by this module (but not argument type or memory errors, etc.). */ static PyObject *socket_error; static PyObject *socket_herror; static PyObject *socket_gaierror; static PyObject *socket_timeout; #ifdef RISCOS /* Global variable which is !=0 if Python is running in a RISC OS taskwindow */ static int taskwindow; #endif /* A forward reference to the socket type object. The sock_type variable contains pointers to various functions, some of which call new_sockobject(), which uses sock_type, so there has to be a circular reference. */ static PyTypeObject sock_type; #if defined(HAVE_POLL_H) #include <poll.h> #elif defined(HAVE_SYS_POLL_H) #include <sys/poll.h> #endif #ifdef HAVE_POLL /* Instead of select(), we'll use poll() since poll() works on any fd. */ #define IS_SELECTABLE(s) 1 /* Can we call select() with this socket without a buffer overrun? */ #else /* If there's no timeout left, we don't have to call select, so it's a safe, * little white lie. */ #define IS_SELECTABLE(s) (_PyIsSelectable_fd((s)->sock_fd) || (s)->sock_timeout <= 0.0) #endif static PyObject* select_error(void) { PyErr_SetString(socket_error, "unable to select on socket"); return NULL; } #ifdef MS_WINDOWS #ifndef WSAEAGAIN #define WSAEAGAIN WSAEWOULDBLOCK #endif #define CHECK_ERRNO(expected) \ (WSAGetLastError() == WSA ## expected) #else #define CHECK_ERRNO(expected) \ (errno == expected) #endif /* Convenience function to raise an error according to errno and return a NULL pointer from a function. */ static PyObject * set_error(void) { #ifdef MS_WINDOWS int err_no = WSAGetLastError(); /* PyErr_SetExcFromWindowsErr() invokes FormatMessage() which recognizes the error codes used by both GetLastError() and WSAGetLastError */ if (err_no) return PyErr_SetExcFromWindowsErr(socket_error, err_no); #endif #if defined(PYOS_OS2) && !defined(PYCC_GCC) if (sock_errno() != NO_ERROR) { APIRET rc; ULONG msglen; char outbuf[100]; int myerrorcode = sock_errno(); /* Retrieve socket-related error message from MPTN.MSG file */ rc = DosGetMessage(NULL, 0, outbuf, sizeof(outbuf), myerrorcode - SOCBASEERR + 26, "mptn.msg", &msglen); if (rc == NO_ERROR) { PyObject *v; /* OS/2 doesn't guarantee a terminator */ outbuf[msglen] = '\0'; if (strlen(outbuf) > 0) { /* If non-empty msg, trim CRLF */ char *lastc = &outbuf[ strlen(outbuf)-1 ]; while (lastc > outbuf && isspace(Py_CHARMASK(*lastc))) { /* Trim trailing whitespace (CRLF) */ *lastc-- = '\0'; } } v = Py_BuildValue("(is)", myerrorcode, outbuf); if (v != NULL) { PyErr_SetObject(socket_error, v); Py_DECREF(v); } return NULL; } } #endif #if defined(RISCOS) if (_inet_error.errnum != NULL) { PyObject *v; v = Py_BuildValue("(is)", errno, _inet_err()); if (v != NULL) { PyErr_SetObject(socket_error, v); Py_DECREF(v); } return NULL; } #endif return PyErr_SetFromErrno(socket_error); } static PyObject * set_herror(int h_error) { PyObject *v; #ifdef HAVE_HSTRERROR v = Py_BuildValue("(is)", h_error, (char *)hstrerror(h_error)); #else v = Py_BuildValue("(is)", h_error, "host not found"); #endif if (v != NULL) { PyErr_SetObject(socket_herror, v); Py_DECREF(v); } return NULL; } static PyObject * set_gaierror(int error) { PyObject *v; #ifdef EAI_SYSTEM /* EAI_SYSTEM is not available on Windows XP. */ if (error == EAI_SYSTEM) return set_error(); #endif #ifdef HAVE_GAI_STRERROR v = Py_BuildValue("(is)", error, gai_strerror(error)); #else v = Py_BuildValue("(is)", error, "getaddrinfo failed"); #endif if (v != NULL) { PyErr_SetObject(socket_gaierror, v); Py_DECREF(v); } return NULL; } #ifdef __VMS /* Function to send in segments */ static Py_ssize_t sendsegmented(int sock_fd, char *buf, Py_ssize_t len, int flags) { int n = 0; Py_ssize_t remaining = len; while (remaining > 0) { unsigned int segment; segment = ((size_t)remaining >= SEGMENT_SIZE ? SEGMENT_SIZE : (unsigned int) remaining); n = send(sock_fd, buf, segment, flags); if (n < 0) { return n; } remaining -= segment; buf += segment; } /* end while */ return len; } #endif /* Function to perform the setting of socket blocking mode internally. block = (1 | 0). */ static int internal_setblocking(PySocketSockObject *s, int block) { #ifndef RISCOS #ifndef MS_WINDOWS int delay_flag; #endif #endif Py_BEGIN_ALLOW_THREADS #ifdef __BEOS__ block = !block; setsockopt(s->sock_fd, SOL_SOCKET, SO_NONBLOCK, (void *)(&block), sizeof(int)); #else #ifndef RISCOS #ifndef MS_WINDOWS #if defined(PYOS_OS2) && !defined(PYCC_GCC) block = !block; ioctl(s->sock_fd, FIONBIO, (caddr_t)&block, sizeof(block)); #elif defined(__VMS) block = !block; ioctl(s->sock_fd, FIONBIO, (unsigned int *)&block); #else /* !PYOS_OS2 && !__VMS */ delay_flag = fcntl(s->sock_fd, F_GETFL, 0); if (block) delay_flag &= (~O_NONBLOCK); else delay_flag |= O_NONBLOCK; fcntl(s->sock_fd, F_SETFL, delay_flag); #endif /* !PYOS_OS2 */ #else /* MS_WINDOWS */ block = !block; ioctlsocket(s->sock_fd, FIONBIO, (u_long*)&block); #endif /* MS_WINDOWS */ #else /* RISCOS */ block = !block; socketioctl(s->sock_fd, FIONBIO, (u_long*)&block); #endif /* RISCOS */ #endif /* __BEOS__ */ Py_END_ALLOW_THREADS /* Since these don't return anything */ return 1; } /* Do a select()/poll() on the socket, if necessary (sock_timeout > 0). The argument writing indicates the direction. This does not raise an exception; we'll let our caller do that after they've reacquired the interpreter lock. Returns 1 on timeout, -1 on error, 0 otherwise. */ static int internal_select_ex(PySocketSockObject *s, int writing, double interval) { int n; /* Nothing to do unless we're in timeout mode (not non-blocking) */ if (s->sock_timeout <= 0.0) return 0; /* Guard against closed socket */ if (s->sock_fd < 0) return 0; /* Handling this condition here simplifies the select loops */ if (interval < 0.0) return 1; /* Prefer poll, if available, since you can poll() any fd * which can't be done with select(). */ #ifdef HAVE_POLL { struct pollfd pollfd; int timeout; pollfd.fd = s->sock_fd; pollfd.events = writing ? POLLOUT : POLLIN; /* s->sock_timeout is in seconds, timeout in ms */ timeout = (int)(interval * 1000 + 0.5); n = poll(&pollfd, 1, timeout); } #else { /* Construct the arguments to select */ fd_set fds; struct timeval tv; tv.tv_sec = (int)interval; tv.tv_usec = (int)((interval - tv.tv_sec) * 1e6); FD_ZERO(&fds); FD_SET(s->sock_fd, &fds); /* See if the socket is ready */ if (writing) n = select(s->sock_fd+1, NULL, &fds, NULL, &tv); else n = select(s->sock_fd+1, &fds, NULL, NULL, &tv); } #endif if (n < 0) return -1; if (n == 0) return 1; return 0; } static int internal_select(PySocketSockObject *s, int writing) { return internal_select_ex(s, writing, s->sock_timeout); } /* Two macros for automatic retry of select() in case of false positives (for example, select() could indicate a socket is ready for reading but the data then discarded by the OS because of a wrong checksum). Here is an example of use: BEGIN_SELECT_LOOP(s) Py_BEGIN_ALLOW_THREADS timeout = internal_select_ex(s, 0, interval); if (!timeout) outlen = recv(s->sock_fd, cbuf, len, flags); Py_END_ALLOW_THREADS if (timeout == 1) { PyErr_SetString(socket_timeout, "timed out"); return -1; } END_SELECT_LOOP(s) */ #define BEGIN_SELECT_LOOP(s) \ { \ double deadline = 0, interval = s->sock_timeout; \ int has_timeout = s->sock_timeout > 0.0; \ if (has_timeout) { \ deadline = _PyTime_FloatTime() + s->sock_timeout; \ } \ while (1) { \ errno = 0; #define END_SELECT_LOOP(s) \ if (!has_timeout || \ (!CHECK_ERRNO(EWOULDBLOCK) && !CHECK_ERRNO(EAGAIN))) \ break; \ interval = deadline - _PyTime_FloatTime(); \ } \ } /* Initialize a new socket object. */ static double defaulttimeout = -1.0; /* Default timeout for new sockets */ PyMODINIT_FUNC init_sockobject(PySocketSockObject *s, SOCKET_T fd, int family, int type, int proto) { #ifdef RISCOS int block = 1; #endif s->sock_fd = fd; s->sock_family = family; s->sock_type = type; s->sock_proto = proto; s->sock_timeout = defaulttimeout; s->errorhandler = &set_error; if (defaulttimeout >= 0.0) internal_setblocking(s, 0); #ifdef RISCOS if (taskwindow) socketioctl(s->sock_fd, 0x80046679, (u_long*)&block); #endif } /* Create a new socket object. This just creates the object and initializes it. If the creation fails, return NULL and set an exception (implicit in NEWOBJ()). */ static PySocketSockObject * new_sockobject(SOCKET_T fd, int family, int type, int proto) { PySocketSockObject *s; s = (PySocketSockObject *) PyType_GenericNew(&sock_type, NULL, NULL); if (s != NULL) init_sockobject(s, fd, family, type, proto); return s; } /* Lock to allow python interpreter to continue, but only allow one thread to be in gethostbyname or getaddrinfo */ #if defined(USE_GETHOSTBYNAME_LOCK) || defined(USE_GETADDRINFO_LOCK) static PyThread_type_lock netdb_lock; #endif /* Convert a string specifying a host name or one of a few symbolic names to a numeric IP address. This usually calls gethostbyname() to do the work; the names "" and "<broadcast>" are special. Return the length (IPv4 should be 4 bytes), or negative if an error occurred; then an exception is raised. */ static int setipaddr(char *name, struct sockaddr *addr_ret, size_t addr_ret_size, int af) { struct addrinfo hints, *res; int error; int d1, d2, d3, d4; char ch; memset((void *) addr_ret, '\0', sizeof(*addr_ret)); if (name[0] == '\0') { int siz; memset(&hints, 0, sizeof(hints)); hints.ai_family = af; hints.ai_socktype = SOCK_DGRAM; /*dummy*/ hints.ai_flags = AI_PASSIVE; Py_BEGIN_ALLOW_THREADS ACQUIRE_GETADDRINFO_LOCK error = getaddrinfo(NULL, "0", &hints, &res); Py_END_ALLOW_THREADS /* We assume that those thread-unsafe getaddrinfo() versions *are* safe regarding their return value, ie. that a subsequent call to getaddrinfo() does not destroy the outcome of the first call. */ RELEASE_GETADDRINFO_LOCK if (error) { set_gaierror(error); return -1; } switch (res->ai_family) { case AF_INET: siz = 4; break; #ifdef ENABLE_IPV6 case AF_INET6: siz = 16; break; #endif default: freeaddrinfo(res); PyErr_SetString(socket_error, "unsupported address family"); return -1; } if (res->ai_next) { freeaddrinfo(res); PyErr_SetString(socket_error, "wildcard resolved to multiple address"); return -1; } if (res->ai_addrlen < addr_ret_size) addr_ret_size = res->ai_addrlen; memcpy(addr_ret, res->ai_addr, addr_ret_size); freeaddrinfo(res); return siz; } if (name[0] == '<' && strcmp(name, "<broadcast>") == 0) { struct sockaddr_in *sin; if (af != AF_INET && af != AF_UNSPEC) { PyErr_SetString(socket_error, "address family mismatched"); return -1; } sin = (struct sockaddr_in *)addr_ret; memset((void *) sin, '\0', sizeof(*sin)); sin->sin_family = AF_INET; #ifdef HAVE_SOCKADDR_SA_LEN sin->sin_len = sizeof(*sin); #endif sin->sin_addr.s_addr = INADDR_BROADCAST; return sizeof(sin->sin_addr); } if (sscanf(name, "%d.%d.%d.%d%c", &d1, &d2, &d3, &d4, &ch) == 4 && 0 <= d1 && d1 <= 255 && 0 <= d2 && d2 <= 255 && 0 <= d3 && d3 <= 255 && 0 <= d4 && d4 <= 255) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr_ret; sin->sin_addr.s_addr = htonl( ((long) d1 << 24) | ((long) d2 << 16) | ((long) d3 << 8) | ((long) d4 << 0)); sin->sin_family = AF_INET; #ifdef HAVE_SOCKADDR_SA_LEN sin->sin_len = sizeof(*sin); #endif return 4; } memset(&hints, 0, sizeof(hints)); hints.ai_family = af; Py_BEGIN_ALLOW_THREADS ACQUIRE_GETADDRINFO_LOCK error = getaddrinfo(name, NULL, &hints, &res); #if defined(__digital__) && defined(__unix__) if (error == EAI_NONAME && af == AF_UNSPEC) { /* On Tru64 V5.1, numeric-to-addr conversion fails if no address family is given. Assume IPv4 for now.*/ hints.ai_family = AF_INET; error = getaddrinfo(name, NULL, &hints, &res); } #endif Py_END_ALLOW_THREADS RELEASE_GETADDRINFO_LOCK /* see comment in setipaddr() */ if (error) { set_gaierror(error); return -1; } if (res->ai_addrlen < addr_ret_size) addr_ret_size = res->ai_addrlen; memcpy((char *) addr_ret, res->ai_addr, addr_ret_size); freeaddrinfo(res); switch (addr_ret->sa_family) { case AF_INET: return 4; #ifdef ENABLE_IPV6 case AF_INET6: return 16; #endif default: PyErr_SetString(socket_error, "unknown address family"); return -1; } } /* Create a string object representing an IP address. This is always a string of the form 'dd.dd.dd.dd' (with variable size numbers). */ static PyObject * makeipaddr(struct sockaddr *addr, int addrlen) { char buf[NI_MAXHOST]; int error; error = getnameinfo(addr, addrlen, buf, sizeof(buf), NULL, 0, NI_NUMERICHOST); if (error) { set_gaierror(error); return NULL; } return PyString_FromString(buf); } #ifdef USE_BLUETOOTH /* Convert a string representation of a Bluetooth address into a numeric address. Returns the length (6), or raises an exception and returns -1 if an error occurred. */ static int setbdaddr(const char *name, bdaddr_t *bdaddr) { unsigned int b0, b1, b2, b3, b4, b5; char ch; int n; n = sscanf(name, "%X:%X:%X:%X:%X:%X%c", &b5, &b4, &b3, &b2, &b1, &b0, &ch); if (n == 6 && (b0 | b1 | b2 | b3 | b4 | b5) < 256) { bdaddr->b[0] = b0; bdaddr->b[1] = b1; bdaddr->b[2] = b2; bdaddr->b[3] = b3; bdaddr->b[4] = b4; bdaddr->b[5] = b5; return 6; } else { PyErr_SetString(socket_error, "bad bluetooth address"); return -1; } } /* Create a string representation of the Bluetooth address. This is always a string of the form 'XX:XX:XX:XX:XX:XX' where XX is a two digit hexadecimal value (zero padded if necessary). */ static PyObject * makebdaddr(bdaddr_t *bdaddr) { char buf[(6 * 2) + 5 + 1]; sprintf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", bdaddr->b[5], bdaddr->b[4], bdaddr->b[3], bdaddr->b[2], bdaddr->b[1], bdaddr->b[0]); return PyString_FromString(buf); } #endif /* Create an object representing the given socket address, suitable for passing it back to bind(), connect() etc. The family field of the sockaddr structure is inspected to determine what kind of address it really is. */ /*ARGSUSED*/ static PyObject * makesockaddr(int sockfd, struct sockaddr *addr, int addrlen, int proto) { if (addrlen == 0) { /* No address -- may be recvfrom() from known socket */ Py_INCREF(Py_None); return Py_None; } #ifdef __BEOS__ /* XXX: BeOS version of accept() doesn't set family correctly */ addr->sa_family = AF_INET; #endif /* __BEOS__ */ switch (addr->sa_family) { case AF_INET: { struct sockaddr_in *a; PyObject *addrobj = makeipaddr(addr, sizeof(*a)); PyObject *ret = NULL; if (addrobj) { a = (struct sockaddr_in *)addr; ret = Py_BuildValue("Oi", addrobj, ntohs(a->sin_port)); Py_DECREF(addrobj); } return ret; } #if defined(AF_UNIX) case AF_UNIX: { struct sockaddr_un *a = (struct sockaddr_un *) addr; #ifdef linux if (a->sun_path[0] == 0) { /* Linux abstract namespace */ addrlen -= offsetof(struct sockaddr_un, sun_path); return PyString_FromStringAndSize(a->sun_path, addrlen); } else #endif /* linux */ { /* regular NULL-terminated string */ return PyString_FromString(a->sun_path); } } #endif /* AF_UNIX */ #if defined(AF_NETLINK) case AF_NETLINK: { struct sockaddr_nl *a = (struct sockaddr_nl *) addr; return Py_BuildValue("II", a->nl_pid, a->nl_groups); } #endif /* AF_NETLINK */ #ifdef ENABLE_IPV6 case AF_INET6: { struct sockaddr_in6 *a; PyObject *addrobj = makeipaddr(addr, sizeof(*a)); PyObject *ret = NULL; if (addrobj) { a = (struct sockaddr_in6 *)addr; ret = Py_BuildValue("OiII", addrobj, ntohs(a->sin6_port), ntohl(a->sin6_flowinfo), a->sin6_scope_id); Py_DECREF(addrobj); } return ret; } #endif /* ENABLE_IPV6 */ #ifdef USE_BLUETOOTH case AF_BLUETOOTH: switch (proto) { case BTPROTO_L2CAP: { struct sockaddr_l2 *a = (struct sockaddr_l2 *) addr; PyObject *addrobj = makebdaddr(&_BT_L2_MEMB(a, bdaddr)); PyObject *ret = NULL; if (addrobj) { ret = Py_BuildValue("Oi", addrobj, _BT_L2_MEMB(a, psm)); Py_DECREF(addrobj); } return ret; } case BTPROTO_RFCOMM: { struct sockaddr_rc *a = (struct sockaddr_rc *) addr; PyObject *addrobj = makebdaddr(&_BT_RC_MEMB(a, bdaddr)); PyObject *ret = NULL; if (addrobj) { ret = Py_BuildValue("Oi", addrobj, _BT_RC_MEMB(a, channel)); Py_DECREF(addrobj); } return ret; } case BTPROTO_HCI: { struct sockaddr_hci *a = (struct sockaddr_hci *) addr; #if defined(__NetBSD__) || defined(__DragonFly__) return makebdaddr(&_BT_HCI_MEMB(a, bdaddr)); #else /* __NetBSD__ || __DragonFly__ */ PyObject *ret = NULL; ret = Py_BuildValue("i", _BT_HCI_MEMB(a, dev)); return ret; #endif /* !(__NetBSD__ || __DragonFly__) */ } #if !defined(__FreeBSD__) case BTPROTO_SCO: { struct sockaddr_sco *a = (struct sockaddr_sco *) addr; return makebdaddr(&_BT_SCO_MEMB(a, bdaddr)); } #endif /* !__FreeBSD__ */ default: PyErr_SetString(PyExc_ValueError, "Unknown Bluetooth protocol"); return NULL; } #endif /* USE_BLUETOOTH */ #if defined(HAVE_NETPACKET_PACKET_H) && defined(SIOCGIFNAME) case AF_PACKET: { struct sockaddr_ll *a = (struct sockaddr_ll *)addr; const char *ifname = ""; struct ifreq ifr; /* need to look up interface name give index */ if (a->sll_ifindex) { ifr.ifr_ifindex = a->sll_ifindex; if (ioctl(sockfd, SIOCGIFNAME, &ifr) == 0) ifname = ifr.ifr_name; } return Py_BuildValue("shbhs#", ifname, ntohs(a->sll_protocol), a->sll_pkttype, a->sll_hatype, a->sll_addr, a->sll_halen); } #endif /* HAVE_NETPACKET_PACKET_H && SIOCGIFNAME */ #ifdef HAVE_LINUX_TIPC_H case AF_TIPC: { struct sockaddr_tipc *a = (struct sockaddr_tipc *) addr; if (a->addrtype == TIPC_ADDR_NAMESEQ) { return Py_BuildValue("IIIII", a->addrtype, a->addr.nameseq.type, a->addr.nameseq.lower, a->addr.nameseq.upper, a->scope); } else if (a->addrtype == TIPC_ADDR_NAME) { return Py_BuildValue("IIIII", a->addrtype, a->addr.name.name.type, a->addr.name.name.instance, a->addr.name.name.instance, a->scope); } else if (a->addrtype == TIPC_ADDR_ID) { return Py_BuildValue("IIIII", a->addrtype, a->addr.id.node, a->addr.id.ref, 0, a->scope); } else { PyErr_SetString(PyExc_ValueError, "Invalid address type"); return NULL; } } #endif /* HAVE_LINUX_TIPC_H */ /* More cases here... */ default: /* If we don't know the address family, don't raise an exception -- return it as a tuple. */ return Py_BuildValue("is#", addr->sa_family, addr->sa_data, sizeof(addr->sa_data)); } } /* Parse a socket address argument according to the socket object's address family. Return 1 if the address was in the proper format, 0 of not. The address is returned through addr_ret, its length through len_ret. */ static int getsockaddrarg(PySocketSockObject *s, PyObject *args, struct sockaddr *addr_ret, int *len_ret) { switch (s->sock_family) { #if defined(AF_UNIX) case AF_UNIX: { struct sockaddr_un* addr; char *path; int len; if (!PyArg_Parse(args, "t#", &path, &len)) return 0; addr = (struct sockaddr_un*)addr_ret; #ifdef linux if (len > 0 && path[0] == 0) { /* Linux abstract namespace extension */ if (len > sizeof addr->sun_path) { PyErr_SetString(socket_error, "AF_UNIX path too long"); return 0; } } else #endif /* linux */ { /* regular NULL-terminated string */ if (len >= sizeof addr->sun_path) { PyErr_SetString(socket_error, "AF_UNIX path too long"); return 0; } addr->sun_path[len] = 0; } addr->sun_family = s->sock_family; memcpy(addr->sun_path, path, len); #if defined(PYOS_OS2) *len_ret = sizeof(*addr); #else /* PYOS_OS2 */ *len_ret = len + offsetof(struct sockaddr_un, sun_path); #endif /* !PYOS_OS2 */ return 1; } #endif /* AF_UNIX */ #if defined(AF_NETLINK) case AF_NETLINK: { struct sockaddr_nl* addr; int pid, groups; addr = (struct sockaddr_nl *)addr_ret; if (!PyTuple_Check(args)) { PyErr_Format( PyExc_TypeError, "getsockaddrarg: " "AF_NETLINK address must be tuple, not %.500s", Py_TYPE(args)->tp_name); return 0; } if (!PyArg_ParseTuple(args, "II:getsockaddrarg", &pid, &groups)) return 0; addr->nl_family = AF_NETLINK; addr->nl_pid = pid; addr->nl_groups = groups; *len_ret = sizeof(*addr); return 1; } #endif /* AF_NETLINK */ case AF_INET: { struct sockaddr_in* addr; char *host; int port, result; if (!PyTuple_Check(args)) { PyErr_Format( PyExc_TypeError, "getsockaddrarg: " "AF_INET address must be tuple, not %.500s", Py_TYPE(args)->tp_name); return 0; } if (!PyArg_ParseTuple(args, "eti:getsockaddrarg", "idna", &host, &port)) return 0; addr=(struct sockaddr_in*)addr_ret; result = setipaddr(host, (struct sockaddr *)addr, sizeof(*addr), AF_INET); PyMem_Free(host); if (result < 0) return 0; if (port < 0 || port > 0xffff) { PyErr_SetString( PyExc_OverflowError, "getsockaddrarg: port must be 0-65535."); return 0; } addr->sin_family = AF_INET; addr->sin_port = htons((short)port); *len_ret = sizeof *addr; return 1; } #ifdef ENABLE_IPV6 case AF_INET6: { struct sockaddr_in6* addr; char *host; int port, result; unsigned int flowinfo, scope_id; flowinfo = scope_id = 0; if (!PyTuple_Check(args)) { PyErr_Format( PyExc_TypeError, "getsockaddrarg: " "AF_INET6 address must be tuple, not %.500s", Py_TYPE(args)->tp_name); return 0; } if (!PyArg_ParseTuple(args, "eti|II", "idna", &host, &port, &flowinfo, &scope_id)) { return 0; } addr = (struct sockaddr_in6*)addr_ret; result = setipaddr(host, (struct sockaddr *)addr, sizeof(*addr), AF_INET6); PyMem_Free(host); if (result < 0) return 0; if (port < 0 || port > 0xffff) { PyErr_SetString( PyExc_OverflowError, "getsockaddrarg: port must be 0-65535."); return 0; } if (flowinfo > 0xfffff) { PyErr_SetString( PyExc_OverflowError, "getsockaddrarg: flowinfo must be 0-1048575."); return 0; } addr->sin6_family = s->sock_family; addr->sin6_port = htons((short)port); addr->sin6_flowinfo = htonl(flowinfo); addr->sin6_scope_id = scope_id; *len_ret = sizeof *addr; return 1; } #endif /* ENABLE_IPV6 */ #ifdef USE_BLUETOOTH case AF_BLUETOOTH: { switch (s->sock_proto) { case BTPROTO_L2CAP: { struct sockaddr_l2 *addr; const char *straddr; addr = (struct sockaddr_l2 *)addr_ret; memset(addr, 0, sizeof(struct sockaddr_l2)); _BT_L2_MEMB(addr, family) = AF_BLUETOOTH; if (!PyArg_ParseTuple(args, "si", &straddr, &_BT_L2_MEMB(addr, psm))) { PyErr_SetString(socket_error, "getsockaddrarg: " "wrong format"); return 0; } if (setbdaddr(straddr, &_BT_L2_MEMB(addr, bdaddr)) < 0) return 0; *len_ret = sizeof *addr; return 1; } case BTPROTO_RFCOMM: { struct sockaddr_rc *addr; const char *straddr; addr = (struct sockaddr_rc *)addr_ret; _BT_RC_MEMB(addr, family) = AF_BLUETOOTH; if (!PyArg_ParseTuple(args, "si", &straddr, &_BT_RC_MEMB(addr, channel))) { PyErr_SetString(socket_error, "getsockaddrarg: " "wrong format"); return 0; } if (setbdaddr(straddr, &_BT_RC_MEMB(addr, bdaddr)) < 0) return 0; *len_ret = sizeof *addr; return 1; } case BTPROTO_HCI: { struct sockaddr_hci *addr = (struct sockaddr_hci *)addr_ret; #if defined(__NetBSD__) || defined(__DragonFly__) const char *straddr; _BT_HCI_MEMB(addr, family) = AF_BLUETOOTH; if (!PyBytes_Check(args)) { PyErr_SetString(PyExc_OSError, "getsockaddrarg: " "wrong format"); return 0; } straddr = PyBytes_AS_STRING(args); if (setbdaddr(straddr, &_BT_HCI_MEMB(addr, bdaddr)) < 0) return 0; #else /* __NetBSD__ || __DragonFly__ */ _BT_HCI_MEMB(addr, family) = AF_BLUETOOTH; if (!PyArg_ParseTuple(args, "i", &_BT_HCI_MEMB(addr, dev))) { PyErr_SetString(socket_error, "getsockaddrarg: " "wrong format"); return 0; } #endif /* !(__NetBSD__ || __DragonFly__) */ *len_ret = sizeof *addr; return 1; } #if !defined(__FreeBSD__) case BTPROTO_SCO: { struct sockaddr_sco *addr; const char *straddr; addr = (struct sockaddr_sco *)addr_ret; _BT_SCO_MEMB(addr, family) = AF_BLUETOOTH; straddr = PyString_AsString(args); if (straddr == NULL) { PyErr_SetString(socket_error, "getsockaddrarg: " "wrong format"); return 0; } if (setbdaddr(straddr, &_BT_SCO_MEMB(addr, bdaddr)) < 0) return 0; *len_ret = sizeof *addr; return 1; } #endif /* !__FreeBSD__ */ default: PyErr_SetString(socket_error, "getsockaddrarg: unknown Bluetooth protocol"); return 0; } } #endif /* USE_BLUETOOTH */ #if defined(HAVE_NETPACKET_PACKET_H) && defined(SIOCGIFINDEX) case AF_PACKET: { struct sockaddr_ll* addr; struct ifreq ifr; const char *interfaceName; int protoNumber; int hatype = 0; int pkttype = 0; char *haddr = NULL; unsigned int halen = 0; if (!PyTuple_Check(args)) { PyErr_Format( PyExc_TypeError, "getsockaddrarg: " "AF_PACKET address must be tuple, not %.500s", Py_TYPE(args)->tp_name); return 0; } if (!PyArg_ParseTuple(args, "si|iis#", &interfaceName, &protoNumber, &pkttype, &hatype, &haddr, &halen)) return 0; strncpy(ifr.ifr_name, interfaceName, sizeof(ifr.ifr_name)); ifr.ifr_name[(sizeof(ifr.ifr_name))-1] = '\0'; if (ioctl(s->sock_fd, SIOCGIFINDEX, &ifr) < 0) { s->errorhandler(); return 0; } if (halen > 8) { PyErr_SetString(PyExc_ValueError, "Hardware address must be 8 bytes or less"); return 0; } if (protoNumber < 0 || protoNumber > 0xffff) { PyErr_SetString( PyExc_OverflowError, "getsockaddrarg: protoNumber must be 0-65535."); return 0; } addr = (struct sockaddr_ll*)addr_ret; addr->sll_family = AF_PACKET; addr->sll_protocol = htons((short)protoNumber); addr->sll_ifindex = ifr.ifr_ifindex; addr->sll_pkttype = pkttype; addr->sll_hatype = hatype; if (halen != 0) { memcpy(&addr->sll_addr, haddr, halen); } addr->sll_halen = halen; *len_ret = sizeof *addr; return 1; } #endif /* HAVE_NETPACKET_PACKET_H && SIOCGIFINDEX */ #ifdef HAVE_LINUX_TIPC_H case AF_TIPC: { unsigned int atype, v1, v2, v3; unsigned int scope = TIPC_CLUSTER_SCOPE; struct sockaddr_tipc *addr; if (!PyTuple_Check(args)) { PyErr_Format( PyExc_TypeError, "getsockaddrarg: " "AF_TIPC address must be tuple, not %.500s", Py_TYPE(args)->tp_name); return 0; } if (!PyArg_ParseTuple(args, "IIII|I;Invalid TIPC address format", &atype, &v1, &v2, &v3, &scope)) return 0; addr = (struct sockaddr_tipc *) addr_ret; memset(addr, 0, sizeof(struct sockaddr_tipc)); addr->family = AF_TIPC; addr->scope = scope; addr->addrtype = atype; if (atype == TIPC_ADDR_NAMESEQ) { addr->addr.nameseq.type = v1; addr->addr.nameseq.lower = v2; addr->addr.nameseq.upper = v3; } else if (atype == TIPC_ADDR_NAME) { addr->addr.name.name.type = v1; addr->addr.name.name.instance = v2; } else if (atype == TIPC_ADDR_ID) { addr->addr.id.node = v1; addr->addr.id.ref = v2; } else { /* Shouldn't happen */ PyErr_SetString(PyExc_TypeError, "Invalid address type"); return 0; } *len_ret = sizeof(*addr); return 1; } #endif /* HAVE_LINUX_TIPC_H */ /* More cases here... */ default: PyErr_SetString(socket_error, "getsockaddrarg: bad family"); return 0; } } /* Get the address length according to the socket object's address family. Return 1 if the family is known, 0 otherwise. The length is returned through len_ret. */ static int getsockaddrlen(PySocketSockObject *s, socklen_t *len_ret) { switch (s->sock_family) { #if defined(AF_UNIX) case AF_UNIX: { *len_ret = sizeof (struct sockaddr_un); return 1; } #endif /* AF_UNIX */ #if defined(AF_NETLINK) case AF_NETLINK: { *len_ret = sizeof (struct sockaddr_nl); return 1; } #endif /* AF_NETLINK */ case AF_INET: { *len_ret = sizeof (struct sockaddr_in); return 1; } #ifdef ENABLE_IPV6 case AF_INET6: { *len_ret = sizeof (struct sockaddr_in6); return 1; } #endif /* ENABLE_IPV6 */ #ifdef USE_BLUETOOTH case AF_BLUETOOTH: { switch(s->sock_proto) { case BTPROTO_L2CAP: *len_ret = sizeof (struct sockaddr_l2); return 1; case BTPROTO_RFCOMM: *len_ret = sizeof (struct sockaddr_rc); return 1; case BTPROTO_HCI: *len_ret = sizeof (struct sockaddr_hci); return 1; #if !defined(__FreeBSD__) case BTPROTO_SCO: *len_ret = sizeof (struct sockaddr_sco); return 1; #endif /* !__FreeBSD__ */ default: PyErr_SetString(socket_error, "getsockaddrlen: " "unknown BT protocol"); return 0; } } #endif /* USE_BLUETOOTH */ #ifdef HAVE_NETPACKET_PACKET_H case AF_PACKET: { *len_ret = sizeof (struct sockaddr_ll); return 1; } #endif /* HAVE_NETPACKET_PACKET_H */ #ifdef HAVE_LINUX_TIPC_H case AF_TIPC: { *len_ret = sizeof (struct sockaddr_tipc); return 1; } #endif /* HAVE_LINUX_TIPC_H */ /* More cases here... */ default: PyErr_SetString(socket_error, "getsockaddrlen: bad family"); return 0; } } /* s.accept() method */ static PyObject * sock_accept(PySocketSockObject *s) { sock_addr_t addrbuf; SOCKET_T newfd; socklen_t addrlen; PyObject *sock = NULL; PyObject *addr = NULL; PyObject *res = NULL; int timeout; if (!getsockaddrlen(s, &addrlen)) return NULL; memset(&addrbuf, 0, addrlen); newfd = INVALID_SOCKET; if (!IS_SELECTABLE(s)) return select_error(); BEGIN_SELECT_LOOP(s) Py_BEGIN_ALLOW_THREADS timeout = internal_select_ex(s, 0, interval); if (!timeout) newfd = accept(s->sock_fd, SAS2SA(&addrbuf), &addrlen); Py_END_ALLOW_THREADS if (timeout == 1) { PyErr_SetString(socket_timeout, "timed out"); return NULL; } END_SELECT_LOOP(s) if (newfd == INVALID_SOCKET) return s->errorhandler(); /* Create the new object with unspecified family, to avoid calls to bind() etc. on it. */ sock = (PyObject *) new_sockobject(newfd, s->sock_family, s->sock_type, s->sock_proto); if (sock == NULL) { SOCKETCLOSE(newfd); goto finally; } addr = makesockaddr(s->sock_fd, SAS2SA(&addrbuf), addrlen, s->sock_proto); if (addr == NULL) goto finally; res = PyTuple_Pack(2, sock, addr); finally: Py_XDECREF(sock); Py_XDECREF(addr); return res; } PyDoc_STRVAR(accept_doc, "accept() -> (socket object, address info)\n\ \n\ Wait for an incoming connection. Return a new socket representing the\n\ connection, and the address of the client. For IP sockets, the address\n\ info is a pair (hostaddr, port)."); /* s.setblocking(flag) method. Argument: False -- non-blocking mode; same as settimeout(0) True -- blocking mode; same as settimeout(None) */ static PyObject * sock_setblocking(PySocketSockObject *s, PyObject *arg) { long block; block = PyInt_AsLong(arg); if (block == -1 && PyErr_Occurred()) return NULL; s->sock_timeout = block ? -1.0 : 0.0; internal_setblocking(s, block); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(setblocking_doc, "setblocking(flag)\n\ \n\ Set the socket to blocking (flag is true) or non-blocking (false).\n\ setblocking(True) is equivalent to settimeout(None);\n\ setblocking(False) is equivalent to settimeout(0.0)."); /* s.settimeout(timeout) method. Argument: None -- no timeout, blocking mode; same as setblocking(True) 0.0 -- non-blocking mode; same as setblocking(False) > 0 -- timeout mode; operations time out after timeout seconds < 0 -- illegal; raises an exception */ static PyObject * sock_settimeout(PySocketSockObject *s, PyObject *arg) { double timeout; if (arg == Py_None) timeout = -1.0; else { timeout = PyFloat_AsDouble(arg); if (timeout < 0.0) { if (!PyErr_Occurred()) PyErr_SetString(PyExc_ValueError, "Timeout value out of range"); return NULL; } } s->sock_timeout = timeout; internal_setblocking(s, timeout < 0.0); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(settimeout_doc, "settimeout(timeout)\n\ \n\ Set a timeout on socket operations. 'timeout' can be a float,\n\ giving in seconds, or None. Setting a timeout of None disables\n\ the timeout feature and is equivalent to setblocking(1).\n\ Setting a timeout of zero is the same as setblocking(0)."); /* s.gettimeout() method. Returns the timeout associated with a socket. */ static PyObject * sock_gettimeout(PySocketSockObject *s) { if (s->sock_timeout < 0.0) { Py_INCREF(Py_None); return Py_None; } else return PyFloat_FromDouble(s->sock_timeout); } PyDoc_STRVAR(gettimeout_doc, "gettimeout() -> timeout\n\ \n\ Returns the timeout in seconds (float) associated with socket \n\ operations. A timeout of None indicates that timeouts on socket \n\ operations are disabled."); #ifdef RISCOS /* s.sleeptaskw(1 | 0) method */ static PyObject * sock_sleeptaskw(PySocketSockObject *s,PyObject *arg) { int block; block = PyInt_AsLong(arg); if (block == -1 && PyErr_Occurred()) return NULL; Py_BEGIN_ALLOW_THREADS socketioctl(s->sock_fd, 0x80046679, (u_long*)&block); Py_END_ALLOW_THREADS Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(sleeptaskw_doc, "sleeptaskw(flag)\n\ \n\ Allow sleeps in taskwindows."); #endif /* s.setsockopt() method. With an integer third argument, sets an integer option. With a string third argument, sets an option from a buffer; use optional built-in module 'struct' to encode the string. */ static PyObject * sock_setsockopt(PySocketSockObject *s, PyObject *args) { int level; int optname; int res; char *buf; int buflen; int flag; if (PyArg_ParseTuple(args, "iii:setsockopt", &level, &optname, &flag)) { buf = (char *) &flag; buflen = sizeof flag; } else { PyErr_Clear(); if (!PyArg_ParseTuple(args, "iis#:setsockopt", &level, &optname, &buf, &buflen)) return NULL; } res = setsockopt(s->sock_fd, level, optname, (void *)buf, buflen); if (res < 0) return s->errorhandler(); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(setsockopt_doc, "setsockopt(level, option, value)\n\ \n\ Set a socket option. See the Unix manual for level and option.\n\ The value argument can either be an integer or a string."); /* s.getsockopt() method. With two arguments, retrieves an integer option. With a third integer argument, retrieves a string buffer of that size; use optional built-in module 'struct' to decode the string. */ static PyObject * sock_getsockopt(PySocketSockObject *s, PyObject *args) { int level; int optname; int res; PyObject *buf; socklen_t buflen = 0; #ifdef __BEOS__ /* We have incomplete socket support. */ PyErr_SetString(socket_error, "getsockopt not supported"); return NULL; #else if (!PyArg_ParseTuple(args, "ii|i:getsockopt", &level, &optname, &buflen)) return NULL; if (buflen == 0) { int flag = 0; socklen_t flagsize = sizeof flag; res = getsockopt(s->sock_fd, level, optname, (void *)&flag, &flagsize); if (res < 0) return s->errorhandler(); return PyInt_FromLong(flag); } #ifdef __VMS /* socklen_t is unsigned so no negative test is needed, test buflen == 0 is previously done */ if (buflen > 1024) { #else if (buflen <= 0 || buflen > 1024) { #endif PyErr_SetString(socket_error, "getsockopt buflen out of range"); return NULL; } buf = PyString_FromStringAndSize((char *)NULL, buflen); if (buf == NULL) return NULL; res = getsockopt(s->sock_fd, level, optname, (void *)PyString_AS_STRING(buf), &buflen); if (res < 0) { Py_DECREF(buf); return s->errorhandler(); } _PyString_Resize(&buf, buflen); return buf; #endif /* __BEOS__ */ } PyDoc_STRVAR(getsockopt_doc, "getsockopt(level, option[, buffersize]) -> value\n\ \n\ Get a socket option. See the Unix manual for level and option.\n\ If a nonzero buffersize argument is given, the return value is a\n\ string of that length; otherwise it is an integer."); /* s.bind(sockaddr) method */ static PyObject * sock_bind(PySocketSockObject *s, PyObject *addro) { sock_addr_t addrbuf; int addrlen; int res; if (!getsockaddrarg(s, addro, SAS2SA(&addrbuf), &addrlen)) return NULL; Py_BEGIN_ALLOW_THREADS res = bind(s->sock_fd, SAS2SA(&addrbuf), addrlen); Py_END_ALLOW_THREADS if (res < 0) return s->errorhandler(); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(bind_doc, "bind(address)\n\ \n\ Bind the socket to a local address. For IP sockets, the address is a\n\ pair (host, port); the host must refer to the local host. For raw packet\n\ sockets the address is a tuple (ifname, proto [,pkttype [,hatype]])"); /* s.close() method. Set the file descriptor to -1 so operations tried subsequently will surely fail. */ static PyObject * sock_close(PySocketSockObject *s) { SOCKET_T fd; if ((fd = s->sock_fd) != -1) { s->sock_fd = -1; Py_BEGIN_ALLOW_THREADS (void) SOCKETCLOSE(fd); Py_END_ALLOW_THREADS } Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(close_doc, "close()\n\ \n\ Close the socket. It cannot be used after this call."); static int internal_connect(PySocketSockObject *s, struct sockaddr *addr, int addrlen, int *timeoutp) { int res, timeout; timeout = 0; res = connect(s->sock_fd, addr, addrlen); #ifdef MS_WINDOWS if (s->sock_timeout > 0.0) { if (res < 0 && WSAGetLastError() == WSAEWOULDBLOCK && IS_SELECTABLE(s)) { /* This is a mess. Best solution: trust select */ fd_set fds; fd_set fds_exc; struct timeval tv; tv.tv_sec = (int)s->sock_timeout; tv.tv_usec = (int)((s->sock_timeout - tv.tv_sec) * 1e6); FD_ZERO(&fds); FD_SET(s->sock_fd, &fds); FD_ZERO(&fds_exc); FD_SET(s->sock_fd, &fds_exc); res = select(s->sock_fd+1, NULL, &fds, &fds_exc, &tv); if (res == 0) { res = WSAEWOULDBLOCK; timeout = 1; } else if (res > 0) { if (FD_ISSET(s->sock_fd, &fds)) /* The socket is in the writeable set - this means connected */ res = 0; else { /* As per MS docs, we need to call getsockopt() to get the underlying error */ int res_size = sizeof res; /* It must be in the exception set */ assert(FD_ISSET(s->sock_fd, &fds_exc)); if (0 == getsockopt(s->sock_fd, SOL_SOCKET, SO_ERROR, (char *)&res, &res_size)) /* getsockopt also clears WSAGetLastError, so reset it back. */ WSASetLastError(res); else res = WSAGetLastError(); } } /* else if (res < 0) an error occurred */ } } if (res < 0) res = WSAGetLastError(); #else if (s->sock_timeout > 0.0) { if (res < 0 && errno == EINPROGRESS && IS_SELECTABLE(s)) { timeout = internal_select(s, 1); if (timeout == 0) { /* Bug #1019808: in case of an EINPROGRESS, use getsockopt(SO_ERROR) to get the real error. */ socklen_t res_size = sizeof res; (void)getsockopt(s->sock_fd, SOL_SOCKET, SO_ERROR, &res, &res_size); if (res == EISCONN) res = 0; errno = res; } else if (timeout == -1) { res = errno; /* had error */ } else res = EWOULDBLOCK; /* timed out */ } } if (res < 0) res = errno; #endif *timeoutp = timeout; return res; } /* s.connect(sockaddr) method */ static PyObject * sock_connect(PySocketSockObject *s, PyObject *addro) { sock_addr_t addrbuf; int addrlen; int res; int timeout; if (!getsockaddrarg(s, addro, SAS2SA(&addrbuf), &addrlen)) return NULL; Py_BEGIN_ALLOW_THREADS res = internal_connect(s, SAS2SA(&addrbuf), addrlen, &timeout); Py_END_ALLOW_THREADS if (timeout == 1) { PyErr_SetString(socket_timeout, "timed out"); return NULL; } if (res != 0) return s->errorhandler(); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(connect_doc, "connect(address)\n\ \n\ Connect the socket to a remote address. For IP sockets, the address\n\ is a pair (host, port)."); /* s.connect_ex(sockaddr) method */ static PyObject * sock_connect_ex(PySocketSockObject *s, PyObject *addro) { sock_addr_t addrbuf; int addrlen; int res; int timeout; if (!getsockaddrarg(s, addro, SAS2SA(&addrbuf), &addrlen)) return NULL; Py_BEGIN_ALLOW_THREADS res = internal_connect(s, SAS2SA(&addrbuf), addrlen, &timeout); Py_END_ALLOW_THREADS /* Signals are not errors (though they may raise exceptions). Adapted from PyErr_SetFromErrnoWithFilenameObject(). */ #ifdef EINTR if (res == EINTR && PyErr_CheckSignals()) return NULL; #endif return PyInt_FromLong((long) res); } PyDoc_STRVAR(connect_ex_doc, "connect_ex(address) -> errno\n\ \n\ This is like connect(address), but returns an error code (the errno value)\n\ instead of raising an exception when an error occurs."); /* s.fileno() method */ static PyObject * sock_fileno(PySocketSockObject *s) { #if SIZEOF_SOCKET_T <= SIZEOF_LONG return PyInt_FromLong((long) s->sock_fd); #else return PyLong_FromLongLong((PY_LONG_LONG)s->sock_fd); #endif } PyDoc_STRVAR(fileno_doc, "fileno() -> integer\n\ \n\ Return the integer file descriptor of the socket."); #ifndef NO_DUP /* s.dup() method */ static PyObject * sock_dup(PySocketSockObject *s) { SOCKET_T newfd; PyObject *sock; newfd = dup(s->sock_fd); if (newfd < 0) return s->errorhandler(); sock = (PyObject *) new_sockobject(newfd, s->sock_family, s->sock_type, s->sock_proto); if (sock == NULL) SOCKETCLOSE(newfd); return sock; } PyDoc_STRVAR(dup_doc, "dup() -> socket object\n\ \n\ Return a new socket object connected to the same system resource."); #endif /* s.getsockname() method */ static PyObject * sock_getsockname(PySocketSockObject *s) { sock_addr_t addrbuf; int res; socklen_t addrlen; if (!getsockaddrlen(s, &addrlen)) return NULL; memset(&addrbuf, 0, addrlen); Py_BEGIN_ALLOW_THREADS res = getsockname(s->sock_fd, SAS2SA(&addrbuf), &addrlen); Py_END_ALLOW_THREADS if (res < 0) return s->errorhandler(); return makesockaddr(s->sock_fd, SAS2SA(&addrbuf), addrlen, s->sock_proto); } PyDoc_STRVAR(getsockname_doc, "getsockname() -> address info\n\ \n\ Return the address of the local endpoint. For IP sockets, the address\n\ info is a pair (hostaddr, port)."); #ifdef HAVE_GETPEERNAME /* Cray APP doesn't have this :-( */ /* s.getpeername() method */ static PyObject * sock_getpeername(PySocketSockObject *s) { sock_addr_t addrbuf; int res; socklen_t addrlen; if (!getsockaddrlen(s, &addrlen)) return NULL; memset(&addrbuf, 0, addrlen); Py_BEGIN_ALLOW_THREADS res = getpeername(s->sock_fd, SAS2SA(&addrbuf), &addrlen); Py_END_ALLOW_THREADS if (res < 0) return s->errorhandler(); return makesockaddr(s->sock_fd, SAS2SA(&addrbuf), addrlen, s->sock_proto); } PyDoc_STRVAR(getpeername_doc, "getpeername() -> address info\n\ \n\ Return the address of the remote endpoint. For IP sockets, the address\n\ info is a pair (hostaddr, port)."); #endif /* HAVE_GETPEERNAME */ /* s.listen(n) method */ static PyObject * sock_listen(PySocketSockObject *s, PyObject *arg) { int backlog; int res; backlog = _PyInt_AsInt(arg); if (backlog == -1 && PyErr_Occurred()) return NULL; Py_BEGIN_ALLOW_THREADS /* To avoid problems on systems that don't allow a negative backlog * (which doesn't make sense anyway) we force a minimum value of 0. */ if (backlog < 0) backlog = 0; res = listen(s->sock_fd, backlog); Py_END_ALLOW_THREADS if (res < 0) return s->errorhandler(); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(listen_doc, "listen(backlog)\n\ \n\ Enable a server to accept connections. The backlog argument must be at\n\ least 0 (if it is lower, it is set to 0); it specifies the number of\n\ unaccepted connections that the system will allow before refusing new\n\ connections."); #ifndef NO_DUP /* s.makefile(mode) method. Create a new open file object referring to a dupped version of the socket's file descriptor. (The dup() call is necessary so that the open file and socket objects may be closed independent of each other.) The mode argument specifies 'r' or 'w' passed to fdopen(). */ static PyObject * sock_makefile(PySocketSockObject *s, PyObject *args) { extern int fclose(FILE *); char *mode = "r"; int bufsize = -1; #ifdef MS_WIN32 Py_intptr_t fd; #else int fd; #endif FILE *fp; PyObject *f; #ifdef __VMS char *mode_r = "r"; char *mode_w = "w"; #endif if (!PyArg_ParseTuple(args, "|si:makefile", &mode, &bufsize)) return NULL; #ifdef __VMS if (strcmp(mode,"rb") == 0) { mode = mode_r; } else { if (strcmp(mode,"wb") == 0) { mode = mode_w; } } #endif #ifdef MS_WIN32 if (((fd = _open_osfhandle(s->sock_fd, _O_BINARY)) < 0) || ((fd = dup(fd)) < 0) || ((fp = fdopen(fd, mode)) == NULL)) #else if ((fd = dup(s->sock_fd)) < 0 || (fp = fdopen(fd, mode)) == NULL) #endif { if (fd >= 0) SOCKETCLOSE(fd); return s->errorhandler(); } f = PyFile_FromFile(fp, "<socket>", mode, fclose); if (f != NULL) PyFile_SetBufSize(f, bufsize); return f; } PyDoc_STRVAR(makefile_doc, "makefile([mode[, buffersize]]) -> file object\n\ \n\ Return a regular file object corresponding to the socket.\n\ The mode and buffersize arguments are as for the built-in open() function."); #endif /* NO_DUP */ /* * This is the guts of the recv() and recv_into() methods, which reads into a * char buffer. If you have any inc/dec ref to do to the objects that contain * the buffer, do it in the caller. This function returns the number of bytes * successfully read. If there was an error, it returns -1. Note that it is * also possible that we return a number of bytes smaller than the request * bytes. */ static ssize_t sock_recv_guts(PySocketSockObject *s, char* cbuf, int len, int flags) { ssize_t outlen = -1; int timeout; #ifdef __VMS int remaining; char *read_buf; #endif if (!IS_SELECTABLE(s)) { select_error(); return -1; } #ifndef __VMS BEGIN_SELECT_LOOP(s) Py_BEGIN_ALLOW_THREADS timeout = internal_select_ex(s, 0, interval); if (!timeout) outlen = recv(s->sock_fd, cbuf, len, flags); Py_END_ALLOW_THREADS if (timeout == 1) { PyErr_SetString(socket_timeout, "timed out"); return -1; } END_SELECT_LOOP(s) if (outlen < 0) { /* Note: the call to errorhandler() ALWAYS indirectly returned NULL, so ignore its return value */ s->errorhandler(); return -1; } #else read_buf = cbuf; remaining = len; while (remaining != 0) { unsigned int segment; int nread = -1; segment = remaining /SEGMENT_SIZE; if (segment != 0) { segment = SEGMENT_SIZE; } else { segment = remaining; } BEGIN_SELECT_LOOP(s) Py_BEGIN_ALLOW_THREADS timeout = internal_select_ex(s, 0, interval); if (!timeout) nread = recv(s->sock_fd, read_buf, segment, flags); Py_END_ALLOW_THREADS if (timeout == 1) { PyErr_SetString(socket_timeout, "timed out"); return -1; } END_SELECT_LOOP(s) if (nread < 0) { s->errorhandler(); return -1; } if (nread != remaining) { read_buf += nread; break; } remaining -= segment; read_buf += segment; } outlen = read_buf - cbuf; #endif /* !__VMS */ return outlen; } /* s.recv(nbytes [,flags]) method */ static PyObject * sock_recv(PySocketSockObject *s, PyObject *args) { int recvlen, flags = 0; ssize_t outlen; PyObject *buf; if (!PyArg_ParseTuple(args, "i|i:recv", &recvlen, &flags)) return NULL; if (recvlen < 0) { PyErr_SetString(PyExc_ValueError, "negative buffersize in recv"); return NULL; } /* Allocate a new string. */ buf = PyString_FromStringAndSize((char *) 0, recvlen); if (buf == NULL) return NULL; /* Call the guts */ outlen = sock_recv_guts(s, PyString_AS_STRING(buf), recvlen, flags); if (outlen < 0) { /* An error occurred, release the string and return an error. */ Py_DECREF(buf); return NULL; } if (outlen != recvlen) { /* We did not read as many bytes as we anticipated, resize the string if possible and be successful. */ if (_PyString_Resize(&buf, outlen) < 0) /* Oopsy, not so successful after all. */ return NULL; } return buf; } PyDoc_STRVAR(recv_doc, "recv(buffersize[, flags]) -> data\n\ \n\ Receive up to buffersize bytes from the socket. For the optional flags\n\ argument, see the Unix manual. When no data is available, block until\n\ at least one byte is available or until the remote end is closed. When\n\ the remote end is closed and all data is read, return the empty string."); /* s.recv_into(buffer, [nbytes [,flags]]) method */ static PyObject* sock_recv_into(PySocketSockObject *s, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"buffer", "nbytes", "flags", 0}; int recvlen = 0, flags = 0; ssize_t readlen; Py_buffer buf; Py_ssize_t buflen; /* Get the buffer's memory */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "w*|ii:recv_into", kwlist, &buf, &recvlen, &flags)) return NULL; buflen = buf.len; assert(buf.buf != 0 && buflen > 0); if (recvlen < 0) { PyErr_SetString(PyExc_ValueError, "negative buffersize in recv_into"); goto error; } if (recvlen == 0) { /* If nbytes was not specified, use the buffer's length */ recvlen = buflen; } /* Check if the buffer is large enough */ if (buflen < recvlen) { PyErr_SetString(PyExc_ValueError, "buffer too small for requested bytes"); goto error; } /* Call the guts */ readlen = sock_recv_guts(s, buf.buf, recvlen, flags); if (readlen < 0) { /* Return an error. */ goto error; } PyBuffer_Release(&buf); /* Return the number of bytes read. Note that we do not do anything special here in the case that readlen < recvlen. */ return PyInt_FromSsize_t(readlen); error: PyBuffer_Release(&buf); return NULL; } PyDoc_STRVAR(recv_into_doc, "recv_into(buffer, [nbytes[, flags]]) -> nbytes_read\n\ \n\ A version of recv() that stores its data into a buffer rather than creating \n\ a new string. Receive up to buffersize bytes from the socket. If buffersize \n\ is not specified (or 0), receive up to the size available in the given buffer.\n\ \n\ See recv() for documentation about the flags."); /* * This is the guts of the recvfrom() and recvfrom_into() methods, which reads * into a char buffer. If you have any inc/def ref to do to the objects that * contain the buffer, do it in the caller. This function returns the number * of bytes successfully read. If there was an error, it returns -1. Note * that it is also possible that we return a number of bytes smaller than the * request bytes. * * 'addr' is a return value for the address object. Note that you must decref * it yourself. */ static ssize_t sock_recvfrom_guts(PySocketSockObject *s, char* cbuf, int len, int flags, PyObject** addr) { sock_addr_t addrbuf; int timeout; ssize_t n = -1; socklen_t addrlen; *addr = NULL; if (!getsockaddrlen(s, &addrlen)) return -1; if (!IS_SELECTABLE(s)) { select_error(); return -1; } BEGIN_SELECT_LOOP(s) Py_BEGIN_ALLOW_THREADS memset(&addrbuf, 0, addrlen); timeout = internal_select_ex(s, 0, interval); if (!timeout) { #ifndef MS_WINDOWS #if defined(PYOS_OS2) && !defined(PYCC_GCC) n = recvfrom(s->sock_fd, cbuf, len, flags, SAS2SA(&addrbuf), &addrlen); #else n = recvfrom(s->sock_fd, cbuf, len, flags, (void *) &addrbuf, &addrlen); #endif #else n = recvfrom(s->sock_fd, cbuf, len, flags, SAS2SA(&addrbuf), &addrlen); #endif } Py_END_ALLOW_THREADS if (timeout == 1) { PyErr_SetString(socket_timeout, "timed out"); return -1; } END_SELECT_LOOP(s) if (n < 0) { s->errorhandler(); return -1; } if (!(*addr = makesockaddr(s->sock_fd, SAS2SA(&addrbuf), addrlen, s->sock_proto))) return -1; return n; } /* s.recvfrom(nbytes [,flags]) method */ static PyObject * sock_recvfrom(PySocketSockObject *s, PyObject *args) { PyObject *buf = NULL; PyObject *addr = NULL; PyObject *ret = NULL; int recvlen, flags = 0; ssize_t outlen; if (!PyArg_ParseTuple(args, "i|i:recvfrom", &recvlen, &flags)) return NULL; if (recvlen < 0) { PyErr_SetString(PyExc_ValueError, "negative buffersize in recvfrom"); return NULL; } buf = PyString_FromStringAndSize((char *) 0, recvlen); if (buf == NULL) return NULL; outlen = sock_recvfrom_guts(s, PyString_AS_STRING(buf), recvlen, flags, &addr); if (outlen < 0) { goto finally; } if (outlen != recvlen) { /* We did not read as many bytes as we anticipated, resize the string if possible and be successful. */ if (_PyString_Resize(&buf, outlen) < 0) /* Oopsy, not so successful after all. */ goto finally; } ret = PyTuple_Pack(2, buf, addr); finally: Py_XDECREF(buf); Py_XDECREF(addr); return ret; } PyDoc_STRVAR(recvfrom_doc, "recvfrom(buffersize[, flags]) -> (data, address info)\n\ \n\ Like recv(buffersize, flags) but also return the sender's address info."); /* s.recvfrom_into(buffer[, nbytes [,flags]]) method */ static PyObject * sock_recvfrom_into(PySocketSockObject *s, PyObject *args, PyObject* kwds) { static char *kwlist[] = {"buffer", "nbytes", "flags", 0}; int recvlen = 0, flags = 0; ssize_t readlen; Py_buffer buf; int buflen; PyObject *addr = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "w*|ii:recvfrom_into", kwlist, &buf, &recvlen, &flags)) return NULL; buflen = buf.len; if (recvlen < 0) { PyErr_SetString(PyExc_ValueError, "negative buffersize in recvfrom_into"); goto error; } if (recvlen == 0) { /* If nbytes was not specified, use the buffer's length */ recvlen = buflen; } else if (recvlen > buflen) { PyErr_SetString(PyExc_ValueError, "nbytes is greater than the length of the buffer"); goto error; } readlen = sock_recvfrom_guts(s, buf.buf, recvlen, flags, &addr); if (readlen < 0) { /* Return an error */ goto error; } PyBuffer_Release(&buf); /* Return the number of bytes read and the address. Note that we do not do anything special here in the case that readlen < recvlen. */ return Py_BuildValue("lN", readlen, addr); error: Py_XDECREF(addr); PyBuffer_Release(&buf); return NULL; } PyDoc_STRVAR(recvfrom_into_doc, "recvfrom_into(buffer[, nbytes[, flags]]) -> (nbytes, address info)\n\ \n\ Like recv_into(buffer[, nbytes[, flags]]) but also return the sender's address info."); /* s.send(data [,flags]) method */ static PyObject * sock_send(PySocketSockObject *s, PyObject *args) { char *buf; int flags = 0, timeout; Py_ssize_t len, n = -1; Py_buffer pbuf; if (!PyArg_ParseTuple(args, "s*|i:send", &pbuf, &flags)) return NULL; if (!IS_SELECTABLE(s)) { PyBuffer_Release(&pbuf); return select_error(); } buf = pbuf.buf; len = pbuf.len; BEGIN_SELECT_LOOP(s) Py_BEGIN_ALLOW_THREADS timeout = internal_select_ex(s, 1, interval); if (!timeout) { #ifdef __VMS n = sendsegmented(s->sock_fd, buf, len, flags); #elif defined(MS_WINDOWS) if (len > INT_MAX) { len = INT_MAX; } n = send(s->sock_fd, buf, (int)len, flags); #else n = send(s->sock_fd, buf, len, flags); #endif } Py_END_ALLOW_THREADS if (timeout == 1) { PyBuffer_Release(&pbuf); PyErr_SetString(socket_timeout, "timed out"); return NULL; } END_SELECT_LOOP(s) PyBuffer_Release(&pbuf); if (n < 0) return s->errorhandler(); return PyInt_FromSsize_t(n); } PyDoc_STRVAR(send_doc, "send(data[, flags]) -> count\n\ \n\ Send a data string to the socket. For the optional flags\n\ argument, see the Unix manual. Return the number of bytes\n\ sent; this may be less than len(data) if the network is busy."); /* s.sendall(data [,flags]) method */ static PyObject * sock_sendall(PySocketSockObject *s, PyObject *args) { char *buf; int flags = 0, timeout, saved_errno; Py_ssize_t len, n = -1; Py_buffer pbuf; if (!PyArg_ParseTuple(args, "s*|i:sendall", &pbuf, &flags)) return NULL; buf = pbuf.buf; len = pbuf.len; if (!IS_SELECTABLE(s)) { PyBuffer_Release(&pbuf); return select_error(); } do { BEGIN_SELECT_LOOP(s) Py_BEGIN_ALLOW_THREADS timeout = internal_select_ex(s, 1, interval); n = -1; if (!timeout) { #ifdef __VMS n = sendsegmented(s->sock_fd, buf, len, flags); #elif defined(MS_WINDOWS) if (len > INT_MAX) { len = INT_MAX; } n = send(s->sock_fd, buf, (int)len, flags); #else n = send(s->sock_fd, buf, len, flags); #endif } Py_END_ALLOW_THREADS if (timeout == 1) { PyBuffer_Release(&pbuf); PyErr_SetString(socket_timeout, "timed out"); return NULL; } END_SELECT_LOOP(s) /* PyErr_CheckSignals() might change errno */ saved_errno = errno; /* We must run our signal handlers before looping again. send() can return a successful partial write when it is interrupted, so we can't restrict ourselves to EINTR. */ if (PyErr_CheckSignals()) { PyBuffer_Release(&pbuf); return NULL; } if (n < 0) { /* If interrupted, try again */ if (saved_errno == EINTR) continue; else break; } buf += n; len -= n; } while (len > 0); PyBuffer_Release(&pbuf); if (n < 0) return s->errorhandler(); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(sendall_doc, "sendall(data[, flags])\n\ \n\ Send a data string to the socket. For the optional flags\n\ argument, see the Unix manual. This calls send() repeatedly\n\ until all data is sent. If an error occurs, it's impossible\n\ to tell how much data has been sent."); /* s.sendto(data, [flags,] sockaddr) method */ static PyObject * sock_sendto(PySocketSockObject *s, PyObject *args) { Py_buffer pbuf; PyObject *addro; char *buf; Py_ssize_t len; sock_addr_t addrbuf; int addrlen, flags, timeout; long n = -1; int arglen; flags = 0; arglen = PyTuple_Size(args); switch(arglen) { case 2: PyArg_ParseTuple(args, "s*O:sendto", &pbuf, &addro); break; case 3: PyArg_ParseTuple(args, "s*iO:sendto", &pbuf, &flags, &addro); break; default: PyErr_Format(PyExc_TypeError, "sendto() takes 2 or 3" " arguments (%d given)", arglen); } if (PyErr_Occurred()) return NULL; buf = pbuf.buf; len = pbuf.len; if (!IS_SELECTABLE(s)) { PyBuffer_Release(&pbuf); return select_error(); } if (!getsockaddrarg(s, addro, SAS2SA(&addrbuf), &addrlen)) { PyBuffer_Release(&pbuf); return NULL; } BEGIN_SELECT_LOOP(s) Py_BEGIN_ALLOW_THREADS timeout = internal_select_ex(s, 1, interval); if (!timeout) n = sendto(s->sock_fd, buf, len, flags, SAS2SA(&addrbuf), addrlen); Py_END_ALLOW_THREADS if (timeout == 1) { PyBuffer_Release(&pbuf); PyErr_SetString(socket_timeout, "timed out"); return NULL; } END_SELECT_LOOP(s) PyBuffer_Release(&pbuf); if (n < 0) return s->errorhandler(); return PyInt_FromLong((long)n); } PyDoc_STRVAR(sendto_doc, "sendto(data[, flags], address) -> count\n\ \n\ Like send(data, flags) but allows specifying the destination address.\n\ For IP sockets, the address is a pair (hostaddr, port)."); /* s.shutdown(how) method */ static PyObject * sock_shutdown(PySocketSockObject *s, PyObject *arg) { int how; int res; how = _PyInt_AsInt(arg); if (how == -1 && PyErr_Occurred()) return NULL; Py_BEGIN_ALLOW_THREADS res = shutdown(s->sock_fd, how); Py_END_ALLOW_THREADS if (res < 0) return s->errorhandler(); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(shutdown_doc, "shutdown(flag)\n\ \n\ Shut down the reading side of the socket (flag == SHUT_RD), the writing side\n\ of the socket (flag == SHUT_WR), or both ends (flag == SHUT_RDWR)."); #if defined(MS_WINDOWS) && defined(SIO_RCVALL) static PyObject* sock_ioctl(PySocketSockObject *s, PyObject *arg) { unsigned long cmd = SIO_RCVALL; PyObject *argO; DWORD recv; if (!PyArg_ParseTuple(arg, "kO:ioctl", &cmd, &argO)) return NULL; switch (cmd) { case SIO_RCVALL: { unsigned int option = RCVALL_ON; if (!PyArg_ParseTuple(arg, "kI:ioctl", &cmd, &option)) return NULL; if (WSAIoctl(s->sock_fd, cmd, &option, sizeof(option), NULL, 0, &recv, NULL, NULL) == SOCKET_ERROR) { return set_error(); } return PyLong_FromUnsignedLong(recv); } case SIO_KEEPALIVE_VALS: { struct tcp_keepalive ka; if (!PyArg_ParseTuple(arg, "k(kkk):ioctl", &cmd, &ka.onoff, &ka.keepalivetime, &ka.keepaliveinterval)) return NULL; if (WSAIoctl(s->sock_fd, cmd, &ka, sizeof(ka), NULL, 0, &recv, NULL, NULL) == SOCKET_ERROR) { return set_error(); } return PyLong_FromUnsignedLong(recv); } default: PyErr_Format(PyExc_ValueError, "invalid ioctl command %d", cmd); return NULL; } } PyDoc_STRVAR(sock_ioctl_doc, "ioctl(cmd, option) -> long\n\ \n\ Control the socket with WSAIoctl syscall. Currently supported 'cmd' values are\n\ SIO_RCVALL: 'option' must be one of the socket.RCVALL_* constants.\n\ SIO_KEEPALIVE_VALS: 'option' is a tuple of (onoff, timeout, interval)."); #endif /* List of methods for socket objects */ static PyMethodDef sock_methods[] = { {"accept", (PyCFunction)sock_accept, METH_NOARGS, accept_doc}, {"bind", (PyCFunction)sock_bind, METH_O, bind_doc}, {"close", (PyCFunction)sock_close, METH_NOARGS, close_doc}, {"connect", (PyCFunction)sock_connect, METH_O, connect_doc}, {"connect_ex", (PyCFunction)sock_connect_ex, METH_O, connect_ex_doc}, #ifndef NO_DUP {"dup", (PyCFunction)sock_dup, METH_NOARGS, dup_doc}, #endif {"fileno", (PyCFunction)sock_fileno, METH_NOARGS, fileno_doc}, #ifdef HAVE_GETPEERNAME {"getpeername", (PyCFunction)sock_getpeername, METH_NOARGS, getpeername_doc}, #endif {"getsockname", (PyCFunction)sock_getsockname, METH_NOARGS, getsockname_doc}, {"getsockopt", (PyCFunction)sock_getsockopt, METH_VARARGS, getsockopt_doc}, #if defined(MS_WINDOWS) && defined(SIO_RCVALL) {"ioctl", (PyCFunction)sock_ioctl, METH_VARARGS, sock_ioctl_doc}, #endif {"listen", (PyCFunction)sock_listen, METH_O, listen_doc}, #ifndef NO_DUP {"makefile", (PyCFunction)sock_makefile, METH_VARARGS, makefile_doc}, #endif {"recv", (PyCFunction)sock_recv, METH_VARARGS, recv_doc}, {"recv_into", (PyCFunction)sock_recv_into, METH_VARARGS | METH_KEYWORDS, recv_into_doc}, {"recvfrom", (PyCFunction)sock_recvfrom, METH_VARARGS, recvfrom_doc}, {"recvfrom_into", (PyCFunction)sock_recvfrom_into, METH_VARARGS | METH_KEYWORDS, recvfrom_into_doc}, {"send", (PyCFunction)sock_send, METH_VARARGS, send_doc}, {"sendall", (PyCFunction)sock_sendall, METH_VARARGS, sendall_doc}, {"sendto", (PyCFunction)sock_sendto, METH_VARARGS, sendto_doc}, {"setblocking", (PyCFunction)sock_setblocking, METH_O, setblocking_doc}, {"settimeout", (PyCFunction)sock_settimeout, METH_O, settimeout_doc}, {"gettimeout", (PyCFunction)sock_gettimeout, METH_NOARGS, gettimeout_doc}, {"setsockopt", (PyCFunction)sock_setsockopt, METH_VARARGS, setsockopt_doc}, {"shutdown", (PyCFunction)sock_shutdown, METH_O, shutdown_doc}, #ifdef RISCOS {"sleeptaskw", (PyCFunction)sock_sleeptaskw, METH_O, sleeptaskw_doc}, #endif {NULL, NULL} /* sentinel */ }; /* SockObject members */ static PyMemberDef sock_memberlist[] = { {"family", T_INT, offsetof(PySocketSockObject, sock_family), READONLY, "the socket family"}, {"type", T_INT, offsetof(PySocketSockObject, sock_type), READONLY, "the socket type"}, {"proto", T_INT, offsetof(PySocketSockObject, sock_proto), READONLY, "the socket protocol"}, {"timeout", T_DOUBLE, offsetof(PySocketSockObject, sock_timeout), READONLY, "the socket timeout"}, {0}, }; /* Deallocate a socket object in response to the last Py_DECREF(). First close the file description. */ static void sock_dealloc(PySocketSockObject *s) { if (s->sock_fd != -1) (void) SOCKETCLOSE(s->sock_fd); if (s->weakreflist != NULL) PyObject_ClearWeakRefs((PyObject *)s); Py_TYPE(s)->tp_free((PyObject *)s); } static PyObject * sock_repr(PySocketSockObject *s) { char buf[512]; long sock_fd; /* On Windows, this test is needed because SOCKET_T is unsigned */ if (s->sock_fd == INVALID_SOCKET) { sock_fd = -1; } #if SIZEOF_SOCKET_T > SIZEOF_LONG else if (s->sock_fd > LONG_MAX) { /* this can occur on Win64, and actually there is a special ugly printf formatter for decimal pointer length integer printing, only bother if necessary*/ PyErr_SetString(PyExc_OverflowError, "no printf formatter to display " "the socket descriptor in decimal"); return NULL; } #endif else sock_fd = (long)s->sock_fd; PyOS_snprintf( buf, sizeof(buf), "<socket object, fd=%ld, family=%d, type=%d, protocol=%d>", sock_fd, s->sock_family, s->sock_type, s->sock_proto); return PyString_FromString(buf); } /* Create a new, uninitialized socket object. */ static PyObject * sock_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { PyObject *new; new = type->tp_alloc(type, 0); if (new != NULL) { ((PySocketSockObject *)new)->sock_fd = -1; ((PySocketSockObject *)new)->sock_timeout = -1.0; ((PySocketSockObject *)new)->errorhandler = &set_error; ((PySocketSockObject *)new)->weakreflist = NULL; } return new; } /* Initialize a new socket object. */ /*ARGSUSED*/ static int sock_initobj(PyObject *self, PyObject *args, PyObject *kwds) { PySocketSockObject *s = (PySocketSockObject *)self; SOCKET_T fd; int family = AF_INET, type = SOCK_STREAM, proto = 0; static char *keywords[] = {"family", "type", "proto", 0}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iii:socket", keywords, &family, &type, &proto)) return -1; Py_BEGIN_ALLOW_THREADS fd = socket(family, type, proto); Py_END_ALLOW_THREADS if (fd == INVALID_SOCKET) { set_error(); return -1; } init_sockobject(s, fd, family, type, proto); return 0; } /* Type object for socket objects. */ static PyTypeObject sock_type = { PyVarObject_HEAD_INIT(0, 0) /* Must fill in type value later */ "_socket.socket", /* tp_name */ sizeof(PySocketSockObject), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)sock_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ (reprfunc)sock_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ sock_doc, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ offsetof(PySocketSockObject, weakreflist), /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ sock_methods, /* tp_methods */ sock_memberlist, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ sock_initobj, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ sock_new, /* tp_new */ PyObject_Del, /* tp_free */ }; /* Python interface to gethostname(). */ /*ARGSUSED*/ static PyObject * socket_gethostname(PyObject *self, PyObject *unused) { char buf[1024]; int res; #ifdef __VITA__ return PyString_FromString("Vita"); #endif /* __VITA__ */ Py_BEGIN_ALLOW_THREADS res = gethostname(buf, (int) sizeof buf - 1); Py_END_ALLOW_THREADS if (res < 0) return set_error(); buf[sizeof buf - 1] = '\0'; return PyString_FromString(buf); } PyDoc_STRVAR(gethostname_doc, "gethostname() -> string\n\ \n\ Return the current host name."); /* Python interface to gethostbyname(name). */ /*ARGSUSED*/ static PyObject * socket_gethostbyname(PyObject *self, PyObject *args) { char *name; sock_addr_t addrbuf; if (!PyArg_ParseTuple(args, "s:gethostbyname", &name)) return NULL; if (setipaddr(name, SAS2SA(&addrbuf), sizeof(addrbuf), AF_INET) < 0) return NULL; return makeipaddr(SAS2SA(&addrbuf), sizeof(struct sockaddr_in)); } PyDoc_STRVAR(gethostbyname_doc, "gethostbyname(host) -> address\n\ \n\ Return the IP address (a string of the form '255.255.255.255') for a host."); /* Convenience function common to gethostbyname_ex and gethostbyaddr */ static PyObject * gethost_common(struct hostent *h, struct sockaddr *addr, int alen, int af) { char **pch; PyObject *rtn_tuple = (PyObject *)NULL; PyObject *name_list = (PyObject *)NULL; PyObject *addr_list = (PyObject *)NULL; PyObject *tmp; if (h == NULL) { /* Let's get real error message to return */ #if !defined(RISCOS) && !defined(__VITA__) set_herror(h_errno); #else PyErr_SetString(socket_error, "host not found"); #endif return NULL; } if (h->h_addrtype != af) { /* Let's get real error message to return */ PyErr_SetString(socket_error, (char *)strerror(EAFNOSUPPORT)); return NULL; } switch (af) { case AF_INET: if (alen < sizeof(struct sockaddr_in)) return NULL; break; #ifdef ENABLE_IPV6 case AF_INET6: if (alen < sizeof(struct sockaddr_in6)) return NULL; break; #endif } if ((name_list = PyList_New(0)) == NULL) goto err; if ((addr_list = PyList_New(0)) == NULL) goto err; /* SF #1511317: h_aliases can be NULL */ if (h->h_aliases) { for (pch = h->h_aliases; *pch != NULL; pch++) { int status; tmp = PyString_FromString(*pch); if (tmp == NULL) goto err; status = PyList_Append(name_list, tmp); Py_DECREF(tmp); if (status) goto err; } } for (pch = h->h_addr_list; *pch != NULL; pch++) { int status; switch (af) { case AF_INET: { struct sockaddr_in sin; memset(&sin, 0, sizeof(sin)); sin.sin_family = af; #ifdef HAVE_SOCKADDR_SA_LEN sin.sin_len = sizeof(sin); #endif memcpy(&sin.sin_addr, *pch, sizeof(sin.sin_addr)); tmp = makeipaddr((struct sockaddr *)&sin, sizeof(sin)); if (pch == h->h_addr_list && alen >= sizeof(sin)) memcpy((char *) addr, &sin, sizeof(sin)); break; } #ifdef ENABLE_IPV6 case AF_INET6: { struct sockaddr_in6 sin6; memset(&sin6, 0, sizeof(sin6)); sin6.sin6_family = af; #ifdef HAVE_SOCKADDR_SA_LEN sin6.sin6_len = sizeof(sin6); #endif memcpy(&sin6.sin6_addr, *pch, sizeof(sin6.sin6_addr)); tmp = makeipaddr((struct sockaddr *)&sin6, sizeof(sin6)); if (pch == h->h_addr_list && alen >= sizeof(sin6)) memcpy((char *) addr, &sin6, sizeof(sin6)); break; } #endif default: /* can't happen */ PyErr_SetString(socket_error, "unsupported address family"); return NULL; } if (tmp == NULL) goto err; status = PyList_Append(addr_list, tmp); Py_DECREF(tmp); if (status) goto err; } rtn_tuple = Py_BuildValue("sOO", h->h_name, name_list, addr_list); err: Py_XDECREF(name_list); Py_XDECREF(addr_list); return rtn_tuple; } /* Python interface to gethostbyname_ex(name). */ /*ARGSUSED*/ static PyObject * socket_gethostbyname_ex(PyObject *self, PyObject *args) { char *name; struct hostent *h; #ifdef ENABLE_IPV6 struct sockaddr_storage addr; #else struct sockaddr_in addr; #endif struct sockaddr *sa; PyObject *ret; #ifdef HAVE_GETHOSTBYNAME_R struct hostent hp_allocated; #ifdef HAVE_GETHOSTBYNAME_R_3_ARG struct hostent_data data; #else char buf[16384]; int buf_len = (sizeof buf) - 1; int errnop; #endif #ifdef HAVE_GETHOSTBYNAME_R_3_ARG int result; #endif #endif /* HAVE_GETHOSTBYNAME_R */ if (!PyArg_ParseTuple(args, "s:gethostbyname_ex", &name)) return NULL; if (setipaddr(name, (struct sockaddr *)&addr, sizeof(addr), AF_INET) < 0) return NULL; Py_BEGIN_ALLOW_THREADS #ifdef HAVE_GETHOSTBYNAME_R #if defined(HAVE_GETHOSTBYNAME_R_6_ARG) gethostbyname_r(name, &hp_allocated, buf, buf_len, &h, &errnop); #elif defined(HAVE_GETHOSTBYNAME_R_5_ARG) h = gethostbyname_r(name, &hp_allocated, buf, buf_len, &errnop); #else /* HAVE_GETHOSTBYNAME_R_3_ARG */ memset((void *) &data, '\0', sizeof(data)); result = gethostbyname_r(name, &hp_allocated, &data); h = (result != 0) ? NULL : &hp_allocated; #endif #else /* not HAVE_GETHOSTBYNAME_R */ #ifdef USE_GETHOSTBYNAME_LOCK PyThread_acquire_lock(netdb_lock, 1); #endif h = gethostbyname(name); #endif /* HAVE_GETHOSTBYNAME_R */ Py_END_ALLOW_THREADS /* Some C libraries would require addr.__ss_family instead of addr.ss_family. Therefore, we cast the sockaddr_storage into sockaddr to access sa_family. */ sa = (struct sockaddr*)&addr; ret = gethost_common(h, (struct sockaddr *)&addr, sizeof(addr), sa->sa_family); #ifdef USE_GETHOSTBYNAME_LOCK PyThread_release_lock(netdb_lock); #endif return ret; } PyDoc_STRVAR(ghbn_ex_doc, "gethostbyname_ex(host) -> (name, aliaslist, addresslist)\n\ \n\ Return the true host name, a list of aliases, and a list of IP addresses,\n\ for a host. The host argument is a string giving a host name or IP number."); /* Python interface to gethostbyaddr(IP). */ /*ARGSUSED*/ static PyObject * socket_gethostbyaddr(PyObject *self, PyObject *args) { #ifdef ENABLE_IPV6 struct sockaddr_storage addr; #else struct sockaddr_in addr; #endif struct sockaddr *sa = (struct sockaddr *)&addr; char *ip_num; struct hostent *h; PyObject *ret; #ifdef __VITA__ PyErr_SetString(socket_error, "Not Supported"); return NULL; #endif /* __VITA__ */ #ifdef HAVE_GETHOSTBYNAME_R struct hostent hp_allocated; #ifdef HAVE_GETHOSTBYNAME_R_3_ARG struct hostent_data data; #else /* glibcs up to 2.10 assume that the buf argument to gethostbyaddr_r is 8-byte aligned, which at least llvm-gcc does not ensure. The attribute below instructs the compiler to maintain this alignment. */ char buf[16384] Py_ALIGNED(8); int buf_len = (sizeof buf) - 1; int errnop; #endif #ifdef HAVE_GETHOSTBYNAME_R_3_ARG int result; #endif #endif /* HAVE_GETHOSTBYNAME_R */ const char *ap; int al; int af; if (!PyArg_ParseTuple(args, "s:gethostbyaddr", &ip_num)) return NULL; af = AF_UNSPEC; if (setipaddr(ip_num, sa, sizeof(addr), af) < 0) return NULL; af = sa->sa_family; ap = NULL; switch (af) { case AF_INET: ap = (char *)&((struct sockaddr_in *)sa)->sin_addr; al = sizeof(((struct sockaddr_in *)sa)->sin_addr); break; #ifdef ENABLE_IPV6 case AF_INET6: ap = (char *)&((struct sockaddr_in6 *)sa)->sin6_addr; al = sizeof(((struct sockaddr_in6 *)sa)->sin6_addr); break; #endif default: PyErr_SetString(socket_error, "unsupported address family"); return NULL; } Py_BEGIN_ALLOW_THREADS #ifdef HAVE_GETHOSTBYNAME_R #if defined(HAVE_GETHOSTBYNAME_R_6_ARG) gethostbyaddr_r(ap, al, af, &hp_allocated, buf, buf_len, &h, &errnop); #elif defined(HAVE_GETHOSTBYNAME_R_5_ARG) h = gethostbyaddr_r(ap, al, af, &hp_allocated, buf, buf_len, &errnop); #else /* HAVE_GETHOSTBYNAME_R_3_ARG */ memset((void *) &data, '\0', sizeof(data)); result = gethostbyaddr_r(ap, al, af, &hp_allocated, &data); h = (result != 0) ? NULL : &hp_allocated; #endif #else /* not HAVE_GETHOSTBYNAME_R */ #ifdef USE_GETHOSTBYNAME_LOCK PyThread_acquire_lock(netdb_lock, 1); #endif h = gethostbyaddr(ap, al, af); #endif /* HAVE_GETHOSTBYNAME_R */ Py_END_ALLOW_THREADS ret = gethost_common(h, (struct sockaddr *)&addr, sizeof(addr), af); #ifdef USE_GETHOSTBYNAME_LOCK PyThread_release_lock(netdb_lock); #endif return ret; } PyDoc_STRVAR(gethostbyaddr_doc, "gethostbyaddr(host) -> (name, aliaslist, addresslist)\n\ \n\ Return the true host name, a list of aliases, and a list of IP addresses,\n\ for a host. The host argument is a string giving a host name or IP number."); /* Python interface to getservbyname(name). This only returns the port number, since the other info is already known or not useful (like the list of aliases). */ /*ARGSUSED*/ static PyObject * socket_getservbyname(PyObject *self, PyObject *args) { const char *name, *proto=NULL; struct servent *sp; if (!PyArg_ParseTuple(args, "s|s:getservbyname", &name, &proto)) return NULL; Py_BEGIN_ALLOW_THREADS sp = getservbyname(name, proto); Py_END_ALLOW_THREADS if (sp == NULL) { PyErr_SetString(socket_error, "service/proto not found"); return NULL; } return PyInt_FromLong((long) ntohs(sp->s_port)); } PyDoc_STRVAR(getservbyname_doc, "getservbyname(servicename[, protocolname]) -> integer\n\ \n\ Return a port number from a service name and protocol name.\n\ The optional protocol name, if given, should be 'tcp' or 'udp',\n\ otherwise any protocol will match."); /* Python interface to getservbyport(port). This only returns the service name, since the other info is already known or not useful (like the list of aliases). */ /*ARGSUSED*/ static PyObject * socket_getservbyport(PyObject *self, PyObject *args) { int port; const char *proto=NULL; struct servent *sp; #ifdef __VITA__ PyErr_SetString(socket_error, "Not Supported"); return NULL; #endif /* __VITA__ */ if (!PyArg_ParseTuple(args, "i|s:getservbyport", &port, &proto)) return NULL; if (port < 0 || port > 0xffff) { PyErr_SetString( PyExc_OverflowError, "getservbyport: port must be 0-65535."); return NULL; } Py_BEGIN_ALLOW_THREADS sp = getservbyport(htons((short)port), proto); Py_END_ALLOW_THREADS if (sp == NULL) { PyErr_SetString(socket_error, "port/proto not found"); return NULL; } return PyString_FromString(sp->s_name); } PyDoc_STRVAR(getservbyport_doc, "getservbyport(port[, protocolname]) -> string\n\ \n\ Return the service name from a port number and protocol name.\n\ The optional protocol name, if given, should be 'tcp' or 'udp',\n\ otherwise any protocol will match."); /* Python interface to getprotobyname(name). This only returns the protocol number, since the other info is already known or not useful (like the list of aliases). */ /*ARGSUSED*/ static PyObject * socket_getprotobyname(PyObject *self, PyObject *args) { const char *name; struct protoent *sp; #if defined(__BEOS__) || defined(__VITA__) /* Not available in BeOS yet. - [cjh] */ PyErr_SetString(socket_error, "getprotobyname not supported"); return NULL; #else if (!PyArg_ParseTuple(args, "s:getprotobyname", &name)) return NULL; Py_BEGIN_ALLOW_THREADS sp = getprotobyname(name); Py_END_ALLOW_THREADS if (sp == NULL) { PyErr_SetString(socket_error, "protocol not found"); return NULL; } return PyInt_FromLong((long) sp->p_proto); #endif } PyDoc_STRVAR(getprotobyname_doc, "getprotobyname(name) -> integer\n\ \n\ Return the protocol number for the named protocol. (Rarely used.)"); #ifdef HAVE_SOCKETPAIR /* Create a pair of sockets using the socketpair() function. Arguments as for socket() except the default family is AF_UNIX if defined on the platform; otherwise, the default is AF_INET. */ /*ARGSUSED*/ static PyObject * socket_socketpair(PyObject *self, PyObject *args) { PySocketSockObject *s0 = NULL, *s1 = NULL; SOCKET_T sv[2]; int family, type = SOCK_STREAM, proto = 0; PyObject *res = NULL; #if defined(AF_UNIX) family = AF_UNIX; #else family = AF_INET; #endif if (!PyArg_ParseTuple(args, "|iii:socketpair", &family, &type, &proto)) return NULL; /* Create a pair of socket fds */ if (socketpair(family, type, proto, sv) < 0) return set_error(); s0 = new_sockobject(sv[0], family, type, proto); if (s0 == NULL) goto finally; s1 = new_sockobject(sv[1], family, type, proto); if (s1 == NULL) goto finally; res = PyTuple_Pack(2, s0, s1); finally: if (res == NULL) { if (s0 == NULL) SOCKETCLOSE(sv[0]); if (s1 == NULL) SOCKETCLOSE(sv[1]); } Py_XDECREF(s0); Py_XDECREF(s1); return res; } PyDoc_STRVAR(socketpair_doc, "socketpair([family[, type[, proto]]]) -> (socket object, socket object)\n\ \n\ Create a pair of socket objects from the sockets returned by the platform\n\ socketpair() function.\n\ The arguments are the same as for socket() except the default family is\n\ AF_UNIX if defined on the platform; otherwise, the default is AF_INET."); #endif /* HAVE_SOCKETPAIR */ #ifndef NO_DUP /* Create a socket object from a numeric file description. Useful e.g. if stdin is a socket. Additional arguments as for socket(). */ /*ARGSUSED*/ static PyObject * socket_fromfd(PyObject *self, PyObject *args) { PySocketSockObject *s; SOCKET_T fd; int family, type, proto = 0; if (!PyArg_ParseTuple(args, "iii|i:fromfd", &fd, &family, &type, &proto)) return NULL; /* Dup the fd so it and the socket can be closed independently */ fd = dup(fd); if (fd < 0) return set_error(); s = new_sockobject(fd, family, type, proto); return (PyObject *) s; } PyDoc_STRVAR(fromfd_doc, "fromfd(fd, family, type[, proto]) -> socket object\n\ \n\ Create a socket object from a duplicate of the given\n\ file descriptor.\n\ The remaining arguments are the same as for socket()."); #endif /* NO_DUP */ static PyObject * socket_ntohs(PyObject *self, PyObject *args) { int x1, x2; if (!PyArg_ParseTuple(args, "i:ntohs", &x1)) { return NULL; } if (x1 < 0) { PyErr_SetString(PyExc_OverflowError, "can't convert negative number to unsigned long"); return NULL; } x2 = (unsigned int)ntohs((unsigned short)x1); return PyInt_FromLong(x2); } PyDoc_STRVAR(ntohs_doc, "ntohs(integer) -> integer\n\ \n\ Convert a 16-bit integer from network to host byte order."); static PyObject * socket_ntohl(PyObject *self, PyObject *arg) { unsigned long x; if (PyInt_Check(arg)) { x = PyInt_AS_LONG(arg); if (x == (unsigned long) -1 && PyErr_Occurred()) return NULL; if ((long)x < 0) { PyErr_SetString(PyExc_OverflowError, "can't convert negative number to unsigned long"); return NULL; } } else if (PyLong_Check(arg)) { x = PyLong_AsUnsignedLong(arg); if (x == (unsigned long) -1 && PyErr_Occurred()) return NULL; #if SIZEOF_LONG > 4 { unsigned long y; /* only want the trailing 32 bits */ y = x & 0xFFFFFFFFUL; if (y ^ x) return PyErr_Format(PyExc_OverflowError, "long int larger than 32 bits"); x = y; } #endif } else return PyErr_Format(PyExc_TypeError, "expected int/long, %s found", Py_TYPE(arg)->tp_name); if (x == (unsigned long) -1 && PyErr_Occurred()) return NULL; return PyLong_FromUnsignedLong(ntohl(x)); } PyDoc_STRVAR(ntohl_doc, "ntohl(integer) -> integer\n\ \n\ Convert a 32-bit integer from network to host byte order."); static PyObject * socket_htons(PyObject *self, PyObject *args) { int x1, x2; if (!PyArg_ParseTuple(args, "i:htons", &x1)) { return NULL; } if (x1 < 0) { PyErr_SetString(PyExc_OverflowError, "can't convert negative number to unsigned long"); return NULL; } x2 = (unsigned int)htons((unsigned short)x1); return PyInt_FromLong(x2); } PyDoc_STRVAR(htons_doc, "htons(integer) -> integer\n\ \n\ Convert a 16-bit integer from host to network byte order."); static PyObject * socket_htonl(PyObject *self, PyObject *arg) { unsigned long x; if (PyInt_Check(arg)) { x = PyInt_AS_LONG(arg); if (x == (unsigned long) -1 && PyErr_Occurred()) return NULL; if ((long)x < 0) { PyErr_SetString(PyExc_OverflowError, "can't convert negative number to unsigned long"); return NULL; } } else if (PyLong_Check(arg)) { x = PyLong_AsUnsignedLong(arg); if (x == (unsigned long) -1 && PyErr_Occurred()) return NULL; #if SIZEOF_LONG > 4 { unsigned long y; /* only want the trailing 32 bits */ y = x & 0xFFFFFFFFUL; if (y ^ x) return PyErr_Format(PyExc_OverflowError, "long int larger than 32 bits"); x = y; } #endif } else return PyErr_Format(PyExc_TypeError, "expected int/long, %s found", Py_TYPE(arg)->tp_name); return PyLong_FromUnsignedLong(htonl((unsigned long)x)); } PyDoc_STRVAR(htonl_doc, "htonl(integer) -> integer\n\ \n\ Convert a 32-bit integer from host to network byte order."); /* socket.inet_aton() and socket.inet_ntoa() functions. */ PyDoc_STRVAR(inet_aton_doc, "inet_aton(string) -> packed 32-bit IP representation\n\ \n\ Convert an IP address in string format (123.45.67.89) to the 32-bit packed\n\ binary format used in low-level network functions."); static PyObject* socket_inet_aton(PyObject *self, PyObject *args) { #ifndef INADDR_NONE #define INADDR_NONE (-1) #endif #ifdef HAVE_INET_ATON struct in_addr buf; #endif #if !defined(HAVE_INET_ATON) || defined(USE_INET_ATON_WEAKLINK) #if (SIZEOF_INT != 4) #error "Not sure if in_addr_t exists and int is not 32-bits." #endif /* Have to use inet_addr() instead */ unsigned int packed_addr; #endif const char *ip_addr; if (!PyArg_ParseTuple(args, "s:inet_aton", &ip_addr)) return NULL; #ifdef HAVE_INET_ATON #ifdef USE_INET_ATON_WEAKLINK if (inet_aton != NULL) { #endif if (inet_aton(ip_addr, &buf)) return PyString_FromStringAndSize((char *)(&buf), sizeof(buf)); PyErr_SetString(socket_error, "illegal IP address string passed to inet_aton"); return NULL; #ifdef USE_INET_ATON_WEAKLINK } else { #endif #endif #if !defined(HAVE_INET_ATON) || defined(USE_INET_ATON_WEAKLINK) /* special-case this address as inet_addr might return INADDR_NONE * for this */ if (strcmp(ip_addr, "255.255.255.255") == 0) { packed_addr = 0xFFFFFFFF; } else { packed_addr = inet_addr(ip_addr); if (packed_addr == INADDR_NONE) { /* invalid address */ PyErr_SetString(socket_error, "illegal IP address string passed to inet_aton"); return NULL; } } return PyString_FromStringAndSize((char *) &packed_addr, sizeof(packed_addr)); #ifdef USE_INET_ATON_WEAKLINK } #endif #endif } PyDoc_STRVAR(inet_ntoa_doc, "inet_ntoa(packed_ip) -> ip_address_string\n\ \n\ Convert an IP address from 32-bit packed binary format to string format"); static PyObject* socket_inet_ntoa(PyObject *self, PyObject *args) { char *packed_str; int addr_len; struct in_addr packed_addr; if (!PyArg_ParseTuple(args, "s#:inet_ntoa", &packed_str, &addr_len)) { return NULL; } if (addr_len != sizeof(packed_addr)) { PyErr_SetString(socket_error, "packed IP wrong length for inet_ntoa"); return NULL; } memcpy(&packed_addr, packed_str, addr_len); return PyString_FromString(inet_ntoa(packed_addr)); } #ifdef HAVE_INET_PTON PyDoc_STRVAR(inet_pton_doc, "inet_pton(af, ip) -> packed IP address string\n\ \n\ Convert an IP address from string format to a packed string suitable\n\ for use with low-level network functions."); static PyObject * socket_inet_pton(PyObject *self, PyObject *args) { int af; const char* ip; int retval; #ifdef ENABLE_IPV6 char packed[MAX(sizeof(struct in_addr), sizeof(struct in6_addr))]; #else char packed[sizeof(struct in_addr)]; #endif if (!PyArg_ParseTuple(args, "is:inet_pton", &af, &ip)) { return NULL; } #if !defined(ENABLE_IPV6) && defined(AF_INET6) if(af == AF_INET6) { PyErr_SetString(socket_error, "can't use AF_INET6, IPv6 is disabled"); return NULL; } #endif retval = inet_pton(af, ip, packed); if (retval < 0) { PyErr_SetFromErrno(socket_error); return NULL; } else if (retval == 0) { PyErr_SetString(socket_error, "illegal IP address string passed to inet_pton"); return NULL; } else if (af == AF_INET) { return PyString_FromStringAndSize(packed, sizeof(struct in_addr)); #ifdef ENABLE_IPV6 } else if (af == AF_INET6) { return PyString_FromStringAndSize(packed, sizeof(struct in6_addr)); #endif } else { PyErr_SetString(socket_error, "unknown address family"); return NULL; } } PyDoc_STRVAR(inet_ntop_doc, "inet_ntop(af, packed_ip) -> string formatted IP address\n\ \n\ Convert a packed IP address of the given family to string format."); static PyObject * socket_inet_ntop(PyObject *self, PyObject *args) { int af; char* packed; int len; const char* retval; #ifdef ENABLE_IPV6 char ip[MAX(INET_ADDRSTRLEN, INET6_ADDRSTRLEN) + 1]; #else char ip[INET_ADDRSTRLEN + 1]; #endif /* Guarantee NUL-termination for PyString_FromString() below */ memset((void *) &ip[0], '\0', sizeof(ip)); if (!PyArg_ParseTuple(args, "is#:inet_ntop", &af, &packed, &len)) { return NULL; } if (af == AF_INET) { if (len != sizeof(struct in_addr)) { PyErr_SetString(PyExc_ValueError, "invalid length of packed IP address string"); return NULL; } #ifdef ENABLE_IPV6 } else if (af == AF_INET6) { if (len != sizeof(struct in6_addr)) { PyErr_SetString(PyExc_ValueError, "invalid length of packed IP address string"); return NULL; } #endif } else { PyErr_Format(PyExc_ValueError, "unknown address family %d", af); return NULL; } retval = inet_ntop(af, packed, ip, sizeof(ip)); if (!retval) { PyErr_SetFromErrno(socket_error); return NULL; } else { return PyString_FromString(retval); } /* NOTREACHED */ PyErr_SetString(PyExc_RuntimeError, "invalid handling of inet_ntop"); return NULL; } #endif /* HAVE_INET_PTON */ /* Python interface to getaddrinfo(host, port). */ /*ARGSUSED*/ static PyObject * socket_getaddrinfo(PyObject *self, PyObject *args) { struct addrinfo hints, *res; struct addrinfo *res0 = NULL; PyObject *hobj = NULL; PyObject *pobj = (PyObject *)NULL; char pbuf[30]; char *hptr, *pptr; int family, socktype, protocol, flags; int error; PyObject *all = (PyObject *)NULL; PyObject *single = (PyObject *)NULL; PyObject *idna = NULL; family = socktype = protocol = flags = 0; family = AF_UNSPEC; if (!PyArg_ParseTuple(args, "OO|iiii:getaddrinfo", &hobj, &pobj, &family, &socktype, &protocol, &flags)) { return NULL; } if (hobj == Py_None) { hptr = NULL; } else if (PyUnicode_Check(hobj)) { idna = PyUnicode_AsEncodedString(hobj, "idna", NULL); if (!idna) return NULL; hptr = PyString_AsString(idna); } else if (PyString_Check(hobj)) { hptr = PyString_AsString(hobj); } else { PyErr_SetString(PyExc_TypeError, "getaddrinfo() argument 1 must be string or None"); return NULL; } if (_PyAnyInt_Check(pobj)) { long value = PyLong_AsLong(pobj); if (value == -1 && PyErr_Occurred()) return NULL; PyOS_snprintf(pbuf, sizeof(pbuf), "%ld", value); pptr = pbuf; } else if (PyString_Check(pobj)) { pptr = PyString_AsString(pobj); } else if (pobj == Py_None) { pptr = (char *)NULL; } else { PyErr_SetString(socket_error, "getaddrinfo() argument 2 must be integer or string"); goto err; } #if defined(__APPLE__) && defined(AI_NUMERICSERV) if ((flags & AI_NUMERICSERV) && (pptr == NULL || (pptr[0] == '0' && pptr[1] == 0))) { /* On OSX upto at least OSX 10.8 getaddrinfo crashes * if AI_NUMERICSERV is set and the servname is NULL or "0". * This workaround avoids a segfault in libsystem. */ pptr = "00"; } #endif memset(&hints, 0, sizeof(hints)); hints.ai_family = family; hints.ai_socktype = socktype; hints.ai_protocol = protocol; hints.ai_flags = flags; Py_BEGIN_ALLOW_THREADS ACQUIRE_GETADDRINFO_LOCK error = getaddrinfo(hptr, pptr, &hints, &res0); Py_END_ALLOW_THREADS RELEASE_GETADDRINFO_LOCK /* see comment in setipaddr() */ if (error) { set_gaierror(error); goto err; } all = PyList_New(0); if (all == NULL) goto err; for (res = res0; res; res = res->ai_next) { PyObject *addr = makesockaddr(-1, res->ai_addr, res->ai_addrlen, protocol); if (addr == NULL) goto err; single = Py_BuildValue("iiisO", res->ai_family, res->ai_socktype, res->ai_protocol, res->ai_canonname ? res->ai_canonname : "", addr); Py_DECREF(addr); if (single == NULL) goto err; if (PyList_Append(all, single)) { Py_DECREF(single); goto err; } Py_DECREF(single); } Py_XDECREF(idna); if (res0) freeaddrinfo(res0); return all; err: Py_XDECREF(single); Py_XDECREF(all); Py_XDECREF(idna); if (res0) freeaddrinfo(res0); return (PyObject *)NULL; } PyDoc_STRVAR(getaddrinfo_doc, "getaddrinfo(host, port [, family, socktype, proto, flags])\n\ -> list of (family, socktype, proto, canonname, sockaddr)\n\ \n\ Resolve host and port into addrinfo struct."); /* Python interface to getnameinfo(sa, flags). */ /*ARGSUSED*/ static PyObject * socket_getnameinfo(PyObject *self, PyObject *args) { PyObject *sa = (PyObject *)NULL; int flags; const char *hostp; int port; unsigned int flowinfo, scope_id; char hbuf[NI_MAXHOST], pbuf[NI_MAXSERV]; struct addrinfo hints, *res = NULL; int error; PyObject *ret = (PyObject *)NULL; flags = flowinfo = scope_id = 0; if (!PyArg_ParseTuple(args, "Oi:getnameinfo", &sa, &flags)) return NULL; if (!PyTuple_Check(sa)) { PyErr_SetString(PyExc_TypeError, "getnameinfo() argument 1 must be a tuple"); return NULL; } if (!PyArg_ParseTuple(sa, "si|II", &hostp, &port, &flowinfo, &scope_id)) return NULL; if (flowinfo > 0xfffff) { PyErr_SetString(PyExc_OverflowError, "getsockaddrarg: flowinfo must be 0-1048575."); return NULL; } PyOS_snprintf(pbuf, sizeof(pbuf), "%d", port); memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_DGRAM; /* make numeric port happy */ Py_BEGIN_ALLOW_THREADS ACQUIRE_GETADDRINFO_LOCK error = getaddrinfo(hostp, pbuf, &hints, &res); Py_END_ALLOW_THREADS RELEASE_GETADDRINFO_LOCK /* see comment in setipaddr() */ if (error) { set_gaierror(error); goto fail; } if (res->ai_next) { PyErr_SetString(socket_error, "sockaddr resolved to multiple addresses"); goto fail; } switch (res->ai_family) { case AF_INET: { if (PyTuple_GET_SIZE(sa) != 2) { PyErr_SetString(socket_error, "IPv4 sockaddr must be 2 tuple"); goto fail; } break; } #ifdef ENABLE_IPV6 case AF_INET6: { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)res->ai_addr; sin6->sin6_flowinfo = htonl(flowinfo); sin6->sin6_scope_id = scope_id; break; } #endif } error = getnameinfo(res->ai_addr, res->ai_addrlen, hbuf, sizeof(hbuf), pbuf, sizeof(pbuf), flags); if (error) { set_gaierror(error); goto fail; } ret = Py_BuildValue("ss", hbuf, pbuf); fail: if (res) freeaddrinfo(res); return ret; } PyDoc_STRVAR(getnameinfo_doc, "getnameinfo(sockaddr, flags) --> (host, port)\n\ \n\ Get host and port for a sockaddr."); /* Python API to getting and setting the default timeout value. */ static PyObject * socket_getdefaulttimeout(PyObject *self) { if (defaulttimeout < 0.0) { Py_INCREF(Py_None); return Py_None; } else return PyFloat_FromDouble(defaulttimeout); } PyDoc_STRVAR(getdefaulttimeout_doc, "getdefaulttimeout() -> timeout\n\ \n\ Returns the default timeout in seconds (float) for new socket objects.\n\ A value of None indicates that new socket objects have no timeout.\n\ When the socket module is first imported, the default is None."); static PyObject * socket_setdefaulttimeout(PyObject *self, PyObject *arg) { double timeout; if (arg == Py_None) timeout = -1.0; else { timeout = PyFloat_AsDouble(arg); if (timeout < 0.0) { if (!PyErr_Occurred()) PyErr_SetString(PyExc_ValueError, "Timeout value out of range"); return NULL; } } defaulttimeout = timeout; Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(setdefaulttimeout_doc, "setdefaulttimeout(timeout)\n\ \n\ Set the default timeout in seconds (float) for new socket objects.\n\ A value of None indicates that new socket objects have no timeout.\n\ When the socket module is first imported, the default is None."); /* List of functions exported by this module. */ static PyMethodDef socket_methods[] = { {"gethostbyname", socket_gethostbyname, METH_VARARGS, gethostbyname_doc}, {"gethostbyname_ex", socket_gethostbyname_ex, METH_VARARGS, ghbn_ex_doc}, {"gethostbyaddr", socket_gethostbyaddr, METH_VARARGS, gethostbyaddr_doc}, {"gethostname", socket_gethostname, METH_NOARGS, gethostname_doc}, {"getservbyname", socket_getservbyname, METH_VARARGS, getservbyname_doc}, {"getservbyport", socket_getservbyport, METH_VARARGS, getservbyport_doc}, {"getprotobyname", socket_getprotobyname, METH_VARARGS, getprotobyname_doc}, #ifndef NO_DUP {"fromfd", socket_fromfd, METH_VARARGS, fromfd_doc}, #endif #ifdef HAVE_SOCKETPAIR {"socketpair", socket_socketpair, METH_VARARGS, socketpair_doc}, #endif {"ntohs", socket_ntohs, METH_VARARGS, ntohs_doc}, {"ntohl", socket_ntohl, METH_O, ntohl_doc}, {"htons", socket_htons, METH_VARARGS, htons_doc}, {"htonl", socket_htonl, METH_O, htonl_doc}, {"inet_aton", socket_inet_aton, METH_VARARGS, inet_aton_doc}, {"inet_ntoa", socket_inet_ntoa, METH_VARARGS, inet_ntoa_doc}, #ifdef HAVE_INET_PTON {"inet_pton", socket_inet_pton, METH_VARARGS, inet_pton_doc}, {"inet_ntop", socket_inet_ntop, METH_VARARGS, inet_ntop_doc}, #endif {"getaddrinfo", socket_getaddrinfo, METH_VARARGS, getaddrinfo_doc}, {"getnameinfo", socket_getnameinfo, METH_VARARGS, getnameinfo_doc}, {"getdefaulttimeout", (PyCFunction)socket_getdefaulttimeout, METH_NOARGS, getdefaulttimeout_doc}, {"setdefaulttimeout", socket_setdefaulttimeout, METH_O, setdefaulttimeout_doc}, {NULL, NULL} /* Sentinel */ }; #ifdef RISCOS #define OS_INIT_DEFINED static int os_init(void) { _kernel_swi_regs r; r.r[0] = 0; _kernel_swi(0x43380, &r, &r); taskwindow = r.r[0]; return 1; } #endif /* RISCOS */ #ifdef MS_WINDOWS #define OS_INIT_DEFINED /* Additional initialization and cleanup for Windows */ static void os_cleanup(void) { WSACleanup(); } static int os_init(void) { WSADATA WSAData; int ret; char buf[100]; ret = WSAStartup(0x0101, &WSAData); switch (ret) { case 0: /* No error */ Py_AtExit(os_cleanup); return 1; /* Success */ case WSASYSNOTREADY: PyErr_SetString(PyExc_ImportError, "WSAStartup failed: network not ready"); break; case WSAVERNOTSUPPORTED: case WSAEINVAL: PyErr_SetString( PyExc_ImportError, "WSAStartup failed: requested version not supported"); break; default: PyOS_snprintf(buf, sizeof(buf), "WSAStartup failed: error code %d", ret); PyErr_SetString(PyExc_ImportError, buf); break; } return 0; /* Failure */ } #endif /* MS_WINDOWS */ #ifdef PYOS_OS2 #define OS_INIT_DEFINED /* Additional initialization for OS/2 */ static int os_init(void) { #ifndef PYCC_GCC char reason[64]; int rc = sock_init(); if (rc == 0) { return 1; /* Success */ } PyOS_snprintf(reason, sizeof(reason), "OS/2 TCP/IP Error# %d", sock_errno()); PyErr_SetString(PyExc_ImportError, reason); return 0; /* Failure */ #else /* No need to initialize sockets with GCC/EMX */ return 1; /* Success */ #endif } #endif /* PYOS_OS2 */ #ifndef OS_INIT_DEFINED static int os_init(void) { return 1; /* Success */ } #endif /* C API table - always add new things to the end for binary compatibility. */ static PySocketModule_APIObject PySocketModuleAPI = { &sock_type, NULL }; /* Initialize the _socket module. This module is actually called "_socket", and there's a wrapper "socket.py" which implements some additional functionality. On some platforms (e.g. Windows and OS/2), socket.py also implements a wrapper for the socket type that provides missing functionality such as makefile(), dup() and fromfd(). The import of "_socket" may fail with an ImportError exception if os-specific initialization fails. On Windows, this does WINSOCK initialization. When WINSOCK is initialized successfully, a call to WSACleanup() is scheduled to be made at exit time. */ PyDoc_STRVAR(socket_doc, "Implementation module for socket operations.\n\ \n\ See the socket module for documentation."); PyMODINIT_FUNC init_socket(void) { PyObject *m, *has_ipv6; if (!os_init()) return; Py_TYPE(&sock_type) = &PyType_Type; m = Py_InitModule3(PySocket_MODULE_NAME, socket_methods, socket_doc); if (m == NULL) return; socket_error = PyErr_NewException("socket.error", PyExc_IOError, NULL); if (socket_error == NULL) return; PySocketModuleAPI.error = socket_error; Py_INCREF(socket_error); PyModule_AddObject(m, "error", socket_error); socket_herror = PyErr_NewException("socket.herror", socket_error, NULL); if (socket_herror == NULL) return; Py_INCREF(socket_herror); PyModule_AddObject(m, "herror", socket_herror); socket_gaierror = PyErr_NewException("socket.gaierror", socket_error, NULL); if (socket_gaierror == NULL) return; Py_INCREF(socket_gaierror); PyModule_AddObject(m, "gaierror", socket_gaierror); socket_timeout = PyErr_NewException("socket.timeout", socket_error, NULL); if (socket_timeout == NULL) return; Py_INCREF(socket_timeout); PyModule_AddObject(m, "timeout", socket_timeout); Py_INCREF((PyObject *)&sock_type); if (PyModule_AddObject(m, "SocketType", (PyObject *)&sock_type) != 0) return; Py_INCREF((PyObject *)&sock_type); if (PyModule_AddObject(m, "socket", (PyObject *)&sock_type) != 0) return; #ifdef ENABLE_IPV6 has_ipv6 = Py_True; #else has_ipv6 = Py_False; #endif Py_INCREF(has_ipv6); PyModule_AddObject(m, "has_ipv6", has_ipv6); /* Export C API */ if (PyModule_AddObject(m, PySocket_CAPI_NAME, PyCapsule_New(&PySocketModuleAPI, PySocket_CAPSULE_NAME, NULL) ) != 0) return; /* Address families (we only support AF_INET and AF_UNIX) */ #ifdef AF_UNSPEC PyModule_AddIntConstant(m, "AF_UNSPEC", AF_UNSPEC); #endif PyModule_AddIntConstant(m, "AF_INET", AF_INET); #ifdef AF_INET6 PyModule_AddIntConstant(m, "AF_INET6", AF_INET6); #endif /* AF_INET6 */ #if defined(AF_UNIX) PyModule_AddIntConstant(m, "AF_UNIX", AF_UNIX); #endif /* AF_UNIX */ #ifdef AF_AX25 /* Amateur Radio AX.25 */ PyModule_AddIntConstant(m, "AF_AX25", AF_AX25); #endif #ifdef AF_IPX PyModule_AddIntConstant(m, "AF_IPX", AF_IPX); /* Novell IPX */ #endif #ifdef AF_APPLETALK /* Appletalk DDP */ PyModule_AddIntConstant(m, "AF_APPLETALK", AF_APPLETALK); #endif #ifdef AF_NETROM /* Amateur radio NetROM */ PyModule_AddIntConstant(m, "AF_NETROM", AF_NETROM); #endif #ifdef AF_BRIDGE /* Multiprotocol bridge */ PyModule_AddIntConstant(m, "AF_BRIDGE", AF_BRIDGE); #endif #ifdef AF_ATMPVC /* ATM PVCs */ PyModule_AddIntConstant(m, "AF_ATMPVC", AF_ATMPVC); #endif #ifdef AF_AAL5 /* Reserved for Werner's ATM */ PyModule_AddIntConstant(m, "AF_AAL5", AF_AAL5); #endif #ifdef AF_X25 /* Reserved for X.25 project */ PyModule_AddIntConstant(m, "AF_X25", AF_X25); #endif #ifdef AF_INET6 PyModule_AddIntConstant(m, "AF_INET6", AF_INET6); /* IP version 6 */ #endif #ifdef AF_ROSE /* Amateur Radio X.25 PLP */ PyModule_AddIntConstant(m, "AF_ROSE", AF_ROSE); #endif #ifdef AF_DECnet /* Reserved for DECnet project */ PyModule_AddIntConstant(m, "AF_DECnet", AF_DECnet); #endif #ifdef AF_NETBEUI /* Reserved for 802.2LLC project */ PyModule_AddIntConstant(m, "AF_NETBEUI", AF_NETBEUI); #endif #ifdef AF_SECURITY /* Security callback pseudo AF */ PyModule_AddIntConstant(m, "AF_SECURITY", AF_SECURITY); #endif #ifdef AF_KEY /* PF_KEY key management API */ PyModule_AddIntConstant(m, "AF_KEY", AF_KEY); #endif #ifdef AF_NETLINK /* */ PyModule_AddIntConstant(m, "AF_NETLINK", AF_NETLINK); PyModule_AddIntConstant(m, "NETLINK_ROUTE", NETLINK_ROUTE); #ifdef NETLINK_SKIP PyModule_AddIntConstant(m, "NETLINK_SKIP", NETLINK_SKIP); #endif #ifdef NETLINK_W1 PyModule_AddIntConstant(m, "NETLINK_W1", NETLINK_W1); #endif PyModule_AddIntConstant(m, "NETLINK_USERSOCK", NETLINK_USERSOCK); PyModule_AddIntConstant(m, "NETLINK_FIREWALL", NETLINK_FIREWALL); #ifdef NETLINK_TCPDIAG PyModule_AddIntConstant(m, "NETLINK_TCPDIAG", NETLINK_TCPDIAG); #endif #ifdef NETLINK_NFLOG PyModule_AddIntConstant(m, "NETLINK_NFLOG", NETLINK_NFLOG); #endif #ifdef NETLINK_XFRM PyModule_AddIntConstant(m, "NETLINK_XFRM", NETLINK_XFRM); #endif #ifdef NETLINK_ARPD PyModule_AddIntConstant(m, "NETLINK_ARPD", NETLINK_ARPD); #endif #ifdef NETLINK_ROUTE6 PyModule_AddIntConstant(m, "NETLINK_ROUTE6", NETLINK_ROUTE6); #endif PyModule_AddIntConstant(m, "NETLINK_IP6_FW", NETLINK_IP6_FW); #ifdef NETLINK_DNRTMSG PyModule_AddIntConstant(m, "NETLINK_DNRTMSG", NETLINK_DNRTMSG); #endif #ifdef NETLINK_TAPBASE PyModule_AddIntConstant(m, "NETLINK_TAPBASE", NETLINK_TAPBASE); #endif #endif /* AF_NETLINK */ #ifdef AF_ROUTE /* Alias to emulate 4.4BSD */ PyModule_AddIntConstant(m, "AF_ROUTE", AF_ROUTE); #endif #ifdef AF_ASH /* Ash */ PyModule_AddIntConstant(m, "AF_ASH", AF_ASH); #endif #ifdef AF_ECONET /* Acorn Econet */ PyModule_AddIntConstant(m, "AF_ECONET", AF_ECONET); #endif #ifdef AF_ATMSVC /* ATM SVCs */ PyModule_AddIntConstant(m, "AF_ATMSVC", AF_ATMSVC); #endif #ifdef AF_SNA /* Linux SNA Project (nutters!) */ PyModule_AddIntConstant(m, "AF_SNA", AF_SNA); #endif #ifdef AF_IRDA /* IRDA sockets */ PyModule_AddIntConstant(m, "AF_IRDA", AF_IRDA); #endif #ifdef AF_PPPOX /* PPPoX sockets */ PyModule_AddIntConstant(m, "AF_PPPOX", AF_PPPOX); #endif #ifdef AF_WANPIPE /* Wanpipe API Sockets */ PyModule_AddIntConstant(m, "AF_WANPIPE", AF_WANPIPE); #endif #ifdef AF_LLC /* Linux LLC */ PyModule_AddIntConstant(m, "AF_LLC", AF_LLC); #endif #ifdef USE_BLUETOOTH PyModule_AddIntConstant(m, "AF_BLUETOOTH", AF_BLUETOOTH); PyModule_AddIntConstant(m, "BTPROTO_L2CAP", BTPROTO_L2CAP); PyModule_AddIntConstant(m, "BTPROTO_HCI", BTPROTO_HCI); PyModule_AddIntConstant(m, "SOL_HCI", SOL_HCI); #if !defined(__NetBSD__) && !defined(__DragonFly__) PyModule_AddIntConstant(m, "HCI_FILTER", HCI_FILTER); #endif #if !defined(__FreeBSD__) #if !defined(__NetBSD__) && !defined(__DragonFly__) PyModule_AddIntConstant(m, "HCI_TIME_STAMP", HCI_TIME_STAMP); #endif PyModule_AddIntConstant(m, "HCI_DATA_DIR", HCI_DATA_DIR); PyModule_AddIntConstant(m, "BTPROTO_SCO", BTPROTO_SCO); #endif PyModule_AddIntConstant(m, "BTPROTO_RFCOMM", BTPROTO_RFCOMM); PyModule_AddStringConstant(m, "BDADDR_ANY", "00:00:00:00:00:00"); PyModule_AddStringConstant(m, "BDADDR_LOCAL", "00:00:00:FF:FF:FF"); #endif #ifdef AF_PACKET PyModule_AddIntMacro(m, AF_PACKET); #endif #ifdef PF_PACKET PyModule_AddIntMacro(m, PF_PACKET); #endif #ifdef PACKET_HOST PyModule_AddIntMacro(m, PACKET_HOST); #endif #ifdef PACKET_BROADCAST PyModule_AddIntMacro(m, PACKET_BROADCAST); #endif #ifdef PACKET_MULTICAST PyModule_AddIntMacro(m, PACKET_MULTICAST); #endif #ifdef PACKET_OTHERHOST PyModule_AddIntMacro(m, PACKET_OTHERHOST); #endif #ifdef PACKET_OUTGOING PyModule_AddIntMacro(m, PACKET_OUTGOING); #endif #ifdef PACKET_LOOPBACK PyModule_AddIntMacro(m, PACKET_LOOPBACK); #endif #ifdef PACKET_FASTROUTE PyModule_AddIntMacro(m, PACKET_FASTROUTE); #endif #ifdef HAVE_LINUX_TIPC_H PyModule_AddIntConstant(m, "AF_TIPC", AF_TIPC); /* for addresses */ PyModule_AddIntConstant(m, "TIPC_ADDR_NAMESEQ", TIPC_ADDR_NAMESEQ); PyModule_AddIntConstant(m, "TIPC_ADDR_NAME", TIPC_ADDR_NAME); PyModule_AddIntConstant(m, "TIPC_ADDR_ID", TIPC_ADDR_ID); PyModule_AddIntConstant(m, "TIPC_ZONE_SCOPE", TIPC_ZONE_SCOPE); PyModule_AddIntConstant(m, "TIPC_CLUSTER_SCOPE", TIPC_CLUSTER_SCOPE); PyModule_AddIntConstant(m, "TIPC_NODE_SCOPE", TIPC_NODE_SCOPE); /* for setsockopt() */ PyModule_AddIntConstant(m, "SOL_TIPC", SOL_TIPC); PyModule_AddIntConstant(m, "TIPC_IMPORTANCE", TIPC_IMPORTANCE); PyModule_AddIntConstant(m, "TIPC_SRC_DROPPABLE", TIPC_SRC_DROPPABLE); PyModule_AddIntConstant(m, "TIPC_DEST_DROPPABLE", TIPC_DEST_DROPPABLE); PyModule_AddIntConstant(m, "TIPC_CONN_TIMEOUT", TIPC_CONN_TIMEOUT); PyModule_AddIntConstant(m, "TIPC_LOW_IMPORTANCE", TIPC_LOW_IMPORTANCE); PyModule_AddIntConstant(m, "TIPC_MEDIUM_IMPORTANCE", TIPC_MEDIUM_IMPORTANCE); PyModule_AddIntConstant(m, "TIPC_HIGH_IMPORTANCE", TIPC_HIGH_IMPORTANCE); PyModule_AddIntConstant(m, "TIPC_CRITICAL_IMPORTANCE", TIPC_CRITICAL_IMPORTANCE); /* for subscriptions */ PyModule_AddIntConstant(m, "TIPC_SUB_PORTS", TIPC_SUB_PORTS); PyModule_AddIntConstant(m, "TIPC_SUB_SERVICE", TIPC_SUB_SERVICE); #ifdef TIPC_SUB_CANCEL /* doesn't seem to be available everywhere */ PyModule_AddIntConstant(m, "TIPC_SUB_CANCEL", TIPC_SUB_CANCEL); #endif PyModule_AddIntConstant(m, "TIPC_WAIT_FOREVER", TIPC_WAIT_FOREVER); PyModule_AddIntConstant(m, "TIPC_PUBLISHED", TIPC_PUBLISHED); PyModule_AddIntConstant(m, "TIPC_WITHDRAWN", TIPC_WITHDRAWN); PyModule_AddIntConstant(m, "TIPC_SUBSCR_TIMEOUT", TIPC_SUBSCR_TIMEOUT); PyModule_AddIntConstant(m, "TIPC_CFG_SRV", TIPC_CFG_SRV); PyModule_AddIntConstant(m, "TIPC_TOP_SRV", TIPC_TOP_SRV); #endif /* Socket types */ PyModule_AddIntConstant(m, "SOCK_STREAM", SOCK_STREAM); PyModule_AddIntConstant(m, "SOCK_DGRAM", SOCK_DGRAM); #ifndef __BEOS__ /* We have incomplete socket support. */ PyModule_AddIntConstant(m, "SOCK_RAW", SOCK_RAW); PyModule_AddIntConstant(m, "SOCK_SEQPACKET", SOCK_SEQPACKET); #if defined(SOCK_RDM) PyModule_AddIntConstant(m, "SOCK_RDM", SOCK_RDM); #endif #endif #ifdef SO_DEBUG PyModule_AddIntConstant(m, "SO_DEBUG", SO_DEBUG); #endif #ifdef SO_ACCEPTCONN PyModule_AddIntConstant(m, "SO_ACCEPTCONN", SO_ACCEPTCONN); #endif #ifdef SO_REUSEADDR PyModule_AddIntConstant(m, "SO_REUSEADDR", SO_REUSEADDR); #endif #ifdef SO_EXCLUSIVEADDRUSE PyModule_AddIntConstant(m, "SO_EXCLUSIVEADDRUSE", SO_EXCLUSIVEADDRUSE); #endif #ifdef SO_KEEPALIVE PyModule_AddIntConstant(m, "SO_KEEPALIVE", SO_KEEPALIVE); #endif #ifdef SO_DONTROUTE PyModule_AddIntConstant(m, "SO_DONTROUTE", SO_DONTROUTE); #endif #ifdef SO_BROADCAST PyModule_AddIntConstant(m, "SO_BROADCAST", SO_BROADCAST); #endif #ifdef SO_USELOOPBACK PyModule_AddIntConstant(m, "SO_USELOOPBACK", SO_USELOOPBACK); #endif #ifdef SO_LINGER PyModule_AddIntConstant(m, "SO_LINGER", SO_LINGER); #endif #ifdef SO_OOBINLINE PyModule_AddIntConstant(m, "SO_OOBINLINE", SO_OOBINLINE); #endif #ifdef SO_REUSEPORT PyModule_AddIntConstant(m, "SO_REUSEPORT", SO_REUSEPORT); #endif #ifdef SO_SNDBUF PyModule_AddIntConstant(m, "SO_SNDBUF", SO_SNDBUF); #endif #ifdef SO_RCVBUF PyModule_AddIntConstant(m, "SO_RCVBUF", SO_RCVBUF); #endif #ifdef SO_SNDLOWAT PyModule_AddIntConstant(m, "SO_SNDLOWAT", SO_SNDLOWAT); #endif #ifdef SO_RCVLOWAT PyModule_AddIntConstant(m, "SO_RCVLOWAT", SO_RCVLOWAT); #endif #ifdef SO_SNDTIMEO PyModule_AddIntConstant(m, "SO_SNDTIMEO", SO_SNDTIMEO); #endif #ifdef SO_RCVTIMEO PyModule_AddIntConstant(m, "SO_RCVTIMEO", SO_RCVTIMEO); #endif #ifdef SO_ERROR PyModule_AddIntConstant(m, "SO_ERROR", SO_ERROR); #endif #ifdef SO_TYPE PyModule_AddIntConstant(m, "SO_TYPE", SO_TYPE); #endif #ifdef SO_SETFIB PyModule_AddIntConstant(m, "SO_SETFIB", SO_SETFIB); #endif /* Maximum number of connections for "listen" */ #ifdef SOMAXCONN PyModule_AddIntConstant(m, "SOMAXCONN", SOMAXCONN); #else PyModule_AddIntConstant(m, "SOMAXCONN", 5); /* Common value */ #endif /* Flags for send, recv */ #ifdef MSG_OOB PyModule_AddIntConstant(m, "MSG_OOB", MSG_OOB); #endif #ifdef MSG_PEEK PyModule_AddIntConstant(m, "MSG_PEEK", MSG_PEEK); #endif #ifdef MSG_DONTROUTE PyModule_AddIntConstant(m, "MSG_DONTROUTE", MSG_DONTROUTE); #endif #ifdef MSG_DONTWAIT PyModule_AddIntConstant(m, "MSG_DONTWAIT", MSG_DONTWAIT); #endif #ifdef MSG_EOR PyModule_AddIntConstant(m, "MSG_EOR", MSG_EOR); #endif #ifdef MSG_TRUNC PyModule_AddIntConstant(m, "MSG_TRUNC", MSG_TRUNC); #endif #ifdef MSG_CTRUNC PyModule_AddIntConstant(m, "MSG_CTRUNC", MSG_CTRUNC); #endif #ifdef MSG_WAITALL PyModule_AddIntConstant(m, "MSG_WAITALL", MSG_WAITALL); #endif #ifdef MSG_BTAG PyModule_AddIntConstant(m, "MSG_BTAG", MSG_BTAG); #endif #ifdef MSG_ETAG PyModule_AddIntConstant(m, "MSG_ETAG", MSG_ETAG); #endif /* Protocol level and numbers, usable for [gs]etsockopt */ #ifdef SOL_SOCKET PyModule_AddIntConstant(m, "SOL_SOCKET", SOL_SOCKET); #endif #ifdef SOL_IP PyModule_AddIntConstant(m, "SOL_IP", SOL_IP); #else PyModule_AddIntConstant(m, "SOL_IP", 0); #endif #ifdef SOL_IPX PyModule_AddIntConstant(m, "SOL_IPX", SOL_IPX); #endif #ifdef SOL_AX25 PyModule_AddIntConstant(m, "SOL_AX25", SOL_AX25); #endif #ifdef SOL_ATALK PyModule_AddIntConstant(m, "SOL_ATALK", SOL_ATALK); #endif #ifdef SOL_NETROM PyModule_AddIntConstant(m, "SOL_NETROM", SOL_NETROM); #endif #ifdef SOL_ROSE PyModule_AddIntConstant(m, "SOL_ROSE", SOL_ROSE); #endif #ifdef SOL_TCP PyModule_AddIntConstant(m, "SOL_TCP", SOL_TCP); #else PyModule_AddIntConstant(m, "SOL_TCP", 6); #endif #ifdef SOL_UDP PyModule_AddIntConstant(m, "SOL_UDP", SOL_UDP); #else PyModule_AddIntConstant(m, "SOL_UDP", 17); #endif #ifdef IPPROTO_IP PyModule_AddIntConstant(m, "IPPROTO_IP", IPPROTO_IP); #else PyModule_AddIntConstant(m, "IPPROTO_IP", 0); #endif #ifdef IPPROTO_HOPOPTS PyModule_AddIntConstant(m, "IPPROTO_HOPOPTS", IPPROTO_HOPOPTS); #endif #ifdef IPPROTO_ICMP PyModule_AddIntConstant(m, "IPPROTO_ICMP", IPPROTO_ICMP); #else PyModule_AddIntConstant(m, "IPPROTO_ICMP", 1); #endif #ifdef IPPROTO_IGMP PyModule_AddIntConstant(m, "IPPROTO_IGMP", IPPROTO_IGMP); #endif #ifdef IPPROTO_GGP PyModule_AddIntConstant(m, "IPPROTO_GGP", IPPROTO_GGP); #endif #ifdef IPPROTO_IPV4 PyModule_AddIntConstant(m, "IPPROTO_IPV4", IPPROTO_IPV4); #endif #ifdef IPPROTO_IPV6 PyModule_AddIntConstant(m, "IPPROTO_IPV6", IPPROTO_IPV6); #endif #ifdef IPPROTO_IPIP PyModule_AddIntConstant(m, "IPPROTO_IPIP", IPPROTO_IPIP); #endif #ifdef IPPROTO_TCP PyModule_AddIntConstant(m, "IPPROTO_TCP", IPPROTO_TCP); #else PyModule_AddIntConstant(m, "IPPROTO_TCP", 6); #endif #ifdef IPPROTO_EGP PyModule_AddIntConstant(m, "IPPROTO_EGP", IPPROTO_EGP); #endif #ifdef IPPROTO_PUP PyModule_AddIntConstant(m, "IPPROTO_PUP", IPPROTO_PUP); #endif #ifdef IPPROTO_UDP PyModule_AddIntConstant(m, "IPPROTO_UDP", IPPROTO_UDP); #else PyModule_AddIntConstant(m, "IPPROTO_UDP", 17); #endif #ifdef IPPROTO_IDP PyModule_AddIntConstant(m, "IPPROTO_IDP", IPPROTO_IDP); #endif #ifdef IPPROTO_HELLO PyModule_AddIntConstant(m, "IPPROTO_HELLO", IPPROTO_HELLO); #endif #ifdef IPPROTO_ND PyModule_AddIntConstant(m, "IPPROTO_ND", IPPROTO_ND); #endif #ifdef IPPROTO_TP PyModule_AddIntConstant(m, "IPPROTO_TP", IPPROTO_TP); #endif #ifdef IPPROTO_IPV6 PyModule_AddIntConstant(m, "IPPROTO_IPV6", IPPROTO_IPV6); #endif #ifdef IPPROTO_ROUTING PyModule_AddIntConstant(m, "IPPROTO_ROUTING", IPPROTO_ROUTING); #endif #ifdef IPPROTO_FRAGMENT PyModule_AddIntConstant(m, "IPPROTO_FRAGMENT", IPPROTO_FRAGMENT); #endif #ifdef IPPROTO_RSVP PyModule_AddIntConstant(m, "IPPROTO_RSVP", IPPROTO_RSVP); #endif #ifdef IPPROTO_GRE PyModule_AddIntConstant(m, "IPPROTO_GRE", IPPROTO_GRE); #endif #ifdef IPPROTO_ESP PyModule_AddIntConstant(m, "IPPROTO_ESP", IPPROTO_ESP); #endif #ifdef IPPROTO_AH PyModule_AddIntConstant(m, "IPPROTO_AH", IPPROTO_AH); #endif #ifdef IPPROTO_MOBILE PyModule_AddIntConstant(m, "IPPROTO_MOBILE", IPPROTO_MOBILE); #endif #ifdef IPPROTO_ICMPV6 PyModule_AddIntConstant(m, "IPPROTO_ICMPV6", IPPROTO_ICMPV6); #endif #ifdef IPPROTO_NONE PyModule_AddIntConstant(m, "IPPROTO_NONE", IPPROTO_NONE); #endif #ifdef IPPROTO_DSTOPTS PyModule_AddIntConstant(m, "IPPROTO_DSTOPTS", IPPROTO_DSTOPTS); #endif #ifdef IPPROTO_XTP PyModule_AddIntConstant(m, "IPPROTO_XTP", IPPROTO_XTP); #endif #ifdef IPPROTO_EON PyModule_AddIntConstant(m, "IPPROTO_EON", IPPROTO_EON); #endif #ifdef IPPROTO_PIM PyModule_AddIntConstant(m, "IPPROTO_PIM", IPPROTO_PIM); #endif #ifdef IPPROTO_IPCOMP PyModule_AddIntConstant(m, "IPPROTO_IPCOMP", IPPROTO_IPCOMP); #endif #ifdef IPPROTO_VRRP PyModule_AddIntConstant(m, "IPPROTO_VRRP", IPPROTO_VRRP); #endif #ifdef IPPROTO_BIP PyModule_AddIntConstant(m, "IPPROTO_BIP", IPPROTO_BIP); #endif /**/ #ifdef IPPROTO_RAW PyModule_AddIntConstant(m, "IPPROTO_RAW", IPPROTO_RAW); #else PyModule_AddIntConstant(m, "IPPROTO_RAW", 255); #endif #ifdef IPPROTO_MAX PyModule_AddIntConstant(m, "IPPROTO_MAX", IPPROTO_MAX); #endif /* Some port configuration */ #ifdef IPPORT_RESERVED PyModule_AddIntConstant(m, "IPPORT_RESERVED", IPPORT_RESERVED); #else PyModule_AddIntConstant(m, "IPPORT_RESERVED", 1024); #endif #ifdef IPPORT_USERRESERVED PyModule_AddIntConstant(m, "IPPORT_USERRESERVED", IPPORT_USERRESERVED); #else PyModule_AddIntConstant(m, "IPPORT_USERRESERVED", 5000); #endif /* Some reserved IP v.4 addresses */ #ifdef INADDR_ANY PyModule_AddIntConstant(m, "INADDR_ANY", INADDR_ANY); #else PyModule_AddIntConstant(m, "INADDR_ANY", 0x00000000); #endif #ifdef INADDR_BROADCAST PyModule_AddIntConstant(m, "INADDR_BROADCAST", INADDR_BROADCAST); #else PyModule_AddIntConstant(m, "INADDR_BROADCAST", 0xffffffff); #endif #ifdef INADDR_LOOPBACK PyModule_AddIntConstant(m, "INADDR_LOOPBACK", INADDR_LOOPBACK); #else PyModule_AddIntConstant(m, "INADDR_LOOPBACK", 0x7F000001); #endif #ifdef INADDR_UNSPEC_GROUP PyModule_AddIntConstant(m, "INADDR_UNSPEC_GROUP", INADDR_UNSPEC_GROUP); #else PyModule_AddIntConstant(m, "INADDR_UNSPEC_GROUP", 0xe0000000); #endif #ifdef INADDR_ALLHOSTS_GROUP PyModule_AddIntConstant(m, "INADDR_ALLHOSTS_GROUP", INADDR_ALLHOSTS_GROUP); #else PyModule_AddIntConstant(m, "INADDR_ALLHOSTS_GROUP", 0xe0000001); #endif #ifdef INADDR_MAX_LOCAL_GROUP PyModule_AddIntConstant(m, "INADDR_MAX_LOCAL_GROUP", INADDR_MAX_LOCAL_GROUP); #else PyModule_AddIntConstant(m, "INADDR_MAX_LOCAL_GROUP", 0xe00000ff); #endif #ifdef INADDR_NONE PyModule_AddIntConstant(m, "INADDR_NONE", INADDR_NONE); #else PyModule_AddIntConstant(m, "INADDR_NONE", 0xffffffff); #endif /* IPv4 [gs]etsockopt options */ #ifdef IP_OPTIONS PyModule_AddIntConstant(m, "IP_OPTIONS", IP_OPTIONS); #endif #ifdef IP_HDRINCL PyModule_AddIntConstant(m, "IP_HDRINCL", IP_HDRINCL); #endif #ifdef IP_TOS PyModule_AddIntConstant(m, "IP_TOS", IP_TOS); #endif #ifdef IP_TTL PyModule_AddIntConstant(m, "IP_TTL", IP_TTL); #endif #ifdef IP_RECVOPTS PyModule_AddIntConstant(m, "IP_RECVOPTS", IP_RECVOPTS); #endif #ifdef IP_RECVRETOPTS PyModule_AddIntConstant(m, "IP_RECVRETOPTS", IP_RECVRETOPTS); #endif #ifdef IP_RECVDSTADDR PyModule_AddIntConstant(m, "IP_RECVDSTADDR", IP_RECVDSTADDR); #endif #ifdef IP_RETOPTS PyModule_AddIntConstant(m, "IP_RETOPTS", IP_RETOPTS); #endif #ifdef IP_MULTICAST_IF PyModule_AddIntConstant(m, "IP_MULTICAST_IF", IP_MULTICAST_IF); #endif #ifdef IP_MULTICAST_TTL PyModule_AddIntConstant(m, "IP_MULTICAST_TTL", IP_MULTICAST_TTL); #endif #ifdef IP_MULTICAST_LOOP PyModule_AddIntConstant(m, "IP_MULTICAST_LOOP", IP_MULTICAST_LOOP); #endif #ifdef IP_ADD_MEMBERSHIP PyModule_AddIntConstant(m, "IP_ADD_MEMBERSHIP", IP_ADD_MEMBERSHIP); #endif #ifdef IP_DROP_MEMBERSHIP PyModule_AddIntConstant(m, "IP_DROP_MEMBERSHIP", IP_DROP_MEMBERSHIP); #endif #ifdef IP_DEFAULT_MULTICAST_TTL PyModule_AddIntConstant(m, "IP_DEFAULT_MULTICAST_TTL", IP_DEFAULT_MULTICAST_TTL); #endif #ifdef IP_DEFAULT_MULTICAST_LOOP PyModule_AddIntConstant(m, "IP_DEFAULT_MULTICAST_LOOP", IP_DEFAULT_MULTICAST_LOOP); #endif #ifdef IP_MAX_MEMBERSHIPS PyModule_AddIntConstant(m, "IP_MAX_MEMBERSHIPS", IP_MAX_MEMBERSHIPS); #endif /* IPv6 [gs]etsockopt options, defined in RFC2553 */ #ifdef IPV6_JOIN_GROUP PyModule_AddIntConstant(m, "IPV6_JOIN_GROUP", IPV6_JOIN_GROUP); #endif #ifdef IPV6_LEAVE_GROUP PyModule_AddIntConstant(m, "IPV6_LEAVE_GROUP", IPV6_LEAVE_GROUP); #endif #ifdef IPV6_MULTICAST_HOPS PyModule_AddIntConstant(m, "IPV6_MULTICAST_HOPS", IPV6_MULTICAST_HOPS); #endif #ifdef IPV6_MULTICAST_IF PyModule_AddIntConstant(m, "IPV6_MULTICAST_IF", IPV6_MULTICAST_IF); #endif #ifdef IPV6_MULTICAST_LOOP PyModule_AddIntConstant(m, "IPV6_MULTICAST_LOOP", IPV6_MULTICAST_LOOP); #endif #ifdef IPV6_UNICAST_HOPS PyModule_AddIntConstant(m, "IPV6_UNICAST_HOPS", IPV6_UNICAST_HOPS); #endif /* Additional IPV6 socket options, defined in RFC 3493 */ #ifdef IPV6_V6ONLY PyModule_AddIntConstant(m, "IPV6_V6ONLY", IPV6_V6ONLY); #endif /* Advanced IPV6 socket options, from RFC 3542 */ #ifdef IPV6_CHECKSUM PyModule_AddIntConstant(m, "IPV6_CHECKSUM", IPV6_CHECKSUM); #endif #ifdef IPV6_DONTFRAG PyModule_AddIntConstant(m, "IPV6_DONTFRAG", IPV6_DONTFRAG); #endif #ifdef IPV6_DSTOPTS PyModule_AddIntConstant(m, "IPV6_DSTOPTS", IPV6_DSTOPTS); #endif #ifdef IPV6_HOPLIMIT PyModule_AddIntConstant(m, "IPV6_HOPLIMIT", IPV6_HOPLIMIT); #endif #ifdef IPV6_HOPOPTS PyModule_AddIntConstant(m, "IPV6_HOPOPTS", IPV6_HOPOPTS); #endif #ifdef IPV6_NEXTHOP PyModule_AddIntConstant(m, "IPV6_NEXTHOP", IPV6_NEXTHOP); #endif #ifdef IPV6_PATHMTU PyModule_AddIntConstant(m, "IPV6_PATHMTU", IPV6_PATHMTU); #endif #ifdef IPV6_PKTINFO PyModule_AddIntConstant(m, "IPV6_PKTINFO", IPV6_PKTINFO); #endif #ifdef IPV6_RECVDSTOPTS PyModule_AddIntConstant(m, "IPV6_RECVDSTOPTS", IPV6_RECVDSTOPTS); #endif #ifdef IPV6_RECVHOPLIMIT PyModule_AddIntConstant(m, "IPV6_RECVHOPLIMIT", IPV6_RECVHOPLIMIT); #endif #ifdef IPV6_RECVHOPOPTS PyModule_AddIntConstant(m, "IPV6_RECVHOPOPTS", IPV6_RECVHOPOPTS); #endif #ifdef IPV6_RECVPKTINFO PyModule_AddIntConstant(m, "IPV6_RECVPKTINFO", IPV6_RECVPKTINFO); #endif #ifdef IPV6_RECVRTHDR PyModule_AddIntConstant(m, "IPV6_RECVRTHDR", IPV6_RECVRTHDR); #endif #ifdef IPV6_RECVTCLASS PyModule_AddIntConstant(m, "IPV6_RECVTCLASS", IPV6_RECVTCLASS); #endif #ifdef IPV6_RTHDR PyModule_AddIntConstant(m, "IPV6_RTHDR", IPV6_RTHDR); #endif #ifdef IPV6_RTHDRDSTOPTS PyModule_AddIntConstant(m, "IPV6_RTHDRDSTOPTS", IPV6_RTHDRDSTOPTS); #endif #ifdef IPV6_RTHDR_TYPE_0 PyModule_AddIntConstant(m, "IPV6_RTHDR_TYPE_0", IPV6_RTHDR_TYPE_0); #endif #ifdef IPV6_RECVPATHMTU PyModule_AddIntConstant(m, "IPV6_RECVPATHMTU", IPV6_RECVPATHMTU); #endif #ifdef IPV6_TCLASS PyModule_AddIntConstant(m, "IPV6_TCLASS", IPV6_TCLASS); #endif #ifdef IPV6_USE_MIN_MTU PyModule_AddIntConstant(m, "IPV6_USE_MIN_MTU", IPV6_USE_MIN_MTU); #endif /* TCP options */ #ifdef TCP_NODELAY PyModule_AddIntConstant(m, "TCP_NODELAY", TCP_NODELAY); #endif #ifdef TCP_MAXSEG PyModule_AddIntConstant(m, "TCP_MAXSEG", TCP_MAXSEG); #endif #ifdef TCP_CORK PyModule_AddIntConstant(m, "TCP_CORK", TCP_CORK); #endif #ifdef TCP_KEEPIDLE PyModule_AddIntConstant(m, "TCP_KEEPIDLE", TCP_KEEPIDLE); #endif #ifdef TCP_KEEPINTVL PyModule_AddIntConstant(m, "TCP_KEEPINTVL", TCP_KEEPINTVL); #endif #ifdef TCP_KEEPCNT PyModule_AddIntConstant(m, "TCP_KEEPCNT", TCP_KEEPCNT); #endif #ifdef TCP_SYNCNT PyModule_AddIntConstant(m, "TCP_SYNCNT", TCP_SYNCNT); #endif #ifdef TCP_LINGER2 PyModule_AddIntConstant(m, "TCP_LINGER2", TCP_LINGER2); #endif #ifdef TCP_DEFER_ACCEPT PyModule_AddIntConstant(m, "TCP_DEFER_ACCEPT", TCP_DEFER_ACCEPT); #endif #ifdef TCP_WINDOW_CLAMP PyModule_AddIntConstant(m, "TCP_WINDOW_CLAMP", TCP_WINDOW_CLAMP); #endif #ifdef TCP_INFO PyModule_AddIntConstant(m, "TCP_INFO", TCP_INFO); #endif #ifdef TCP_QUICKACK PyModule_AddIntConstant(m, "TCP_QUICKACK", TCP_QUICKACK); #endif /* IPX options */ #ifdef IPX_TYPE PyModule_AddIntConstant(m, "IPX_TYPE", IPX_TYPE); #endif /* get{addr,name}info parameters */ #ifdef EAI_ADDRFAMILY PyModule_AddIntConstant(m, "EAI_ADDRFAMILY", EAI_ADDRFAMILY); #endif #ifdef EAI_AGAIN PyModule_AddIntConstant(m, "EAI_AGAIN", EAI_AGAIN); #endif #ifdef EAI_BADFLAGS PyModule_AddIntConstant(m, "EAI_BADFLAGS", EAI_BADFLAGS); #endif #ifdef EAI_FAIL PyModule_AddIntConstant(m, "EAI_FAIL", EAI_FAIL); #endif #ifdef EAI_FAMILY PyModule_AddIntConstant(m, "EAI_FAMILY", EAI_FAMILY); #endif #ifdef EAI_MEMORY PyModule_AddIntConstant(m, "EAI_MEMORY", EAI_MEMORY); #endif #ifdef EAI_NODATA PyModule_AddIntConstant(m, "EAI_NODATA", EAI_NODATA); #endif #ifdef EAI_NONAME PyModule_AddIntConstant(m, "EAI_NONAME", EAI_NONAME); #endif #ifdef EAI_OVERFLOW PyModule_AddIntConstant(m, "EAI_OVERFLOW", EAI_OVERFLOW); #endif #ifdef EAI_SERVICE PyModule_AddIntConstant(m, "EAI_SERVICE", EAI_SERVICE); #endif #ifdef EAI_SOCKTYPE PyModule_AddIntConstant(m, "EAI_SOCKTYPE", EAI_SOCKTYPE); #endif #ifdef EAI_SYSTEM PyModule_AddIntConstant(m, "EAI_SYSTEM", EAI_SYSTEM); #endif #ifdef EAI_BADHINTS PyModule_AddIntConstant(m, "EAI_BADHINTS", EAI_BADHINTS); #endif #ifdef EAI_PROTOCOL PyModule_AddIntConstant(m, "EAI_PROTOCOL", EAI_PROTOCOL); #endif #ifdef EAI_MAX PyModule_AddIntConstant(m, "EAI_MAX", EAI_MAX); #endif #ifdef AI_PASSIVE PyModule_AddIntConstant(m, "AI_PASSIVE", AI_PASSIVE); #endif #ifdef AI_CANONNAME PyModule_AddIntConstant(m, "AI_CANONNAME", AI_CANONNAME); #endif #ifdef AI_NUMERICHOST PyModule_AddIntConstant(m, "AI_NUMERICHOST", AI_NUMERICHOST); #endif #ifdef AI_NUMERICSERV PyModule_AddIntConstant(m, "AI_NUMERICSERV", AI_NUMERICSERV); #endif #ifdef AI_MASK PyModule_AddIntConstant(m, "AI_MASK", AI_MASK); #endif #ifdef AI_ALL PyModule_AddIntConstant(m, "AI_ALL", AI_ALL); #endif #ifdef AI_V4MAPPED_CFG PyModule_AddIntConstant(m, "AI_V4MAPPED_CFG", AI_V4MAPPED_CFG); #endif #ifdef AI_ADDRCONFIG PyModule_AddIntConstant(m, "AI_ADDRCONFIG", AI_ADDRCONFIG); #endif #ifdef AI_V4MAPPED PyModule_AddIntConstant(m, "AI_V4MAPPED", AI_V4MAPPED); #endif #ifdef AI_DEFAULT PyModule_AddIntConstant(m, "AI_DEFAULT", AI_DEFAULT); #endif #ifdef NI_MAXHOST PyModule_AddIntConstant(m, "NI_MAXHOST", NI_MAXHOST); #endif #ifdef NI_MAXSERV PyModule_AddIntConstant(m, "NI_MAXSERV", NI_MAXSERV); #endif #ifdef NI_NOFQDN PyModule_AddIntConstant(m, "NI_NOFQDN", NI_NOFQDN); #endif #ifdef NI_NUMERICHOST PyModule_AddIntConstant(m, "NI_NUMERICHOST", NI_NUMERICHOST); #endif #ifdef NI_NAMEREQD PyModule_AddIntConstant(m, "NI_NAMEREQD", NI_NAMEREQD); #endif #ifdef NI_NUMERICSERV PyModule_AddIntConstant(m, "NI_NUMERICSERV", NI_NUMERICSERV); #endif #ifdef NI_DGRAM PyModule_AddIntConstant(m, "NI_DGRAM", NI_DGRAM); #endif /* shutdown() parameters */ #ifdef SHUT_RD PyModule_AddIntConstant(m, "SHUT_RD", SHUT_RD); #elif defined(SD_RECEIVE) PyModule_AddIntConstant(m, "SHUT_RD", SD_RECEIVE); #else PyModule_AddIntConstant(m, "SHUT_RD", 0); #endif #ifdef SHUT_WR PyModule_AddIntConstant(m, "SHUT_WR", SHUT_WR); #elif defined(SD_SEND) PyModule_AddIntConstant(m, "SHUT_WR", SD_SEND); #else PyModule_AddIntConstant(m, "SHUT_WR", 1); #endif #ifdef SHUT_RDWR PyModule_AddIntConstant(m, "SHUT_RDWR", SHUT_RDWR); #elif defined(SD_BOTH) PyModule_AddIntConstant(m, "SHUT_RDWR", SD_BOTH); #else PyModule_AddIntConstant(m, "SHUT_RDWR", 2); #endif #ifdef SIO_RCVALL { DWORD codes[] = {SIO_RCVALL, SIO_KEEPALIVE_VALS}; const char *names[] = {"SIO_RCVALL", "SIO_KEEPALIVE_VALS"}; int i; for(i = 0; i<sizeof(codes)/sizeof(*codes); ++i) { PyObject *tmp; tmp = PyLong_FromUnsignedLong(codes[i]); if (tmp == NULL) return; PyModule_AddObject(m, names[i], tmp); } } PyModule_AddIntConstant(m, "RCVALL_OFF", RCVALL_OFF); PyModule_AddIntConstant(m, "RCVALL_ON", RCVALL_ON); PyModule_AddIntConstant(m, "RCVALL_SOCKETLEVELONLY", RCVALL_SOCKETLEVELONLY); #ifdef RCVALL_IPLEVEL PyModule_AddIntConstant(m, "RCVALL_IPLEVEL", RCVALL_IPLEVEL); #endif #ifdef RCVALL_MAX PyModule_AddIntConstant(m, "RCVALL_MAX", RCVALL_MAX); #endif #endif /* _MSTCPIP_ */ /* Initialize gethostbyname lock */ #if defined(USE_GETHOSTBYNAME_LOCK) || defined(USE_GETADDRINFO_LOCK) netdb_lock = PyThread_allocate_lock(); #endif } #ifndef HAVE_INET_PTON #if !defined(NTDDI_VERSION) || (NTDDI_VERSION < NTDDI_LONGHORN) /* Simplistic emulation code for inet_pton that only works for IPv4 */ /* These are not exposed because they do not set errno properly */ int inet_pton(int af, const char *src, void *dst) { if (af == AF_INET) { #if (SIZEOF_INT != 4) #error "Not sure if in_addr_t exists and int is not 32-bits." #endif unsigned int packed_addr; packed_addr = inet_addr(src); if (packed_addr == INADDR_NONE) return 0; memcpy(dst, &packed_addr, 4); return 1; } /* Should set errno to EAFNOSUPPORT */ return -1; } const char * inet_ntop(int af, const void *src, char *dst, socklen_t size) { if (af == AF_INET) { struct in_addr packed_addr; if (size < 16) /* Should set errno to ENOSPC. */ return NULL; memcpy(&packed_addr, src, sizeof(packed_addr)); return strncpy(dst, inet_ntoa(packed_addr), size); } /* Should set errno to EAFNOSUPPORT */ return NULL; } #endif #endif
583915.c
/* -*- Mode: C; c-basic-offset:4 ; -*- */ /* * (C) 2001 by Argonne National Laboratory. * See COPYRIGHT in top-level directory. * * This file is automatically generated by buildiface * DO NOT EDIT */ #include "mpi_fortimpl.h" /* Begin MPI profiling block */ #if defined(USE_WEAK_SYMBOLS) && !defined(USE_ONLY_MPI_NAMES) #if defined(HAVE_MULTIPLE_PRAGMA_WEAK) extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #if defined(F77_NAME_UPPER) #pragma weak MPI_COMM_TEST_INTER = PMPI_COMM_TEST_INTER #pragma weak mpi_comm_test_inter__ = PMPI_COMM_TEST_INTER #pragma weak mpi_comm_test_inter_ = PMPI_COMM_TEST_INTER #pragma weak mpi_comm_test_inter = PMPI_COMM_TEST_INTER #elif defined(F77_NAME_LOWER_2USCORE) #pragma weak MPI_COMM_TEST_INTER = pmpi_comm_test_inter__ #pragma weak mpi_comm_test_inter__ = pmpi_comm_test_inter__ #pragma weak mpi_comm_test_inter_ = pmpi_comm_test_inter__ #pragma weak mpi_comm_test_inter = pmpi_comm_test_inter__ #elif defined(F77_NAME_LOWER_USCORE) #pragma weak MPI_COMM_TEST_INTER = pmpi_comm_test_inter_ #pragma weak mpi_comm_test_inter__ = pmpi_comm_test_inter_ #pragma weak mpi_comm_test_inter_ = pmpi_comm_test_inter_ #pragma weak mpi_comm_test_inter = pmpi_comm_test_inter_ #else #pragma weak MPI_COMM_TEST_INTER = pmpi_comm_test_inter #pragma weak mpi_comm_test_inter__ = pmpi_comm_test_inter #pragma weak mpi_comm_test_inter_ = pmpi_comm_test_inter #pragma weak mpi_comm_test_inter = pmpi_comm_test_inter #endif #elif defined(HAVE_PRAGMA_WEAK) #if defined(F77_NAME_UPPER) extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #pragma weak MPI_COMM_TEST_INTER = PMPI_COMM_TEST_INTER #elif defined(F77_NAME_LOWER_2USCORE) extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #pragma weak mpi_comm_test_inter__ = pmpi_comm_test_inter__ #elif !defined(F77_NAME_LOWER_USCORE) extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #pragma weak mpi_comm_test_inter = pmpi_comm_test_inter #else extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #pragma weak mpi_comm_test_inter_ = pmpi_comm_test_inter_ #endif #elif defined(HAVE_PRAGMA_HP_SEC_DEF) #if defined(F77_NAME_UPPER) #pragma _HP_SECONDARY_DEF PMPI_COMM_TEST_INTER MPI_COMM_TEST_INTER #elif defined(F77_NAME_LOWER_2USCORE) #pragma _HP_SECONDARY_DEF pmpi_comm_test_inter__ mpi_comm_test_inter__ #elif !defined(F77_NAME_LOWER_USCORE) #pragma _HP_SECONDARY_DEF pmpi_comm_test_inter mpi_comm_test_inter #else #pragma _HP_SECONDARY_DEF pmpi_comm_test_inter_ mpi_comm_test_inter_ #endif #elif defined(HAVE_PRAGMA_CRI_DUP) #if defined(F77_NAME_UPPER) #pragma _CRI duplicate MPI_COMM_TEST_INTER as PMPI_COMM_TEST_INTER #elif defined(F77_NAME_LOWER_2USCORE) #pragma _CRI duplicate mpi_comm_test_inter__ as pmpi_comm_test_inter__ #elif !defined(F77_NAME_LOWER_USCORE) #pragma _CRI duplicate mpi_comm_test_inter as pmpi_comm_test_inter #else #pragma _CRI duplicate mpi_comm_test_inter_ as pmpi_comm_test_inter_ #endif #elif defined(HAVE_WEAK_ATTRIBUTE) #if defined(F77_NAME_UPPER) extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("PMPI_COMM_TEST_INTER"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("PMPI_COMM_TEST_INTER"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("PMPI_COMM_TEST_INTER"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("PMPI_COMM_TEST_INTER"))); #elif defined(F77_NAME_LOWER_2USCORE) extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter__"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter__"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter__"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter__"))); #elif defined(F77_NAME_LOWER_USCORE) extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter_"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter_"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter_"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter_"))); #else extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter"))); #endif #endif /* HAVE_PRAGMA_WEAK */ #endif /* USE_WEAK_SYMBOLS */ /* End MPI profiling block */ /* These definitions are used only for generating the Fortran wrappers */ #if defined(USE_WEAK_SYMBOLS) && defined(USE_ONLY_MPI_NAMES) #if defined(HAVE_MULTIPLE_PRAGMA_WEAK) extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #if defined(F77_NAME_UPPER) #pragma weak mpi_comm_test_inter__ = MPI_COMM_TEST_INTER #pragma weak mpi_comm_test_inter_ = MPI_COMM_TEST_INTER #pragma weak mpi_comm_test_inter = MPI_COMM_TEST_INTER #elif defined(F77_NAME_LOWER_2USCORE) #pragma weak MPI_COMM_TEST_INTER = mpi_comm_test_inter__ #pragma weak mpi_comm_test_inter_ = mpi_comm_test_inter__ #pragma weak mpi_comm_test_inter = mpi_comm_test_inter__ #elif defined(F77_NAME_LOWER_USCORE) #pragma weak MPI_COMM_TEST_INTER = mpi_comm_test_inter_ #pragma weak mpi_comm_test_inter__ = mpi_comm_test_inter_ #pragma weak mpi_comm_test_inter = mpi_comm_test_inter_ #else #pragma weak MPI_COMM_TEST_INTER = mpi_comm_test_inter #pragma weak mpi_comm_test_inter__ = mpi_comm_test_inter #pragma weak mpi_comm_test_inter_ = mpi_comm_test_inter #endif #elif defined(HAVE_WEAK_ATTRIBUTE) #if defined(F77_NAME_UPPER) extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("MPI_COMM_TEST_INTER"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("MPI_COMM_TEST_INTER"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("MPI_COMM_TEST_INTER"))); #elif defined(F77_NAME_LOWER_2USCORE) extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("mpi_comm_test_inter__"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("mpi_comm_test_inter__"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("mpi_comm_test_inter__"))); #elif defined(F77_NAME_LOWER_USCORE) extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("mpi_comm_test_inter_"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("mpi_comm_test_inter_"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("mpi_comm_test_inter_"))); #else extern FORT_DLL_SPEC void FORT_CALL MPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("mpi_comm_test_inter"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("mpi_comm_test_inter"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("mpi_comm_test_inter"))); extern FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #endif #endif #endif /* Map the name to the correct form */ #ifndef MPICH_MPI_FROM_PMPI #if defined(USE_WEAK_SYMBOLS) #if defined(HAVE_MULTIPLE_PRAGMA_WEAK) /* Define the weak versions of the PMPI routine*/ #ifndef F77_NAME_UPPER extern FORT_DLL_SPEC void FORT_CALL PMPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #endif #ifndef F77_NAME_LOWER_2USCORE extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #endif #ifndef F77_NAME_LOWER_USCORE extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #endif #ifndef F77_NAME_LOWER extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ); #endif #if defined(F77_NAME_UPPER) #pragma weak pmpi_comm_test_inter__ = PMPI_COMM_TEST_INTER #pragma weak pmpi_comm_test_inter_ = PMPI_COMM_TEST_INTER #pragma weak pmpi_comm_test_inter = PMPI_COMM_TEST_INTER #elif defined(F77_NAME_LOWER_2USCORE) #pragma weak PMPI_COMM_TEST_INTER = pmpi_comm_test_inter__ #pragma weak pmpi_comm_test_inter_ = pmpi_comm_test_inter__ #pragma weak pmpi_comm_test_inter = pmpi_comm_test_inter__ #elif defined(F77_NAME_LOWER_USCORE) #pragma weak PMPI_COMM_TEST_INTER = pmpi_comm_test_inter_ #pragma weak pmpi_comm_test_inter__ = pmpi_comm_test_inter_ #pragma weak pmpi_comm_test_inter = pmpi_comm_test_inter_ #else #pragma weak PMPI_COMM_TEST_INTER = pmpi_comm_test_inter #pragma weak pmpi_comm_test_inter__ = pmpi_comm_test_inter #pragma weak pmpi_comm_test_inter_ = pmpi_comm_test_inter #endif /* Test on name mapping */ #elif defined(HAVE_WEAK_ATTRIBUTE) #if defined(F77_NAME_UPPER) extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("PMPI_COMM_TEST_INTER"))); extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("PMPI_COMM_TEST_INTER"))); extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("PMPI_COMM_TEST_INTER"))); #elif defined(F77_NAME_LOWER_2USCORE) extern FORT_DLL_SPEC void FORT_CALL PMPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter__"))); extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter__"))); extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter__"))); #elif defined(F77_NAME_LOWER_USCORE) extern FORT_DLL_SPEC void FORT_CALL PMPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter_"))); extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter_"))); extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter_"))); #else extern FORT_DLL_SPEC void FORT_CALL PMPI_COMM_TEST_INTER( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter"))); extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter__( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter"))); extern FORT_DLL_SPEC void FORT_CALL pmpi_comm_test_inter_( MPI_Fint *, MPI_Fint *, MPI_Fint * ) __attribute__((weak,alias("pmpi_comm_test_inter"))); #endif /* Test on name mapping */ #endif /* HAVE_MULTIPLE_PRAGMA_WEAK */ #endif /* USE_WEAK_SYMBOLS */ #ifdef F77_NAME_UPPER #define mpi_comm_test_inter_ PMPI_COMM_TEST_INTER #elif defined(F77_NAME_LOWER_2USCORE) #define mpi_comm_test_inter_ pmpi_comm_test_inter__ #elif !defined(F77_NAME_LOWER_USCORE) #define mpi_comm_test_inter_ pmpi_comm_test_inter #else #define mpi_comm_test_inter_ pmpi_comm_test_inter_ #endif /* Test on name mapping */ /* This defines the routine that we call, which must be the PMPI version since we're renaming the Fortran entry as the pmpi version. The MPI name must be undefined first to prevent any conflicts with previous renamings. */ #undef MPI_Comm_test_inter #define MPI_Comm_test_inter PMPI_Comm_test_inter #else #ifdef F77_NAME_UPPER #define mpi_comm_test_inter_ MPI_COMM_TEST_INTER #elif defined(F77_NAME_LOWER_2USCORE) #define mpi_comm_test_inter_ mpi_comm_test_inter__ #elif !defined(F77_NAME_LOWER_USCORE) #define mpi_comm_test_inter_ mpi_comm_test_inter /* Else leave name alone */ #endif #endif /* MPICH_MPI_FROM_PMPI */ /* Prototypes for the Fortran interfaces */ #include "fproto.h" FORT_DLL_SPEC void FORT_CALL mpi_comm_test_inter_ ( MPI_Fint *v1, MPI_Fint *v2, MPI_Fint *ierr ){ int l2; *ierr = MPI_Comm_test_inter( (MPI_Comm)(*v1), &l2 ); if (*ierr == MPI_SUCCESS) *v2 = MPIR_TO_FLOG(l2); }
685636.c
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2020 Evan Nemerson <[email protected]> */ #define SIMDE_TEST_X86_AVX512_INSN setzero #include <test/x86/avx512/test-avx512.h> #include <simde/x86/avx512/set.h> #include <simde/x86/avx512/set1.h> #include <simde/x86/avx512/setzero.h> static int test_simde_mm512_setzero_si512(SIMDE_MUNIT_TEST_ARGS) { simde_assert_m512i_i32(simde_mm512_setzero_si512(), ==, simde_mm512_set1_epi32(INT32_C(0))); return 0; } static int test_simde_mm512_setzero_ps(SIMDE_MUNIT_TEST_ARGS) { simde_assert_m512_close(simde_mm512_setzero_ps(), simde_mm512_set1_ps(SIMDE_FLOAT32_C(0.0)), 1); return 0; } static int test_simde_mm512_setzero_pd(SIMDE_MUNIT_TEST_ARGS) { simde_assert_m512d_close(simde_mm512_setzero_pd(), simde_mm512_set1_pd(SIMDE_FLOAT64_C(0.0)), 1); return 0; } SIMDE_TEST_FUNC_LIST_BEGIN SIMDE_TEST_FUNC_LIST_ENTRY(mm512_setzero_si512) SIMDE_TEST_FUNC_LIST_ENTRY(mm512_setzero_ps) SIMDE_TEST_FUNC_LIST_ENTRY(mm512_setzero_pd) SIMDE_TEST_FUNC_LIST_END #include <test/x86/avx512/test-avx512-footer.h>
996194.c
/* This example demonstrates how to import an FMU implemented as a shared library */ #ifdef _WIN32 #include <Windows.h> #else #include <dlfcn.h> #endif #include <stdlib.h> // FMI function types #include "fmi3FunctionTypes.h" #define INSTANTIATION_TOKEN "{8c4e810f-3da3-4a00-8276-176fa3c9f000}" #ifdef _WIN32 #define RESOURCE_LOCATION "file:/C:/tmp/VanDerPol" #else #define RESOURCE_LOCATION "file:///var/tmp/VanDerPol" #endif static void cb_logMessage(fmi3InstanceEnvironment instanceEnvironment, fmi3String instanceName, fmi3Status status, fmi3String category, fmi3String message) { // log message... } int main(int argc, char* argv[]) { #if defined(_WIN32) HMODULE libraryHandle = LoadLibrary("VanDerPol\\binaries\\x86_64-windows\\VanDerPol.dll"); #elif defined(__APPLE__) void *libraryHandle = dlopen("VanDerPol/binaries/x86_64-darwin/VanDerPol.dylib", RTLD_LAZY); #else void *libraryHandle = dlopen("VanDerPol/binaries/x86_64-linux/VanDerPol.so", RTLD_LAZY); #endif if (!libraryHandle) { return EXIT_FAILURE; } fmi3InstantiateModelExchangeTYPE *instantiateModelExchange = #ifdef _WIN32 GetProcAddress(libraryHandle, "fmi3InstantiateModelExchange"); #else dlsym(libraryHandle, "fmi3InstantiateModelExchange"); #endif fmi3FreeInstanceTYPE *freeInstance = #ifdef _WIN32 GetProcAddress(libraryHandle, "fmi3FreeInstance"); #else dlsym(libraryHandle, "fmi3FreeInstance"); #endif // load remaining FMI functions... if (!instantiateModelExchange || !freeInstance) { return EXIT_FAILURE; } fmi3Instance m = instantiateModelExchange( "instance1", // instance name INSTANTIATION_TOKEN, // instantiation token (from XML) RESOURCE_LOCATION, // resource location (extracted FMU) fmi3False, // visible fmi3False, // debug logging disabled NULL, // instance environment cb_logMessage); // logger callback if (!m) { return EXIT_FAILURE; } // simulation... freeInstance(m); // unload shared library #ifdef _WIN32 FreeLibrary(libraryHandle); #else dlclose(libraryHandle); #endif return EXIT_SUCCESS; }
704947.c
/****************************************************************************** * Code generated with sympy 0.7.6 * * * * See http://www.sympy.org/ for more information. * * * * This file is part of 'project' * ******************************************************************************/ #include "pinky_prox_thumb_inter_length_3.h" #include <math.h> double pinky_prox_thumb_inter_length_3() { double pinky_prox_thumb_inter_length_3_result; pinky_prox_thumb_inter_length_3_result = 0; return pinky_prox_thumb_inter_length_3_result; }
205292.c
/* * Copyright(c) 2019 Intel Corporation * SPDX - License - Identifier: BSD - 2 - Clause - Patent */ #include <stdlib.h> #include "EbDefinitions.h" #include "EbEncodeContext.h" #include "EbPictureManagerQueue.h" EbErrorType encode_context_ctor( EbPtr *object_dbl_ptr, EbPtr object_init_data_ptr) { uint32_t picture_index; EbErrorType return_error = EB_ErrorNone; EncodeContext *encode_context_ptr; (void)object_init_data_ptr; EB_MALLOC(EncodeContext*, encode_context_ptr, sizeof(EncodeContext), EB_N_PTR); *object_dbl_ptr = (EbPtr) encode_context_ptr; // Callback Functions encode_context_ptr->app_callback_ptr = (EbCallback *) EB_NULL; // Port Active State encode_context_ptr->recon_port_active = EB_FALSE; // Port Active State EB_CREATEMUTEX(EbHandle, encode_context_ptr->total_number_of_recon_frame_mutex, sizeof(EbHandle), EB_MUTEX); encode_context_ptr->total_number_of_recon_frames = 0; // Output Buffer Fifos encode_context_ptr->stream_output_fifo_ptr = (EbFifo*) EB_NULL; encode_context_ptr->recon_output_fifo_ptr = (EbFifo*)EB_NULL; // Picture Buffer Fifos encode_context_ptr->input_picture_pool_fifo_ptr = (EbFifo*) EB_NULL; encode_context_ptr->reference_picture_pool_fifo_ptr = (EbFifo*) EB_NULL; encode_context_ptr->pa_reference_picture_pool_fifo_ptr = (EbFifo*) EB_NULL; // Picture Decision Reordering Queue encode_context_ptr->picture_decision_reorder_queue_head_index = 0; EB_MALLOC(PictureDecisionReorderEntry**, encode_context_ptr->picture_decision_reorder_queue, sizeof(PictureDecisionReorderEntry*) * PICTURE_DECISION_REORDER_QUEUE_MAX_DEPTH, EB_N_PTR); for(picture_index=0; picture_index < PICTURE_DECISION_REORDER_QUEUE_MAX_DEPTH; ++picture_index) { return_error = picture_decision_reorder_entry_ctor( &(encode_context_ptr->picture_decision_reorder_queue[picture_index]), picture_index); if (return_error == EB_ErrorInsufficientResources){ return EB_ErrorInsufficientResources; } } // Picture Manager Reordering Queue encode_context_ptr->picture_manager_reorder_queue_head_index = 0; EB_MALLOC(PictureManagerReorderEntry**, encode_context_ptr->picture_manager_reorder_queue, sizeof(PictureManagerReorderEntry*) * PICTURE_MANAGER_REORDER_QUEUE_MAX_DEPTH, EB_N_PTR); for (picture_index = 0; picture_index < PICTURE_MANAGER_REORDER_QUEUE_MAX_DEPTH; ++picture_index) { return_error = picture_manager_reorder_entry_ctor( &(encode_context_ptr->picture_manager_reorder_queue[picture_index]), picture_index); if (return_error == EB_ErrorInsufficientResources){ return EB_ErrorInsufficientResources; } } // Picture Manager Pre-Assignment Buffer encode_context_ptr->pre_assignment_buffer_intra_count = 0; encode_context_ptr->pre_assignment_buffer_idr_count = 0; encode_context_ptr->pre_assignment_buffer_scene_change_count = 0; encode_context_ptr->pre_assignment_buffer_scene_change_index = 0; encode_context_ptr->pre_assignment_buffer_eos_flag = EB_FALSE; encode_context_ptr->decode_base_number = 0; encode_context_ptr->pre_assignment_buffer_count = 0; encode_context_ptr->number_of_active_pictures = 0; EB_MALLOC(EbObjectWrapper**, encode_context_ptr->pre_assignment_buffer, sizeof(EbObjectWrapper*) * PRE_ASSIGNMENT_MAX_DEPTH, EB_N_PTR); for(picture_index=0; picture_index < PRE_ASSIGNMENT_MAX_DEPTH; ++picture_index) { encode_context_ptr->pre_assignment_buffer[picture_index] = (EbObjectWrapper*) EB_NULL; } // Picture Manager Input Queue encode_context_ptr->input_picture_queue_head_index = 0; encode_context_ptr->input_picture_queue_tail_index = 0; EB_MALLOC(InputQueueEntry**, encode_context_ptr->input_picture_queue, sizeof(InputQueueEntry*) * INPUT_QUEUE_MAX_DEPTH, EB_N_PTR); for(picture_index=0; picture_index < INPUT_QUEUE_MAX_DEPTH; ++picture_index) { return_error = input_queue_entry_ctor( &(encode_context_ptr->input_picture_queue[picture_index])); if (return_error == EB_ErrorInsufficientResources){ return EB_ErrorInsufficientResources; } } // Picture Manager Reference Queue encode_context_ptr->reference_picture_queue_head_index = 0; encode_context_ptr->reference_picture_queue_tail_index = 0; EB_MALLOC(ReferenceQueueEntry**, encode_context_ptr->reference_picture_queue, sizeof(ReferenceQueueEntry*) * REFERENCE_QUEUE_MAX_DEPTH, EB_N_PTR); for(picture_index=0; picture_index < REFERENCE_QUEUE_MAX_DEPTH; ++picture_index) { return_error = reference_queue_entry_ctor( &(encode_context_ptr->reference_picture_queue[picture_index])); if (return_error == EB_ErrorInsufficientResources){ return EB_ErrorInsufficientResources; } } // Picture Decision PA Reference Queue encode_context_ptr->picture_decision_pa_reference_queue_head_index = 0; encode_context_ptr->picture_decision_pa_reference_queue_tail_index = 0; EB_MALLOC(PaReferenceQueueEntry**, encode_context_ptr->picture_decision_pa_reference_queue, sizeof(PaReferenceQueueEntry*) * PICTURE_DECISION_PA_REFERENCE_QUEUE_MAX_DEPTH, EB_N_PTR); for(picture_index=0; picture_index < PICTURE_DECISION_PA_REFERENCE_QUEUE_MAX_DEPTH; ++picture_index) { return_error = pa_reference_queue_entry_ctor( &(encode_context_ptr->picture_decision_pa_reference_queue[picture_index])); if (return_error == EB_ErrorInsufficientResources){ return EB_ErrorInsufficientResources; } } // Initial Rate Control Reordering Queue encode_context_ptr->initial_rate_control_reorder_queue_head_index = 0; EB_MALLOC(InitialRateControlReorderEntry**, encode_context_ptr->initial_rate_control_reorder_queue, sizeof(InitialRateControlReorderEntry*) * INITIAL_RATE_CONTROL_REORDER_QUEUE_MAX_DEPTH, EB_N_PTR); for(picture_index=0; picture_index < INITIAL_RATE_CONTROL_REORDER_QUEUE_MAX_DEPTH; ++picture_index) { return_error = initial_rate_control_reorder_entry_ctor( &(encode_context_ptr->initial_rate_control_reorder_queue[picture_index]), picture_index); if (return_error == EB_ErrorInsufficientResources){ return EB_ErrorInsufficientResources; } } // High level Rate Control histogram Queue encode_context_ptr->hl_rate_control_historgram_queue_head_index = 0; EB_MALLOC(HlRateControlHistogramEntry**, encode_context_ptr->hl_rate_control_historgram_queue, sizeof(HlRateControlHistogramEntry*) * HIGH_LEVEL_RATE_CONTROL_HISTOGRAM_QUEUE_MAX_DEPTH, EB_N_PTR); for(picture_index=0; picture_index < HIGH_LEVEL_RATE_CONTROL_HISTOGRAM_QUEUE_MAX_DEPTH; ++picture_index) { return_error = hl_rate_control_histogram_entry_ctor( &(encode_context_ptr->hl_rate_control_historgram_queue[picture_index]), picture_index); if (return_error == EB_ErrorInsufficientResources){ return EB_ErrorInsufficientResources; } } // HLRateControl Historgram Queue Mutex EB_CREATEMUTEX(EbHandle, encode_context_ptr->hl_rate_control_historgram_queue_mutex, sizeof(EbHandle), EB_MUTEX); // Packetization Reordering Queue encode_context_ptr->packetization_reorder_queue_head_index = 0; EB_MALLOC(PacketizationReorderEntry**, encode_context_ptr->packetization_reorder_queue, sizeof(PacketizationReorderEntry*) * PACKETIZATION_REORDER_QUEUE_MAX_DEPTH, EB_N_PTR); for(picture_index=0; picture_index < PACKETIZATION_REORDER_QUEUE_MAX_DEPTH; ++picture_index) { return_error = packetization_reorder_entry_ctor( &(encode_context_ptr->packetization_reorder_queue[picture_index]), picture_index); if (return_error == EB_ErrorInsufficientResources){ return EB_ErrorInsufficientResources; } } encode_context_ptr->intra_period_position = 0; encode_context_ptr->pred_struct_position = 0; encode_context_ptr->current_input_poc = -1; encode_context_ptr->elapsed_non_idr_count = 0; encode_context_ptr->elapsed_non_cra_count = 0; encode_context_ptr->initial_picture = EB_TRUE; encode_context_ptr->last_idr_picture = 0; // Sequence Termination Flags encode_context_ptr->terminating_picture_number = ~0u; encode_context_ptr->terminating_sequence_flag_received = EB_FALSE; // Prediction Structure Group encode_context_ptr->prediction_structure_group_ptr = (PredictionStructureGroup*) EB_NULL; // Rate Control encode_context_ptr->available_target_bitrate = 10000000; encode_context_ptr->available_target_bitrate_changed = EB_FALSE; encode_context_ptr->buffer_fill = 0; encode_context_ptr->vbv_buf_size = 0; encode_context_ptr->vbv_max_rate = 0; // Rate Control Bit Tables EB_MALLOC(RateControlTables*, encode_context_ptr->rate_control_tables_array, sizeof(RateControlTables) * TOTAL_NUMBER_OF_INITIAL_RC_TABLES_ENTRY, EB_N_PTR); return_error = rate_control_tables_ctor(encode_context_ptr->rate_control_tables_array); if (return_error == EB_ErrorInsufficientResources){ return EB_ErrorInsufficientResources; } // RC Rate Table Update Mutex EB_CREATEMUTEX(EbHandle, encode_context_ptr->rate_table_update_mutex, sizeof(EbHandle), EB_MUTEX); encode_context_ptr->rate_control_tables_array_updated = EB_FALSE; EB_CREATEMUTEX(EbHandle, encode_context_ptr->sc_buffer_mutex, sizeof(EbHandle), EB_MUTEX); encode_context_ptr->sc_buffer = 0; encode_context_ptr->sc_frame_in = 0; encode_context_ptr->sc_frame_out = 0; encode_context_ptr->enc_mode = SPEED_CONTROL_INIT_MOD; encode_context_ptr->previous_selected_ref_qp = 32; encode_context_ptr->max_coded_poc = 0; encode_context_ptr->max_coded_poc_selected_ref_qp = 32; encode_context_ptr->shared_reference_mutex = eb_create_mutex(); if (encode_context_ptr->shared_reference_mutex == (EbHandle) EB_NULL){ return EB_ErrorInsufficientResources; }else { memory_map[*(memory_map_index)].ptr_type = EB_MUTEX; memory_map[(*(memory_map_index))++].ptr = encode_context_ptr->shared_reference_mutex ; *total_lib_memory += (sizeof(EbHandle)); } return EB_ErrorNone; }