filename
stringlengths
3
9
code
stringlengths
4
1.87M
925349.c
// ---------------------------------------------------------------------------- // Copyright 2016-2017 ARM Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ---------------------------------------------------------------------------- #include "pv_error_handling.h" #include "cs_der_certs.h" #include "cs_der_keys.h" #include "cs_hash.h" #include "pal.h" #include "cs_utils.h" #include "stdbool.h" #include "fcc_malloc.h" static kcm_status_e cs_get_x509_cert_attribute_type(cs_certificate_attribute_type_e cs_attribute_type, palX509Attr_t *attribute_type) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; switch (cs_attribute_type) { case CS_CN_ATTRIBUTE_TYPE: *attribute_type = PAL_X509_CN_ATTR; break; case CS_VALID_TO_ATTRIBUTE_TYPE: *attribute_type = PAL_X509_VALID_TO; break; case CS_VALID_FROM_ATTRIBUTE_TYPE: *attribute_type = PAL_X509_VALID_FROM; break; case CS_OU_ATTRIBUTE_TYPE: *attribute_type = PAL_X509_OU_ATTR; break; case CS_SUBJECT_ATTRIBUTE_TYPE: *attribute_type = PAL_X509_SUBJECT_ATTR; break; case CS_ISSUER_ATTRIBUTE_TYPE: *attribute_type = PAL_X509_ISSUER_ATTR; break; default: SA_PV_ERR_RECOVERABLE_RETURN_IF((true), KCM_CRYPTO_STATUS_INVALID_X509_ATTR, "Invalid cert attribute"); } return kcm_status; } kcm_status_e cs_is_self_signed_x509_cert(palX509Handle_t x509_cert, bool *is_self_signed) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; palStatus_t pal_status = PAL_SUCCESS; uint8_t *cert_subject = NULL; uint8_t *cert_issuer = NULL; size_t subject_size = 0, issuer_size = 0; //Self-signed certificate is certificate with subject attribute = issuer attribute //get and check issuer and subject sizes kcm_status = cs_attr_get_data_size_x509_cert(x509_cert, CS_SUBJECT_ATTRIBUTE_TYPE, &subject_size); SA_PV_ERR_RECOVERABLE_RETURN_IF((kcm_status != KCM_STATUS_SUCCESS), kcm_status, "get size PAL_X509_SUBJECT_ATTR failed"); kcm_status = cs_attr_get_data_size_x509_cert(x509_cert, CS_ISSUER_ATTRIBUTE_TYPE, &issuer_size); SA_PV_ERR_RECOVERABLE_RETURN_IF((kcm_status != KCM_STATUS_SUCCESS), kcm_status, "get size PAL_X509_ISSUER_ATTR failed"); //If issuer and subject attributes have different length it is not self-signed certificate if (subject_size != issuer_size) { *is_self_signed = false; return KCM_STATUS_SUCCESS; } //Get and check attributes values cert_subject = fcc_malloc(subject_size); SA_PV_ERR_RECOVERABLE_GOTO_IF((cert_subject == NULL), kcm_status = KCM_STATUS_OUT_OF_MEMORY, exit, "Allocate subject attribute failed"); pal_status = pal_x509CertGetAttribute(x509_cert, PAL_X509_SUBJECT_ATTR, cert_subject, subject_size, &subject_size); SA_PV_ERR_RECOVERABLE_GOTO_IF((pal_status != PAL_SUCCESS), kcm_status = cs_error_handler(pal_status), exit,"pal_x509CertGetAttribute PAL_X509_SUBJECT_ATTR failed %d ", (int)cs_error_handler(pal_status)); cert_issuer = fcc_malloc(issuer_size); SA_PV_ERR_RECOVERABLE_GOTO_IF((cert_subject == NULL), kcm_status = KCM_STATUS_OUT_OF_MEMORY, exit, "Allocate issuer attribute failed"); pal_status = pal_x509CertGetAttribute(x509_cert, PAL_X509_ISSUER_ATTR, cert_issuer, issuer_size, &issuer_size); SA_PV_ERR_RECOVERABLE_GOTO_IF((pal_status != PAL_SUCCESS), kcm_status = cs_error_handler(pal_status), exit, "pal_x509CertGetAttribute PAL_X509_ISSUER_ATTR failed %d", (int)kcm_status); if (memcmp(cert_issuer, cert_subject, issuer_size) == 0) { *is_self_signed = true; } else { *is_self_signed = false; } exit: fcc_free(cert_subject); fcc_free(cert_issuer); return kcm_status; } kcm_status_e cs_create_handle_from_der_x509_cert(const uint8_t *cert, size_t cert_length, palX509Handle_t *x509_cert_handle) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; palStatus_t pal_status = PAL_SUCCESS; SA_PV_ERR_RECOVERABLE_RETURN_IF((cert != NULL && cert_length == 0), KCM_STATUS_INVALID_PARAMETER, "Invalid cert_length"); SA_PV_ERR_RECOVERABLE_RETURN_IF((x509_cert_handle == NULL), KCM_STATUS_INVALID_PARAMETER, "Invalid x509_cert_handler"); //Allocate and Init certificate handler pal_status = pal_x509Initiate(x509_cert_handle); SA_PV_ERR_RECOVERABLE_RETURN_IF((pal_status != PAL_SUCCESS), cs_error_handler(pal_status), "pal_x509Initiate failed"); if (cert != NULL) { //Parse Certificate. pal_status = pal_x509CertParse(*x509_cert_handle, cert, cert_length); SA_PV_ERR_RECOVERABLE_GOTO_IF((pal_status != PAL_SUCCESS), kcm_status = cs_error_handler(pal_status), exit, "pal_x509CertParse failed"); } exit: if (pal_status != PAL_SUCCESS) { pal_x509Free(x509_cert_handle); } return kcm_status; } kcm_status_e cs_add_to_chain_x509_cert(const uint8_t *cert, size_t cert_length, palX509Handle_t x509_chain_handle) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; palStatus_t pal_status = PAL_SUCCESS; SA_PV_ERR_RECOVERABLE_RETURN_IF((cert == NULL), KCM_STATUS_INVALID_PARAMETER, "Invalid cert pointer"); SA_PV_ERR_RECOVERABLE_RETURN_IF((cert_length <= 0), KCM_STATUS_INVALID_PARAMETER, "Invalid cert_length"); SA_PV_ERR_RECOVERABLE_RETURN_IF((x509_chain_handle == NULLPTR), KCM_STATUS_INVALID_PARAMETER, "Invalid x509_chain_handle"); //Parse Certificate. pal_status = pal_x509CertParse(x509_chain_handle, cert, cert_length); SA_PV_ERR_RECOVERABLE_RETURN_IF((pal_status != PAL_SUCCESS), kcm_status = cs_error_handler(pal_status), "pal_x509CertParse failed"); return kcm_status; } kcm_status_e cs_close_handle_x509_cert(palX509Handle_t *x509_cert_handle) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; palStatus_t pal_status = PAL_SUCCESS; pal_status = pal_x509Free(x509_cert_handle); SA_PV_ERR_RECOVERABLE_RETURN_IF((pal_status != PAL_SUCCESS), kcm_status = cs_error_handler(pal_status), "pal_x509Free failed"); return kcm_status; } kcm_status_e cs_parse_der_x509_cert(const uint8_t *cert, size_t cert_length) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; palStatus_t pal_status = PAL_SUCCESS; palX509Handle_t x509_cert = NULLPTR; SA_PV_ERR_RECOVERABLE_RETURN_IF((cert == NULL), KCM_STATUS_INVALID_PARAMETER, "Invalid cert pointer"); SA_PV_ERR_RECOVERABLE_RETURN_IF((cert_length <= 0), KCM_STATUS_INVALID_PARAMETER, "Invalid cert_length"); //Allocate and Init certificate handler pal_status = pal_x509Initiate(&x509_cert); SA_PV_ERR_RECOVERABLE_RETURN_IF((pal_status != PAL_SUCCESS), cs_error_handler(pal_status), "pal_x509Initiate failed"); //Parse Certificate. pal_status = pal_x509CertParse(x509_cert, cert, cert_length); SA_PV_ERR_RECOVERABLE_GOTO_IF((pal_status != PAL_SUCCESS), kcm_status = cs_error_handler(pal_status), exit, "pal_x509CertParse failed"); exit: pal_x509Free(&x509_cert); return kcm_status; } kcm_status_e cs_verify_x509_cert(palX509Handle_t x509_cert,palX509Handle_t x509_cert_chain) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; palStatus_t pal_status = PAL_SUCCESS; bool is_self_signed = false; palX509Handle_t x509_ca_cert = NULLPTR; SA_PV_ERR_RECOVERABLE_RETURN_IF((x509_cert == NULLPTR), KCM_STATUS_INVALID_PARAMETER, "Invalid cert handle"); kcm_status = cs_is_self_signed_x509_cert(x509_cert, &is_self_signed); SA_PV_ERR_RECOVERABLE_GOTO_IF((kcm_status != KCM_STATUS_SUCCESS), kcm_status = kcm_status, exit, "Self signed verification failed"); if (is_self_signed && x509_cert_chain == NULLPTR) { // Send the certificate itself as trusted chain x509_ca_cert = x509_cert; } else { x509_ca_cert = x509_cert_chain; } //Verify certificate using created certificate chain pal_status = pal_x509CertVerify(x509_cert, x509_ca_cert); SA_PV_ERR_RECOVERABLE_GOTO_IF((pal_status != PAL_SUCCESS), kcm_status = cs_error_handler(pal_status), exit, "pal_x509CertVerify failed %" PRIu32 "", pal_status); exit: return kcm_status; } kcm_status_e cs_attr_get_data_size_x509_cert(palX509Handle_t x509_cert, cs_certificate_attribute_type_e cs_attribute_type, size_t *size_of_attribute) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; palX509Attr_t attribute_type; palStatus_t pal_status = PAL_SUCCESS; uint8_t output_buffer; SA_PV_ERR_RECOVERABLE_RETURN_IF((x509_cert == NULLPTR), KCM_STATUS_INVALID_PARAMETER, "Invalid x509_cert"); SA_PV_ERR_RECOVERABLE_RETURN_IF((size_of_attribute == NULL), KCM_STATUS_INVALID_PARAMETER, "Invalid size_of_attribute pointer"); kcm_status = cs_get_x509_cert_attribute_type(cs_attribute_type, &attribute_type); SA_PV_ERR_RECOVERABLE_RETURN_IF((kcm_status != KCM_STATUS_SUCCESS), kcm_status, "cs_get_x509_cert_attribute_type failed"); //Get the attribute size pal_status = pal_x509CertGetAttribute(x509_cert, attribute_type, &output_buffer, 0, size_of_attribute); SA_PV_ERR_RECOVERABLE_RETURN_IF((pal_status == PAL_SUCCESS), KCM_STATUS_ERROR, "Attribute size is 0"); SA_PV_ERR_RECOVERABLE_RETURN_IF((pal_status != PAL_ERR_BUFFER_TOO_SMALL), kcm_status = cs_error_handler(pal_status), "Failed to get attribute size"); return KCM_STATUS_SUCCESS; }; kcm_status_e cs_attr_get_data_x509_cert(palX509Handle_t x509_cert, cs_certificate_attribute_type_e cs_attribute_type, uint8_t *attribute_output_buffer, size_t max_size_of_attribute_output_buffer, size_t *actual_size_of_attribute_output_buffer) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; palX509Attr_t attribute_type; palStatus_t pal_status = PAL_SUCCESS; SA_PV_ERR_RECOVERABLE_RETURN_IF((x509_cert == NULLPTR), KCM_STATUS_INVALID_PARAMETER, "Invalid x509_cert"); SA_PV_ERR_RECOVERABLE_RETURN_IF((attribute_output_buffer == NULL), KCM_STATUS_INVALID_PARAMETER, "Invalid output pointer"); SA_PV_ERR_RECOVERABLE_RETURN_IF((actual_size_of_attribute_output_buffer == NULL), KCM_STATUS_INVALID_PARAMETER, "Invalid actual_size_of_output pointer"); kcm_status = cs_get_x509_cert_attribute_type(cs_attribute_type, &attribute_type); SA_PV_ERR_RECOVERABLE_RETURN_IF((kcm_status != KCM_STATUS_SUCCESS), kcm_status, "cs_get_x509_cert_attribute_type failed"); //Get the attribute pal_status = pal_x509CertGetAttribute(x509_cert, attribute_type, attribute_output_buffer, max_size_of_attribute_output_buffer, actual_size_of_attribute_output_buffer); SA_PV_ERR_RECOVERABLE_RETURN_IF((pal_status != PAL_SUCCESS), kcm_status = cs_error_handler(pal_status), "pal_x509CertGetAttribute failed"); return kcm_status; }; kcm_status_e cs_x509_cert_verify_signature(palX509Handle_t x509_cert, const unsigned char *hash, size_t hash_size, const unsigned char *signature, size_t signature_size) { kcm_status_e kcm_status = KCM_STATUS_SUCCESS; palStatus_t pal_status = PAL_SUCCESS; SA_PV_ERR_RECOVERABLE_RETURN_IF((x509_cert == NULLPTR), KCM_STATUS_INVALID_PARAMETER, "Invalid x509_cert"); SA_PV_ERR_RECOVERABLE_RETURN_IF((hash == NULL), KCM_STATUS_INVALID_PARAMETER, "Invalid hash pointer"); SA_PV_ERR_RECOVERABLE_RETURN_IF((hash_size != CS_SHA256_SIZE), KCM_STATUS_INVALID_PARAMETER, "Invalid hash digest size"); SA_PV_ERR_RECOVERABLE_RETURN_IF((signature == NULL), KCM_STATUS_INVALID_PARAMETER, "Invalid signature pointer"); SA_PV_ERR_RECOVERABLE_RETURN_IF((signature_size == 0), KCM_STATUS_INVALID_PARAMETER, "Invalid signature size"); //Verify signature pal_status = pal_verifySignature(x509_cert, PAL_SHA256, hash, hash_size, signature, signature_size); SA_PV_ERR_RECOVERABLE_RETURN_IF((pal_status != PAL_SUCCESS), kcm_status = cs_error_handler(pal_status), "pal_verifySignature failed"); return kcm_status; }
310878.c
/* CODE for likelihood based cut calculation that preserves long reads and operates on fragments rather than edges code first implemented 03/02/15 | likelihood based model for initializing two shores of the cut (startnode... secondnode) ##for each read store 4 floats P(R | H), P(R | complement(H)) and P(R | H_new), P(R | complement(H_new)) where likelihood is only for nodes added to the cut till now scores[0] scores[1] scores[2] scores[3] where H_new is new haplotype formed by flipping the phase of vertices in shore2 relative to shore1 score of a variant snpfrag[node].score = log10( P(R|H) + P(R | complement(H)) - log10( P(R|H_new) + P(R | complement(H_new) ) */ #include "common.h" #include "math.h" // simple calculation for difference between likelihood of old and new solution based on the Flist[f].scores used for building max-cut float cut_score(struct fragment* Flist, struct BLOCK* component, char* hap) { int i = 0, t = 0, f = 0; float oldLL = 0, newLL = 0; float scores[4]; // normal scores float Ln, fL; // use the 4 values fragment.scores[] to calculate the likelihoods for (i = 0; i < component->frags; i++) { f = component->flist[i]; for (t = 0; t < 4; t++) scores[t] = Flist[f].scores[t]; Ln = addlogs(scores[2], scores[3]); calculate_fragscore(Flist, f, hap, &fL); oldLL += fL; // ready to go with h-trans calculations newLL += Ln; } if (newLL > oldLL && DEBUG) fprintf_time(stderr, "component %d old %f new %f %f \n", component->phased, oldLL, newLL, component->SCORE); if (DEBUG) fprintf(stdout, "FRAG component %d old %f new %f %f \n\n", component->phased, oldLL, newLL, component->SCORE); return oldLL - newLL; } // initialize the scores for initial two nodes void init_fragment_scores(struct SNPfrags* snpfrag, struct fragment* Flist, char* hap, int startnode, int secondnode) { int i=0, j = 0, f = 0, t = 0, n = 0, k = 0, node = 0, node1; float prob, prob2; // prob = P(miscall). prob2 == P(correct) float scores[4]; // a temp variable float Lo, Ln; // old likelihood, new likelihood for (i = 0; i < 2; i++) { node1 = i ? secondnode:startnode; if (i && node1 < 0) break; // if secondnode is not valid, we should exit, otherwise segfault for (n = 0; n < snpfrag[node1].frags; n++) { f = snpfrag[node1].flist[n]; // index into Flist global //if (Flist[f].init == '0') continue; j = snpfrag[node1].jlist[n]; k = snpfrag[node1].klist[n]; node = node1; if (hap[node] == '-' || (int) Flist[f].list[j].qv[k] - QVoffset < MINQ) continue; prob = (QVoffset - (int) Flist[f].list[j].qv[k]); prob /= 10; prob2 = Flist[f].list[j].p1[k]; if (node == startnode) { if (hap[node] == Flist[f].list[j].hap[k]){ Flist[f].scores[0] += prob2; Flist[f].scores[1] += prob; Flist[f].scores[2] += prob2; Flist[f].scores[3] += prob; } else { Flist[f].scores[0] += prob; Flist[f].scores[1] += prob2; Flist[f].scores[2] += prob; Flist[f].scores[3] += prob2; } } else if (node == secondnode){ // probabilities are flipped compared to start node if (hap[node] == Flist[f].list[j].hap[k]) { Flist[f].scores[0] += prob; Flist[f].scores[1] += prob2; Flist[f].scores[2] += prob; Flist[f].scores[3] += prob2; } else { Flist[f].scores[0] += prob2; Flist[f].scores[1] += prob; Flist[f].scores[2] += prob2; Flist[f].scores[3] += prob; } } // update score of every node outside 2 shores covered by 'f' for (j = 0; j < Flist[f].blocks; j++){ for (k = 0; k < Flist[f].list[j].len; k++) { node = Flist[f].list[j].offset + k; if (hap[node] == '-' || (int) Flist[f].list[j].qv[k] - QVoffset < MINQ) continue; if (node == startnode || node == secondnode) continue; prob = (QVoffset - (int) Flist[f].list[j].qv[k]); prob /= 10; prob2 = Flist[f].list[j].p1[k]; for (t = 0; t < 4; t++) scores[t] = Flist[f].scores[t]; if (hap[node] == Flist[f].list[j].hap[k]) { scores[0] += prob2; scores[1] += prob; // add node to startnode side of cut scores[2] += prob; scores[3] += prob2; // add to other side of cut } else { scores[0] += prob; scores[1] += prob2; // allele mismatch so flip probability scores[2] += prob2; scores[3] += prob; } // Lo is likelihood of fragment if SNP is added to 'startnode' and Ln if SNP is added to 'secondnode' Lo = addlogs(scores[0], scores[1]); Ln = addlogs(scores[2], scores[3]); snpfrag[node].score += Lo - Ln; } } } } } // updates fragment scores and variant scores as a result of adding the node 'node_added' to the growing max-cut void update_fragment_scores(struct SNPfrags* snpfrag, struct fragment* Flist, char* hap, int startnode, int secondnode, int node_added, struct PHEAP* pheap, int* slist) { int j = 0, f = 0, t = 0, n = 0, k = 0, node = 0; float prob, prob2; float scores[4]; float Lo, Ln; float f_scores[4]; float oldscore; for (n = 0; n < snpfrag[node_added].frags; n++){ f = snpfrag[node_added].flist[n]; // index into Flist global for (t = 0; t < 4; t++) f_scores[t] = Flist[f].scores[t]; // store previous fragment scores before updating j = snpfrag[node_added].jlist[n]; k = snpfrag[node_added].klist[n]; if (hap[Flist[f].list[j].offset + k] == '-' || (int) Flist[f].list[j].qv[k] - QVoffset < MINQ) continue; node = Flist[f].list[j].offset + k; if (node != node_added) continue; prob = QVoffset - (int) Flist[f].list[j].qv[k]; prob /= 10; // log10(e) prob2 = Flist[f].list[j].p1[k]; if (snpfrag[node].parent == startnode){ // if node is added to 'startnode', original and new likelihoods are updated identically if (hap[node] == Flist[f].list[j].hap[k]){ Flist[f].scores[0] += prob2; Flist[f].scores[1] += prob; Flist[f].scores[2] += prob2; Flist[f].scores[3] += prob; } else { Flist[f].scores[0] += prob; Flist[f].scores[1] += prob2; Flist[f].scores[2] += prob; Flist[f].scores[3] += prob2; } } else if (snpfrag[node].parent == secondnode){ if (hap[node] == Flist[f].list[j].hap[k]){ Flist[f].scores[0] += prob; Flist[f].scores[1] += prob2; Flist[f].scores[2] += prob; Flist[f].scores[3] += prob2; } else { Flist[f].scores[0] += prob2; Flist[f].scores[1] += prob; Flist[f].scores[2] += prob2; Flist[f].scores[3] += prob; } } for (j = 0; j < Flist[f].blocks; j++){ // update score of every node outside 2 shores covered by 'f' for (k = 0; k < Flist[f].list[j].len; k++) { if (hap[Flist[f].list[j].offset + k] == '-' || (int) Flist[f].list[j].qv[k] - QVoffset < MINQ) continue; node = Flist[f].list[j].offset + k; if (snpfrag[node].parent != startnode && snpfrag[node].parent != secondnode && node != node_added) { oldscore = snpfrag[node].score; // store old score prob = QVoffset - (int) Flist[f].list[j].qv[k]; prob /= 10; // log10(e) //prob1 = 1.0 - pow(10,prob); prob2 = log10(prob1); prob2 = Flist[f].list[j].p1[k]; for (t = 0; t < 4; t++) scores[t] = f_scores[t]; if (hap[node] == Flist[f].list[j].hap[k]) { scores[0] += prob2; scores[1] += prob; scores[2] += prob; scores[3] += prob2; } else { scores[0] += prob; scores[1] += prob2; scores[2] += prob2; scores[3] += prob; } Lo = addlogs(scores[0], scores[1]); Ln = addlogs(scores[2], scores[3]); snpfrag[node].score -= Lo - Ln;; // subtract old score for variant for (t = 0; t < 4; t++) scores[t] = Flist[f].scores[t]; if (hap[node] == Flist[f].list[j].hap[k]) { scores[0] += prob2; scores[1] += prob; scores[2] += prob; scores[3] += prob2; } else { scores[0] += prob; scores[1] += prob2; scores[2] += prob2; scores[3] += prob; } Lo = addlogs(scores[0], scores[1]); Ln = addlogs(scores[2], scores[3]); snpfrag[node].score += Lo - Ln; // add new delta LL for variant if (fabsf(oldscore) > fabsf(snpfrag[node].score)){ // score decreased pmaxHeapify(pheap, snpfrag[node].heaploc, snpfrag, slist); } else pbubbleUp(pheap, snpfrag[node].heaploc, snpfrag, slist); } } } } }
497077.c
/* * Rebar control * * Copyright 1998, 1999 Eric Kohl * Copyright 2007, 2008 Mikolaj Zalewski * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA * * NOTES * * This code was audited for completeness against the documented features * of Comctl32.dll version 6.0 on Oct. 19, 2004, by Robert Shearman. * * Unless otherwise noted, we believe this code to be complete, as per * the specification mentioned above. * If you discover missing features or bugs please note them below. * * TODO * Styles: * - RBS_DBLCLKTOGGLE * - RBS_FIXEDORDER * - RBS_REGISTERDROP * - RBS_TOOLTIPS * Messages: * - RB_BEGINDRAG * - RB_DRAGMOVE * - RB_ENDDRAG * - RB_GETBANDMARGINS * - RB_GETCOLORSCHEME * - RB_GETDROPTARGET * - RB_GETPALETTE * - RB_SETCOLORSCHEME * - RB_SETPALETTE * - RB_SETTOOLTIPS * - WM_CHARTOITEM * - WM_LBUTTONDBLCLK * - WM_MEASUREITEM * - WM_PALETTECHANGED * - WM_QUERYNEWPALETTE * - WM_RBUTTONDOWN * - WM_RBUTTONUP * - WM_SYSCOLORCHANGE * - WM_VKEYTOITEM * - WM_WININICHANGE * Notifications: * - NM_HCHITTEST * - NM_RELEASEDCAPTURE * - RBN_AUTOBREAK * - RBN_GETOBJECT * - RBN_MINMAX * Band styles: * - RBBS_FIXEDBMP * Native uses (on each draw!!) SM_CYBORDER (or SM_CXBORDER for CCS_VERT) * to set the size of the separator width (the value SEP_WIDTH_SIZE * in here). Should be fixed!! */ /* * Testing: set to 1 to make background brush *always* green */ #define GLATESTING 0 /* * 3. REBAR_MoveChildWindows should have a loop because more than * one pass (together with the RBN_CHILDSIZEs) is made on * at least RB_INSERTBAND */ #include <assert.h> #include <stdarg.h> #include <stdlib.h> #include <string.h> #include "windef.h" #include "winbase.h" #include "wingdi.h" #include "wine/unicode.h" #include "winuser.h" #include "winnls.h" #include "commctrl.h" #include "comctl32.h" #include "uxtheme.h" #include "vssym32.h" #include "wine/debug.h" WINE_DEFAULT_DEBUG_CHANNEL(rebar); typedef struct { UINT fStyle; UINT fMask; COLORREF clrFore; COLORREF clrBack; INT iImage; HWND hwndChild; UINT cxMinChild; /* valid if _CHILDSIZE */ UINT cyMinChild; /* valid if _CHILDSIZE */ UINT cx; /* valid if _SIZE */ HBITMAP hbmBack; UINT wID; UINT cyChild; /* valid if _CHILDSIZE */ UINT cyMaxChild; /* valid if _CHILDSIZE */ UINT cyIntegral; /* valid if _CHILDSIZE */ UINT cxIdeal; LPARAM lParam; UINT cxHeader; INT cxEffective; /* current cx for band */ UINT cyHeader; /* the height of the header */ UINT cxMinBand; /* minimum cx for band */ UINT cyMinBand; /* minimum cy for band */ UINT cyRowSoFar; /* for RBS_VARHEIGHT - the height of the row if it would break on this band (set by _Layout) */ INT iRow; /* zero-based index of the row this band assigned to */ UINT fStatus; /* status flags, reset only by _Validate */ UINT fDraw; /* drawing flags, reset only by _Layout */ UINT uCDret; /* last return from NM_CUSTOMDRAW */ RECT rcBand; /* calculated band rectangle - coordinates swapped for CCS_VERT */ RECT rcGripper; /* calculated gripper rectangle */ RECT rcCapImage; /* calculated caption image rectangle */ RECT rcCapText; /* calculated caption text rectangle */ RECT rcChild; /* calculated child rectangle */ RECT rcChevron; /* calculated chevron rectangle */ LPWSTR lpText; HWND hwndPrevParent; } REBAR_BAND; /* has a value of: 0, CCS_TOP, CCS_NOMOVEY, CCS_BOTTOM */ #define CCS_LAYOUT_MASK 0x3 /* fStatus flags */ #define HAS_GRIPPER 0x00000001 #define HAS_IMAGE 0x00000002 #define HAS_TEXT 0x00000004 /* fDraw flags */ #define DRAW_GRIPPER 0x00000001 #define DRAW_IMAGE 0x00000002 #define DRAW_TEXT 0x00000004 #define DRAW_CHEVRONHOT 0x00000040 #define DRAW_CHEVRONPUSHED 0x00000080 #define NTF_INVALIDATE 0x01000000 typedef struct { COLORREF clrBk; /* background color */ COLORREF clrText; /* text color */ COLORREF clrBtnText; /* system color for BTNTEXT */ COLORREF clrBtnFace; /* system color for BTNFACE */ HIMAGELIST himl; /* handle to imagelist */ UINT uNumBands; /* # of bands in rebar (first=0, last=uNumBands-1 */ UINT uNumRows; /* # of rows of bands (first=1, last=uNumRows */ HWND hwndSelf; /* handle of REBAR window itself */ HWND hwndToolTip; /* handle to the tool tip control */ HWND hwndNotify; /* notification window (parent) */ HFONT hDefaultFont; HFONT hFont; /* handle to the rebar's font */ SIZE imageSize; /* image size (image list) */ DWORD dwStyle; /* window style */ DWORD orgStyle; /* original style (dwStyle may change) */ SIZE calcSize; /* calculated rebar size - coordinates swapped for CCS_VERT */ BOOL bUnicode; /* TRUE if parent wants notify in W format */ BOOL DoRedraw; /* TRUE to actually draw bands */ UINT fStatus; /* Status flags (see below) */ HCURSOR hcurArrow; /* handle to the arrow cursor */ HCURSOR hcurHorz; /* handle to the EW cursor */ HCURSOR hcurVert; /* handle to the NS cursor */ HCURSOR hcurDrag; /* handle to the drag cursor */ INT iVersion; /* version number */ POINT dragStart; /* x,y of button down */ POINT dragNow; /* x,y of this MouseMove */ INT iOldBand; /* last band that had the mouse cursor over it */ INT ihitoffset; /* offset of hotspot from gripper.left */ INT ichevronhotBand; /* last band that had a hot chevron */ INT iGrabbedBand;/* band number of band whose gripper was grabbed */ HDPA bands; /* pointer to the array of rebar bands */ } REBAR_INFO; /* fStatus flags */ #define BEGIN_DRAG_ISSUED 0x00000001 #define SELF_RESIZE 0x00000002 #define BAND_NEEDS_REDRAW 0x00000020 /* used by Windows to mark that the header size has been set by the user and shouldn't be changed */ #define RBBS_UNDOC_FIXEDHEADER 0x40000000 /* ---- REBAR layout constants. Mostly determined by ---- */ /* ---- experiment on WIN 98. ---- */ /* Width (or height) of separators between bands (either horz. or */ /* vert.). True only if RBS_BANDBORDERS is set */ #define SEP_WIDTH_SIZE 2 #define SEP_WIDTH ((infoPtr->dwStyle & RBS_BANDBORDERS) ? SEP_WIDTH_SIZE : 0) /* Blank (background color) space between Gripper (if present) */ /* and next item (image, text, or window). Always present */ #define REBAR_ALWAYS_SPACE 4 /* Blank (background color) space after Image (if present). */ #define REBAR_POST_IMAGE 2 /* Blank (background color) space after Text (if present). */ #define REBAR_POST_TEXT 4 /* Height of vertical gripper in a CCS_VERT rebar. */ #define GRIPPER_HEIGHT 16 /* Blank (background color) space before Gripper (if present). */ #define REBAR_PRE_GRIPPER 2 /* Width (of normal vertical gripper) or height (of horz. gripper) */ /* if present. */ #define GRIPPER_WIDTH 3 /* Width of the chevron button if present */ #define CHEVRON_WIDTH 10 /* the gap between the child and the next band */ #define REBAR_POST_CHILD 4 /* Height of divider for Rebar if not disabled (CCS_NODIVIDER) */ /* either top or bottom */ #define REBAR_DIVIDER 2 /* height of a rebar without a child */ #define REBAR_NO_CHILD_HEIGHT 4 /* minimum vertical height of a normal bar */ /* or minimum width of a CCS_VERT bar - from experiment on Win2k */ #define REBAR_MINSIZE 23 /* This is the increment that is used over the band height */ #define REBARSPACE(a) ((a->fStyle & RBBS_CHILDEDGE) ? 2*REBAR_DIVIDER : 0) /* ---- End of REBAR layout constants. ---- */ #define RB_GETBANDINFO_OLD (WM_USER+5) /* obsoleted after IE3, but we have to support it anyway */ /* The following define determines if a given band is hidden */ #define HIDDENBAND(a) (((a)->fStyle & RBBS_HIDDEN) || \ ((infoPtr->dwStyle & CCS_VERT) && \ ((a)->fStyle & RBBS_NOVERT))) #define REBAR_GetInfoPtr(wndPtr) ((REBAR_INFO *)GetWindowLongPtrW (hwnd, 0)) static LRESULT REBAR_NotifyFormat(REBAR_INFO *infoPtr, LPARAM lParam); static void REBAR_AutoSize(REBAR_INFO *infoPtr, BOOL needsLayout); /* no index check here */ static inline REBAR_BAND* REBAR_GetBand(const REBAR_INFO *infoPtr, INT i) { assert(i >= 0 && i < infoPtr->uNumBands); return DPA_GetPtr(infoPtr->bands, i); } /* "constant values" retrieved when DLL was initialized */ /* FIXME we do this when the classes are registered. */ static UINT mindragx = 0; static UINT mindragy = 0; static const char * const band_stylename[] = { "RBBS_BREAK", /* 0001 */ "RBBS_FIXEDSIZE", /* 0002 */ "RBBS_CHILDEDGE", /* 0004 */ "RBBS_HIDDEN", /* 0008 */ "RBBS_NOVERT", /* 0010 */ "RBBS_FIXEDBMP", /* 0020 */ "RBBS_VARIABLEHEIGHT", /* 0040 */ "RBBS_GRIPPERALWAYS", /* 0080 */ "RBBS_NOGRIPPER", /* 0100 */ NULL }; static const char * const band_maskname[] = { "RBBIM_STYLE", /* 0x00000001 */ "RBBIM_COLORS", /* 0x00000002 */ "RBBIM_TEXT", /* 0x00000004 */ "RBBIM_IMAGE", /* 0x00000008 */ "RBBIM_CHILD", /* 0x00000010 */ "RBBIM_CHILDSIZE", /* 0x00000020 */ "RBBIM_SIZE", /* 0x00000040 */ "RBBIM_BACKGROUND", /* 0x00000080 */ "RBBIM_ID", /* 0x00000100 */ "RBBIM_IDEALSIZE", /* 0x00000200 */ "RBBIM_LPARAM", /* 0x00000400 */ "RBBIM_HEADERSIZE", /* 0x00000800 */ NULL }; static CHAR line[200]; static const WCHAR themeClass[] = { 'R','e','b','a','r',0 }; static CHAR * REBAR_FmtStyle( UINT style) { INT i = 0; *line = 0; while (band_stylename[i]) { if (style & (1<<i)) { if (*line != 0) strcat(line, " | "); strcat(line, band_stylename[i]); } i++; } return line; } static CHAR * REBAR_FmtMask( UINT mask) { INT i = 0; *line = 0; while (band_maskname[i]) { if (mask & (1<<i)) { if (*line != 0) strcat(line, " | "); strcat(line, band_maskname[i]); } i++; } return line; } static VOID REBAR_DumpBandInfo(const REBARBANDINFOW *pB) { if( !TRACE_ON(rebar) ) return; TRACE("band info: "); if (pB->fMask & RBBIM_ID) TRACE("ID=%u, ", pB->wID); TRACE("size=%u, child=%p", pB->cbSize, pB->hwndChild); if (pB->fMask & RBBIM_COLORS) TRACE(", clrF=0x%06x, clrB=0x%06x", pB->clrFore, pB->clrBack); TRACE("\n"); TRACE("band info: mask=0x%08x (%s)\n", pB->fMask, REBAR_FmtMask(pB->fMask)); if (pB->fMask & RBBIM_STYLE) TRACE("band info: style=0x%08x (%s)\n", pB->fStyle, REBAR_FmtStyle(pB->fStyle)); if (pB->fMask & (RBBIM_SIZE | RBBIM_IDEALSIZE | RBBIM_HEADERSIZE | RBBIM_LPARAM )) { TRACE("band info:"); if (pB->fMask & RBBIM_SIZE) TRACE(" cx=%u", pB->cx); if (pB->fMask & RBBIM_IDEALSIZE) TRACE(" xIdeal=%u", pB->cxIdeal); if (pB->fMask & RBBIM_HEADERSIZE) TRACE(" xHeader=%u", pB->cxHeader); if (pB->fMask & RBBIM_LPARAM) TRACE(" lParam=0x%08lx", pB->lParam); TRACE("\n"); } if (pB->fMask & RBBIM_CHILDSIZE) TRACE("band info: xMin=%u, yMin=%u, yChild=%u, yMax=%u, yIntgl=%u\n", pB->cxMinChild, pB->cyMinChild, pB->cyChild, pB->cyMaxChild, pB->cyIntegral); } static VOID REBAR_DumpBand (const REBAR_INFO *iP) { REBAR_BAND *pB; UINT i; if(! TRACE_ON(rebar) ) return; TRACE("hwnd=%p: color=%08x/%08x, bands=%u, rows=%u, cSize=%d,%d\n", iP->hwndSelf, iP->clrText, iP->clrBk, iP->uNumBands, iP->uNumRows, iP->calcSize.cx, iP->calcSize.cy); TRACE("hwnd=%p: flags=%08x, dragStart=%d,%d, dragNow=%d,%d, iGrabbedBand=%d\n", iP->hwndSelf, iP->fStatus, iP->dragStart.x, iP->dragStart.y, iP->dragNow.x, iP->dragNow.y, iP->iGrabbedBand); TRACE("hwnd=%p: style=%08x, notify in Unicode=%s, redraw=%s\n", iP->hwndSelf, iP->dwStyle, (iP->bUnicode)?"TRUE":"FALSE", (iP->DoRedraw)?"TRUE":"FALSE"); for (i = 0; i < iP->uNumBands; i++) { pB = REBAR_GetBand(iP, i); TRACE("band # %u:", i); if (pB->fMask & RBBIM_ID) TRACE(" ID=%u", pB->wID); if (pB->fMask & RBBIM_CHILD) TRACE(" child=%p", pB->hwndChild); if (pB->fMask & RBBIM_COLORS) TRACE(" clrF=0x%06x clrB=0x%06x", pB->clrFore, pB->clrBack); TRACE("\n"); TRACE("band # %u: mask=0x%08x (%s)\n", i, pB->fMask, REBAR_FmtMask(pB->fMask)); if (pB->fMask & RBBIM_STYLE) TRACE("band # %u: style=0x%08x (%s)\n", i, pB->fStyle, REBAR_FmtStyle(pB->fStyle)); TRACE("band # %u: xHeader=%u", i, pB->cxHeader); if (pB->fMask & (RBBIM_SIZE | RBBIM_IDEALSIZE | RBBIM_LPARAM )) { if (pB->fMask & RBBIM_SIZE) TRACE(" cx=%u", pB->cx); if (pB->fMask & RBBIM_IDEALSIZE) TRACE(" xIdeal=%u", pB->cxIdeal); if (pB->fMask & RBBIM_LPARAM) TRACE(" lParam=0x%08lx", pB->lParam); } TRACE("\n"); if (RBBIM_CHILDSIZE) TRACE("band # %u: xMin=%u, yMin=%u, yChild=%u, yMax=%u, yIntgl=%u\n", i, pB->cxMinChild, pB->cyMinChild, pB->cyChild, pB->cyMaxChild, pB->cyIntegral); if (pB->fMask & RBBIM_TEXT) TRACE("band # %u: text=%s\n", i, (pB->lpText) ? debugstr_w(pB->lpText) : "(null)"); TRACE("band # %u: cxMinBand=%u, cxEffective=%u, cyMinBand=%u\n", i, pB->cxMinBand, pB->cxEffective, pB->cyMinBand); TRACE("band # %u: fStatus=%08x, fDraw=%08x, Band=(%s), Grip=(%s)\n", i, pB->fStatus, pB->fDraw, wine_dbgstr_rect(&pB->rcBand), wine_dbgstr_rect(&pB->rcGripper)); TRACE("band # %u: Img=(%s), Txt=(%s), Child=(%s)\n", i, wine_dbgstr_rect(&pB->rcCapImage), wine_dbgstr_rect(&pB->rcCapText), wine_dbgstr_rect(&pB->rcChild)); } } /* dest can be equal to src */ static void translate_rect(const REBAR_INFO *infoPtr, RECT *dest, const RECT *src) { if (infoPtr->dwStyle & CCS_VERT) { int tmp; tmp = src->left; dest->left = src->top; dest->top = tmp; tmp = src->right; dest->right = src->bottom; dest->bottom = tmp; } else { *dest = *src; } } static int get_rect_cx(const REBAR_INFO *infoPtr, const RECT *lpRect) { if (infoPtr->dwStyle & CCS_VERT) return lpRect->bottom - lpRect->top; return lpRect->right - lpRect->left; } static int get_rect_cy(const REBAR_INFO *infoPtr, const RECT *lpRect) { if (infoPtr->dwStyle & CCS_VERT) return lpRect->right - lpRect->left; return lpRect->bottom - lpRect->top; } static int round_child_height(const REBAR_BAND *lpBand, int cyHeight) { int cy = 0; if (lpBand->cyIntegral == 0) return cyHeight; cy = max(cyHeight - (int)lpBand->cyMinChild, 0); cy = lpBand->cyMinChild + (cy/lpBand->cyIntegral) * lpBand->cyIntegral; cy = min(cy, lpBand->cyMaxChild); return cy; } static void update_min_band_height(const REBAR_INFO *infoPtr, REBAR_BAND *lpBand) { lpBand->cyMinBand = max(lpBand->cyHeader, (lpBand->hwndChild ? lpBand->cyChild + REBARSPACE(lpBand) : REBAR_NO_CHILD_HEIGHT)); } static void REBAR_DrawChevron (HDC hdc, INT left, INT top, INT colorRef) { INT x, y; HPEN hPen, hOldPen; if (!(hPen = CreatePen( PS_SOLID, 1, GetSysColor( colorRef )))) return; hOldPen = SelectObject ( hdc, hPen ); x = left + 2; y = top; MoveToEx (hdc, x, y, NULL); LineTo (hdc, x+5, y++); x++; MoveToEx (hdc, x, y, NULL); LineTo (hdc, x+3, y++); x++; MoveToEx (hdc, x, y, NULL); LineTo (hdc, x+1, y); SelectObject( hdc, hOldPen ); DeleteObject( hPen ); } static HWND REBAR_GetNotifyParent (const REBAR_INFO *infoPtr) { HWND parent, owner; parent = infoPtr->hwndNotify; if (!parent) { parent = GetParent (infoPtr->hwndSelf); owner = GetWindow (infoPtr->hwndSelf, GW_OWNER); if (owner) parent = owner; } return parent; } static INT REBAR_Notify (NMHDR *nmhdr, const REBAR_INFO *infoPtr, UINT code) { HWND parent; parent = REBAR_GetNotifyParent (infoPtr); nmhdr->idFrom = GetDlgCtrlID (infoPtr->hwndSelf); nmhdr->hwndFrom = infoPtr->hwndSelf; nmhdr->code = code; TRACE("window %p, code=%08x, via %s\n", parent, code, (infoPtr->bUnicode)?"Unicode":"ANSI"); return SendMessageW(parent, WM_NOTIFY, nmhdr->idFrom, (LPARAM)nmhdr); } static INT REBAR_Notify_NMREBAR (const REBAR_INFO *infoPtr, UINT uBand, UINT code) { NMREBAR notify_rebar; notify_rebar.dwMask = 0; if (uBand != -1) { REBAR_BAND *lpBand = REBAR_GetBand(infoPtr, uBand); if (lpBand->fMask & RBBIM_ID) { notify_rebar.dwMask |= RBNM_ID; notify_rebar.wID = lpBand->wID; } if (lpBand->fMask & RBBIM_LPARAM) { notify_rebar.dwMask |= RBNM_LPARAM; notify_rebar.lParam = lpBand->lParam; } if (lpBand->fMask & RBBIM_STYLE) { notify_rebar.dwMask |= RBNM_STYLE; notify_rebar.fStyle = lpBand->fStyle; } } notify_rebar.uBand = uBand; return REBAR_Notify ((NMHDR *)&notify_rebar, infoPtr, code); } static VOID REBAR_DrawBand (HDC hdc, const REBAR_INFO *infoPtr, REBAR_BAND *lpBand) { HFONT hOldFont = 0; INT oldBkMode = 0; NMCUSTOMDRAW nmcd; HTHEME theme = GetWindowTheme (infoPtr->hwndSelf); RECT rcBand; translate_rect(infoPtr, &rcBand, &lpBand->rcBand); if (lpBand->fDraw & DRAW_TEXT) { hOldFont = SelectObject (hdc, infoPtr->hFont); oldBkMode = SetBkMode (hdc, TRANSPARENT); } /* should test for CDRF_NOTIFYITEMDRAW here */ nmcd.dwDrawStage = CDDS_ITEMPREPAINT; nmcd.hdc = hdc; nmcd.rc = rcBand; nmcd.rc.right = lpBand->rcCapText.right; nmcd.rc.bottom = lpBand->rcCapText.bottom; nmcd.dwItemSpec = lpBand->wID; nmcd.uItemState = 0; nmcd.lItemlParam = lpBand->lParam; lpBand->uCDret = REBAR_Notify ((NMHDR *)&nmcd, infoPtr, NM_CUSTOMDRAW); if (lpBand->uCDret == CDRF_SKIPDEFAULT) { if (oldBkMode != TRANSPARENT) SetBkMode (hdc, oldBkMode); SelectObject (hdc, hOldFont); return; } /* draw gripper */ if (lpBand->fDraw & DRAW_GRIPPER) { if (theme) { RECT rcGripper = lpBand->rcGripper; int partId = (infoPtr->dwStyle & CCS_VERT) ? RP_GRIPPERVERT : RP_GRIPPER; GetThemeBackgroundExtent (theme, hdc, partId, 0, &rcGripper, &rcGripper); OffsetRect (&rcGripper, lpBand->rcGripper.left - rcGripper.left, lpBand->rcGripper.top - rcGripper.top); DrawThemeBackground (theme, hdc, partId, 0, &rcGripper, NULL); } else DrawEdge (hdc, &lpBand->rcGripper, BDR_RAISEDINNER, BF_RECT | BF_MIDDLE); } /* draw caption image */ if (lpBand->fDraw & DRAW_IMAGE) { POINT pt; /* center image */ pt.y = (lpBand->rcCapImage.bottom + lpBand->rcCapImage.top - infoPtr->imageSize.cy)/2; pt.x = (lpBand->rcCapImage.right + lpBand->rcCapImage.left - infoPtr->imageSize.cx)/2; ImageList_Draw (infoPtr->himl, lpBand->iImage, hdc, pt.x, pt.y, ILD_TRANSPARENT); } /* draw caption text */ if (lpBand->fDraw & DRAW_TEXT) { /* need to handle CDRF_NEWFONT here */ INT oldBkMode = SetBkMode (hdc, TRANSPARENT); COLORREF oldcolor = CLR_NONE; COLORREF new; if (lpBand->clrFore != CLR_NONE) { new = (lpBand->clrFore == CLR_DEFAULT) ? infoPtr->clrBtnText : lpBand->clrFore; oldcolor = SetTextColor (hdc, new); } DrawTextW (hdc, lpBand->lpText, -1, &lpBand->rcCapText, DT_CENTER | DT_VCENTER | DT_SINGLELINE); if (oldBkMode != TRANSPARENT) SetBkMode (hdc, oldBkMode); if (lpBand->clrFore != CLR_NONE) SetTextColor (hdc, oldcolor); SelectObject (hdc, hOldFont); } if (!IsRectEmpty(&lpBand->rcChevron)) { if (theme) { int stateId; if (lpBand->fDraw & DRAW_CHEVRONPUSHED) stateId = CHEVS_PRESSED; else if (lpBand->fDraw & DRAW_CHEVRONHOT) stateId = CHEVS_HOT; else stateId = CHEVS_NORMAL; DrawThemeBackground (theme, hdc, RP_CHEVRON, stateId, &lpBand->rcChevron, NULL); } else { if (lpBand->fDraw & DRAW_CHEVRONPUSHED) { DrawEdge(hdc, &lpBand->rcChevron, BDR_SUNKENOUTER, BF_RECT | BF_MIDDLE); REBAR_DrawChevron(hdc, lpBand->rcChevron.left+1, lpBand->rcChevron.top + 11, COLOR_WINDOWFRAME); } else if (lpBand->fDraw & DRAW_CHEVRONHOT) { DrawEdge(hdc, &lpBand->rcChevron, BDR_RAISEDINNER, BF_RECT | BF_MIDDLE); REBAR_DrawChevron(hdc, lpBand->rcChevron.left, lpBand->rcChevron.top + 10, COLOR_WINDOWFRAME); } else REBAR_DrawChevron(hdc, lpBand->rcChevron.left, lpBand->rcChevron.top + 10, COLOR_WINDOWFRAME); } } if (lpBand->uCDret == (CDRF_NOTIFYPOSTPAINT | CDRF_NOTIFYITEMDRAW)) { nmcd.dwDrawStage = CDDS_ITEMPOSTPAINT; nmcd.hdc = hdc; nmcd.rc = rcBand; nmcd.rc.right = lpBand->rcCapText.right; nmcd.rc.bottom = lpBand->rcCapText.bottom; nmcd.dwItemSpec = lpBand->wID; nmcd.uItemState = 0; nmcd.lItemlParam = lpBand->lParam; lpBand->uCDret = REBAR_Notify ((NMHDR *)&nmcd, infoPtr, NM_CUSTOMDRAW); } } static VOID REBAR_Refresh (const REBAR_INFO *infoPtr, HDC hdc) { REBAR_BAND *lpBand; UINT i; if (!infoPtr->DoRedraw) return; for (i = 0; i < infoPtr->uNumBands; i++) { lpBand = REBAR_GetBand(infoPtr, i); if (HIDDENBAND(lpBand)) continue; /* now draw the band */ TRACE("[%p] drawing band %i, flags=%08x\n", infoPtr->hwndSelf, i, lpBand->fDraw); REBAR_DrawBand (hdc, infoPtr, lpBand); } } static void REBAR_CalcHorzBand (const REBAR_INFO *infoPtr, UINT rstart, UINT rend) /* Function: this routine initializes all the rectangles in */ /* each band in a row to fit in the adjusted rcBand rect. */ /* *** Supports only Horizontal bars. *** */ { REBAR_BAND *lpBand; UINT i, xoff; RECT work; for(i=rstart; i<rend; i++){ lpBand = REBAR_GetBand(infoPtr, i); if (HIDDENBAND(lpBand)) { SetRect (&lpBand->rcChild, lpBand->rcBand.right, lpBand->rcBand.top, lpBand->rcBand.right, lpBand->rcBand.bottom); continue; } /* set initial gripper rectangle */ SetRect (&lpBand->rcGripper, lpBand->rcBand.left, lpBand->rcBand.top, lpBand->rcBand.left, lpBand->rcBand.bottom); /* calculate gripper rectangle */ if ( lpBand->fStatus & HAS_GRIPPER) { lpBand->fDraw |= DRAW_GRIPPER; lpBand->rcGripper.left += REBAR_PRE_GRIPPER; lpBand->rcGripper.right = lpBand->rcGripper.left + GRIPPER_WIDTH; lpBand->rcGripper.top += 2; lpBand->rcGripper.bottom -= 2; SetRect (&lpBand->rcCapImage, lpBand->rcGripper.right+REBAR_ALWAYS_SPACE, lpBand->rcBand.top, lpBand->rcGripper.right+REBAR_ALWAYS_SPACE, lpBand->rcBand.bottom); } else { /* no gripper will be drawn */ xoff = 0; if (lpBand->fStatus & (HAS_IMAGE | HAS_TEXT)) /* if no gripper but either image or text, then leave space */ xoff = REBAR_ALWAYS_SPACE; SetRect (&lpBand->rcCapImage, lpBand->rcBand.left+xoff, lpBand->rcBand.top, lpBand->rcBand.left+xoff, lpBand->rcBand.bottom); } /* image is visible */ if (lpBand->fStatus & HAS_IMAGE) { lpBand->fDraw |= DRAW_IMAGE; lpBand->rcCapImage.right += infoPtr->imageSize.cx; lpBand->rcCapImage.bottom = lpBand->rcCapImage.top + infoPtr->imageSize.cy; /* set initial caption text rectangle */ SetRect (&lpBand->rcCapText, lpBand->rcCapImage.right+REBAR_POST_IMAGE, lpBand->rcBand.top+1, lpBand->rcBand.left+lpBand->cxHeader, lpBand->rcBand.bottom-1); } else { /* set initial caption text rectangle */ SetRect (&lpBand->rcCapText, lpBand->rcCapImage.right, lpBand->rcBand.top+1, lpBand->rcBand.left+lpBand->cxHeader, lpBand->rcBand.bottom-1); } /* text is visible */ if ((lpBand->fStatus & HAS_TEXT) && !(lpBand->fStyle & RBBS_HIDETITLE)) { lpBand->fDraw |= DRAW_TEXT; lpBand->rcCapText.right = max(lpBand->rcCapText.left, lpBand->rcCapText.right-REBAR_POST_TEXT); } /* set initial child window rectangle if there is a child */ if (lpBand->hwndChild) { lpBand->rcChild.left = lpBand->rcBand.left + lpBand->cxHeader; lpBand->rcChild.right = lpBand->rcBand.right - REBAR_POST_CHILD; if (lpBand->cyChild > 0) { UINT yoff = (lpBand->rcBand.bottom - lpBand->rcBand.top - lpBand->cyChild) / 2; /* center child if height is known */ lpBand->rcChild.top = lpBand->rcBand.top + yoff; lpBand->rcChild.bottom = lpBand->rcBand.top + yoff + lpBand->cyChild; } else { lpBand->rcChild.top = lpBand->rcBand.top; lpBand->rcChild.bottom = lpBand->rcBand.bottom; } if ((lpBand->fStyle & RBBS_USECHEVRON) && (lpBand->rcChild.right - lpBand->rcChild.left < lpBand->cxIdeal)) { lpBand->rcChild.right -= CHEVRON_WIDTH; SetRect(&lpBand->rcChevron, lpBand->rcChild.right, lpBand->rcChild.top, lpBand->rcChild.right + CHEVRON_WIDTH, lpBand->rcChild.bottom); } } else { SetRect (&lpBand->rcChild, lpBand->rcBand.left+lpBand->cxHeader, lpBand->rcBand.top, lpBand->rcBand.right, lpBand->rcBand.bottom); } /* flag if notify required and invalidate rectangle */ if (lpBand->fDraw & NTF_INVALIDATE) { TRACE("invalidating (%d,%d)-(%d,%d)\n", lpBand->rcBand.left, lpBand->rcBand.top, lpBand->rcBand.right + SEP_WIDTH, lpBand->rcBand.bottom + SEP_WIDTH); lpBand->fDraw &= ~NTF_INVALIDATE; work = lpBand->rcBand; work.right += SEP_WIDTH; work.bottom += SEP_WIDTH; InvalidateRect(infoPtr->hwndSelf, &work, TRUE); if (lpBand->hwndChild) InvalidateRect(lpBand->hwndChild, NULL, TRUE); } } } static VOID REBAR_CalcVertBand (const REBAR_INFO *infoPtr, UINT rstart, UINT rend) /* Function: this routine initializes all the rectangles in */ /* each band in a row to fit in the adjusted rcBand rect. */ /* *** Supports only Vertical bars. *** */ { REBAR_BAND *lpBand; UINT i, xoff; RECT work; for(i=rstart; i<rend; i++){ RECT rcBand; lpBand = REBAR_GetBand(infoPtr, i); if (HIDDENBAND(lpBand)) continue; translate_rect(infoPtr, &rcBand, &lpBand->rcBand); /* set initial gripper rectangle */ SetRect (&lpBand->rcGripper, rcBand.left, rcBand.top, rcBand.right, rcBand.top); /* calculate gripper rectangle */ if (lpBand->fStatus & HAS_GRIPPER) { lpBand->fDraw |= DRAW_GRIPPER; if (infoPtr->dwStyle & RBS_VERTICALGRIPPER) { /* vertical gripper */ lpBand->rcGripper.left += 3; lpBand->rcGripper.right = lpBand->rcGripper.left + GRIPPER_WIDTH; lpBand->rcGripper.top += REBAR_PRE_GRIPPER; lpBand->rcGripper.bottom = lpBand->rcGripper.top + GRIPPER_HEIGHT; /* initialize Caption image rectangle */ SetRect (&lpBand->rcCapImage, rcBand.left, lpBand->rcGripper.bottom + REBAR_ALWAYS_SPACE, rcBand.right, lpBand->rcGripper.bottom + REBAR_ALWAYS_SPACE); } else { /* horizontal gripper */ lpBand->rcGripper.left += 2; lpBand->rcGripper.right -= 2; lpBand->rcGripper.top += REBAR_PRE_GRIPPER; lpBand->rcGripper.bottom = lpBand->rcGripper.top + GRIPPER_WIDTH; /* initialize Caption image rectangle */ SetRect (&lpBand->rcCapImage, rcBand.left, lpBand->rcGripper.bottom + REBAR_ALWAYS_SPACE, rcBand.right, lpBand->rcGripper.bottom + REBAR_ALWAYS_SPACE); } } else { /* no gripper will be drawn */ xoff = 0; if (lpBand->fStatus & (HAS_IMAGE | HAS_TEXT)) /* if no gripper but either image or text, then leave space */ xoff = REBAR_ALWAYS_SPACE; /* initialize Caption image rectangle */ SetRect (&lpBand->rcCapImage, rcBand.left, rcBand.top+xoff, rcBand.right, rcBand.top+xoff); } /* image is visible */ if (lpBand->fStatus & HAS_IMAGE) { lpBand->fDraw |= DRAW_IMAGE; lpBand->rcCapImage.right = lpBand->rcCapImage.left + infoPtr->imageSize.cx; lpBand->rcCapImage.bottom += infoPtr->imageSize.cy; /* set initial caption text rectangle */ SetRect (&lpBand->rcCapText, rcBand.left, lpBand->rcCapImage.bottom+REBAR_POST_IMAGE, rcBand.right, rcBand.top+lpBand->cxHeader); } else { /* set initial caption text rectangle */ SetRect (&lpBand->rcCapText, rcBand.left, lpBand->rcCapImage.bottom, rcBand.right, rcBand.top+lpBand->cxHeader); } /* text is visible */ if ((lpBand->fStatus & HAS_TEXT) && !(lpBand->fStyle & RBBS_HIDETITLE)) { lpBand->fDraw |= DRAW_TEXT; lpBand->rcCapText.bottom = max(lpBand->rcCapText.top, lpBand->rcCapText.bottom); } /* set initial child window rectangle if there is a child */ if (lpBand->hwndChild) { int cxBand = rcBand.right - rcBand.left; xoff = (cxBand - lpBand->cyChild) / 2; SetRect (&lpBand->rcChild, rcBand.left + xoff, rcBand.top + lpBand->cxHeader, rcBand.left + xoff + lpBand->cyChild, rcBand.bottom - REBAR_POST_CHILD); } else { SetRect (&lpBand->rcChild, rcBand.left, rcBand.top+lpBand->cxHeader, rcBand.right, rcBand.bottom); } if (lpBand->fDraw & NTF_INVALIDATE) { TRACE("invalidating (%d,%d)-(%d,%d)\n", rcBand.left, rcBand.top, rcBand.right + SEP_WIDTH, rcBand.bottom + SEP_WIDTH); lpBand->fDraw &= ~NTF_INVALIDATE; work = rcBand; work.bottom += SEP_WIDTH; work.right += SEP_WIDTH; InvalidateRect(infoPtr->hwndSelf, &work, TRUE); if (lpBand->hwndChild) InvalidateRect(lpBand->hwndChild, NULL, TRUE); } } } static VOID REBAR_ForceResize (REBAR_INFO *infoPtr) /* Function: This changes the size of the REBAR window to that */ /* calculated by REBAR_Layout. */ { INT x, y, width, height; INT xedge = 0, yedge = 0; RECT rcSelf; TRACE("new size [%d x %d]\n", infoPtr->calcSize.cx, infoPtr->calcSize.cy); if (infoPtr->dwStyle & CCS_NORESIZE) return; if (infoPtr->dwStyle & WS_BORDER) { xedge = GetSystemMetrics(SM_CXEDGE); yedge = GetSystemMetrics(SM_CYEDGE); /* swap for CCS_VERT? */ } /* compute rebar window rect in parent client coordinates */ GetWindowRect(infoPtr->hwndSelf, &rcSelf); MapWindowPoints(HWND_DESKTOP, GetParent(infoPtr->hwndSelf), (LPPOINT)&rcSelf, 2); translate_rect(infoPtr, &rcSelf, &rcSelf); height = infoPtr->calcSize.cy + 2*yedge; if (!(infoPtr->dwStyle & CCS_NOPARENTALIGN)) { RECT rcParent; x = -xedge; width = infoPtr->calcSize.cx + 2*xedge; y = 0; /* quiet compiler warning */ switch ( infoPtr->dwStyle & CCS_LAYOUT_MASK) { case 0: /* shouldn't happen - see NCCreate */ case CCS_TOP: y = ((infoPtr->dwStyle & CCS_NODIVIDER) ? 0 : REBAR_DIVIDER) - yedge; break; case CCS_NOMOVEY: y = rcSelf.top; break; case CCS_BOTTOM: GetClientRect(GetParent(infoPtr->hwndSelf), &rcParent); translate_rect(infoPtr, &rcParent, &rcParent); y = rcParent.bottom - infoPtr->calcSize.cy - yedge; break; } } else { x = rcSelf.left; /* As on Windows if the CCS_NODIVIDER is not present the control will move * 2 pixel down after every layout */ y = rcSelf.top + ((infoPtr->dwStyle & CCS_NODIVIDER) ? 0 : REBAR_DIVIDER); width = rcSelf.right - rcSelf.left; } TRACE("hwnd %p, style=%08x, setting at (%d,%d) for (%d,%d)\n", infoPtr->hwndSelf, infoPtr->dwStyle, x, y, width, height); /* Set flag to ignore next WM_SIZE message and resize the window */ infoPtr->fStatus |= SELF_RESIZE; if ((infoPtr->dwStyle & CCS_VERT) == 0) SetWindowPos(infoPtr->hwndSelf, 0, x, y, width, height, SWP_NOZORDER); else SetWindowPos(infoPtr->hwndSelf, 0, y, x, height, width, SWP_NOZORDER); infoPtr->fStatus &= ~SELF_RESIZE; } static VOID REBAR_MoveChildWindows (const REBAR_INFO *infoPtr, UINT start, UINT endplus) { static const WCHAR strComboBox[] = { 'C','o','m','b','o','B','o','x',0 }; REBAR_BAND *lpBand; WCHAR szClassName[40]; UINT i; NMREBARCHILDSIZE rbcz; HDWP deferpos; if (!(deferpos = BeginDeferWindowPos(infoPtr->uNumBands))) ERR("BeginDeferWindowPos returned NULL\n"); for (i = start; i < endplus; i++) { lpBand = REBAR_GetBand(infoPtr, i); if (HIDDENBAND(lpBand)) continue; if (lpBand->hwndChild) { TRACE("hwndChild = %p\n", lpBand->hwndChild); /* Always generate the RBN_CHILDSIZE even if child did not change */ rbcz.uBand = i; rbcz.wID = lpBand->wID; rbcz.rcChild = lpBand->rcChild; translate_rect(infoPtr, &rbcz.rcBand, &lpBand->rcBand); if (infoPtr->dwStyle & CCS_VERT) rbcz.rcBand.top += lpBand->cxHeader; else rbcz.rcBand.left += lpBand->cxHeader; REBAR_Notify ((NMHDR *)&rbcz, infoPtr, RBN_CHILDSIZE); if (!EqualRect (&lpBand->rcChild, &rbcz.rcChild)) { TRACE("Child rect changed by NOTIFY for band %u\n", i); TRACE(" from (%s) to (%s)\n", wine_dbgstr_rect(&lpBand->rcChild), wine_dbgstr_rect(&rbcz.rcChild)); lpBand->rcChild = rbcz.rcChild; /* *** ??? */ } /* native (IE4 in "Favorites" frame **1) does: * SetRect (&rc, -1, -1, -1, -1) * EqualRect (&rc,band->rc???) * if ret==0 * CopyRect (band->rc????, &rc) * set flag outside of loop */ GetClassNameW (lpBand->hwndChild, szClassName, sizeof(szClassName)/sizeof(szClassName[0])); if (!lstrcmpW (szClassName, strComboBox) || !lstrcmpW (szClassName, WC_COMBOBOXEXW)) { INT nEditHeight, yPos; RECT rc; /* special placement code for combo or comboex box */ /* get size of edit line */ GetWindowRect (lpBand->hwndChild, &rc); nEditHeight = rc.bottom - rc.top; yPos = (lpBand->rcChild.bottom + lpBand->rcChild.top - nEditHeight)/2; /* center combo box inside child area */ TRACE("moving child (Combo(Ex)) %p to (%d,%d) for (%d,%d)\n", lpBand->hwndChild, lpBand->rcChild.left, yPos, lpBand->rcChild.right - lpBand->rcChild.left, nEditHeight); deferpos = DeferWindowPos (deferpos, lpBand->hwndChild, HWND_TOP, lpBand->rcChild.left, /*lpBand->rcChild.top*/ yPos, lpBand->rcChild.right - lpBand->rcChild.left, nEditHeight, SWP_NOZORDER); if (!deferpos) ERR("DeferWindowPos returned NULL\n"); } else { TRACE("moving child (Other) %p to (%d,%d) for (%d,%d)\n", lpBand->hwndChild, lpBand->rcChild.left, lpBand->rcChild.top, lpBand->rcChild.right - lpBand->rcChild.left, lpBand->rcChild.bottom - lpBand->rcChild.top); deferpos = DeferWindowPos (deferpos, lpBand->hwndChild, HWND_TOP, lpBand->rcChild.left, lpBand->rcChild.top, lpBand->rcChild.right - lpBand->rcChild.left, lpBand->rcChild.bottom - lpBand->rcChild.top, SWP_NOZORDER); if (!deferpos) ERR("DeferWindowPos returned NULL\n"); } } } if (!EndDeferWindowPos(deferpos)) ERR("EndDeferWindowPos returned NULL\n"); if (infoPtr->DoRedraw) UpdateWindow (infoPtr->hwndSelf); /* native (from **1 above) does: * UpdateWindow(rebar) * REBAR_ForceResize * RBN_HEIGHTCHANGE if necessary * if ret from any EqualRect was 0 * Goto "BeginDeferWindowPos" */ } /* Returns the next visible band (the first visible band in [i+1; infoPtr->uNumBands) ) * or infoPtr->uNumBands if none */ static int next_visible(const REBAR_INFO *infoPtr, int i) { unsigned int n; for (n = i + 1; n < infoPtr->uNumBands; n++) if (!HIDDENBAND(REBAR_GetBand(infoPtr, n))) break; return n; } /* Returns the previous visible band (the last visible band in [0; i) ) * or -1 if none */ static int prev_visible(const REBAR_INFO *infoPtr, int i) { int n; for (n = i - 1; n >= 0; n--) if (!HIDDENBAND(REBAR_GetBand(infoPtr, n))) break; return n; } /* Returns the first visible band or infoPtr->uNumBands if none */ static int first_visible(const REBAR_INFO *infoPtr) { return next_visible(infoPtr, -1); /* this works*/ } /* Returns the first visible band for the given row (or iBand if none) */ static int get_row_begin_for_band(const REBAR_INFO *infoPtr, INT iBand) { int iLastBand = iBand; int iRow = REBAR_GetBand(infoPtr, iBand)->iRow; while ((iBand = prev_visible(infoPtr, iBand)) >= 0) { if (REBAR_GetBand(infoPtr, iBand)->iRow != iRow) break; else iLastBand = iBand; } return iLastBand; } /* Returns the first visible band for the next row (or infoPtr->uNumBands if none) */ static int get_row_end_for_band(const REBAR_INFO *infoPtr, INT iBand) { int iRow = REBAR_GetBand(infoPtr, iBand)->iRow; while ((iBand = next_visible(infoPtr, iBand)) < infoPtr->uNumBands) if (REBAR_GetBand(infoPtr, iBand)->iRow != iRow) break; return iBand; } /* Compute the rcBand.{left,right} from the cxEffective bands widths computed earlier. * iBeginBand must be visible */ static void REBAR_SetRowRectsX(const REBAR_INFO *infoPtr, INT iBeginBand, INT iEndBand) { int xPos = 0, i; for (i = iBeginBand; i < iEndBand; i = next_visible(infoPtr, i)) { REBAR_BAND *lpBand = REBAR_GetBand(infoPtr, i); if (lpBand->rcBand.left != xPos || lpBand->rcBand.right != xPos + lpBand->cxEffective) { lpBand->fDraw |= NTF_INVALIDATE; TRACE("Setting rect %d to %d,%d\n", i, xPos, xPos + lpBand->cxEffective); lpBand->rcBand.left = xPos; lpBand->rcBand.right = xPos + lpBand->cxEffective; } xPos += lpBand->cxEffective + SEP_WIDTH; } } /* The rationale of this function is probably as follows: if we have some space * to distribute we want to add it to a band on the right. However we don't want * to unminimize a minimized band so we search for a band that is big enough. * For some reason "big enough" is defined as bigger than the minimum size of the * first band in the row */ static REBAR_BAND *REBAR_FindBandToGrow(const REBAR_INFO *infoPtr, INT iBeginBand, INT iEndBand) { INT cxMinFirstBand = 0, i; cxMinFirstBand = REBAR_GetBand(infoPtr, iBeginBand)->cxMinBand; for (i = prev_visible(infoPtr, iEndBand); i >= iBeginBand; i = prev_visible(infoPtr, i)) if (REBAR_GetBand(infoPtr, i)->cxEffective > cxMinFirstBand && !(REBAR_GetBand(infoPtr, i)->fStyle & RBBS_FIXEDSIZE)) break; if (i < iBeginBand) for (i = prev_visible(infoPtr, iEndBand); i >= iBeginBand; i = prev_visible(infoPtr, i)) if (REBAR_GetBand(infoPtr, i)->cxMinBand == cxMinFirstBand) break; TRACE("Extra space for row [%d..%d) should be added to band %d\n", iBeginBand, iEndBand, i); return REBAR_GetBand(infoPtr, i); } /* Try to shrink the visible bands in [iBeginBand; iEndBand) by cxShrink, starting from the right */ static int REBAR_ShrinkBandsRTL(const REBAR_INFO *infoPtr, INT iBeginBand, INT iEndBand, INT cxShrink, BOOL bEnforce) { REBAR_BAND *lpBand; INT width, i; TRACE("Shrinking bands [%d..%d) by %d, right-to-left\n", iBeginBand, iEndBand, cxShrink); for (i = prev_visible(infoPtr, iEndBand); i >= iBeginBand; i = prev_visible(infoPtr, i)) { lpBand = REBAR_GetBand(infoPtr, i); width = max(lpBand->cxEffective - cxShrink, (int)lpBand->cxMinBand); cxShrink -= lpBand->cxEffective - width; lpBand->cxEffective = width; if (bEnforce && lpBand->cx > lpBand->cxEffective) lpBand->cx = lpBand->cxEffective; if (cxShrink == 0) break; } return cxShrink; } /* Try to shrink the visible bands in [iBeginBand; iEndBand) by cxShrink, starting from the left. * iBeginBand must be visible */ static int REBAR_ShrinkBandsLTR(const REBAR_INFO *infoPtr, INT iBeginBand, INT iEndBand, INT cxShrink, BOOL bEnforce) { REBAR_BAND *lpBand; INT width, i; TRACE("Shrinking bands [%d..%d) by %d, left-to-right\n", iBeginBand, iEndBand, cxShrink); for (i = iBeginBand; i < iEndBand; i = next_visible(infoPtr, i)) { lpBand = REBAR_GetBand(infoPtr, i); width = max(lpBand->cxEffective - cxShrink, (int)lpBand->cxMinBand); cxShrink -= lpBand->cxEffective - width; lpBand->cxEffective = width; if (bEnforce) lpBand->cx = lpBand->cxEffective; if (cxShrink == 0) break; } return cxShrink; } /* Tries to move a band to a given offset within a row. */ static int REBAR_MoveBandToRowOffset(REBAR_INFO *infoPtr, INT iBand, INT iFirstBand, INT iLastBand, INT xOff, BOOL reorder) { REBAR_BAND *insertBand = REBAR_GetBand(infoPtr, iBand); int xPos = 0, i; const BOOL setBreak = REBAR_GetBand(infoPtr, iFirstBand)->fStyle & RBBS_BREAK; /* Find the band's new position */ if(reorder) { /* Used during an LR band reorder drag */ for (i = iFirstBand; i < iLastBand; i = next_visible(infoPtr, i)) { if(xPos > xOff) break; xPos += REBAR_GetBand(infoPtr, i)->cxEffective + SEP_WIDTH; } } else { /* Used during a UD band insertion drag */ for (i = iFirstBand; i < iLastBand; i = next_visible(infoPtr, i)) { const REBAR_BAND *band = REBAR_GetBand(infoPtr, i); if(xPos + band->cxMinBand / 2 > xOff) break; xPos += band->cxEffective + SEP_WIDTH; } } /* Move the band to its new position */ DPA_DeletePtr(infoPtr->bands, iBand); if(i > iBand) i--; DPA_InsertPtr(infoPtr->bands, i, insertBand); /* Ensure only the last band has the RBBS_BREAK flag set */ insertBand->fStyle &= ~RBBS_BREAK; if(setBreak) REBAR_GetBand(infoPtr, iFirstBand)->fStyle |= RBBS_BREAK; /* Return the currently grabbed band */ if(infoPtr->iGrabbedBand == iBand) { infoPtr->iGrabbedBand = i; return i; } else return -1; } /* Set the heights of the visible bands in [iBeginBand; iEndBand) to the max height. iBeginBand must be visible */ static int REBAR_SetBandsHeight(const REBAR_INFO *infoPtr, INT iBeginBand, INT iEndBand, INT yStart) { REBAR_BAND *lpBand; int yMaxHeight = 0; int yPos = yStart; int row = REBAR_GetBand(infoPtr, iBeginBand)->iRow; int i; for (i = iBeginBand; i < iEndBand; i = next_visible(infoPtr, i)) { lpBand = REBAR_GetBand(infoPtr, i); lpBand->cyRowSoFar = yMaxHeight; yMaxHeight = max(yMaxHeight, lpBand->cyMinBand); } TRACE("Bands [%d; %d) height: %d\n", iBeginBand, iEndBand, yMaxHeight); for (i = iBeginBand; i < iEndBand; i = next_visible(infoPtr, i)) { lpBand = REBAR_GetBand(infoPtr, i); /* we may be called for multiple rows if RBS_VARHEIGHT not set */ if (lpBand->iRow != row) { yPos += yMaxHeight + SEP_WIDTH; row = lpBand->iRow; } if (lpBand->rcBand.top != yPos || lpBand->rcBand.bottom != yPos + yMaxHeight) { lpBand->fDraw |= NTF_INVALIDATE; lpBand->rcBand.top = yPos; lpBand->rcBand.bottom = yPos + yMaxHeight; TRACE("Band %d: %s\n", i, wine_dbgstr_rect(&lpBand->rcBand)); } } return yPos + yMaxHeight; } /* Layout the row [iBeginBand; iEndBand). iBeginBand must be visible */ static void REBAR_LayoutRow(const REBAR_INFO *infoPtr, int iBeginBand, int iEndBand, int cx, int *piRow, int *pyPos) { REBAR_BAND *lpBand; int i, extra; int width = 0; TRACE("Adjusting row [%d;%d). Width: %d\n", iBeginBand, iEndBand, cx); for (i = iBeginBand; i < iEndBand; i++) REBAR_GetBand(infoPtr, i)->iRow = *piRow; /* compute the extra space */ for (i = iBeginBand; i < iEndBand; i = next_visible(infoPtr, i)) { lpBand = REBAR_GetBand(infoPtr, i); if (i > iBeginBand) width += SEP_WIDTH; lpBand->cxEffective = max(lpBand->cxMinBand, lpBand->cx); width += lpBand->cxEffective; } extra = cx - width; TRACE("Extra space: %d\n", extra); if (extra < 0) { int ret = REBAR_ShrinkBandsRTL(infoPtr, iBeginBand, iEndBand, -extra, FALSE); if (ret > 0 && next_visible(infoPtr, iBeginBand) != iEndBand) /* one band may be longer than expected... */ ERR("Error layouting row %d - couldn't shrink for %d pixels (%d total shrink)\n", *piRow, ret, -extra); } else if (extra > 0) { lpBand = REBAR_FindBandToGrow(infoPtr, iBeginBand, iEndBand); lpBand->cxEffective += extra; } REBAR_SetRowRectsX(infoPtr, iBeginBand, iEndBand); if (infoPtr->dwStyle & RBS_VARHEIGHT) { if (*piRow > 0) *pyPos += SEP_WIDTH; *pyPos = REBAR_SetBandsHeight(infoPtr, iBeginBand, iEndBand, *pyPos); } (*piRow)++; } static VOID REBAR_Layout(REBAR_INFO *infoPtr) { REBAR_BAND *lpBand; RECT rcAdj; SIZE oldSize; INT adjcx, i; INT rowstart; INT row = 0; INT xMin, yPos; if (infoPtr->dwStyle & (CCS_NORESIZE | CCS_NOPARENTALIGN) || GetParent(infoPtr->hwndSelf) == NULL) GetClientRect(infoPtr->hwndSelf, &rcAdj); else GetClientRect(GetParent(infoPtr->hwndSelf), &rcAdj); TRACE("adjustment rect is (%s)\n", wine_dbgstr_rect(&rcAdj)); adjcx = get_rect_cx(infoPtr, &rcAdj); if (infoPtr->uNumBands == 0) { TRACE("No bands - setting size to (0,%d), vert: %x\n", adjcx, infoPtr->dwStyle & CCS_VERT); infoPtr->calcSize.cx = adjcx; /* the calcSize.cy won't change for a 0 band rebar */ infoPtr->uNumRows = 0; REBAR_ForceResize(infoPtr); return; } yPos = 0; xMin = 0; rowstart = first_visible(infoPtr); /* divide rows */ for (i = rowstart; i < infoPtr->uNumBands; i = next_visible(infoPtr, i)) { lpBand = REBAR_GetBand(infoPtr, i); if (i > rowstart && (lpBand->fStyle & RBBS_BREAK || xMin + lpBand->cxMinBand > adjcx)) { TRACE("%s break on band %d\n", (lpBand->fStyle & RBBS_BREAK ? "Hard" : "Soft"), i - 1); REBAR_LayoutRow(infoPtr, rowstart, i, adjcx, &row, &yPos); rowstart = i; xMin = 0; } else xMin += SEP_WIDTH; xMin += lpBand->cxMinBand; } if (rowstart < infoPtr->uNumBands) REBAR_LayoutRow(infoPtr, rowstart, infoPtr->uNumBands, adjcx, &row, &yPos); if (!(infoPtr->dwStyle & RBS_VARHEIGHT)) yPos = REBAR_SetBandsHeight(infoPtr, first_visible(infoPtr), infoPtr->uNumBands, 0); infoPtr->uNumRows = row; if (infoPtr->dwStyle & CCS_VERT) REBAR_CalcVertBand(infoPtr, 0, infoPtr->uNumBands); else REBAR_CalcHorzBand(infoPtr, 0, infoPtr->uNumBands); /* now compute size of Rebar itself */ oldSize = infoPtr->calcSize; infoPtr->calcSize.cx = adjcx; infoPtr->calcSize.cy = yPos; TRACE("calcsize size=(%d, %d), origheight=(%d,%d)\n", infoPtr->calcSize.cx, infoPtr->calcSize.cy, oldSize.cx, oldSize.cy); REBAR_DumpBand (infoPtr); REBAR_MoveChildWindows (infoPtr, 0, infoPtr->uNumBands); REBAR_ForceResize (infoPtr); /* note: after a RBN_HEIGHTCHANGE native sends once again all the RBN_CHILDSIZE * and does another ForceResize */ if (oldSize.cy != infoPtr->calcSize.cy) { NMHDR heightchange; REBAR_Notify(&heightchange, infoPtr, RBN_HEIGHTCHANGE); REBAR_AutoSize(infoPtr, FALSE); } } /* iBeginBand must be visible */ static int REBAR_SizeChildrenToHeight(const REBAR_INFO *infoPtr, int iBeginBand, int iEndBand, int extra, BOOL *fChanged) { int cyBandsOld; int cyBandsNew = 0; int i; TRACE("[%d;%d) by %d\n", iBeginBand, iEndBand, extra); cyBandsOld = REBAR_GetBand(infoPtr, iBeginBand)->rcBand.bottom - REBAR_GetBand(infoPtr, iBeginBand)->rcBand.top; for (i = iBeginBand; i < iEndBand; i = next_visible(infoPtr, i)) { REBAR_BAND *lpBand = REBAR_GetBand(infoPtr, i); int cyMaxChild = cyBandsOld - REBARSPACE(lpBand) + extra; int cyChild = round_child_height(lpBand, cyMaxChild); if (lpBand->hwndChild && cyChild != lpBand->cyChild && (lpBand->fStyle & RBBS_VARIABLEHEIGHT)) { TRACE("Resizing %d: %d -> %d [%d]\n", i, lpBand->cyChild, cyChild, lpBand->cyMaxChild); *fChanged = TRUE; lpBand->cyChild = cyChild; lpBand->fDraw |= NTF_INVALIDATE; update_min_band_height(infoPtr, lpBand); } cyBandsNew = max(cyBandsNew, lpBand->cyMinBand); } return cyBandsNew - cyBandsOld; } /* worker function for RB_SIZETORECT and RBS_AUTOSIZE */ static VOID REBAR_SizeToHeight(REBAR_INFO *infoPtr, int height) { int extra = height - infoPtr->calcSize.cy; /* may be negative */ BOOL fChanged = FALSE; UINT uNumRows = infoPtr->uNumRows; int i; if (uNumRows == 0) /* avoid division by 0 */ return; /* That's not exactly what Windows does but should be similar */ /* Pass one: break-up/glue rows */ if (extra > 0) { for (i = prev_visible(infoPtr, infoPtr->uNumBands); i > 0; i = prev_visible(infoPtr, i)) { REBAR_BAND *lpBand = REBAR_GetBand(infoPtr, i); int cyBreakExtra; /* additional cy for the rebar after a RBBS_BREAK on this band */ height = lpBand->rcBand.bottom - lpBand->rcBand.top; if (infoPtr->dwStyle & RBS_VARHEIGHT) cyBreakExtra = lpBand->cyRowSoFar; /* 'height' => 'lpBand->cyRowSoFar' + 'height'*/ else cyBreakExtra = height; /* 'height' => 'height' + 'height'*/ cyBreakExtra += SEP_WIDTH; if (extra <= cyBreakExtra / 2) break; if (!(lpBand->fStyle & RBBS_BREAK)) { TRACE("Adding break on band %d - extra %d -> %d\n", i, extra, extra - cyBreakExtra); lpBand->fStyle |= RBBS_BREAK; lpBand->fDraw |= NTF_INVALIDATE; fChanged = TRUE; extra -= cyBreakExtra; uNumRows++; /* temporary change for _SizeControlsToHeight. The true values will be computed in _Layout */ if (infoPtr->dwStyle & RBS_VARHEIGHT) lpBand->rcBand.bottom = lpBand->rcBand.top + lpBand->cyMinBand; } } } /* TODO: else if (extra < 0) { try to remove some RBBS_BREAKs } */ /* Pass two: increase/decrease control height */ if (infoPtr->dwStyle & RBS_VARHEIGHT) { int i = first_visible(infoPtr); int iRow = 0; while (i < infoPtr->uNumBands) { REBAR_BAND *lpBand = REBAR_GetBand(infoPtr, i); int extraForRow = extra / (int)(uNumRows - iRow); int rowEnd; /* we can't use get_row_end_for_band as we might have added RBBS_BREAK in the first phase */ for (rowEnd = next_visible(infoPtr, i); rowEnd < infoPtr->uNumBands; rowEnd = next_visible(infoPtr, rowEnd)) if (REBAR_GetBand(infoPtr, rowEnd)->iRow != lpBand->iRow || REBAR_GetBand(infoPtr, rowEnd)->fStyle & RBBS_BREAK) break; extra -= REBAR_SizeChildrenToHeight(infoPtr, i, rowEnd, extraForRow, &fChanged); TRACE("extra = %d\n", extra); i = rowEnd; iRow++; } } else REBAR_SizeChildrenToHeight(infoPtr, first_visible(infoPtr), infoPtr->uNumBands, extra / infoPtr->uNumRows, &fChanged); if (fChanged) REBAR_Layout(infoPtr); } static VOID REBAR_AutoSize(REBAR_INFO *infoPtr, BOOL needsLayout) { RECT rc, rcNew; NMRBAUTOSIZE autosize; if (needsLayout) REBAR_Layout(infoPtr); GetClientRect(infoPtr->hwndSelf, &rc); REBAR_SizeToHeight(infoPtr, get_rect_cy(infoPtr, &rc)); GetClientRect(infoPtr->hwndSelf, &rcNew); GetClientRect(infoPtr->hwndSelf, &autosize.rcTarget); autosize.fChanged = (memcmp(&rc, &rcNew, sizeof(RECT)) == 0); autosize.rcTarget = rc; autosize.rcActual = rcNew; REBAR_Notify((NMHDR *)&autosize, infoPtr, RBN_AUTOSIZE); } static VOID REBAR_ValidateBand (const REBAR_INFO *infoPtr, REBAR_BAND *lpBand) /* Function: This routine evaluates the band specs supplied */ /* by the user and updates the following 5 fields in */ /* the internal band structure: cxHeader, cyHeader, cxMinBand, cyMinBand, fStatus */ { UINT header=0; UINT textheight=0, imageheight = 0; UINT i, nonfixed; REBAR_BAND *tBand; lpBand->fStatus = 0; lpBand->cxMinBand = 0; lpBand->cyMinBand = 0; /* Data coming in from users into the cx... and cy... fields */ /* may be bad, just garbage, because the user never clears */ /* the fields. RB_{SET|INSERT}BAND{A|W} just passes the data */ /* along if the fields exist in the input area. Here we must */ /* determine if the data is valid. I have no idea how MS does */ /* the validation, but it does because the RB_GETBANDINFO */ /* returns a 0 when I know the sample program passed in an */ /* address. Here I will use the algorithm that if the value */ /* is greater than 65535 then it is bad and replace it with */ /* a zero. Feel free to improve the algorithm. - GA 12/2000 */ if (lpBand->cxMinChild > 65535) lpBand->cxMinChild = 0; if (lpBand->cyMinChild > 65535) lpBand->cyMinChild = 0; if (lpBand->cx > 65535) lpBand->cx = 0; if (lpBand->cyChild > 65535) lpBand->cyChild = 0; if (lpBand->cyIntegral > 65535) lpBand->cyIntegral = 0; if (lpBand->cxIdeal > 65535) lpBand->cxIdeal = 0; if (lpBand->cxHeader > 65535) lpBand->cxHeader = 0; /* TODO : we could try return to the caller if a value changed so that */ /* a REBAR_Layout is needed. Till now the caller should call it */ /* it always (we should also check what native does) */ /* Header is where the image, text and gripper exist */ /* in the band and precede the child window. */ /* count number of non-FIXEDSIZE and non-Hidden bands */ nonfixed = 0; for (i=0; i<infoPtr->uNumBands; i++){ tBand = REBAR_GetBand(infoPtr, i); if (!HIDDENBAND(tBand) && !(tBand->fStyle & RBBS_FIXEDSIZE)) nonfixed++; } /* calculate gripper rectangle */ if ( (!(lpBand->fStyle & RBBS_NOGRIPPER)) && ( (lpBand->fStyle & RBBS_GRIPPERALWAYS) || ( !(lpBand->fStyle & RBBS_FIXEDSIZE) && (nonfixed > 1))) ) { lpBand->fStatus |= HAS_GRIPPER; if (infoPtr->dwStyle & CCS_VERT) if (infoPtr->dwStyle & RBS_VERTICALGRIPPER) header += (GRIPPER_HEIGHT + REBAR_PRE_GRIPPER); else header += (GRIPPER_WIDTH + REBAR_PRE_GRIPPER); else header += (REBAR_PRE_GRIPPER + GRIPPER_WIDTH); /* Always have 4 pixels before anything else */ header += REBAR_ALWAYS_SPACE; } /* image is visible */ if (lpBand->iImage != -1 && (infoPtr->himl)) { lpBand->fStatus |= HAS_IMAGE; if (infoPtr->dwStyle & CCS_VERT) { header += (infoPtr->imageSize.cy + REBAR_POST_IMAGE); imageheight = infoPtr->imageSize.cx + 4; } else { header += (infoPtr->imageSize.cx + REBAR_POST_IMAGE); imageheight = infoPtr->imageSize.cy + 4; } } /* text is visible */ if ((lpBand->fMask & RBBIM_TEXT) && (lpBand->lpText) && !(lpBand->fStyle & RBBS_HIDETITLE)) { HDC hdc = GetDC (0); HFONT hOldFont = SelectObject (hdc, infoPtr->hFont); SIZE size; lpBand->fStatus |= HAS_TEXT; GetTextExtentPoint32W (hdc, lpBand->lpText, lstrlenW (lpBand->lpText), &size); header += ((infoPtr->dwStyle & CCS_VERT) ? (size.cy + REBAR_POST_TEXT) : (size.cx + REBAR_POST_TEXT)); textheight = (infoPtr->dwStyle & CCS_VERT) ? 0 : size.cy; SelectObject (hdc, hOldFont); ReleaseDC (0, hdc); } /* if no gripper but either image or text, then leave space */ if ((lpBand->fStatus & (HAS_IMAGE | HAS_TEXT)) && !(lpBand->fStatus & HAS_GRIPPER)) { header += REBAR_ALWAYS_SPACE; } /* check if user overrode the header value */ if (!(lpBand->fStyle & RBBS_UNDOC_FIXEDHEADER)) lpBand->cxHeader = header; lpBand->cyHeader = max(textheight, imageheight); /* Now compute minimum size of child window */ update_min_band_height(infoPtr, lpBand); /* update lpBand->cyMinBand from cyHeader and cyChild*/ lpBand->cxMinBand = lpBand->cxMinChild + lpBand->cxHeader + REBAR_POST_CHILD; if (lpBand->fStyle & RBBS_USECHEVRON && lpBand->cxMinChild < lpBand->cxIdeal) lpBand->cxMinBand += CHEVRON_WIDTH; } static UINT REBAR_CommonSetupBand(HWND hwnd, const REBARBANDINFOW *lprbbi, REBAR_BAND *lpBand) /* Function: This routine copies the supplied values from */ /* user input (lprbbi) to the internal band structure. */ /* It returns the mask of what changed. */ { UINT uChanged = 0x0; lpBand->fMask |= lprbbi->fMask; if( (lprbbi->fMask & RBBIM_STYLE) && (lpBand->fStyle != lprbbi->fStyle ) ) { lpBand->fStyle = lprbbi->fStyle; uChanged |= RBBIM_STYLE; } if( (lprbbi->fMask & RBBIM_COLORS) && ( ( lpBand->clrFore != lprbbi->clrFore ) || ( lpBand->clrBack != lprbbi->clrBack ) ) ) { lpBand->clrFore = lprbbi->clrFore; lpBand->clrBack = lprbbi->clrBack; uChanged |= RBBIM_COLORS; } if( (lprbbi->fMask & RBBIM_IMAGE) && ( lpBand->iImage != lprbbi->iImage ) ) { lpBand->iImage = lprbbi->iImage; uChanged |= RBBIM_IMAGE; } if( (lprbbi->fMask & RBBIM_CHILD) && (lprbbi->hwndChild != lpBand->hwndChild ) ) { if (lprbbi->hwndChild) { lpBand->hwndChild = lprbbi->hwndChild; lpBand->hwndPrevParent = SetParent (lpBand->hwndChild, hwnd); /* below in trace from WinRAR */ ShowWindow(lpBand->hwndChild, SW_SHOWNOACTIVATE | SW_SHOWNORMAL); /* above in trace from WinRAR */ } else { TRACE("child: %p prev parent: %p\n", lpBand->hwndChild, lpBand->hwndPrevParent); lpBand->hwndChild = 0; lpBand->hwndPrevParent = 0; } uChanged |= RBBIM_CHILD; } if( (lprbbi->fMask & RBBIM_CHILDSIZE) && ( (lpBand->cxMinChild != lprbbi->cxMinChild) || (lpBand->cyMinChild != lprbbi->cyMinChild ) || ( (lprbbi->cbSize >= REBARBANDINFOA_V6_SIZE && (lpBand->fStyle & RBBS_VARIABLEHEIGHT)) && ( (lpBand->cyChild != lprbbi->cyChild ) || (lpBand->cyMaxChild != lprbbi->cyMaxChild ) || (lpBand->cyIntegral != lprbbi->cyIntegral ) ) ) || ( (lprbbi->cbSize < REBARBANDINFOA_V6_SIZE) && ( (lpBand->cyChild || lpBand->cyMaxChild || lpBand->cyIntegral ) ) ) ) ) { lpBand->cxMinChild = lprbbi->cxMinChild; lpBand->cyMinChild = lprbbi->cyMinChild; /* These fields where added in WIN32_IE == 0x400 and are set only for RBBS_VARIABLEHEIGHT bands */ if (lprbbi->cbSize >= REBARBANDINFOA_V6_SIZE && (lpBand->fStyle & RBBS_VARIABLEHEIGHT)) { lpBand->cyMaxChild = lprbbi->cyMaxChild; lpBand->cyIntegral = lprbbi->cyIntegral; lpBand->cyChild = round_child_height(lpBand, lprbbi->cyChild); /* make (cyChild - cyMinChild) a multiple of cyIntergral */ } else { lpBand->cyChild = lpBand->cyMinChild; lpBand->cyMaxChild = 0x7fffffff; lpBand->cyIntegral = 0; } uChanged |= RBBIM_CHILDSIZE; } if( (lprbbi->fMask & RBBIM_SIZE) && (lpBand->cx != lprbbi->cx ) ) { lpBand->cx = lprbbi->cx; uChanged |= RBBIM_SIZE; } if( (lprbbi->fMask & RBBIM_BACKGROUND) && ( lpBand->hbmBack != lprbbi->hbmBack ) ) { lpBand->hbmBack = lprbbi->hbmBack; uChanged |= RBBIM_BACKGROUND; } if( (lprbbi->fMask & RBBIM_ID) && (lpBand->wID != lprbbi->wID ) ) { lpBand->wID = lprbbi->wID; uChanged |= RBBIM_ID; } /* check for additional data */ if (lprbbi->cbSize >= REBARBANDINFOA_V6_SIZE) { if( (lprbbi->fMask & RBBIM_IDEALSIZE) && ( lpBand->cxIdeal != lprbbi->cxIdeal ) ) { lpBand->cxIdeal = lprbbi->cxIdeal; uChanged |= RBBIM_IDEALSIZE; } if( (lprbbi->fMask & RBBIM_LPARAM) && (lpBand->lParam != lprbbi->lParam ) ) { lpBand->lParam = lprbbi->lParam; uChanged |= RBBIM_LPARAM; } if( (lprbbi->fMask & RBBIM_HEADERSIZE) && (lpBand->cxHeader != lprbbi->cxHeader ) ) { lpBand->cxHeader = lprbbi->cxHeader; lpBand->fStyle |= RBBS_UNDOC_FIXEDHEADER; uChanged |= RBBIM_HEADERSIZE; } } return uChanged; } static LRESULT REBAR_EraseBkGnd (const REBAR_INFO *infoPtr, HDC hdc) /* Function: This erases the background rectangle by drawing */ /* each band with its background color (or the default) and */ /* draws each bands right separator if necessary. The row */ /* separators are drawn on the first band of the next row. */ { REBAR_BAND *lpBand; UINT i; INT oldrow; RECT cr; COLORREF old = CLR_NONE, new; HTHEME theme = GetWindowTheme (infoPtr->hwndSelf); GetClientRect (infoPtr->hwndSelf, &cr); oldrow = -1; for(i=0; i<infoPtr->uNumBands; i++) { RECT rcBand; lpBand = REBAR_GetBand(infoPtr, i); if (HIDDENBAND(lpBand)) continue; translate_rect(infoPtr, &rcBand, &lpBand->rcBand); /* draw band separator between rows */ if (lpBand->iRow != oldrow) { oldrow = lpBand->iRow; if (infoPtr->dwStyle & RBS_BANDBORDERS) { RECT rcRowSep; rcRowSep = rcBand; if (infoPtr->dwStyle & CCS_VERT) { rcRowSep.right += SEP_WIDTH_SIZE; rcRowSep.bottom = infoPtr->calcSize.cx; if (theme) DrawThemeEdge (theme, hdc, RP_BAND, 0, &rcRowSep, EDGE_ETCHED, BF_RIGHT, NULL); else DrawEdge (hdc, &rcRowSep, EDGE_ETCHED, BF_RIGHT); } else { rcRowSep.bottom += SEP_WIDTH_SIZE; rcRowSep.right = infoPtr->calcSize.cx; if (theme) DrawThemeEdge (theme, hdc, RP_BAND, 0, &rcRowSep, EDGE_ETCHED, BF_BOTTOM, NULL); else DrawEdge (hdc, &rcRowSep, EDGE_ETCHED, BF_BOTTOM); } TRACE ("drawing band separator bottom (%s)\n", wine_dbgstr_rect(&rcRowSep)); } } /* draw band separator between bands in a row */ if (infoPtr->dwStyle & RBS_BANDBORDERS && lpBand->rcBand.left > 0) { RECT rcSep; rcSep = rcBand; if (infoPtr->dwStyle & CCS_VERT) { rcSep.bottom = rcSep.top; rcSep.top -= SEP_WIDTH_SIZE; if (theme) DrawThemeEdge (theme, hdc, RP_BAND, 0, &rcSep, EDGE_ETCHED, BF_BOTTOM, NULL); else DrawEdge (hdc, &rcSep, EDGE_ETCHED, BF_BOTTOM); } else { rcSep.right = rcSep.left; rcSep.left -= SEP_WIDTH_SIZE; if (theme) DrawThemeEdge (theme, hdc, RP_BAND, 0, &rcSep, EDGE_ETCHED, BF_RIGHT, NULL); else DrawEdge (hdc, &rcSep, EDGE_ETCHED, BF_RIGHT); } TRACE("drawing band separator right (%s)\n", wine_dbgstr_rect(&rcSep)); } /* draw the actual background */ if (lpBand->clrBack != CLR_NONE) { new = (lpBand->clrBack == CLR_DEFAULT) ? infoPtr->clrBtnFace : lpBand->clrBack; #if GLATESTING /* testing only - make background green to see it */ new = RGB(0,128,0); #endif } else { /* In the absence of documentation for Rebar vs. CLR_NONE, * we will use the default BtnFace color. Note documentation * exists for Listview and Imagelist. */ new = infoPtr->clrBtnFace; #if GLATESTING /* testing only - make background green to see it */ new = RGB(0,128,0); #endif } if (theme) { /* When themed, the background color is ignored (but not a * background bitmap */ DrawThemeBackground (theme, hdc, 0, 0, &cr, &rcBand); } else { old = SetBkColor (hdc, new); TRACE("%s background color=0x%06x, band %s\n", (lpBand->clrBack == CLR_NONE) ? "none" : ((lpBand->clrBack == CLR_DEFAULT) ? "dft" : ""), GetBkColor(hdc), wine_dbgstr_rect(&rcBand)); ExtTextOutW (hdc, 0, 0, ETO_OPAQUE, &rcBand, NULL, 0, 0); if (lpBand->clrBack != CLR_NONE) SetBkColor (hdc, old); } } return TRUE; } static void REBAR_InternalHitTest (const REBAR_INFO *infoPtr, const POINT *lpPt, UINT *pFlags, INT *pBand) { REBAR_BAND *lpBand; RECT rect; UINT iCount; GetClientRect (infoPtr->hwndSelf, &rect); *pFlags = RBHT_NOWHERE; if (PtInRect (&rect, *lpPt)) { if (infoPtr->uNumBands == 0) { *pFlags = RBHT_NOWHERE; if (pBand) *pBand = -1; TRACE("NOWHERE\n"); return; } else { /* somewhere inside */ for (iCount = 0; iCount < infoPtr->uNumBands; iCount++) { RECT rcBand; lpBand = REBAR_GetBand(infoPtr, iCount); translate_rect(infoPtr, &rcBand, &lpBand->rcBand); if (HIDDENBAND(lpBand)) continue; if (PtInRect (&rcBand, *lpPt)) { if (pBand) *pBand = iCount; if (PtInRect (&lpBand->rcGripper, *lpPt)) { *pFlags = RBHT_GRABBER; TRACE("ON GRABBER %d\n", iCount); return; } else if (PtInRect (&lpBand->rcCapImage, *lpPt)) { *pFlags = RBHT_CAPTION; TRACE("ON CAPTION %d\n", iCount); return; } else if (PtInRect (&lpBand->rcCapText, *lpPt)) { *pFlags = RBHT_CAPTION; TRACE("ON CAPTION %d\n", iCount); return; } else if (PtInRect (&lpBand->rcChild, *lpPt)) { *pFlags = RBHT_CLIENT; TRACE("ON CLIENT %d\n", iCount); return; } else if (PtInRect (&lpBand->rcChevron, *lpPt)) { *pFlags = RBHT_CHEVRON; TRACE("ON CHEVRON %d\n", iCount); return; } else { *pFlags = RBHT_NOWHERE; TRACE("NOWHERE %d\n", iCount); return; } } } *pFlags = RBHT_NOWHERE; if (pBand) *pBand = -1; TRACE("NOWHERE\n"); return; } } else { *pFlags = RBHT_NOWHERE; if (pBand) *pBand = -1; TRACE("NOWHERE\n"); return; } } static void REBAR_HandleLRDrag (REBAR_INFO *infoPtr, const POINT *ptsmove) /* Function: This will implement the functionality of a */ /* Gripper drag within a row. It will not implement "out- */ /* of-row" drags. (They are detected and handled in */ /* REBAR_MouseMove.) */ { REBAR_BAND *hitBand; INT iHitBand, iRowBegin, iRowEnd; INT movement, xBand, cxLeft = 0; BOOL shrunkBands = FALSE; iHitBand = infoPtr->iGrabbedBand; iRowBegin = get_row_begin_for_band(infoPtr, iHitBand); iRowEnd = get_row_end_for_band(infoPtr, iHitBand); hitBand = REBAR_GetBand(infoPtr, iHitBand); xBand = hitBand->rcBand.left; movement = (infoPtr->dwStyle&CCS_VERT ? ptsmove->y : ptsmove->x) - (xBand + REBAR_PRE_GRIPPER - infoPtr->ihitoffset); /* Dragging the first band in a row cannot cause shrinking */ if(iHitBand != iRowBegin) { if (movement < 0) { cxLeft = REBAR_ShrinkBandsRTL(infoPtr, iRowBegin, iHitBand, -movement, TRUE); if(cxLeft < -movement) { hitBand->cxEffective += -movement - cxLeft; hitBand->cx = hitBand->cxEffective; shrunkBands = TRUE; } } else if (movement > 0) { cxLeft = movement; if (prev_visible(infoPtr, iHitBand) >= 0) cxLeft = REBAR_ShrinkBandsLTR(infoPtr, iHitBand, iRowEnd, movement, TRUE); if(cxLeft < movement) { REBAR_BAND *lpPrev = REBAR_GetBand(infoPtr, prev_visible(infoPtr, iHitBand)); lpPrev->cxEffective += movement - cxLeft; lpPrev->cx = hitBand->cxEffective; shrunkBands = TRUE; } } } if(!shrunkBands) { /* It was not possible to move the band by shrinking bands. * Try relocating the band instead. */ REBAR_MoveBandToRowOffset(infoPtr, iHitBand, iRowBegin, iRowEnd, xBand + movement, TRUE); } REBAR_SetRowRectsX(infoPtr, iRowBegin, iRowEnd); if (infoPtr->dwStyle & CCS_VERT) REBAR_CalcVertBand(infoPtr, 0, infoPtr->uNumBands); else REBAR_CalcHorzBand(infoPtr, 0, infoPtr->uNumBands); REBAR_MoveChildWindows(infoPtr, iRowBegin, iRowEnd); } static void REBAR_HandleUDDrag (REBAR_INFO *infoPtr, const POINT *ptsmove) { INT yOff = (infoPtr->dwStyle & CCS_VERT) ? ptsmove->x : ptsmove->y; INT iHitBand, iRowBegin, iNextRowBegin; REBAR_BAND *hitBand, *rowBeginBand; if(infoPtr->uNumBands <= 0) ERR("There are no bands in this rebar\n"); /* Up/down dragging can only occur when there is more than one * band in the rebar */ if(infoPtr->uNumBands <= 1) return; iHitBand = infoPtr->iGrabbedBand; hitBand = REBAR_GetBand(infoPtr, iHitBand); /* If we're taking a band that has the RBBS_BREAK style set, this * style needs to be reapplied to the band that is going to become * the new start of the row. */ if((hitBand->fStyle & RBBS_BREAK) && (iHitBand < infoPtr->uNumBands - 1)) REBAR_GetBand(infoPtr, iHitBand + 1)->fStyle |= RBBS_BREAK; if(yOff < 0) { /* Place the band above the current top row */ if(iHitBand==0 && (infoPtr->uNumBands==1 || REBAR_GetBand(infoPtr, 1)->fStyle&RBBS_BREAK)) return; DPA_DeletePtr(infoPtr->bands, iHitBand); hitBand->fStyle &= ~RBBS_BREAK; REBAR_GetBand(infoPtr, 0)->fStyle |= RBBS_BREAK; infoPtr->iGrabbedBand = DPA_InsertPtr( infoPtr->bands, 0, hitBand); } else if(yOff > REBAR_GetBand(infoPtr, infoPtr->uNumBands - 1)->rcBand.bottom) { /* Place the band below the current bottom row */ if(iHitBand == infoPtr->uNumBands-1 && hitBand->fStyle&RBBS_BREAK) return; DPA_DeletePtr(infoPtr->bands, iHitBand); hitBand->fStyle |= RBBS_BREAK; infoPtr->iGrabbedBand = DPA_InsertPtr( infoPtr->bands, infoPtr->uNumBands - 1, hitBand); } else { /* Place the band in the prexisting row the mouse is hovering over */ iRowBegin = first_visible(infoPtr); while(iRowBegin < infoPtr->uNumBands) { iNextRowBegin = get_row_end_for_band(infoPtr, iRowBegin); rowBeginBand = REBAR_GetBand(infoPtr, iRowBegin); if(rowBeginBand->rcBand.bottom > yOff) { REBAR_MoveBandToRowOffset( infoPtr, iHitBand, iRowBegin, iNextRowBegin, ((infoPtr->dwStyle & CCS_VERT) ? ptsmove->y : ptsmove->x) - REBAR_PRE_GRIPPER - infoPtr->ihitoffset, FALSE); break; } iRowBegin = iNextRowBegin; } } REBAR_Layout(infoPtr); } /* << REBAR_BeginDrag >> */ static LRESULT REBAR_DeleteBand (REBAR_INFO *infoPtr, WPARAM wParam) { UINT uBand = (UINT)wParam; REBAR_BAND *lpBand; if (uBand >= infoPtr->uNumBands) return FALSE; TRACE("deleting band %u!\n", uBand); lpBand = REBAR_GetBand(infoPtr, uBand); REBAR_Notify_NMREBAR (infoPtr, uBand, RBN_DELETINGBAND); /* TODO: a return of 1 should probably cancel the deletion */ if (lpBand->hwndChild) ShowWindow(lpBand->hwndChild, SW_HIDE); Free(lpBand->lpText); Free(lpBand); infoPtr->uNumBands--; DPA_DeletePtr(infoPtr->bands, uBand); REBAR_Notify_NMREBAR (infoPtr, -1, RBN_DELETEDBAND); /* if only 1 band left the re-validate to possible eliminate gripper */ if (infoPtr->uNumBands == 1) REBAR_ValidateBand (infoPtr, REBAR_GetBand(infoPtr, 0)); REBAR_Layout(infoPtr); return TRUE; } /* << REBAR_DragMove >> */ /* << REBAR_EndDrag >> */ static LRESULT REBAR_GetBandBorders (const REBAR_INFO *infoPtr, UINT uBand, RECT *lpRect) { REBAR_BAND *lpBand; if (!lpRect) return 0; if (uBand >= infoPtr->uNumBands) return 0; lpBand = REBAR_GetBand(infoPtr, uBand); /* FIXME - the following values were determined by experimentation */ /* with the REBAR Control Spy. I have guesses as to what the 4 and */ /* 1 are, but I am not sure. There doesn't seem to be any actual */ /* difference in size of the control area with and without the */ /* style. - GA */ if (infoPtr->dwStyle & RBS_BANDBORDERS) { if (infoPtr->dwStyle & CCS_VERT) { lpRect->left = 1; lpRect->top = lpBand->cxHeader + 4; lpRect->right = 1; lpRect->bottom = 0; } else { lpRect->left = lpBand->cxHeader + 4; lpRect->top = 1; lpRect->right = 0; lpRect->bottom = 1; } } else { lpRect->left = lpBand->cxHeader; } return 0; } static inline LRESULT REBAR_GetBandCount (const REBAR_INFO *infoPtr) { TRACE("band count %u!\n", infoPtr->uNumBands); return infoPtr->uNumBands; } static LRESULT REBAR_GetBandInfoT(const REBAR_INFO *infoPtr, UINT uIndex, LPREBARBANDINFOW lprbbi, BOOL bUnicode) { REBAR_BAND *lpBand; if (!lprbbi || lprbbi->cbSize < REBARBANDINFOA_V3_SIZE) return FALSE; if (uIndex >= infoPtr->uNumBands) return FALSE; TRACE("index %u (bUnicode=%d)\n", uIndex, bUnicode); /* copy band information */ lpBand = REBAR_GetBand(infoPtr, uIndex); if (lprbbi->fMask & RBBIM_STYLE) lprbbi->fStyle = lpBand->fStyle; if (lprbbi->fMask & RBBIM_COLORS) { lprbbi->clrFore = lpBand->clrFore; lprbbi->clrBack = lpBand->clrBack; if (lprbbi->clrBack == CLR_DEFAULT) lprbbi->clrBack = infoPtr->clrBtnFace; } if (lprbbi->fMask & RBBIM_TEXT) { if (bUnicode) Str_GetPtrW(lpBand->lpText, lprbbi->lpText, lprbbi->cch); else Str_GetPtrWtoA(lpBand->lpText, (LPSTR)lprbbi->lpText, lprbbi->cch); } if (lprbbi->fMask & RBBIM_IMAGE) lprbbi->iImage = lpBand->iImage; if (lprbbi->fMask & RBBIM_CHILD) lprbbi->hwndChild = lpBand->hwndChild; if (lprbbi->fMask & RBBIM_CHILDSIZE) { lprbbi->cxMinChild = lpBand->cxMinChild; lprbbi->cyMinChild = lpBand->cyMinChild; /* to make tests pass we follow Windows behaviour and allow to read these fields only * for RBBS_VARIABLEHEIGHTS bands */ if (lprbbi->cbSize >= REBARBANDINFOW_V6_SIZE && (lpBand->fStyle & RBBS_VARIABLEHEIGHT)) { lprbbi->cyChild = lpBand->cyChild; lprbbi->cyMaxChild = lpBand->cyMaxChild; lprbbi->cyIntegral = lpBand->cyIntegral; } } if (lprbbi->fMask & RBBIM_SIZE) lprbbi->cx = lpBand->cx; if (lprbbi->fMask & RBBIM_BACKGROUND) lprbbi->hbmBack = lpBand->hbmBack; if (lprbbi->fMask & RBBIM_ID) lprbbi->wID = lpBand->wID; /* check for additional data */ if (lprbbi->cbSize >= REBARBANDINFOW_V6_SIZE) { if (lprbbi->fMask & RBBIM_IDEALSIZE) lprbbi->cxIdeal = lpBand->cxIdeal; if (lprbbi->fMask & RBBIM_LPARAM) lprbbi->lParam = lpBand->lParam; if (lprbbi->fMask & RBBIM_HEADERSIZE) lprbbi->cxHeader = lpBand->cxHeader; } REBAR_DumpBandInfo(lprbbi); return TRUE; } static LRESULT REBAR_GetBarHeight (const REBAR_INFO *infoPtr) { INT nHeight; nHeight = infoPtr->calcSize.cy; TRACE("height = %d\n", nHeight); return nHeight; } static LRESULT REBAR_GetBarInfo (const REBAR_INFO *infoPtr, LPREBARINFO lpInfo) { if (!lpInfo || lpInfo->cbSize < sizeof (REBARINFO)) return FALSE; TRACE("getting bar info!\n"); if (infoPtr->himl) { lpInfo->himl = infoPtr->himl; lpInfo->fMask |= RBIM_IMAGELIST; } return TRUE; } static inline LRESULT REBAR_GetBkColor (const REBAR_INFO *infoPtr) { COLORREF clr = infoPtr->clrBk; if (clr == CLR_DEFAULT) clr = infoPtr->clrBtnFace; TRACE("background color 0x%06x!\n", clr); return clr; } /* << REBAR_GetColorScheme >> */ /* << REBAR_GetDropTarget >> */ static LRESULT REBAR_GetPalette (const REBAR_INFO *infoPtr) { FIXME("empty stub!\n"); return 0; } static LRESULT REBAR_GetRect (const REBAR_INFO *infoPtr, INT iBand, RECT *lprc) { REBAR_BAND *lpBand; if (iBand < 0 || iBand >= infoPtr->uNumBands) return FALSE; if (!lprc) return FALSE; lpBand = REBAR_GetBand(infoPtr, iBand); /* For CCS_VERT the coordinates will be swapped - like on Windows */ CopyRect (lprc, &lpBand->rcBand); TRACE("band %d, (%s)\n", iBand, wine_dbgstr_rect(lprc)); return TRUE; } static inline LRESULT REBAR_GetRowCount (const REBAR_INFO *infoPtr) { TRACE("%u\n", infoPtr->uNumRows); return infoPtr->uNumRows; } static LRESULT REBAR_GetRowHeight (const REBAR_INFO *infoPtr, INT iRow) { int j = 0, ret = 0; UINT i; REBAR_BAND *lpBand; for (i=0; i<infoPtr->uNumBands; i++) { lpBand = REBAR_GetBand(infoPtr, i); if (HIDDENBAND(lpBand)) continue; if (lpBand->iRow != iRow) continue; j = lpBand->rcBand.bottom - lpBand->rcBand.top; if (j > ret) ret = j; } TRACE("row %d, height %d\n", iRow, ret); return ret; } static inline LRESULT REBAR_GetTextColor (const REBAR_INFO *infoPtr) { TRACE("text color 0x%06x!\n", infoPtr->clrText); return infoPtr->clrText; } static inline LRESULT REBAR_GetToolTips (const REBAR_INFO *infoPtr) { return (LRESULT)infoPtr->hwndToolTip; } static inline LRESULT REBAR_GetUnicodeFormat (const REBAR_INFO *infoPtr) { TRACE("%s hwnd=%p\n", infoPtr->bUnicode ? "TRUE" : "FALSE", infoPtr->hwndSelf); return infoPtr->bUnicode; } static inline LRESULT REBAR_GetVersion (const REBAR_INFO *infoPtr) { TRACE("version %d\n", infoPtr->iVersion); return infoPtr->iVersion; } static LRESULT REBAR_HitTest (const REBAR_INFO *infoPtr, LPRBHITTESTINFO lprbht) { if (!lprbht) return -1; REBAR_InternalHitTest (infoPtr, &lprbht->pt, &lprbht->flags, &lprbht->iBand); return lprbht->iBand; } static LRESULT REBAR_IdToIndex (const REBAR_INFO *infoPtr, UINT uId) { UINT i; if (infoPtr->uNumBands < 1) return -1; for (i = 0; i < infoPtr->uNumBands; i++) { if (REBAR_GetBand(infoPtr, i)->wID == uId) { TRACE("id %u is band %u found!\n", uId, i); return i; } } TRACE("id %u is not found\n", uId); return -1; } static LRESULT REBAR_InsertBandT(REBAR_INFO *infoPtr, INT iIndex, const REBARBANDINFOW *lprbbi, BOOL bUnicode) { REBAR_BAND *lpBand; if (!lprbbi || lprbbi->cbSize < REBARBANDINFOA_V3_SIZE) return FALSE; /* trace the index as signed to see the -1 */ TRACE("insert band at %d (bUnicode=%d)!\n", iIndex, bUnicode); REBAR_DumpBandInfo(lprbbi); if (!(lpBand = Alloc(sizeof(REBAR_BAND)))) return FALSE; if ((iIndex == -1) || (iIndex > infoPtr->uNumBands)) iIndex = infoPtr->uNumBands; if (DPA_InsertPtr(infoPtr->bands, iIndex, lpBand) == -1) { Free(lpBand); return FALSE; } infoPtr->uNumBands++; TRACE("index %d!\n", iIndex); /* initialize band */ memset(lpBand, 0, sizeof(*lpBand)); lpBand->clrFore = infoPtr->clrText == CLR_NONE ? infoPtr->clrBtnText : infoPtr->clrText; lpBand->clrBack = infoPtr->clrBk == CLR_NONE ? infoPtr->clrBtnFace : infoPtr->clrBk; lpBand->iImage = -1; REBAR_CommonSetupBand(infoPtr->hwndSelf, lprbbi, lpBand); /* Make sure the defaults for these are correct */ if (lprbbi->cbSize < REBARBANDINFOA_V6_SIZE || !(lpBand->fStyle & RBBS_VARIABLEHEIGHT)) { lpBand->cyChild = lpBand->cyMinChild; lpBand->cyMaxChild = 0x7fffffff; lpBand->cyIntegral = 0; } if ((lprbbi->fMask & RBBIM_TEXT) && (lprbbi->lpText)) { if (bUnicode) Str_SetPtrW(&lpBand->lpText, lprbbi->lpText); else Str_SetPtrAtoW(&lpBand->lpText, (LPSTR)lprbbi->lpText); } REBAR_ValidateBand (infoPtr, lpBand); /* On insert of second band, revalidate band 1 to possible add gripper */ if (infoPtr->uNumBands == 2) REBAR_ValidateBand (infoPtr, REBAR_GetBand(infoPtr, 0)); REBAR_DumpBand (infoPtr); REBAR_Layout(infoPtr); InvalidateRect(infoPtr->hwndSelf, NULL, TRUE); return TRUE; } static LRESULT REBAR_MaximizeBand (const REBAR_INFO *infoPtr, INT iBand, LPARAM lParam) { REBAR_BAND *lpBand; int iRowBegin, iRowEnd; int cxDesired, extra, extraOrig; int cxIdealBand; /* Validate */ if (infoPtr->uNumBands == 0 || iBand < 0 || iBand >= infoPtr->uNumBands) { /* error !!! */ ERR("Illegal MaximizeBand, requested=%d, current band count=%d\n", iBand, infoPtr->uNumBands); return FALSE; } lpBand = REBAR_GetBand(infoPtr, iBand); if (lpBand->fStyle & RBBS_HIDDEN) { /* Windows is buggy and creates a hole */ WARN("Ignoring maximize request on a hidden band (%d)\n", iBand); return FALSE; } cxIdealBand = lpBand->cxIdeal + lpBand->cxHeader + REBAR_POST_CHILD; if (lParam && (lpBand->cxEffective < cxIdealBand)) cxDesired = cxIdealBand; else cxDesired = infoPtr->calcSize.cx; iRowBegin = get_row_begin_for_band(infoPtr, iBand); iRowEnd = get_row_end_for_band(infoPtr, iBand); extraOrig = extra = cxDesired - lpBand->cxEffective; if (extra > 0) extra = REBAR_ShrinkBandsRTL(infoPtr, iRowBegin, iBand, extra, TRUE); if (extra > 0) extra = REBAR_ShrinkBandsLTR(infoPtr, next_visible(infoPtr, iBand), iRowEnd, extra, TRUE); lpBand->cxEffective += extraOrig - extra; lpBand->cx = lpBand->cxEffective; TRACE("(%d, %ld): Wanted size %d, obtained %d (shrink %d, %d)\n", iBand, lParam, cxDesired, lpBand->cx, extraOrig, extra); REBAR_SetRowRectsX(infoPtr, iRowBegin, iRowEnd); if (infoPtr->dwStyle & CCS_VERT) REBAR_CalcVertBand(infoPtr, iRowBegin, iRowEnd); else REBAR_CalcHorzBand(infoPtr, iRowBegin, iRowEnd); REBAR_MoveChildWindows(infoPtr, iRowBegin, iRowEnd); return TRUE; } static LRESULT REBAR_MinimizeBand (const REBAR_INFO *infoPtr, INT iBand) { REBAR_BAND *lpBand; int iPrev, iRowBegin, iRowEnd; /* A "minimize" band is equivalent to "dragging" the gripper * of than band to the right till the band is only the size * of the cxHeader. */ /* Validate */ if (infoPtr->uNumBands == 0 || iBand < 0 || iBand >= infoPtr->uNumBands) { /* error !!! */ ERR("Illegal MinimizeBand, requested=%d, current band count=%d\n", iBand, infoPtr->uNumBands); return FALSE; } /* compute amount of movement and validate */ lpBand = REBAR_GetBand(infoPtr, iBand); if (lpBand->fStyle & RBBS_HIDDEN) { /* Windows is buggy and creates a hole/overlap */ WARN("Ignoring minimize request on a hidden band (%d)\n", iBand); return FALSE; } iPrev = prev_visible(infoPtr, iBand); /* if first band in row */ if (iPrev < 0 || REBAR_GetBand(infoPtr, iPrev)->iRow != lpBand->iRow) { int iNext = next_visible(infoPtr, iBand); if (iNext < infoPtr->uNumBands && REBAR_GetBand(infoPtr, iNext)->iRow == lpBand->iRow) { TRACE("(%d): Minimizing the first band in row is by maximizing the second\n", iBand); REBAR_MaximizeBand(infoPtr, iNext, FALSE); } else TRACE("(%d): Only one band in row - nothing to do\n", iBand); return TRUE; } REBAR_GetBand(infoPtr, iPrev)->cxEffective += lpBand->cxEffective - lpBand->cxMinBand; REBAR_GetBand(infoPtr, iPrev)->cx = REBAR_GetBand(infoPtr, iPrev)->cxEffective; lpBand->cx = lpBand->cxEffective = lpBand->cxMinBand; iRowBegin = get_row_begin_for_band(infoPtr, iBand); iRowEnd = get_row_end_for_band(infoPtr, iBand); REBAR_SetRowRectsX(infoPtr, iRowBegin, iRowEnd); if (infoPtr->dwStyle & CCS_VERT) REBAR_CalcVertBand(infoPtr, iRowBegin, iRowEnd); else REBAR_CalcHorzBand(infoPtr, iRowBegin, iRowEnd); REBAR_MoveChildWindows(infoPtr, iRowBegin, iRowEnd); return FALSE; } static LRESULT REBAR_MoveBand (REBAR_INFO *infoPtr, INT iFrom, INT iTo) { REBAR_BAND *lpBand; /* Validate */ if ((infoPtr->uNumBands == 0) || (iFrom < 0) || iFrom >= infoPtr->uNumBands || (iTo < 0) || iTo >= infoPtr->uNumBands) { /* error !!! */ ERR("Illegal MoveBand, from=%d, to=%d, current band count=%d\n", iFrom, iTo, infoPtr->uNumBands); return FALSE; } lpBand = REBAR_GetBand(infoPtr, iFrom); DPA_DeletePtr(infoPtr->bands, iFrom); DPA_InsertPtr(infoPtr->bands, iTo, lpBand); TRACE("moved band %d to index %d\n", iFrom, iTo); REBAR_DumpBand (infoPtr); /* **************************************************** */ /* */ /* We do not do a REBAR_Layout here because the native */ /* control does not do that. The actual layout and */ /* repaint is done by the *next* real action, ex.: */ /* RB_INSERTBAND, RB_DELETEBAND, RB_SIZETORECT, etc. */ /* */ /* **************************************************** */ return TRUE; } /* return TRUE if two strings are different */ static BOOL REBAR_strdifW( LPCWSTR a, LPCWSTR b ) { return ( (a && !b) || (b && !a) || (a && b && lstrcmpW(a, b) ) ); } static LRESULT REBAR_SetBandInfoT(REBAR_INFO *infoPtr, INT iBand, const REBARBANDINFOW *lprbbi, BOOL bUnicode) { REBAR_BAND *lpBand; UINT uChanged; if (!lprbbi || lprbbi->cbSize < REBARBANDINFOA_V3_SIZE) return FALSE; if (iBand >= infoPtr->uNumBands) return FALSE; TRACE("index %d\n", iBand); REBAR_DumpBandInfo (lprbbi); /* set band information */ lpBand = REBAR_GetBand(infoPtr, iBand); uChanged = REBAR_CommonSetupBand (infoPtr->hwndSelf, lprbbi, lpBand); if (lprbbi->fMask & RBBIM_TEXT) { LPWSTR wstr = NULL; if (bUnicode) Str_SetPtrW(&wstr, lprbbi->lpText); else Str_SetPtrAtoW(&wstr, (LPSTR)lprbbi->lpText); if (REBAR_strdifW(wstr, lpBand->lpText)) { Free(lpBand->lpText); lpBand->lpText = wstr; uChanged |= RBBIM_TEXT; } else Free(wstr); } REBAR_ValidateBand (infoPtr, lpBand); REBAR_DumpBand (infoPtr); if (uChanged & (RBBIM_CHILDSIZE | RBBIM_SIZE | RBBIM_STYLE | RBBIM_IMAGE)) { REBAR_Layout(infoPtr); InvalidateRect(infoPtr->hwndSelf, NULL, TRUE); } return TRUE; } static LRESULT REBAR_SetBarInfo (REBAR_INFO *infoPtr, const REBARINFO *lpInfo) { REBAR_BAND *lpBand; UINT i; if (!lpInfo || lpInfo->cbSize < sizeof (REBARINFO)) return FALSE; TRACE("setting bar info!\n"); if (lpInfo->fMask & RBIM_IMAGELIST) { infoPtr->himl = lpInfo->himl; if (infoPtr->himl) { INT cx, cy; ImageList_GetIconSize (infoPtr->himl, &cx, &cy); infoPtr->imageSize.cx = cx; infoPtr->imageSize.cy = cy; } else { infoPtr->imageSize.cx = 0; infoPtr->imageSize.cy = 0; } TRACE("new image cx=%d, cy=%d\n", infoPtr->imageSize.cx, infoPtr->imageSize.cy); } /* revalidate all bands to reset flags for images in headers of bands */ for (i=0; i<infoPtr->uNumBands; i++) { lpBand = REBAR_GetBand(infoPtr, i); REBAR_ValidateBand (infoPtr, lpBand); } return TRUE; } static LRESULT REBAR_SetBkColor (REBAR_INFO *infoPtr, COLORREF clr) { COLORREF clrTemp; clrTemp = infoPtr->clrBk; infoPtr->clrBk = clr; TRACE("background color 0x%06x!\n", infoPtr->clrBk); return clrTemp; } /* << REBAR_SetColorScheme >> */ /* << REBAR_SetPalette >> */ static LRESULT REBAR_SetParent (REBAR_INFO *infoPtr, HWND parent) { HWND hwndTemp = infoPtr->hwndNotify; infoPtr->hwndNotify = parent; return (LRESULT)hwndTemp; } static LRESULT REBAR_SetTextColor (REBAR_INFO *infoPtr, COLORREF clr) { COLORREF clrTemp; clrTemp = infoPtr->clrText; infoPtr->clrText = clr; TRACE("text color 0x%06x!\n", infoPtr->clrText); return clrTemp; } /* << REBAR_SetTooltips >> */ static inline LRESULT REBAR_SetUnicodeFormat (REBAR_INFO *infoPtr, BOOL unicode) { BOOL bTemp = infoPtr->bUnicode; TRACE("to %s hwnd=%p, was %s\n", unicode ? "TRUE" : "FALSE", infoPtr->hwndSelf, (bTemp) ? "TRUE" : "FALSE"); infoPtr->bUnicode = unicode; return bTemp; } static LRESULT REBAR_SetVersion (REBAR_INFO *infoPtr, INT iVersion) { INT iOldVersion = infoPtr->iVersion; if (iVersion > COMCTL32_VERSION) return -1; infoPtr->iVersion = iVersion; TRACE("new version %d\n", iVersion); return iOldVersion; } static LRESULT REBAR_ShowBand (REBAR_INFO *infoPtr, INT iBand, BOOL show) { REBAR_BAND *lpBand; if (iBand < 0 || iBand >= infoPtr->uNumBands) return FALSE; lpBand = REBAR_GetBand(infoPtr, iBand); if (show) { TRACE("show band %d\n", iBand); lpBand->fStyle = lpBand->fStyle & ~RBBS_HIDDEN; if (IsWindow (lpBand->hwndChild)) ShowWindow (lpBand->hwndChild, SW_SHOW); } else { TRACE("hide band %d\n", iBand); lpBand->fStyle = lpBand->fStyle | RBBS_HIDDEN; if (IsWindow (lpBand->hwndChild)) ShowWindow (lpBand->hwndChild, SW_HIDE); } REBAR_Layout(infoPtr); InvalidateRect(infoPtr->hwndSelf, NULL, TRUE); return TRUE; } static LRESULT REBAR_SizeToRect (REBAR_INFO *infoPtr, const RECT *lpRect) { if (!lpRect) return FALSE; TRACE("[%s]\n", wine_dbgstr_rect(lpRect)); REBAR_SizeToHeight(infoPtr, get_rect_cy(infoPtr, lpRect)); return TRUE; } static LRESULT REBAR_Create (REBAR_INFO *infoPtr, LPCREATESTRUCTW cs) { RECT wnrc1, clrc1; if (TRACE_ON(rebar)) { GetWindowRect(infoPtr->hwndSelf, &wnrc1); GetClientRect(infoPtr->hwndSelf, &clrc1); TRACE("window=(%s) client=(%s) cs=(%d,%d %dx%d)\n", wine_dbgstr_rect(&wnrc1), wine_dbgstr_rect(&clrc1), cs->x, cs->y, cs->cx, cs->cy); } TRACE("created!\n"); if (OpenThemeData (infoPtr->hwndSelf, themeClass)) { /* native seems to clear WS_BORDER when themed */ infoPtr->dwStyle &= ~WS_BORDER; } return 0; } static LRESULT REBAR_Destroy (REBAR_INFO *infoPtr) { REBAR_BAND *lpBand; UINT i; /* clean up each band */ for (i = 0; i < infoPtr->uNumBands; i++) { lpBand = REBAR_GetBand(infoPtr, i); /* delete text strings */ Free (lpBand->lpText); lpBand->lpText = NULL; /* destroy child window */ DestroyWindow (lpBand->hwndChild); Free (lpBand); } /* free band array */ DPA_Destroy (infoPtr->bands); infoPtr->bands = NULL; DestroyCursor (infoPtr->hcurArrow); DestroyCursor (infoPtr->hcurHorz); DestroyCursor (infoPtr->hcurVert); DestroyCursor (infoPtr->hcurDrag); if (infoPtr->hDefaultFont) DeleteObject (infoPtr->hDefaultFont); SetWindowLongPtrW (infoPtr->hwndSelf, 0, 0); CloseThemeData (GetWindowTheme (infoPtr->hwndSelf)); /* free rebar info data */ Free (infoPtr); TRACE("destroyed!\n"); return 0; } static LRESULT REBAR_GetFont (const REBAR_INFO *infoPtr) { return (LRESULT)infoPtr->hFont; } static LRESULT REBAR_PushChevron(const REBAR_INFO *infoPtr, UINT uBand, LPARAM lParam) { if (uBand < infoPtr->uNumBands) { NMREBARCHEVRON nmrbc; REBAR_BAND *lpBand = REBAR_GetBand(infoPtr, uBand); TRACE("Pressed chevron on band %u\n", uBand); /* redraw chevron in pushed state */ lpBand->fDraw |= DRAW_CHEVRONPUSHED; RedrawWindow(infoPtr->hwndSelf, &lpBand->rcChevron,0, RDW_ERASE|RDW_INVALIDATE|RDW_UPDATENOW); /* notify app so it can display a popup menu or whatever */ nmrbc.uBand = uBand; nmrbc.wID = lpBand->wID; nmrbc.lParam = lpBand->lParam; nmrbc.rc = lpBand->rcChevron; nmrbc.lParamNM = lParam; REBAR_Notify((NMHDR*)&nmrbc, infoPtr, RBN_CHEVRONPUSHED); /* redraw chevron in previous state */ lpBand->fDraw &= ~DRAW_CHEVRONPUSHED; InvalidateRect(infoPtr->hwndSelf, &lpBand->rcChevron, TRUE); return TRUE; } return FALSE; } static LRESULT REBAR_LButtonDown (REBAR_INFO *infoPtr, LPARAM lParam) { UINT htFlags; INT iHitBand; POINT ptMouseDown; ptMouseDown.x = (short)LOWORD(lParam); ptMouseDown.y = (short)HIWORD(lParam); REBAR_InternalHitTest(infoPtr, &ptMouseDown, &htFlags, &iHitBand); if (htFlags == RBHT_CHEVRON) { REBAR_PushChevron(infoPtr, iHitBand, 0); } else if (htFlags == RBHT_GRABBER || htFlags == RBHT_CAPTION) { REBAR_BAND *lpBand; TRACE("Starting drag\n"); lpBand = REBAR_GetBand(infoPtr, iHitBand); SetCapture (infoPtr->hwndSelf); infoPtr->iGrabbedBand = iHitBand; /* save off the LOWORD and HIWORD of lParam as initial x,y */ infoPtr->dragStart.x = (short)LOWORD(lParam); infoPtr->dragStart.y = (short)HIWORD(lParam); infoPtr->dragNow = infoPtr->dragStart; if (infoPtr->dwStyle & CCS_VERT) infoPtr->ihitoffset = infoPtr->dragStart.y - (lpBand->rcBand.left + REBAR_PRE_GRIPPER); else infoPtr->ihitoffset = infoPtr->dragStart.x - (lpBand->rcBand.left + REBAR_PRE_GRIPPER); } return 0; } static LRESULT REBAR_LButtonUp (REBAR_INFO *infoPtr) { if (infoPtr->iGrabbedBand >= 0) { NMHDR layout; RECT rect; infoPtr->dragStart.x = 0; infoPtr->dragStart.y = 0; infoPtr->dragNow = infoPtr->dragStart; ReleaseCapture (); if (infoPtr->fStatus & BEGIN_DRAG_ISSUED) { REBAR_Notify(&layout, infoPtr, RBN_LAYOUTCHANGED); REBAR_Notify_NMREBAR (infoPtr, infoPtr->iGrabbedBand, RBN_ENDDRAG); infoPtr->fStatus &= ~BEGIN_DRAG_ISSUED; } infoPtr->iGrabbedBand = -1; GetClientRect(infoPtr->hwndSelf, &rect); InvalidateRect(infoPtr->hwndSelf, NULL, TRUE); } return 0; } static LRESULT REBAR_MouseLeave (REBAR_INFO *infoPtr) { if (infoPtr->ichevronhotBand >= 0) { REBAR_BAND *lpChevronBand = REBAR_GetBand(infoPtr, infoPtr->ichevronhotBand); if (lpChevronBand->fDraw & DRAW_CHEVRONHOT) { lpChevronBand->fDraw &= ~DRAW_CHEVRONHOT; InvalidateRect(infoPtr->hwndSelf, &lpChevronBand->rcChevron, TRUE); } } infoPtr->iOldBand = -1; infoPtr->ichevronhotBand = -2; return TRUE; } static LRESULT REBAR_MouseMove (REBAR_INFO *infoPtr, LPARAM lParam) { REBAR_BAND *lpChevronBand; POINT ptMove; ptMove.x = (short)LOWORD(lParam); ptMove.y = (short)HIWORD(lParam); /* if we are currently dragging a band */ if (infoPtr->iGrabbedBand >= 0) { REBAR_BAND *band; int yPtMove = (infoPtr->dwStyle & CCS_VERT ? ptMove.x : ptMove.y); if (GetCapture() != infoPtr->hwndSelf) ERR("We are dragging but haven't got capture?!?\n"); band = REBAR_GetBand(infoPtr, infoPtr->iGrabbedBand); /* if mouse did not move much, exit */ if ((abs(ptMove.x - infoPtr->dragNow.x) <= mindragx) && (abs(ptMove.y - infoPtr->dragNow.y) <= mindragy)) return 0; /* on first significant mouse movement, issue notify */ if (!(infoPtr->fStatus & BEGIN_DRAG_ISSUED)) { if (REBAR_Notify_NMREBAR (infoPtr, -1, RBN_BEGINDRAG)) { /* Notify returned TRUE - abort drag */ infoPtr->dragStart.x = 0; infoPtr->dragStart.y = 0; infoPtr->dragNow = infoPtr->dragStart; infoPtr->iGrabbedBand = -1; ReleaseCapture (); return 0; } infoPtr->fStatus |= BEGIN_DRAG_ISSUED; } /* Test for valid drag case - must not be first band in row */ if ((yPtMove < band->rcBand.top) || (yPtMove > band->rcBand.bottom)) { REBAR_HandleUDDrag (infoPtr, &ptMove); } else { REBAR_HandleLRDrag (infoPtr, &ptMove); } } else { INT iHitBand; UINT htFlags; TRACKMOUSEEVENT trackinfo; REBAR_InternalHitTest(infoPtr, &ptMove, &htFlags, &iHitBand); if (infoPtr->iOldBand >= 0 && infoPtr->iOldBand == infoPtr->ichevronhotBand) { lpChevronBand = REBAR_GetBand(infoPtr, infoPtr->ichevronhotBand); if (lpChevronBand->fDraw & DRAW_CHEVRONHOT) { lpChevronBand->fDraw &= ~DRAW_CHEVRONHOT; InvalidateRect(infoPtr->hwndSelf, &lpChevronBand->rcChevron, TRUE); } infoPtr->ichevronhotBand = -2; } if (htFlags == RBHT_CHEVRON) { /* fill in the TRACKMOUSEEVENT struct */ trackinfo.cbSize = sizeof(TRACKMOUSEEVENT); trackinfo.dwFlags = TME_QUERY; trackinfo.hwndTrack = infoPtr->hwndSelf; trackinfo.dwHoverTime = 0; /* call _TrackMouseEvent to see if we are currently tracking for this hwnd */ _TrackMouseEvent(&trackinfo); /* Make sure tracking is enabled so we receive a WM_MOUSELEAVE message */ if(!(trackinfo.dwFlags & TME_LEAVE)) { trackinfo.dwFlags = TME_LEAVE; /* notify upon leaving */ /* call TRACKMOUSEEVENT so we receive a WM_MOUSELEAVE message */ /* and can properly deactivate the hot chevron */ _TrackMouseEvent(&trackinfo); } lpChevronBand = REBAR_GetBand(infoPtr, iHitBand); if (!(lpChevronBand->fDraw & DRAW_CHEVRONHOT)) { lpChevronBand->fDraw |= DRAW_CHEVRONHOT; InvalidateRect(infoPtr->hwndSelf, &lpChevronBand->rcChevron, TRUE); infoPtr->ichevronhotBand = iHitBand; } } infoPtr->iOldBand = iHitBand; } return 0; } static inline LRESULT REBAR_NCCalcSize (const REBAR_INFO *infoPtr, RECT *rect) { HTHEME theme; if (infoPtr->dwStyle & WS_BORDER) { rect->left = min(rect->left + GetSystemMetrics(SM_CXEDGE), rect->right); rect->right = max(rect->right - GetSystemMetrics(SM_CXEDGE), rect->left); rect->top = min(rect->top + GetSystemMetrics(SM_CYEDGE), rect->bottom); rect->bottom = max(rect->bottom - GetSystemMetrics(SM_CYEDGE), rect->top); } else if ((theme = GetWindowTheme (infoPtr->hwndSelf))) { /* FIXME: should use GetThemeInt */ rect->top = min(rect->top + 1, rect->bottom); } TRACE("new client=(%s)\n", wine_dbgstr_rect(rect)); return 0; } static LRESULT REBAR_NCCreate (HWND hwnd, const CREATESTRUCTW *cs) { REBAR_INFO *infoPtr = REBAR_GetInfoPtr (hwnd); RECT wnrc1, clrc1; NONCLIENTMETRICSW ncm; HFONT tfont; if (infoPtr) { ERR("Strange info structure pointer *not* NULL\n"); return FALSE; } if (TRACE_ON(rebar)) { GetWindowRect(hwnd, &wnrc1); GetClientRect(hwnd, &clrc1); TRACE("window=(%s) client=(%s) cs=(%d,%d %dx%d)\n", wine_dbgstr_rect(&wnrc1), wine_dbgstr_rect(&clrc1), cs->x, cs->y, cs->cx, cs->cy); } /* allocate memory for info structure */ infoPtr = Alloc (sizeof(REBAR_INFO)); SetWindowLongPtrW (hwnd, 0, (DWORD_PTR)infoPtr); /* initialize info structure - initial values are 0 */ infoPtr->clrBk = CLR_NONE; infoPtr->clrText = CLR_NONE; infoPtr->clrBtnText = comctl32_color.clrBtnText; infoPtr->clrBtnFace = comctl32_color.clrBtnFace; infoPtr->iOldBand = -1; infoPtr->ichevronhotBand = -2; infoPtr->iGrabbedBand = -1; infoPtr->hwndSelf = hwnd; infoPtr->DoRedraw = TRUE; infoPtr->hcurArrow = LoadCursorW (0, (LPWSTR)IDC_ARROW); infoPtr->hcurHorz = LoadCursorW (0, (LPWSTR)IDC_SIZEWE); infoPtr->hcurVert = LoadCursorW (0, (LPWSTR)IDC_SIZENS); infoPtr->hcurDrag = LoadCursorW (0, (LPWSTR)IDC_SIZE); infoPtr->fStatus = 0; infoPtr->hFont = GetStockObject (SYSTEM_FONT); infoPtr->bands = DPA_Create(8); /* issue WM_NOTIFYFORMAT to get unicode status of parent */ REBAR_NotifyFormat(infoPtr, NF_REQUERY); /* Stow away the original style */ infoPtr->orgStyle = cs->style; /* add necessary styles to the requested styles */ infoPtr->dwStyle = cs->style | WS_VISIBLE; if ((infoPtr->dwStyle & CCS_LAYOUT_MASK) == 0) infoPtr->dwStyle |= CCS_TOP; SetWindowLongW (hwnd, GWL_STYLE, infoPtr->dwStyle); /* get font handle for Caption Font */ ncm.cbSize = sizeof(ncm); SystemParametersInfoW (SPI_GETNONCLIENTMETRICS, ncm.cbSize, &ncm, 0); /* if the font is bold, set to normal */ if (ncm.lfCaptionFont.lfWeight > FW_NORMAL) { ncm.lfCaptionFont.lfWeight = FW_NORMAL; } tfont = CreateFontIndirectW (&ncm.lfCaptionFont); if (tfont) { infoPtr->hFont = infoPtr->hDefaultFont = tfont; } /* native does: GetSysColor (numerous); GetSysColorBrush (numerous) (see WM_SYSCOLORCHANGE); *GetStockObject (SYSTEM_FONT); *SetWindowLong (hwnd, 0, info ptr); *WM_NOTIFYFORMAT; *SetWindowLong (hwnd, GWL_STYLE, style+0x10000001); WS_VISIBLE = 0x10000000; CCS_TOP = 0x00000001; *SystemParametersInfo (SPI_GETNONCLIENTMETRICS...); *CreateFontIndirect (lfCaptionFont from above); GetDC (); SelectObject (hdc, fontabove); GetTextMetrics (hdc, ); guessing is tmHeight SelectObject (hdc, oldfont); ReleaseDC (); GetWindowRect (); MapWindowPoints (0, parent, rectabove, 2); GetWindowRect (); GetClientRect (); ClientToScreen (clientrect); SetWindowPos (hwnd, 0, 0, 0, 0, 0, SWP_NOZORDER); */ return TRUE; } static LRESULT REBAR_NCHitTest (const REBAR_INFO *infoPtr, LPARAM lParam) { NMMOUSE nmmouse; POINT clpt; INT i; UINT scrap; LRESULT ret = HTCLIENT; /* * Differences from doc at MSDN (as observed with version 4.71 of * comctl32.dll * 1. doc says nmmouse.pt is in screen coord, trace shows client coord. * 2. if band is not identified .dwItemSpec is 0xffffffff. * 3. native always seems to return HTCLIENT if notify return is 0. */ clpt.x = (short)LOWORD(lParam); clpt.y = (short)HIWORD(lParam); ScreenToClient (infoPtr->hwndSelf, &clpt); REBAR_InternalHitTest (infoPtr, &clpt, &scrap, (INT *)&nmmouse.dwItemSpec); nmmouse.dwItemData = 0; nmmouse.pt = clpt; nmmouse.dwHitInfo = 0; if ((i = REBAR_Notify((NMHDR *) &nmmouse, infoPtr, NM_NCHITTEST))) { TRACE("notify changed return value from %ld to %d\n", ret, i); ret = (LRESULT) i; } TRACE("returning %ld, client point (%d,%d)\n", ret, clpt.x, clpt.y); return ret; } static LRESULT REBAR_NCPaint (const REBAR_INFO *infoPtr) { RECT rcWindow; HDC hdc; HTHEME theme; if (infoPtr->dwStyle & WS_MINIMIZE) return 0; /* Nothing to do */ if (infoPtr->dwStyle & WS_BORDER) { /* adjust rectangle and draw the necessary edge */ if (!(hdc = GetDCEx( infoPtr->hwndSelf, 0, DCX_USESTYLE | DCX_WINDOW ))) return 0; GetWindowRect (infoPtr->hwndSelf, &rcWindow); OffsetRect (&rcWindow, -rcWindow.left, -rcWindow.top); TRACE("rect (%s)\n", wine_dbgstr_rect(&rcWindow)); DrawEdge (hdc, &rcWindow, EDGE_ETCHED, BF_RECT); ReleaseDC( infoPtr->hwndSelf, hdc ); } else if ((theme = GetWindowTheme (infoPtr->hwndSelf))) { /* adjust rectangle and draw the necessary edge */ if (!(hdc = GetDCEx( infoPtr->hwndSelf, 0, DCX_USESTYLE | DCX_WINDOW ))) return 0; GetWindowRect (infoPtr->hwndSelf, &rcWindow); OffsetRect (&rcWindow, -rcWindow.left, -rcWindow.top); TRACE("rect (%s)\n", wine_dbgstr_rect(&rcWindow)); DrawThemeEdge (theme, hdc, 0, 0, &rcWindow, BDR_RAISEDINNER, BF_TOP, NULL); ReleaseDC( infoPtr->hwndSelf, hdc ); } return 0; } static LRESULT REBAR_NotifyFormat (REBAR_INFO *infoPtr, LPARAM cmd) { INT i; if (cmd == NF_REQUERY) { i = SendMessageW(REBAR_GetNotifyParent (infoPtr), WM_NOTIFYFORMAT, (WPARAM)infoPtr->hwndSelf, NF_QUERY); if ((i != NFR_ANSI) && (i != NFR_UNICODE)) { ERR("wrong response to WM_NOTIFYFORMAT (%d), assuming ANSI\n", i); i = NFR_ANSI; } infoPtr->bUnicode = (i == NFR_UNICODE) ? 1 : 0; return (LRESULT)i; } return (LRESULT)((infoPtr->bUnicode) ? NFR_UNICODE : NFR_ANSI); } static LRESULT REBAR_Paint (const REBAR_INFO *infoPtr, HDC hdc) { if (hdc) { TRACE("painting\n"); REBAR_Refresh (infoPtr, hdc); } else { PAINTSTRUCT ps; hdc = BeginPaint (infoPtr->hwndSelf, &ps); TRACE("painting (%s)\n", wine_dbgstr_rect(&ps.rcPaint)); if (ps.fErase) { /* Erase area of paint if requested */ REBAR_EraseBkGnd (infoPtr, hdc); } REBAR_Refresh (infoPtr, hdc); EndPaint (infoPtr->hwndSelf, &ps); } return 0; } static LRESULT REBAR_SetCursor (const REBAR_INFO *infoPtr, LPARAM lParam) { POINT pt; UINT flags; TRACE("code=0x%X id=0x%X\n", LOWORD(lParam), HIWORD(lParam)); GetCursorPos (&pt); ScreenToClient (infoPtr->hwndSelf, &pt); REBAR_InternalHitTest (infoPtr, &pt, &flags, NULL); if (flags == RBHT_GRABBER) { if ((infoPtr->dwStyle & CCS_VERT) && !(infoPtr->dwStyle & RBS_VERTICALGRIPPER)) SetCursor (infoPtr->hcurVert); else SetCursor (infoPtr->hcurHorz); } else if (flags != RBHT_CLIENT) SetCursor (infoPtr->hcurArrow); return 0; } static LRESULT REBAR_SetFont (REBAR_INFO *infoPtr, HFONT font) { REBAR_BAND *lpBand; UINT i; infoPtr->hFont = font; /* revalidate all bands to change sizes of text in headers of bands */ for (i=0; i<infoPtr->uNumBands; i++) { lpBand = REBAR_GetBand(infoPtr, i); REBAR_ValidateBand (infoPtr, lpBand); } REBAR_Layout(infoPtr); return 0; } /***************************************************** * * Handles the WM_SETREDRAW message. * * Documentation: * According to testing V4.71 of COMCTL32 returns the * *previous* status of the redraw flag (either 0 or -1) * instead of the MSDN documented value of 0 if handled * *****************************************************/ static inline LRESULT REBAR_SetRedraw (REBAR_INFO *infoPtr, BOOL redraw) { BOOL oldredraw = infoPtr->DoRedraw; TRACE("set to %s, fStatus=%08x\n", (redraw) ? "TRUE" : "FALSE", infoPtr->fStatus); infoPtr->DoRedraw = redraw; if (redraw) { if (infoPtr->fStatus & BAND_NEEDS_REDRAW) { REBAR_MoveChildWindows (infoPtr, 0, infoPtr->uNumBands); REBAR_ForceResize (infoPtr); InvalidateRect (infoPtr->hwndSelf, NULL, TRUE); } infoPtr->fStatus &= ~BAND_NEEDS_REDRAW; } return (oldredraw) ? -1 : 0; } static LRESULT REBAR_Size (REBAR_INFO *infoPtr, WPARAM wParam, LPARAM lParam) { TRACE("wParam=%lx, lParam=%lx\n", wParam, lParam); /* avoid _Layout resize recursion (but it shouldn't be infinite and it seems Windows does recurse) */ if (infoPtr->fStatus & SELF_RESIZE) { infoPtr->fStatus &= ~SELF_RESIZE; TRACE("SELF_RESIZE was set, reset, fStatus=%08x lparam=%08lx\n", infoPtr->fStatus, lParam); return 0; } if (infoPtr->dwStyle & RBS_AUTOSIZE) REBAR_AutoSize(infoPtr, TRUE); else REBAR_Layout(infoPtr); return 0; } static LRESULT REBAR_StyleChanged (REBAR_INFO *infoPtr, INT nType, const STYLESTRUCT *lpStyle) { TRACE("current style=%08x, styleOld=%08x, style being set to=%08x\n", infoPtr->dwStyle, lpStyle->styleOld, lpStyle->styleNew); if (nType == GWL_STYLE) { infoPtr->orgStyle = infoPtr->dwStyle = lpStyle->styleNew; if (GetWindowTheme (infoPtr->hwndSelf)) infoPtr->dwStyle &= ~WS_BORDER; /* maybe it should be COMMON_STYLES like in toolbar */ if ((lpStyle->styleNew ^ lpStyle->styleOld) & CCS_VERT) REBAR_Layout(infoPtr); } return FALSE; } /* update theme after a WM_THEMECHANGED message */ static LRESULT theme_changed (REBAR_INFO* infoPtr) { HTHEME theme = GetWindowTheme (infoPtr->hwndSelf); CloseThemeData (theme); theme = OpenThemeData (infoPtr->hwndSelf, themeClass); /* WS_BORDER disappears when theming is enabled and reappears when * disabled... */ infoPtr->dwStyle &= ~WS_BORDER; infoPtr->dwStyle |= theme ? 0 : (infoPtr->orgStyle & WS_BORDER); return 0; } static LRESULT REBAR_WindowPosChanged (const REBAR_INFO *infoPtr, WPARAM wParam, LPARAM lParam) { LRESULT ret; RECT rc; ret = DefWindowProcW(infoPtr->hwndSelf, WM_WINDOWPOSCHANGED, wParam, lParam); GetWindowRect(infoPtr->hwndSelf, &rc); TRACE("hwnd %p new pos (%s)\n", infoPtr->hwndSelf, wine_dbgstr_rect(&rc)); return ret; } static LRESULT WINAPI REBAR_WindowProc (HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam) { REBAR_INFO *infoPtr = REBAR_GetInfoPtr (hwnd); TRACE("hwnd=%p msg=%x wparam=%lx lparam=%lx\n", hwnd, uMsg, wParam, lParam); if (!infoPtr && (uMsg != WM_NCCREATE)) return DefWindowProcW (hwnd, uMsg, wParam, lParam); switch (uMsg) { /* case RB_BEGINDRAG: */ case RB_DELETEBAND: return REBAR_DeleteBand (infoPtr, wParam); /* case RB_DRAGMOVE: */ /* case RB_ENDDRAG: */ case RB_GETBANDBORDERS: return REBAR_GetBandBorders (infoPtr, wParam, (LPRECT)lParam); case RB_GETBANDCOUNT: return REBAR_GetBandCount (infoPtr); case RB_GETBANDINFO_OLD: case RB_GETBANDINFOA: case RB_GETBANDINFOW: return REBAR_GetBandInfoT(infoPtr, wParam, (LPREBARBANDINFOW)lParam, uMsg == RB_GETBANDINFOW); case RB_GETBARHEIGHT: return REBAR_GetBarHeight (infoPtr); case RB_GETBARINFO: return REBAR_GetBarInfo (infoPtr, (LPREBARINFO)lParam); case RB_GETBKCOLOR: return REBAR_GetBkColor (infoPtr); /* case RB_GETCOLORSCHEME: */ /* case RB_GETDROPTARGET: */ case RB_GETPALETTE: return REBAR_GetPalette (infoPtr); case RB_GETRECT: return REBAR_GetRect (infoPtr, wParam, (LPRECT)lParam); case RB_GETROWCOUNT: return REBAR_GetRowCount (infoPtr); case RB_GETROWHEIGHT: return REBAR_GetRowHeight (infoPtr, wParam); case RB_GETTEXTCOLOR: return REBAR_GetTextColor (infoPtr); case RB_GETTOOLTIPS: return REBAR_GetToolTips (infoPtr); case RB_GETUNICODEFORMAT: return REBAR_GetUnicodeFormat (infoPtr); case CCM_GETVERSION: return REBAR_GetVersion (infoPtr); case RB_HITTEST: return REBAR_HitTest (infoPtr, (LPRBHITTESTINFO)lParam); case RB_IDTOINDEX: return REBAR_IdToIndex (infoPtr, wParam); case RB_INSERTBANDA: case RB_INSERTBANDW: return REBAR_InsertBandT(infoPtr, wParam, (LPREBARBANDINFOW)lParam, uMsg == RB_INSERTBANDW); case RB_MAXIMIZEBAND: return REBAR_MaximizeBand (infoPtr, wParam, lParam); case RB_MINIMIZEBAND: return REBAR_MinimizeBand (infoPtr, wParam); case RB_MOVEBAND: return REBAR_MoveBand (infoPtr, wParam, lParam); case RB_PUSHCHEVRON: return REBAR_PushChevron (infoPtr, wParam, lParam); case RB_SETBANDINFOA: case RB_SETBANDINFOW: return REBAR_SetBandInfoT(infoPtr, wParam, (LPREBARBANDINFOW)lParam, uMsg == RB_SETBANDINFOW); case RB_SETBARINFO: return REBAR_SetBarInfo (infoPtr, (LPREBARINFO)lParam); case RB_SETBKCOLOR: return REBAR_SetBkColor (infoPtr, lParam); /* case RB_SETCOLORSCHEME: */ /* case RB_SETPALETTE: */ case RB_SETPARENT: return REBAR_SetParent (infoPtr, (HWND)wParam); case RB_SETTEXTCOLOR: return REBAR_SetTextColor (infoPtr, lParam); /* case RB_SETTOOLTIPS: */ case RB_SETUNICODEFORMAT: return REBAR_SetUnicodeFormat (infoPtr, wParam); case CCM_SETVERSION: return REBAR_SetVersion (infoPtr, (INT)wParam); case RB_SHOWBAND: return REBAR_ShowBand (infoPtr, wParam, lParam); case RB_SIZETORECT: return REBAR_SizeToRect (infoPtr, (LPCRECT)lParam); /* Messages passed to parent */ case WM_COMMAND: case WM_DRAWITEM: case WM_NOTIFY: return SendMessageW(REBAR_GetNotifyParent (infoPtr), uMsg, wParam, lParam); /* case WM_CHARTOITEM: supported according to ControlSpy */ case WM_CREATE: return REBAR_Create (infoPtr, (LPCREATESTRUCTW)lParam); case WM_DESTROY: return REBAR_Destroy (infoPtr); case WM_ERASEBKGND: return REBAR_EraseBkGnd (infoPtr, (HDC)wParam); case WM_GETFONT: return REBAR_GetFont (infoPtr); /* case WM_LBUTTONDBLCLK: supported according to ControlSpy */ case WM_LBUTTONDOWN: return REBAR_LButtonDown (infoPtr, lParam); case WM_LBUTTONUP: return REBAR_LButtonUp (infoPtr); /* case WM_MEASUREITEM: supported according to ControlSpy */ case WM_MOUSEMOVE: return REBAR_MouseMove (infoPtr, lParam); case WM_MOUSELEAVE: return REBAR_MouseLeave (infoPtr); case WM_NCCALCSIZE: return REBAR_NCCalcSize (infoPtr, (RECT*)lParam); case WM_NCCREATE: return REBAR_NCCreate (hwnd, (LPCREATESTRUCTW)lParam); case WM_NCHITTEST: return REBAR_NCHitTest (infoPtr, lParam); case WM_NCPAINT: return REBAR_NCPaint (infoPtr); case WM_NOTIFYFORMAT: return REBAR_NotifyFormat (infoPtr, lParam); case WM_PRINTCLIENT: case WM_PAINT: return REBAR_Paint (infoPtr, (HDC)wParam); /* case WM_PALETTECHANGED: supported according to ControlSpy */ /* case WM_QUERYNEWPALETTE:supported according to ControlSpy */ /* case WM_RBUTTONDOWN: supported according to ControlSpy */ /* case WM_RBUTTONUP: supported according to ControlSpy */ case WM_SETCURSOR: return REBAR_SetCursor (infoPtr, lParam); case WM_SETFONT: return REBAR_SetFont (infoPtr, (HFONT)wParam); case WM_SETREDRAW: return REBAR_SetRedraw (infoPtr, wParam); case WM_SIZE: return REBAR_Size (infoPtr, wParam, lParam); case WM_STYLECHANGED: return REBAR_StyleChanged (infoPtr, wParam, (LPSTYLESTRUCT)lParam); case WM_THEMECHANGED: return theme_changed (infoPtr); case WM_SYSCOLORCHANGE: COMCTL32_RefreshSysColors(); return 0; /* case WM_VKEYTOITEM: supported according to ControlSpy */ /* case WM_WININICHANGE: */ case WM_WINDOWPOSCHANGED: return REBAR_WindowPosChanged (infoPtr, wParam, lParam); default: if ((uMsg >= WM_USER) && (uMsg < WM_APP) && !COMCTL32_IsReflectedMessage(uMsg)) ERR("unknown msg %04x wp=%08lx lp=%08lx\n", uMsg, wParam, lParam); return DefWindowProcW (hwnd, uMsg, wParam, lParam); } } VOID REBAR_Register (void) { WNDCLASSW wndClass; ZeroMemory (&wndClass, sizeof(WNDCLASSW)); wndClass.style = CS_GLOBALCLASS | CS_DBLCLKS; wndClass.lpfnWndProc = REBAR_WindowProc; wndClass.cbClsExtra = 0; wndClass.cbWndExtra = sizeof(REBAR_INFO *); wndClass.hCursor = 0; wndClass.hbrBackground = (HBRUSH)(COLOR_BTNFACE + 1); #if GLATESTING wndClass.hbrBackground = CreateSolidBrush(RGB(0,128,0)); #endif wndClass.lpszClassName = REBARCLASSNAMEW; RegisterClassW (&wndClass); mindragx = GetSystemMetrics (SM_CXDRAG); mindragy = GetSystemMetrics (SM_CYDRAG); } VOID REBAR_Unregister (void) { UnregisterClassW (REBARCLASSNAMEW, NULL); }
438835.c
/******************************************************************************* * Copyright 2013-2020 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // // Purpose: // Cryptography Primitive. // RSASSA-PKCS-v1_5 // // Signatire Scheme with Appendix Signatute Generation // // Contents: // ippsRSAVerifyHash_PKCS1v15_rmf() // */ #include "owndefs.h" #include "owncp.h" #include "pcpngrsa.h" #include "pcphash_rmf.h" #include "pcptool.h" #include "pcprsa_pkcs1c15_data.h" #include "pcprsa_verifysing_pkcs1v15.h" #if defined( _ABL_ ) IPPFUN(IppStatus, ippsRSAVerifyHash_PKCS1v15_rmf,(const Ipp8u* md, const Ipp8u* pSign, int* pIsValid, const IppsRSAPublicKeyState* pKey, const IppsHashMethod* pMethod, Ipp8u* pBuffer)) { IppHashAlgId hashAlg; /* test public key context */ IPP_BAD_PTR3_RET(pKey, pBuffer, pMethod); pKey = (IppsRSAPublicKeyState*)( IPP_ALIGNED_PTR(pKey, RSA_PUBLIC_KEY_ALIGNMENT) ); IPP_BADARG_RET(!RSA_PUB_KEY_VALID_ID(pKey), ippStsContextMatchErr); IPP_BADARG_RET(!RSA_PUB_KEY_IS_SET(pKey), ippStsIncompleteContextErr); /* test hash algorith ID */ hashAlg = pMethod->hashAlgId; IPP_BADARG_RET(ippHashAlg_SM3==hashAlg, ippStsNotSupportedModeErr); /* test data pointer */ IPP_BAD_PTR3_RET(md, pSign, pIsValid); *pIsValid = 0; return VerifySing(md, pMethod->hashLen, pksc15_salt[hashAlg].pSalt, pksc15_salt[hashAlg].saltLen, pSign, pIsValid, pKey, (BNU_CHUNK_T*)(IPP_ALIGNED_PTR((pBuffer), (int)sizeof(BNU_CHUNK_T))))? ippStsNoErr : ippStsSizeErr; } #endif /* #if defined( _ABL_ ) */
49826.c
/** * Copyright (c) 2015 - 2018, Nordic Semiconductor ASA * * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form, except as embedded into a Nordic * Semiconductor ASA integrated circuit in a product or a software update for * such product, must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * * 3. Neither the name of Nordic Semiconductor ASA nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * 4. This software, with or without modification, must only be used with a * Nordic Semiconductor ASA integrated circuit. * * 5. Any software provided in binary form under this license must not be reverse * engineered, decompiled, modified and/or disassembled. * * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include "sdk_common.h" #if NRF_MODULE_ENABLED(ANT_COMMON_PAGE_81) #include "ant_common_page_81.h" #define NRF_LOG_MODULE_NAME ant_common_page_81 #if ANT_COMMON_PAGE_81_LOG_ENABLED #define NRF_LOG_LEVEL ANT_COMMON_PAGE_81_LOG_LEVEL #define NRF_LOG_INFO_COLOR ANT_COMMON_PAGE_81_INFO_COLOR #else // ANT_COMMON_PAGE_81_LOG_ENABLED #define NRF_LOG_LEVEL 0 #endif // ANT_COMMON_PAGE_81_LOG_ENABLED #include "nrf_log.h" NRF_LOG_MODULE_REGISTER(); /**@brief ant+ common page 81 data layout structure. */ typedef struct { uint8_t reserved; ///< unused, fill by 0xFF uint8_t sw_revision_minor; uint8_t sw_revision_major; uint8_t serial_number[4]; }ant_common_page81_data_layout_t; /**@brief Function for tracing page 80 data. * * @param[in] p_page_data Pointer to the page 80 data. */ static void page81_data_log(volatile ant_common_page81_data_t const * p_page_data) { if (p_page_data->sw_revision_minor != UINT8_MAX) { NRF_LOG_INFO("sw revision: %u.%u", ((ant_common_page81_data_t const *) p_page_data)->sw_revision_major, ((ant_common_page81_data_t const *) p_page_data)->sw_revision_minor); } else { NRF_LOG_INFO("sw revision: %u", p_page_data->sw_revision_major); } NRF_LOG_INFO("serial number: %u\r\n\n", (unsigned int) p_page_data->serial_number); } void ant_common_page_81_encode(uint8_t * p_page_buffer, volatile ant_common_page81_data_t const * p_page_data) { ant_common_page81_data_layout_t * p_outcoming_data = (ant_common_page81_data_layout_t *)p_page_buffer; p_outcoming_data->reserved = UINT8_MAX; p_outcoming_data->sw_revision_minor = p_page_data->sw_revision_minor; p_outcoming_data->sw_revision_major = p_page_data->sw_revision_major; UNUSED_PARAMETER(uint32_encode(p_page_data->serial_number, p_outcoming_data->serial_number)); page81_data_log(p_page_data); } void ant_common_page_81_decode(uint8_t const * p_page_buffer, volatile ant_common_page81_data_t * p_page_data) { ant_common_page81_data_layout_t const * p_incoming_data = (ant_common_page81_data_layout_t *)p_page_buffer; p_page_data->sw_revision_minor = p_incoming_data->sw_revision_minor; p_page_data->sw_revision_major = p_incoming_data->sw_revision_major; p_page_data->serial_number = uint32_decode(p_incoming_data->serial_number); page81_data_log(p_page_data); } #endif // NRF_MODULE_ENABLED(ANT_COMMON_PAGE_81)
730488.c
#include "main.h" void HW_EXTI_Init(u8 int_portsource,u8 int_pinsource,u32 int_line,EXTITrigger_TypeDef trig) { EXTI_InitTypeDef EXTI_InitStructure; RCC_APB2PeriphClockCmd(RCC_APB2Periph_AFIO,ENABLE); //使能复用功能时钟 //中断线以及中断初始化配置 GPIO_EXTILineConfig(int_portsource,int_pinsource); EXTI_InitStructure.EXTI_Line = int_line; EXTI_InitStructure.EXTI_Mode = EXTI_Mode_Interrupt; EXTI_InitStructure.EXTI_Trigger = trig; EXTI_InitStructure.EXTI_LineCmd = ENABLE; EXTI_Init(&EXTI_InitStructure); //根据EXTI_InitStruct中指定的参数初始化外设EXTI寄存器 } void HW_EXTI_Enable(u8 int_channel,u8 preemption_priority,u8 sub_priority) { NVIC_InitTypeDef NVIC_InitStructure; NVIC_InitStructure.NVIC_IRQChannel = int_channel; //使能外部中断通道 NVIC_InitStructure.NVIC_IRQChannelPreemptionPriority = preemption_priority; //抢占优先级 //NVIC_InitStructure.NVIC_IRQChannelSubPriority = sub_priority; //子优先级 NVIC_InitStructure.NVIC_IRQChannelCmd = ENABLE; //使能外部中断通道 NVIC_Init(&NVIC_InitStructure); } void HW_EXTI_Disable(u8 int_channel,u8 preemption_priority,u8 sub_priority) { NVIC_InitTypeDef NVIC_InitStructure; NVIC_InitStructure.NVIC_IRQChannel = int_channel; //使能外部中断通道 NVIC_InitStructure.NVIC_IRQChannelPreemptionPriority = preemption_priority; //抢占优先级 //NVIC_InitStructure.NVIC_IRQChannelSubPriority = sub_priority; //子优先级 NVIC_InitStructure.NVIC_IRQChannelCmd = DISABLE; //使能外部中断通道 NVIC_Init(&NVIC_InitStructure); }
591014.c
#include <common/json_command.h> #include <common/jsonrpc_errors.h> #include <common/wallet_tx.h> #include <inttypes.h> #include <wallet/wallet.h> void wtx_init(struct command *cmd, struct wallet_tx *wtx, struct amount_sat max) { wtx->cmd = cmd; wtx->amount = max; } struct command_result *param_wtx(struct command *cmd, const char *name, const char *buffer, const jsmntok_t *tok, struct wallet_tx *wtx) { struct amount_sat max = wtx->amount; if (json_tok_streq(buffer, tok, "all")) { wtx->all_funds = true; return NULL; } wtx->all_funds = false; if (!parse_amount_sat(&wtx->amount, buffer + tok->start, tok->end - tok->start)) return command_fail(cmd, JSONRPC2_INVALID_PARAMS, "'%s' should be satoshis or 'all', not '%.*s'", name, tok->end - tok->start, buffer + tok->start); if (amount_sat_greater(wtx->amount, max)) return command_fail(wtx->cmd, FUND_MAX_EXCEEDED, "Amount exceeded %s", type_to_string(tmpctx, struct amount_sat, &max)); return NULL; } static struct command_result *check_amount(const struct wallet_tx *wtx, struct amount_sat amount) { if (tal_count(wtx->utxos) == 0) { return command_fail(wtx->cmd, FUND_CANNOT_AFFORD, "Cannot afford transaction"); } if (amount_sat_less(amount, get_chainparams(wtx->cmd->ld)->dust_limit)) { return command_fail(wtx->cmd, FUND_OUTPUT_IS_DUST, "Output %s would be dust", type_to_string(tmpctx, struct amount_sat, &amount)); } return NULL; } struct command_result *wtx_select_utxos(struct wallet_tx *tx, u32 fee_rate_per_kw, size_t out_len, u32 maxheight) { struct command_result *res; struct amount_sat fee_estimate; if (tx->all_funds) { struct amount_sat amount; tx->utxos = wallet_select_all(tx->cmd, tx->cmd->ld->wallet, fee_rate_per_kw, out_len, maxheight, &amount, &fee_estimate); res = check_amount(tx, amount); if (res) return res; /* tx->amount is max permissible */ if (amount_sat_less_eq(amount, tx->amount)) { tx->change = AMOUNT_SAT(0); tx->change_key_index = 0; tx->amount = amount; return NULL; } /* Too much? Try again, but ask for limit instead. */ tx->all_funds = false; tx->utxos = tal_free(tx->utxos); } tx->utxos = wallet_select_coins(tx->cmd, tx->cmd->ld->wallet, tx->amount, fee_rate_per_kw, out_len, maxheight, &fee_estimate, &tx->change); res = check_amount(tx, tx->amount); if (res) return res; if (amount_sat_less(tx->change, get_chainparams(tx->cmd->ld)->dust_limit)) { tx->change = AMOUNT_SAT(0); tx->change_key_index = 0; } else { tx->change_key_index = wallet_get_newindex(tx->cmd->ld); } return NULL; }
258697.c
/***************************************************************************//** * @file retargettextdisplay.c * @brief Provide stdio retargeting to text display interface. * @version 4.1.0 ******************************************************************************* * @section License * <b>(C) Copyright 2014 Silicon Labs, http://www.silabs.com</b> ******************************************************************************* * * This file is licensed under the Silabs License Agreement. See the file * "Silabs_License_Agreement.txt" for details. Before using this software for * any purpose, you must agree to the terms of that agreement. * ******************************************************************************/ #include <stdio.h> #include <stdint.h> #include "displayconfigall.h" #include "display.h" #include "textdisplay.h" #include "retargettextdisplay.h" /** @cond DO_NOT_INCLUDE_WITH_DOXYGEN */ /******************************************************************************* ******************************** STATICS ************************************ ******************************************************************************/ /* Handle which references the selected text display to print text on. */ TEXTDISPLAY_Handle_t textDisplayHandle = 0; /** @endcond */ /******************************************************************************* ************************** GLOBAL FUNCTIONS ************************** ******************************************************************************/ /**************************************************************************//** * @brief Initialize/retarget a TEXTDISPLAY device to receivie stdout(put). * * @return EMSTATUS code of the operation. *****************************************************************************/ EMSTATUS RETARGET_TextDisplayInit(void) { EMSTATUS status; DISPLAY_Device_t displayDevice; TEXTDISPLAY_Config_t textDisplayConfig; /* Query that the specified DISPLAY device is available. */ status = DISPLAY_DeviceGet(RETARGETTEXTDISPLAY_DISPLAY_NO, &displayDevice); if (DISPLAY_EMSTATUS_OK == status) { textDisplayConfig.displayDeviceNo = RETARGETTEXTDISPLAY_DISPLAY_NO; textDisplayConfig.scrollEnable = RETARGETTEXTDISPLAY_SCROLL_MODE; textDisplayConfig.lfToCrLf = RETARGETTEXTDISPLAY_LINE_FEED_MODE; status = TEXTDISPLAY_New(&textDisplayConfig, &textDisplayHandle); #if !defined(__CROSSWORKS_ARM) && defined(__GNUC__) if (TEXTDISPLAY_EMSTATUS_OK == status) { /* Set unbuffered mode for stdout (newlib) */ setvbuf(stdout, NULL, _IONBF, 0); } #endif } return status; } /**************************************************************************//** * @brief Receive a byte * No input method from the text display is possible, thus we always * return -1 * * @return -1 on failure *****************************************************************************/ int RETARGET_ReadChar(void) { return -1; } /**************************************************************************//** * @brief Write a single byte to the text display * * @param c Character to write * * @return Printed character if text display is initialized. * -1 if text display is not initialized. *****************************************************************************/ int RETARGET_WriteChar(char c) { if (textDisplayHandle) { TEXTDISPLAY_WriteChar(textDisplayHandle, c); return c; } else return -1; } /**************************************************************************//** * @brief Write a string of characters to the RETARGET text display device. * * @param[in] str String to write. * * @return EMSTATUS code of the operation. *****************************************************************************/ EMSTATUS RETARGET_WriteString(char* str) { if (textDisplayHandle) { return TEXTDISPLAY_WriteString(textDisplayHandle, str); } else return TEXTDISPLAY_EMSTATUS_NOT_INITIALIZED; } /*************** THE REST OF THE FILE IS DOCUMENTATION ONLY ! ***************/ /******************************************************************************* ************************** DOCUMENTATION ************************** ******************************************************************************/ /**************************************************************************//** @addtogroup RetargetIo @{ @n @section retargettextdisplay_doc Retarget TextDisplay Module The source code of the RETARGETTEXTDISPLAY module is implemented in kits/common/drivers/retargettextdisplay.c and retargettextdisplay.h. @li @ref retargettextdisplay_intro @li @ref retargettextdisplay_config @n @section retargettextdisplay_intro Introduction The RETARGETTEXTDISPLAY Library implements a stdout interface to a textdisplay device (@ref textdisplay_doc) in order for the user to print text by calling standard C language functions that prints text to stdout. @n @section retargettextdisplay_config Retarget TextDisplay Configuration. This section contains a description of the configuration parameters of the RETARGETTEXTDISPLAY Library. @verbatim #define RETARGETTEXTDISPLAY_SCROLL_MODE Set to 'true' to enable scroll mode on the text display device where stdout is retargeted. Set to 'false' to disable scroll mode. #define RETARGETTEXTDISPLAY_LINE_FEED_MODE Set to 'true' to enable adding Carriage Return (CR) to Line Feed (LF) characters on the text display device where stdout is retargeted. Set to 'false' to disable line feed mode. #define RETARGETTEXTDISPLAY_DISPLAY_NO Select which TEXTDISPLAY device number to retarget stdout to. Normally there is only one display device present in the system therefore this parameter should be zero. However if there are more than one display device the user may want to select a different display device. @endverbatim @} (end group RetargetIo) */
437823.c
/* This testcase is part of GDB, the GNU debugger. Copyright 2020-2021 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "gold-gdb-index.h" namespace N1 { void foo () { C1::baz (); } } int main () { return 0; }
498061.c
//------------------------------------------------------------------------------ // GB_AxB_saxpy3_slice_balanced: construct balanced tasks for GB_AxB_saxpy3 //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If the mask is present but must be discarded, this function returns // GrB_NO_VALUE, to indicate that the analysis was terminated early. #include "GB_AxB_saxpy3.h" // control parameters for generating parallel tasks #define GB_NTASKS_PER_THREAD 2 #define GB_COSTLY 1.2 #define GB_FINE_WORK 2 #define GB_MWORK_ALPHA 0.01 #define GB_MWORK_BETA 0.10 #define GB_FREE_WORK \ { \ GB_WERK_POP (Fine_fl, int64_t) ; \ GB_WERK_POP (Fine_slice, int64_t) ; \ GB_WERK_POP (Coarse_Work, int64_t) ; \ GB_WERK_POP (Coarse_initial, int64_t) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_FREE_WERK (&SaxpyTasks, SaxpyTasks_size) ; \ } //------------------------------------------------------------------------------ // GB_hash_table_size //------------------------------------------------------------------------------ // flmax is the max flop count for computing A*B(:,j), for any vector j that // this task computes. If the mask M is present, flmax also includes the // number of entries in M(:,j). GB_hash_table_size determines the hash table // size for this task, which is twice the smallest power of 2 larger than // flmax. If flmax is large enough, the hash_size is returned as cvlen, so // that Gustavson's method will be used instead of the Hash method. // By default, Gustavson vs Hash is selected automatically. AxB_method can be // selected via the descriptor or a global setting, as the non-default // GxB_AxB_GUSTAVSON or GxB_AxB_HASH settings, to enforce the selection of // either of those methods. However, if Hash is selected but the hash table // equals or exceeds cvlen, then Gustavson's method is used instead. static inline int64_t GB_hash_table_size ( int64_t flmax, // max flop count for any vector computed by this task int64_t cvlen, // vector length of C const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash ) { int64_t hash_size ; if (AxB_method == GxB_AxB_GUSTAVSON || flmax >= cvlen/2) { //---------------------------------------------------------------------- // use Gustavson if selected explicitly or if flmax is large //---------------------------------------------------------------------- hash_size = cvlen ; } else { //---------------------------------------------------------------------- // flmax is small; consider hash vs Gustavson //---------------------------------------------------------------------- // hash_size = 2 * (smallest power of 2 >= flmax) hash_size = ((uint64_t) 2) << (GB_FLOOR_LOG2 (flmax) + 1) ; bool use_Gustavson ; if (AxB_method == GxB_AxB_HASH) { // always use Hash method, unless the hash_size >= cvlen use_Gustavson = (hash_size >= cvlen) ; } else { // default: auto selection: // use Gustavson's method if hash_size is too big use_Gustavson = (hash_size >= cvlen/12) ; } if (use_Gustavson) { hash_size = cvlen ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- return (hash_size) ; } //------------------------------------------------------------------------------ // GB_create_coarse_task: create a single coarse task //------------------------------------------------------------------------------ // Compute the max flop count for any vector in a coarse task, determine the // hash table size, and construct the coarse task. static inline void GB_create_coarse_task ( int64_t kfirst, // coarse task consists of vectors kfirst:klast int64_t klast, GB_saxpy3task_struct *SaxpyTasks, int taskid, // taskid for this coarse task int64_t *Bflops, // size bnvec; cum sum of flop counts for vectors of B int64_t cvlen, // vector length of B and C double chunk, int nthreads_max, int64_t *Coarse_Work, // workspace for parallel reduction for flop count const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash ) { //-------------------------------------------------------------------------- // find the max # of flops for any vector in this task //-------------------------------------------------------------------------- int64_t nk = klast - kfirst + 1 ; int nth = GB_nthreads (nk, chunk, nthreads_max) ; // each thread finds the max flop count for a subset of the vectors int tid ; #pragma omp parallel for num_threads(nth) schedule(static) for (tid = 0 ; tid < nth ; tid++) { int64_t my_flmax = 1, istart, iend ; GB_PARTITION (istart, iend, nk, tid, nth) ; for (int64_t i = istart ; i < iend ; i++) { int64_t kk = kfirst + i ; int64_t fl = Bflops [kk+1] - Bflops [kk] ; my_flmax = GB_IMAX (my_flmax, fl) ; } Coarse_Work [tid] = my_flmax ; } // combine results from each thread int64_t flmax = 1 ; for (tid = 0 ; tid < nth ; tid++) { flmax = GB_IMAX (flmax, Coarse_Work [tid]) ; } // check the parallel computation #ifdef GB_DEBUG int64_t flmax2 = 1 ; for (int64_t kk = kfirst ; kk <= klast ; kk++) { int64_t fl = Bflops [kk+1] - Bflops [kk] ; flmax2 = GB_IMAX (flmax2, fl) ; } ASSERT (flmax == flmax2) ; #endif //-------------------------------------------------------------------------- // define the coarse task //-------------------------------------------------------------------------- SaxpyTasks [taskid].start = kfirst ; SaxpyTasks [taskid].end = klast ; SaxpyTasks [taskid].vector = -1 ; SaxpyTasks [taskid].hsize = GB_hash_table_size (flmax, cvlen, AxB_method) ; SaxpyTasks [taskid].Hi = NULL ; // assigned later SaxpyTasks [taskid].Hf = NULL ; // assigned later SaxpyTasks [taskid].Hx = NULL ; // assigned later SaxpyTasks [taskid].my_cjnz = 0 ; // for fine tasks only SaxpyTasks [taskid].leader = taskid ; SaxpyTasks [taskid].team_size = 1 ; } //------------------------------------------------------------------------------ // GB_AxB_saxpy3_slice_balanced: create balanced tasks for saxpy3 //------------------------------------------------------------------------------ GrB_Info GB_AxB_saxpy3_slice_balanced ( // inputs GrB_Matrix C, // output matrix const GrB_Matrix M, // optional mask matrix const bool Mask_comp, // if true, use !M const GrB_Matrix A, // input matrix A const GrB_Matrix B, // input matrix B GrB_Desc_Value AxB_method, // Default, Gustavson, or Hash // outputs GB_saxpy3task_struct **SaxpyTasks_handle, size_t *SaxpyTasks_size_handle, bool *apply_mask, // if true, apply M during sapxy3 bool *M_packed_in_place, // if true, use M in-place int *ntasks, // # of tasks created (coarse and fine) int *nfine, // # of fine tasks created int *nthreads, // # of threads to use GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; (*apply_mask) = false ; (*M_packed_in_place) = false ; (*ntasks) = 0 ; (*nfine) = 0 ; (*nthreads) = 0 ; ASSERT_MATRIX_OK_OR_NULL (M, "M for saxpy3_slice_balanced A*B", GB0) ; ASSERT (!GB_PENDING (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT_MATRIX_OK (A, "A for saxpy3_slice_balanced A*B", GB0) ; ASSERT (!GB_PENDING (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT_MATRIX_OK (B, "B for saxpy3_slice_balanced A*B", GB0) ; ASSERT (!GB_PENDING (B)) ; ASSERT (GB_JUMBLED_OK (B)) ; ASSERT (!GB_ZOMBIES (B)) ; //-------------------------------------------------------------------------- // determine the # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // define result and workspace //-------------------------------------------------------------------------- GB_saxpy3task_struct *restrict SaxpyTasks = NULL ; size_t SaxpyTasks_size = 0 ; GB_WERK_DECLARE (Coarse_initial, int64_t) ; // initial coarse tasks GB_WERK_DECLARE (Coarse_Work, int64_t) ; // workspace for flop counts GB_WERK_DECLARE (Fine_slice, int64_t) ; GB_WERK_DECLARE (Fine_fl, int64_t) ; // size max(nnz(B(:,j))) //-------------------------------------------------------------------------- // get A, and B //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t avlen = A->vlen ; const int64_t anvec = A->nvec ; const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int8_t *restrict Bb = B->b ; const int64_t *restrict Bi = B->i ; const int64_t bvdim = B->vdim ; const int64_t bnz = GB_NNZ_HELD (B) ; const int64_t bnvec = B->nvec ; const int64_t bvlen = B->vlen ; const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ; int64_t cvlen = avlen ; int64_t cvdim = bvdim ; //-------------------------------------------------------------------------- // compute flop counts for each vector of B and C //-------------------------------------------------------------------------- int64_t Mwork = 0 ; int64_t *restrict Bflops = C->p ; // use C->p as workspace for Bflops GB_OK (GB_AxB_saxpy3_flopcount (&Mwork, Bflops, M, Mask_comp, A, B, Context)) ; int64_t total_flops = Bflops [bnvec] ; double axbflops = total_flops - Mwork ; GBURBLE ("axbwork %g ", axbflops) ; if (Mwork > 0) GBURBLE ("mwork %g ", (double) Mwork) ; //-------------------------------------------------------------------------- // determine if the mask M should be applied, or done later //-------------------------------------------------------------------------- if (M == NULL) { //---------------------------------------------------------------------- // M is not present //---------------------------------------------------------------------- (*apply_mask) = false ; } else if (GB_is_packed (M)) { //---------------------------------------------------------------------- // M is present and full, bitmap, or sparse/hyper with all entries //---------------------------------------------------------------------- // Choose all-hash or all-Gustavson tasks, and apply M during saxpy3. (*apply_mask) = true ; // The work for M has not yet been added Bflops. // Each vector M(:,j) has cvlen entries. Mwork = cvlen * cvdim ; if (!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON)) { if (axbflops < (double) Mwork * GB_MWORK_BETA) { // The mask is too costly to scatter into the Hf workspace. // Leave it in place and use all-hash tasks. AxB_method = GxB_AxB_HASH ; } else { // Scatter M into Hf and use all-Gustavson tasks. AxB_method = GxB_AxB_GUSTAVSON ; } } if (AxB_method == GxB_AxB_HASH) { // Use the hash method for all tasks (except for those tasks which // require a hash table size >= cvlen; those tasks use Gustavson). // Do not scatter the mask into the Hf hash workspace. The work // for the mask is not accounted for in Bflops, so the hash tables // can be small. (*M_packed_in_place) = true ; GBURBLE ("(use packed mask in-place) ") ; } else { // Use the Gustavson method for all tasks, and scatter M into the // fine Gustavson workspace. The work for M is not yet in the // Bflops cumulative sum. Add it now. ASSERT (AxB_method == GxB_AxB_GUSTAVSON) int nth = GB_nthreads (bnvec, chunk, nthreads_max) ; int64_t kk ; #pragma omp parallel for num_threads(nth) schedule(static) for (kk = 0 ; kk <= bnvec ; kk++) { Bflops [kk] += cvlen * (kk+1) ; } total_flops = Bflops [bnvec] ; GBURBLE ("(use packed mask) ") ; } } else if (axbflops < ((double) Mwork * GB_MWORK_ALPHA)) { //---------------------------------------------------------------------- // M is costly to use; apply it after C=A*B //---------------------------------------------------------------------- // Do not use M during the computation of A*B. Instead, compute C=A*B // and then apply the mask later. Tell the caller that the mask should // not be applied, so that it will be applied later in GB_mxm. (*apply_mask) = false ; GBURBLE ("(discard mask) ") ; GB_FREE_ALL ; return (GrB_NO_VALUE) ; } else { //---------------------------------------------------------------------- // use M during saxpy3 //---------------------------------------------------------------------- (*apply_mask) = true ; GBURBLE ("(use mask) ") ; } //-------------------------------------------------------------------------- // determine # of threads and # of initial coarse tasks //-------------------------------------------------------------------------- (*nthreads) = GB_nthreads ((double) total_flops, chunk, nthreads_max) ; int ntasks_initial = ((*nthreads) == 1) ? 1 : (GB_NTASKS_PER_THREAD * (*nthreads)) ; //-------------------------------------------------------------------------- // give preference to Gustavson when using few threads //-------------------------------------------------------------------------- if ((*nthreads) <= 8 && (!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON))) { // Unless a specific method has been explicitly requested, see if // Gustavson should be used with a small number of threads. // Matrix-vector has a maximum intensity of 1, so this heuristic only // applies to GrB_mxm. double abnz = GB_NNZ (A) + GB_NNZ (B) + 1 ; double workspace = (double) ntasks_initial * (double) cvlen ; double intensity = total_flops / abnz ; GBURBLE ("(intensity: %0.3g workspace/(nnz(A)+nnz(B)): %0.3g", intensity, workspace / abnz) ; if (intensity >= 8 && workspace < abnz) { // work intensity is large, and Gustvason workspace is modest; // use Gustavson for all tasks AxB_method = GxB_AxB_GUSTAVSON ; GBURBLE (": select Gustvason) ") ; } else { // use default task creation: mix of Hash and Gustavson GBURBLE (") ") ; } } //-------------------------------------------------------------------------- // determine target task size //-------------------------------------------------------------------------- double target_task_size = ((double) total_flops) / ntasks_initial ; target_task_size = GB_IMAX (target_task_size, chunk) ; double target_fine_size = target_task_size / GB_FINE_WORK ; target_fine_size = GB_IMAX (target_fine_size, chunk) ; //-------------------------------------------------------------------------- // determine # of parallel tasks //-------------------------------------------------------------------------- int ncoarse = 0 ; // # of coarse tasks int max_bjnz = 0 ; // max (nnz (B (:,j))) of fine tasks // FUTURE: also use ultra-fine tasks that compute A(i1:i2,k)*B(k,j) if (ntasks_initial > 1) { //---------------------------------------------------------------------- // construct initial coarse tasks //---------------------------------------------------------------------- GB_WERK_PUSH (Coarse_initial, ntasks_initial + 1, int64_t) ; if (Coarse_initial == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_pslice (Coarse_initial, Bflops, bnvec, ntasks_initial, true) ; //---------------------------------------------------------------------- // split the work into coarse and fine tasks //---------------------------------------------------------------------- for (int taskid = 0 ; taskid < ntasks_initial ; taskid++) { // get the initial coarse task int64_t kfirst = Coarse_initial [taskid] ; int64_t klast = Coarse_initial [taskid+1] ; int64_t task_ncols = klast - kfirst ; int64_t task_flops = Bflops [klast] - Bflops [kfirst] ; if (task_ncols == 0) { // This coarse task is empty, having been squeezed out by // costly vectors in adjacent coarse tasks. } else if (task_flops > 2 * GB_COSTLY * target_task_size) { // This coarse task is too costly, because it contains one or // more costly vectors. Split its vectors into a mixture of // coarse and fine tasks. int64_t kcoarse_start = kfirst ; for (int64_t kk = kfirst ; kk < klast ; kk++) { // jflops = # of flops to compute a single vector A*B(:,j) // where j == GBH (Bh, kk) double jflops = Bflops [kk+1] - Bflops [kk] ; // bjnz = nnz (B (:,j)) int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]); if (jflops > GB_COSTLY * target_task_size && bjnz > 1) { // A*B(:,j) is costly; split it into 2 or more fine // tasks. First flush the prior coarse task, if any. if (kcoarse_start < kk) { // vectors kcoarse_start to kk-1 form a single // coarse task ncoarse++ ; } // next coarse task (if any) starts at kk+1 kcoarse_start = kk+1 ; // vectors kk will be split into multiple fine tasks max_bjnz = GB_IMAX (max_bjnz, bjnz) ; int team_size = ceil (jflops / target_fine_size) ; (*nfine) += team_size ; } } // flush the last coarse task, if any if (kcoarse_start < klast) { // vectors kcoarse_start to klast-1 form a single // coarse task ncoarse++ ; } } else { // This coarse task is OK as-is. ncoarse++ ; } } } else { //---------------------------------------------------------------------- // entire computation in a single fine or coarse task //---------------------------------------------------------------------- if (bnvec == 1) { // If B is a single vector, and is computed by a single thread, // then a single fine task is used. (*nfine) = 1 ; ncoarse = 0 ; } else { // One thread uses a single coarse task if B is not a vector. (*nfine) = 0 ; ncoarse = 1 ; } } (*ntasks) = ncoarse + (*nfine) ; //-------------------------------------------------------------------------- // allocate the tasks, and workspace to construct fine tasks //-------------------------------------------------------------------------- SaxpyTasks = GB_MALLOC_WERK ((*ntasks), GB_saxpy3task_struct, &SaxpyTasks_size) ; GB_WERK_PUSH (Coarse_Work, nthreads_max, int64_t) ; if (max_bjnz > 0) { // also allocate workspace to construct fine tasks GB_WERK_PUSH (Fine_slice, (*ntasks)+1, int64_t) ; // Fine_fl will only fit on the Werk stack if max_bjnz is small, // but try anyway, in case it fits. It is placed at the top of the // Werk stack. GB_WERK_PUSH (Fine_fl, max_bjnz+1, int64_t) ; } if (SaxpyTasks == NULL || Coarse_Work == NULL || (max_bjnz > 0 && (Fine_slice == NULL || Fine_fl == NULL))) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // clear SaxpyTasks memset (SaxpyTasks, 0, SaxpyTasks_size) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks_initial > 1) { //---------------------------------------------------------------------- // create the coarse and fine tasks //---------------------------------------------------------------------- int nf = 0 ; // fine tasks have task id 0:nfine-1 int nc = (*nfine) ; // coarse task ids are nfine:ntasks-1 for (int taskid = 0 ; taskid < ntasks_initial ; taskid++) { // get the initial coarse task int64_t kfirst = Coarse_initial [taskid] ; int64_t klast = Coarse_initial [taskid+1] ; int64_t task_ncols = klast - kfirst ; int64_t task_flops = Bflops [klast] - Bflops [kfirst] ; if (task_ncols == 0) { // This coarse task is empty, having been squeezed out by // costly vectors in adjacent coarse tasks. } else if (task_flops > 2 * GB_COSTLY * target_task_size) { // This coarse task is too costly, because it contains one or // more costly vectors. Split its vectors into a mixture of // coarse and fine tasks. int64_t kcoarse_start = kfirst ; for (int64_t kk = kfirst ; kk < klast ; kk++) { // jflops = # of flops to compute a single vector A*B(:,j) double jflops = Bflops [kk+1] - Bflops [kk] ; // bjnz = nnz (B (:,j)) int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]); if (jflops > GB_COSTLY * target_task_size && bjnz > 1) { // A*B(:,j) is costly; split it into 2 or more fine // tasks. First flush the prior coarse task, if any. if (kcoarse_start < kk) { // kcoarse_start:kk-1 form a single coarse task GB_create_coarse_task (kcoarse_start, kk-1, SaxpyTasks, nc++, Bflops, cvlen, chunk, nthreads_max, Coarse_Work, AxB_method) ; } // next coarse task (if any) starts at kk+1 kcoarse_start = kk+1 ; // count the work for each entry B(k,j). Do not // include the work to scan M(:,j), since that will // be evenly divided between all tasks in this team. int64_t pB_start = GBP (Bp, kk, bvlen) ; int nth = GB_nthreads (bjnz, chunk, nthreads_max) ; int64_t s ; #pragma omp parallel for num_threads(nth) \ schedule(static) for (s = 0 ; s < bjnz ; s++) { // get B(k,j) Fine_fl [s] = 1 ; int64_t pB = pB_start + s ; if (!GBB (Bb, pB)) continue ; int64_t k = GBI (Bi, pB, bvlen) ; // fl = flop count for just A(:,k)*B(k,j) int64_t pA, pA_end ; int64_t pleft = 0 ; GB_lookup (A_is_hyper, Ah, Ap, avlen, &pleft, anvec-1, k, &pA, &pA_end) ; int64_t fl = pA_end - pA ; Fine_fl [s] = fl ; ASSERT (fl >= 0) ; } // cumulative sum of flops to compute A*B(:,j) GB_cumsum (Fine_fl, bjnz, NULL, nth, Context) ; // slice B(:,j) into fine tasks int team_size = ceil (jflops / target_fine_size) ; ASSERT (Fine_slice != NULL) ; GB_pslice (Fine_slice, Fine_fl, bjnz, team_size, false); // shared hash table for all fine tasks for A*B(:,j) int64_t hsize = GB_hash_table_size (jflops, cvlen, AxB_method) ; // construct the fine tasks for C(:,j)=A*B(:,j) int leader = nf ; for (int fid = 0 ; fid < team_size ; fid++) { int64_t pstart = Fine_slice [fid] ; int64_t pend = Fine_slice [fid+1] ; int64_t fl = Fine_fl [pend] - Fine_fl [pstart] ; SaxpyTasks [nf].start = pB_start + pstart ; SaxpyTasks [nf].end = pB_start + pend - 1 ; SaxpyTasks [nf].vector = kk ; SaxpyTasks [nf].hsize = hsize ; SaxpyTasks [nf].Hi = NULL ; // assigned later SaxpyTasks [nf].Hf = NULL ; // assigned later SaxpyTasks [nf].Hx = NULL ; // assigned later SaxpyTasks [nf].my_cjnz = 0 ; SaxpyTasks [nf].leader = leader ; SaxpyTasks [nf].team_size = team_size ; nf++ ; } } } // flush the last coarse task, if any if (kcoarse_start < klast) { // kcoarse_start:klast-1 form a single coarse task GB_create_coarse_task (kcoarse_start, klast-1, SaxpyTasks, nc++, Bflops, cvlen, chunk, nthreads_max, Coarse_Work, AxB_method) ; } } else { // This coarse task is OK as-is. GB_create_coarse_task (kfirst, klast-1, SaxpyTasks, nc++, Bflops, cvlen, chunk, nthreads_max, Coarse_Work, AxB_method) ; } } } else { //---------------------------------------------------------------------- // entire computation in a single fine or coarse task //---------------------------------------------------------------------- // create a single coarse task: hash or Gustavson GB_create_coarse_task (0, bnvec-1, SaxpyTasks, 0, Bflops, cvlen, 1, 1, Coarse_Work, AxB_method) ; if (bnvec == 1) { // convert the single coarse task into a single fine task SaxpyTasks [0].start = 0 ; // first entry in B(:,0) SaxpyTasks [0].end = bnz - 1 ; // last entry in B(:,0) SaxpyTasks [0].vector = 0 ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; (*SaxpyTasks_handle) = SaxpyTasks ; (*SaxpyTasks_size_handle) = SaxpyTasks_size ; return (GrB_SUCCESS) ; }
571552.c
#include <std.h> #include <magic.h> #include <party.h> #include <daemons.h> inherit SPELL; void create() { ::create(); set_spell_name("mass harm"); set_spell_level(([ "cleric" : 9 ])); set_affixed_spell_level(6); set_spell_sphere("necromancy"); set_syntax("cast CLASS mass harm on TARGET"); set_damage_desc("negative energy"); set_description("This is a stronger version of harm spell. It acts like mass heal, but channels negative energy."); set_verbal_comp(); set_somatic_comp(); set_target_required(1); set_helpful_spell(1); splash_spell(1); set_save("fort"); } int preSpell() { if (!objectp(target)) { tell_object(caster, "You must specify a target for this spell."); return 0; } return 1; } string query_cast_string() { tell_object(caster, "%^BOLD%^%^BLACK%^Your voice rings out as you begin to " + "chant a powerful prayer, gathering divine energy into your hands."); tell_room(place, "%^BOLD%^%^BLACK%^" + caster->QCN + "'s voice rings out as they" + " begin to chant a powerful prayer. Cupping " + caster->QP + " hands, " + "" + caster->QS + " starts to gather energy.", caster); return "display"; } void spell_effect(int prof) { int i; object* party_members = ({}), * attackers = ({}), * living = ({}), * targets = ({}), * followers = ({}); set_helpful_spell(0); party_members = ob_party(caster); attackers = target_selector(); followers = caster->query_followers(); living = all_living(place); if (!objectp(target)) { target = caster; } if (target == caster || member_array(target, party_members) != -1 || member_array(target, followers) != -1) { targets = filter_array(distinct_array(party_members + (followers - attackers)) + ({ caster }), (: !!$1->query_property("negative energy affinity") :)); set_helpful_spell(1); }else if (member_array(target, attackers) != -1) { set_helpful_spell(0); if (do_save(target, -2)) { sdamage /= 2; } targets = filter_array(attackers, (: !$1->query_property("negative energy affinity") :)); }else { targets = ({ target }); } targets = distinct_array(targets); tell_room(place, "%^BOLD%^%^BLACK%^" + caster->QCN + " opens " + "" + caster->QP + " hands, releasing a fell wave of" + " energy as " + caster->QS + " shouts out the final words " + "of the prayer.", caster); if (sizeof(targets)) { int healamnt = calculate_healing(); for (i = 0; i < sizeof(targets); i++) { if (!objectp(targets[i])) { continue; } if (!present(targets[i], place)) { continue; } if (!targets[i]->query_property("negative energy affinity") && !targets[i]->query_property("heart of darkness")) { if (targets[i] == caster) { continue; tell_object(targets[i], "You shouldn't do that to yourself."); } set_helpful_spell(0); if (do_save(target, -2)) { sdamage /= 2; } }else { set_helpful_spell(1); } if (targets[i] == caster) { tell_object(targets[i], "%^BOLD%^%^BLACK%^A fell " + "wave moves through you, carrying with it the essence of death."); }else { tell_room(place, "%^BOLD%^%^BLACK%^A fell wave moves through" + " " + targets[i]->QCN + " carrying with it the essence of " + "death, as " + caster->QCN + " voice rings out.", ({ targets[i], caster })); tell_object(caster, "%^BOLD%^%^BLACK%^A fell " + "wave moves through " + targets[i]->QCN + ", carrying with it the essence of death."); tell_object(targets[i], "%^BOLD%^%^BLACK%^A fell " + "wave moves through you, carrying with it the essence of death."); } damage_targ(targets[i], targets[i]->return_target_limb(), healamnt, "negative energy"); if (query_spell_name() == "mass harm") { if (member_array(targets[i], caster->query_attackers()) == -1) { "/std/magic/cleanse"->cleanse(targets[i]); } } } } spell_successful(); dest_effect(); return; } int calculate_healing(object targ) { return sdamage; } void dest_effect() { ::dest_effect(); if (objectp(TO)) { TO->remove(); } }
556214.c
/* * File: monty_funcs_2.c * Auth: rediet abdisa * */ #include "monty.h" void monty_add(stack_t **stack, unsigned int line_number); void monty_sub(stack_t **stack, unsigned int line_number); void monty_div(stack_t **stack, unsigned int line_number); void monty_mul(stack_t **stack, unsigned int line_number); void monty_mod(stack_t **stack, unsigned int line_number); /** * monty_add - Adds the top two values of a stack_t linked list. * @stack: A pointer to the top mode node of a stack_t linked list. * @line_number: The current working line number of a Monty bytecodes file. * * Description: The result is stored in the second value node * from the top and the top value is removed. */ void monty_add(stack_t **stack, unsigned int line_number) { if ((*stack)->next == NULL || (*stack)->next->next == NULL) { set_op_tok_error(short_stack_error(line_number, "add")); return; } (*stack)->next->next->n += (*stack)->next->n; monty_pop(stack, line_number); } /** * monty_sub - Subtracts the second value from the top of * a stack_t linked list by the top value. * @stack: A pointer to the top mode node of a stack_t linked list. * @line_number: The current working line number of a Monty bytecodes file. * * Description: The result is stored in the second value node * from the top and the top value is removed. */ void monty_sub(stack_t **stack, unsigned int line_number) { if ((*stack)->next == NULL || (*stack)->next->next == NULL) { set_op_tok_error(short_stack_error(line_number, "sub")); return; } (*stack)->next->next->n -= (*stack)->next->n; monty_pop(stack, line_number); } /** * monty_div - Divides the second value from the top of * a stack_t linked list by the top value. * @stack: A pointer to the top mode node of a stack_t linked list. * @line_number: The current working line number of a Monty bytecodes file. * * Description: The result is stored in the second value node * from the top and the top value is removed. */ void monty_div(stack_t **stack, unsigned int line_number) { if ((*stack)->next == NULL || (*stack)->next->next == NULL) { set_op_tok_error(short_stack_error(line_number, "div")); return; } if ((*stack)->next->n == 0) { set_op_tok_error(div_error(line_number)); return; } (*stack)->next->next->n /= (*stack)->next->n; monty_pop(stack, line_number); } /** * monty_mul - Multiplies the second value from the top of * a stack_t linked list by the top value. * @stack: A pointer to the top mode node of a stack_t linked list. * @line_number: The current working line number of a Monty bytecodes file. * * Description: The result is stored in the second value node * from the top and the top value is removed. */ void monty_mul(stack_t **stack, unsigned int line_number) { if ((*stack)->next == NULL || (*stack)->next->next == NULL) { set_op_tok_error(short_stack_error(line_number, "mul")); return; } (*stack)->next->next->n *= (*stack)->next->n; monty_pop(stack, line_number); } /** * monty_mod - Computes the modulus of the second value from the * top of a stack_t linked list by the top value. * @stack: A pointer to the top mode node of a stack_t linked list. * @line_number: The current working line number of a Monty bytecodes file. * * Description: The result is stored in the second value node * from the top and the top value is removed. */ void monty_mod(stack_t **stack, unsigned int line_number) { if ((*stack)->next == NULL || (*stack)->next->next == NULL) { set_op_tok_error(short_stack_error(line_number, "mod")); return; } if ((*stack)->next->n == 0) { set_op_tok_error(div_error(line_number)); return; } (*stack)->next->next->n %= (*stack)->next->n; monty_pop(stack, line_number); }
21264.c
/* * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* ==================================================================== * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. * ECC cipher suite support in OpenSSL originally developed by * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project. */ #include <limits.h> #include <string.h> #include <stdio.h> #include "../ssl_locl.h" #include "statem_locl.h" #include <openssl/buffer.h> #include <openssl/objects.h> #include <openssl/evp.h> #include <openssl/x509.h> /* * send s->init_buf in records of type 'type' (SSL3_RT_HANDSHAKE or * SSL3_RT_CHANGE_CIPHER_SPEC) */ int ssl3_do_write(SSL *s, int type) { int ret; ret = ssl3_write_bytes(s, type, &s->init_buf->data[s->init_off], s->init_num); if (ret < 0) return (-1); if (type == SSL3_RT_HANDSHAKE) /* * should not be done for 'Hello Request's, but in that case we'll * ignore the result anyway */ if (!ssl3_finish_mac(s, (unsigned char *)&s->init_buf->data[s->init_off], ret)) return -1; if (ret == s->init_num) { if (s->msg_callback) s->msg_callback(1, s->version, type, s->init_buf->data, (size_t)(s->init_off + s->init_num), s, s->msg_callback_arg); return (1); } s->init_off += ret; s->init_num -= ret; return (0); } int tls_construct_finished(SSL *s, const char *sender, int slen) { unsigned char *p; int i; unsigned long l; p = ssl_handshake_start(s); i = s->method->ssl3_enc->final_finish_mac(s, sender, slen, s->s3->tmp.finish_md); if (i <= 0) return 0; s->s3->tmp.finish_md_len = i; memcpy(p, s->s3->tmp.finish_md, i); l = i; /* * Copy the finished so we can use it for renegotiation checks */ if (!s->server) { OPENSSL_assert(i <= EVP_MAX_MD_SIZE); memcpy(s->s3->previous_client_finished, s->s3->tmp.finish_md, i); s->s3->previous_client_finished_len = i; } else { OPENSSL_assert(i <= EVP_MAX_MD_SIZE); memcpy(s->s3->previous_server_finished, s->s3->tmp.finish_md, i); s->s3->previous_server_finished_len = i; } if (!ssl_set_handshake_header(s, SSL3_MT_FINISHED, l)) { SSLerr(SSL_F_TLS_CONSTRUCT_FINISHED, ERR_R_INTERNAL_ERROR); return 0; } return 1; } /* * ssl3_take_mac calculates the Finished MAC for the handshakes messages seen * to far. */ int ssl3_take_mac(SSL *s) { const char *sender; int slen; if (!s->server) { sender = s->method->ssl3_enc->server_finished_label; slen = s->method->ssl3_enc->server_finished_label_len; } else { sender = s->method->ssl3_enc->client_finished_label; slen = s->method->ssl3_enc->client_finished_label_len; } s->s3->tmp.peer_finish_md_len = s->method->ssl3_enc->final_finish_mac(s, sender, slen, s->s3->tmp.peer_finish_md); if (s->s3->tmp.peer_finish_md_len == 0) { SSLerr(SSL_F_SSL3_TAKE_MAC, ERR_R_INTERNAL_ERROR); return 0; } return 1; } MSG_PROCESS_RETURN tls_process_change_cipher_spec(SSL *s, PACKET *pkt) { int al; long remain; remain = PACKET_remaining(pkt); /* * 'Change Cipher Spec' is just a single byte, which should already have * been consumed by ssl_get_message() so there should be no bytes left, * unless we're using DTLS1_BAD_VER, which has an extra 2 bytes */ if (SSL_IS_DTLS(s)) { if ((s->version == DTLS1_BAD_VER && remain != DTLS1_CCS_HEADER_LENGTH + 1) || (s->version != DTLS1_BAD_VER && remain != DTLS1_CCS_HEADER_LENGTH - 1)) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_TLS_PROCESS_CHANGE_CIPHER_SPEC, SSL_R_BAD_CHANGE_CIPHER_SPEC); goto f_err; } } else { if (remain != 0) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_TLS_PROCESS_CHANGE_CIPHER_SPEC, SSL_R_BAD_CHANGE_CIPHER_SPEC); goto f_err; } } /* Check we have a cipher to change to */ if (s->s3->tmp.new_cipher == NULL) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_TLS_PROCESS_CHANGE_CIPHER_SPEC, SSL_R_CCS_RECEIVED_EARLY); goto f_err; } s->s3->change_cipher_spec = 1; if (!ssl3_do_change_cipher_spec(s)) { al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_TLS_PROCESS_CHANGE_CIPHER_SPEC, ERR_R_INTERNAL_ERROR); goto f_err; } if (SSL_IS_DTLS(s)) { dtls1_reset_seq_numbers(s, SSL3_CC_READ); if (s->version == DTLS1_BAD_VER) s->d1->handshake_read_seq++; #ifndef OPENSSL_NO_SCTP /* * Remember that a CCS has been received, so that an old key of * SCTP-Auth can be deleted when a CCS is sent. Will be ignored if no * SCTP is used */ BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_AUTH_CCS_RCVD, 1, NULL); #endif } return MSG_PROCESS_CONTINUE_READING; f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); ossl_statem_set_error(s); return MSG_PROCESS_ERROR; } MSG_PROCESS_RETURN tls_process_finished(SSL *s, PACKET *pkt) { int al, i; /* If this occurs, we have missed a message */ if (!s->s3->change_cipher_spec) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_TLS_PROCESS_FINISHED, SSL_R_GOT_A_FIN_BEFORE_A_CCS); goto f_err; } s->s3->change_cipher_spec = 0; i = s->s3->tmp.peer_finish_md_len; if ((unsigned long)i != PACKET_remaining(pkt)) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_TLS_PROCESS_FINISHED, SSL_R_BAD_DIGEST_LENGTH); goto f_err; } if (CRYPTO_memcmp(PACKET_data(pkt), s->s3->tmp.peer_finish_md, i) != 0) { al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_TLS_PROCESS_FINISHED, SSL_R_DIGEST_CHECK_FAILED); goto f_err; } /* * Copy the finished so we can use it for renegotiation checks */ if (s->server) { OPENSSL_assert(i <= EVP_MAX_MD_SIZE); memcpy(s->s3->previous_client_finished, s->s3->tmp.peer_finish_md, i); s->s3->previous_client_finished_len = i; } else { OPENSSL_assert(i <= EVP_MAX_MD_SIZE); memcpy(s->s3->previous_server_finished, s->s3->tmp.peer_finish_md, i); s->s3->previous_server_finished_len = i; } return MSG_PROCESS_FINISHED_READING; f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); ossl_statem_set_error(s); return MSG_PROCESS_ERROR; } int tls_construct_change_cipher_spec(SSL *s) { unsigned char *p; p = (unsigned char *)s->init_buf->data; *p = SSL3_MT_CCS; s->init_num = 1; s->init_off = 0; return 1; } unsigned long ssl3_output_cert_chain(SSL *s, CERT_PKEY *cpk) { unsigned char *p; unsigned long l = 3 + SSL_HM_HEADER_LENGTH(s); if (!ssl_add_cert_chain(s, cpk, &l)) return 0; l -= 3 + SSL_HM_HEADER_LENGTH(s); p = ssl_handshake_start(s); l2n3(l, p); l += 3; if (!ssl_set_handshake_header(s, SSL3_MT_CERTIFICATE, l)) { SSLerr(SSL_F_SSL3_OUTPUT_CERT_CHAIN, ERR_R_INTERNAL_ERROR); return 0; } return l + SSL_HM_HEADER_LENGTH(s); } WORK_STATE tls_finish_handshake(SSL *s, WORK_STATE wst) { void (*cb) (const SSL *ssl, int type, int val) = NULL; /* clean a few things up */ ssl3_cleanup_key_block(s); if (!SSL_IS_DTLS(s)) { /* * We don't do this in DTLS because we may still need the init_buf * in case there are any unexpected retransmits */ BUF_MEM_free(s->init_buf); s->init_buf = NULL; } ssl_free_wbio_buffer(s); s->init_num = 0; if (!s->server || s->renegotiate == 2) { /* skipped if we just sent a HelloRequest */ s->renegotiate = 0; s->new_session = 0; if (s->server) { ssl_update_cache(s, SSL_SESS_CACHE_SERVER); s->ctx->stats.sess_accept_good++; s->handshake_func = ossl_statem_accept; if (SSL_IS_DTLS(s) && !s->hit) { /* * We are finishing after the client. We start the timer going * in case there are any retransmits of our final flight * required. */ dtls1_start_timer(s); } } else { ssl_update_cache(s, SSL_SESS_CACHE_CLIENT); if (s->hit) s->ctx->stats.sess_hit++; s->handshake_func = ossl_statem_connect; s->ctx->stats.sess_connect_good++; if (SSL_IS_DTLS(s) && s->hit) { /* * We are finishing after the server. We start the timer going * in case there are any retransmits of our final flight * required. */ dtls1_start_timer(s); } } if (s->info_callback != NULL) cb = s->info_callback; else if (s->ctx->info_callback != NULL) cb = s->ctx->info_callback; if (cb != NULL) cb(s, SSL_CB_HANDSHAKE_DONE, 1); if (SSL_IS_DTLS(s)) { /* done with handshaking */ s->d1->handshake_read_seq = 0; s->d1->handshake_write_seq = 0; s->d1->next_handshake_write_seq = 0; dtls1_clear_received_buffer(s); } } return WORK_FINISHED_STOP; } int tls_get_message_header(SSL *s, int *mt) { /* s->init_num < SSL3_HM_HEADER_LENGTH */ int skip_message, i, recvd_type, al; unsigned char *p; unsigned long l; p = (unsigned char *)s->init_buf->data; do { while (s->init_num < SSL3_HM_HEADER_LENGTH) { i = s->method->ssl_read_bytes(s, SSL3_RT_HANDSHAKE, &recvd_type, &p[s->init_num], SSL3_HM_HEADER_LENGTH - s->init_num, 0); if (i <= 0) { s->rwstate = SSL_READING; return 0; } if (recvd_type == SSL3_RT_CHANGE_CIPHER_SPEC) { /* * A ChangeCipherSpec must be a single byte and may not occur * in the middle of a handshake message. */ if (s->init_num != 0 || i != 1 || p[0] != SSL3_MT_CCS) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_TLS_GET_MESSAGE_HEADER, SSL_R_BAD_CHANGE_CIPHER_SPEC); goto f_err; } s->s3->tmp.message_type = *mt = SSL3_MT_CHANGE_CIPHER_SPEC; s->init_num = i - 1; s->init_msg = s->init_buf->data; s->s3->tmp.message_size = i; return 1; } else if (recvd_type != SSL3_RT_HANDSHAKE) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_TLS_GET_MESSAGE_HEADER, SSL_R_CCS_RECEIVED_EARLY); goto f_err; } s->init_num += i; } skip_message = 0; if (!s->server) if (p[0] == SSL3_MT_HELLO_REQUEST) /* * The server may always send 'Hello Request' messages -- * we are doing a handshake anyway now, so ignore them if * their format is correct. Does not count for 'Finished' * MAC. */ if (p[1] == 0 && p[2] == 0 && p[3] == 0) { s->init_num = 0; skip_message = 1; if (s->msg_callback) s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE, p, SSL3_HM_HEADER_LENGTH, s, s->msg_callback_arg); } } while (skip_message); /* s->init_num == SSL3_HM_HEADER_LENGTH */ *mt = *p; s->s3->tmp.message_type = *(p++); if (RECORD_LAYER_is_sslv2_record(&s->rlayer)) { /* * Only happens with SSLv3+ in an SSLv2 backward compatible * ClientHello * * Total message size is the remaining record bytes to read * plus the SSL3_HM_HEADER_LENGTH bytes that we already read */ l = RECORD_LAYER_get_rrec_length(&s->rlayer) + SSL3_HM_HEADER_LENGTH; s->s3->tmp.message_size = l; s->init_msg = s->init_buf->data; s->init_num = SSL3_HM_HEADER_LENGTH; } else { n2l3(p, l); /* BUF_MEM_grow takes an 'int' parameter */ if (l > (INT_MAX - SSL3_HM_HEADER_LENGTH)) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_TLS_GET_MESSAGE_HEADER, SSL_R_EXCESSIVE_MESSAGE_SIZE); goto f_err; } s->s3->tmp.message_size = l; s->init_msg = s->init_buf->data + SSL3_HM_HEADER_LENGTH; s->init_num = 0; } return 1; f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); return 0; } int tls_get_message_body(SSL *s, unsigned long *len) { long n; unsigned char *p; int i; if (s->s3->tmp.message_type == SSL3_MT_CHANGE_CIPHER_SPEC) { /* We've already read everything in */ *len = (unsigned long)s->init_num; return 1; } p = s->init_msg; n = s->s3->tmp.message_size - s->init_num; while (n > 0) { i = s->method->ssl_read_bytes(s, SSL3_RT_HANDSHAKE, NULL, &p[s->init_num], n, 0); if (i <= 0) { s->rwstate = SSL_READING; *len = 0; return 0; } s->init_num += i; n -= i; } /* * If receiving Finished, record MAC of prior handshake messages for * Finished verification. */ if (*(s->init_buf->data) == SSL3_MT_FINISHED && !ssl3_take_mac(s)) { /* SSLfatal() already called */ *len = 0; return 0; } /* Feed this message into MAC computation. */ if (RECORD_LAYER_is_sslv2_record(&s->rlayer)) { if (!ssl3_finish_mac(s, (unsigned char *)s->init_buf->data, s->init_num)) { SSLerr(SSL_F_TLS_GET_MESSAGE_BODY, ERR_R_EVP_LIB); ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); *len = 0; return 0; } if (s->msg_callback) s->msg_callback(0, SSL2_VERSION, 0, s->init_buf->data, (size_t)s->init_num, s, s->msg_callback_arg); } else { if (!ssl3_finish_mac(s, (unsigned char *)s->init_buf->data, s->init_num + SSL3_HM_HEADER_LENGTH)) { SSLerr(SSL_F_TLS_GET_MESSAGE_BODY, ERR_R_EVP_LIB); ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); *len = 0; return 0; } if (s->msg_callback) s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE, s->init_buf->data, (size_t)s->init_num + SSL3_HM_HEADER_LENGTH, s, s->msg_callback_arg); } /* * init_num should never be negative...should probably be declared * unsigned */ if (s->init_num < 0) { SSLerr(SSL_F_TLS_GET_MESSAGE_BODY, ERR_R_INTERNAL_ERROR); ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); *len = 0; return 0; } *len = (unsigned long)s->init_num; return 1; } int ssl_cert_type(const X509 *x, const EVP_PKEY *pk) { if (pk == NULL && (pk = X509_get0_pubkey(x)) == NULL) return -1; switch (EVP_PKEY_id(pk)) { default: return -1; case EVP_PKEY_RSA: return SSL_PKEY_RSA_ENC; case EVP_PKEY_DSA: return SSL_PKEY_DSA_SIGN; #ifndef OPENSSL_NO_EC case EVP_PKEY_EC: return SSL_PKEY_ECC; #endif #ifndef OPENSSL_NO_GOST case NID_id_GostR3410_2001: return SSL_PKEY_GOST01; case NID_id_GostR3410_2012_256: return SSL_PKEY_GOST12_256; case NID_id_GostR3410_2012_512: return SSL_PKEY_GOST12_512; #endif } } int ssl_verify_alarm_type(long type) { int al; switch (type) { case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: case X509_V_ERR_UNABLE_TO_GET_CRL: case X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER: al = SSL_AD_UNKNOWN_CA; break; case X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE: case X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE: case X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY: case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: case X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD: case X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD: case X509_V_ERR_CERT_NOT_YET_VALID: case X509_V_ERR_CRL_NOT_YET_VALID: case X509_V_ERR_CERT_UNTRUSTED: case X509_V_ERR_CERT_REJECTED: case X509_V_ERR_HOSTNAME_MISMATCH: case X509_V_ERR_EMAIL_MISMATCH: case X509_V_ERR_IP_ADDRESS_MISMATCH: case X509_V_ERR_DANE_NO_MATCH: case X509_V_ERR_EE_KEY_TOO_SMALL: case X509_V_ERR_CA_KEY_TOO_SMALL: case X509_V_ERR_CA_MD_TOO_WEAK: al = SSL_AD_BAD_CERTIFICATE; break; case X509_V_ERR_CERT_SIGNATURE_FAILURE: case X509_V_ERR_CRL_SIGNATURE_FAILURE: al = SSL_AD_DECRYPT_ERROR; break; case X509_V_ERR_CERT_HAS_EXPIRED: case X509_V_ERR_CRL_HAS_EXPIRED: al = SSL_AD_CERTIFICATE_EXPIRED; break; case X509_V_ERR_CERT_REVOKED: al = SSL_AD_CERTIFICATE_REVOKED; break; case X509_V_ERR_UNSPECIFIED: case X509_V_ERR_OUT_OF_MEM: case X509_V_ERR_INVALID_CALL: case X509_V_ERR_STORE_LOOKUP: al = SSL_AD_INTERNAL_ERROR; break; case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT: case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN: case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY: case X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE: case X509_V_ERR_CERT_CHAIN_TOO_LONG: case X509_V_ERR_PATH_LENGTH_EXCEEDED: case X509_V_ERR_INVALID_CA: al = SSL_AD_UNKNOWN_CA; break; case X509_V_ERR_APPLICATION_VERIFICATION: al = SSL_AD_HANDSHAKE_FAILURE; break; case X509_V_ERR_INVALID_PURPOSE: al = SSL_AD_UNSUPPORTED_CERTIFICATE; break; default: al = SSL_AD_CERTIFICATE_UNKNOWN; break; } return (al); } int ssl_allow_compression(SSL *s) { if (s->options & SSL_OP_NO_COMPRESSION) return 0; return ssl_security(s, SSL_SECOP_COMPRESSION, 0, 0, NULL); } static int version_cmp(const SSL *s, int a, int b) { int dtls = SSL_IS_DTLS(s); if (a == b) return 0; if (!dtls) return a < b ? -1 : 1; return DTLS_VERSION_LT(a, b) ? -1 : 1; } typedef struct { int version; const SSL_METHOD *(*cmeth) (void); const SSL_METHOD *(*smeth) (void); } version_info; #if TLS_MAX_VERSION != TLS1_2_VERSION # error Code needs update for TLS_method() support beyond TLS1_2_VERSION. #endif static const version_info tls_version_table[] = { #ifndef OPENSSL_NO_TLS1_2 {TLS1_2_VERSION, tlsv1_2_client_method, tlsv1_2_server_method}, #else {TLS1_2_VERSION, NULL, NULL}, #endif #ifndef OPENSSL_NO_TLS1_1 {TLS1_1_VERSION, tlsv1_1_client_method, tlsv1_1_server_method}, #else {TLS1_1_VERSION, NULL, NULL}, #endif #ifndef OPENSSL_NO_TLS1 {TLS1_VERSION, tlsv1_client_method, tlsv1_server_method}, #else {TLS1_VERSION, NULL, NULL}, #endif #ifndef OPENSSL_NO_SSL3 {SSL3_VERSION, sslv3_client_method, sslv3_server_method}, #else {SSL3_VERSION, NULL, NULL}, #endif {0, NULL, NULL}, }; #if DTLS_MAX_VERSION != DTLS1_2_VERSION # error Code needs update for DTLS_method() support beyond DTLS1_2_VERSION. #endif static const version_info dtls_version_table[] = { #ifndef OPENSSL_NO_DTLS1_2 {DTLS1_2_VERSION, dtlsv1_2_client_method, dtlsv1_2_server_method}, #else {DTLS1_2_VERSION, NULL, NULL}, #endif #ifndef OPENSSL_NO_DTLS1 {DTLS1_VERSION, dtlsv1_client_method, dtlsv1_server_method}, {DTLS1_BAD_VER, dtls_bad_ver_client_method, NULL}, #else {DTLS1_VERSION, NULL, NULL}, {DTLS1_BAD_VER, NULL, NULL}, #endif {0, NULL, NULL}, }; /* * ssl_method_error - Check whether an SSL_METHOD is enabled. * * @s: The SSL handle for the candidate method * @method: the intended method. * * Returns 0 on success, or an SSL error reason on failure. */ static int ssl_method_error(const SSL *s, const SSL_METHOD *method) { int version = method->version; if ((s->min_proto_version != 0 && version_cmp(s, version, s->min_proto_version) < 0) || ssl_security(s, SSL_SECOP_VERSION, 0, version, NULL) == 0) return SSL_R_VERSION_TOO_LOW; if (s->max_proto_version != 0 && version_cmp(s, version, s->max_proto_version) > 0) return SSL_R_VERSION_TOO_HIGH; if ((s->options & method->mask) != 0) return SSL_R_UNSUPPORTED_PROTOCOL; if ((method->flags & SSL_METHOD_NO_SUITEB) != 0 && tls1_suiteb(s)) return SSL_R_AT_LEAST_TLS_1_2_NEEDED_IN_SUITEB_MODE; else if ((method->flags & SSL_METHOD_NO_FIPS) != 0 && FIPS_mode()) return SSL_R_AT_LEAST_TLS_1_0_NEEDED_IN_FIPS_MODE; return 0; } /* * ssl_version_supported - Check that the specified `version` is supported by * `SSL *` instance * * @s: The SSL handle for the candidate method * @version: Protocol version to test against * * Returns 1 when supported, otherwise 0 */ int ssl_version_supported(const SSL *s, int version) { const version_info *vent; const version_info *table; switch (s->method->version) { default: /* Version should match method version for non-ANY method */ return version_cmp(s, version, s->version) == 0; case TLS_ANY_VERSION: table = tls_version_table; break; case DTLS_ANY_VERSION: table = dtls_version_table; break; } for (vent = table; vent->version != 0 && version_cmp(s, version, vent->version) <= 0; ++vent) { if (vent->cmeth != NULL && version_cmp(s, version, vent->version) == 0 && ssl_method_error(s, vent->cmeth()) == 0) { return 1; } } return 0; } /* * ssl_check_version_downgrade - In response to RFC7507 SCSV version * fallback indication from a client check whether we're using the highest * supported protocol version. * * @s server SSL handle. * * Returns 1 when using the highest enabled version, 0 otherwise. */ int ssl_check_version_downgrade(SSL *s) { const version_info *vent; const version_info *table; /* * Check that the current protocol is the highest enabled version * (according to s->ctx->method, as version negotiation may have changed * s->method). */ if (s->version == s->ctx->method->version) return 1; /* * Apparently we're using a version-flexible SSL_METHOD (not at its * highest protocol version). */ if (s->ctx->method->version == TLS_method()->version) table = tls_version_table; else if (s->ctx->method->version == DTLS_method()->version) table = dtls_version_table; else { /* Unexpected state; fail closed. */ return 0; } for (vent = table; vent->version != 0; ++vent) { if (vent->smeth != NULL && ssl_method_error(s, vent->smeth()) == 0) return s->version == vent->version; } return 0; } /* * ssl_set_version_bound - set an upper or lower bound on the supported (D)TLS * protocols, provided the initial (D)TLS method is version-flexible. This * function sanity-checks the proposed value and makes sure the method is * version-flexible, then sets the limit if all is well. * * @method_version: The version of the current SSL_METHOD. * @version: the intended limit. * @bound: pointer to limit to be updated. * * Returns 1 on success, 0 on failure. */ int ssl_set_version_bound(int method_version, int version, int *bound) { if (version == 0) { *bound = version; return 1; } /*- * Restrict TLS methods to TLS protocol versions. * Restrict DTLS methods to DTLS protocol versions. * Note, DTLS version numbers are decreasing, use comparison macros. * * Note that for both lower-bounds we use explicit versions, not * (D)TLS_MIN_VERSION. This is because we don't want to break user * configurations. If the MIN (supported) version ever rises, the user's * "floor" remains valid even if no longer available. We don't expect the * MAX ceiling to ever get lower, so making that variable makes sense. */ switch (method_version) { default: /* * XXX For fixed version methods, should we always fail and not set any * bounds, always succeed and not set any bounds, or set the bounds and * arrange to fail later if they are not met? At present fixed-version * methods are not subject to controls that disable individual protocol * versions. */ return 0; case TLS_ANY_VERSION: if (version < SSL3_VERSION || version > TLS_MAX_VERSION) return 0; break; case DTLS_ANY_VERSION: if (DTLS_VERSION_GT(version, DTLS_MAX_VERSION) || DTLS_VERSION_LT(version, DTLS1_BAD_VER)) return 0; break; } *bound = version; return 1; } /* * ssl_choose_server_version - Choose server (D)TLS version. Called when the * client HELLO is received to select the final server protocol version and * the version specific method. * * @s: server SSL handle. * * Returns 0 on success or an SSL error reason number on failure. */ int ssl_choose_server_version(SSL *s) { /*- * With version-flexible methods we have an initial state with: * * s->method->version == (D)TLS_ANY_VERSION, * s->version == (D)TLS_MAX_VERSION. * * So we detect version-flexible methods via the method version, not the * handle version. */ int server_version = s->method->version; int client_version = s->client_version; const version_info *vent; const version_info *table; int disabled = 0; switch (server_version) { default: if (version_cmp(s, client_version, s->version) < 0) return SSL_R_WRONG_SSL_VERSION; /* * If this SSL handle is not from a version flexible method we don't * (and never did) check min/max FIPS or Suite B constraints. Hope * that's OK. It is up to the caller to not choose fixed protocol * versions they don't want. If not, then easy to fix, just return * ssl_method_error(s, s->method) */ return 0; case TLS_ANY_VERSION: table = tls_version_table; break; case DTLS_ANY_VERSION: table = dtls_version_table; break; } for (vent = table; vent->version != 0; ++vent) { const SSL_METHOD *method; if (vent->smeth == NULL || version_cmp(s, client_version, vent->version) < 0) continue; method = vent->smeth(); if (ssl_method_error(s, method) == 0) { s->version = vent->version; s->method = method; return 0; } disabled = 1; } return disabled ? SSL_R_UNSUPPORTED_PROTOCOL : SSL_R_VERSION_TOO_LOW; } /* * ssl_choose_client_version - Choose client (D)TLS version. Called when the * server HELLO is received to select the final client protocol version and * the version specific method. * * @s: client SSL handle. * @version: The proposed version from the server's HELLO. * * Returns 0 on success or an SSL error reason number on failure. */ int ssl_choose_client_version(SSL *s, int version) { const version_info *vent; const version_info *table; switch (s->method->version) { default: if (version != s->version) return SSL_R_WRONG_SSL_VERSION; /* * If this SSL handle is not from a version flexible method we don't * (and never did) check min/max, FIPS or Suite B constraints. Hope * that's OK. It is up to the caller to not choose fixed protocol * versions they don't want. If not, then easy to fix, just return * ssl_method_error(s, s->method) */ return 0; case TLS_ANY_VERSION: table = tls_version_table; break; case DTLS_ANY_VERSION: table = dtls_version_table; break; } for (vent = table; vent->version != 0; ++vent) { const SSL_METHOD *method; int err; if (version != vent->version) continue; if (vent->cmeth == NULL) break; method = vent->cmeth(); err = ssl_method_error(s, method); if (err != 0) return err; s->method = method; s->version = version; return 0; } return SSL_R_UNSUPPORTED_PROTOCOL; } /* * ssl_get_client_min_max_version - get minimum and maximum client version * @s: The SSL connection * @min_version: The minimum supported version * @max_version: The maximum supported version * * Work out what version we should be using for the initial ClientHello if the * version is initially (D)TLS_ANY_VERSION. We apply any explicit SSL_OP_NO_xxx * options, the MinProtocol and MaxProtocol configuration commands, any Suite B * or FIPS_mode() constraints and any floor imposed by the security level here, * so we don't advertise the wrong protocol version to only reject the outcome later. * * Computing the right floor matters. If, e.g., TLS 1.0 and 1.2 are enabled, * TLS 1.1 is disabled, but the security level, Suite-B and/or MinProtocol * only allow TLS 1.2, we want to advertise TLS1.2, *not* TLS1. * * Returns 0 on success or an SSL error reason number on failure. On failure * min_version and max_version will also be set to 0. */ int ssl_get_client_min_max_version(const SSL *s, int *min_version, int *max_version) { int version; int hole; const SSL_METHOD *single = NULL; const SSL_METHOD *method; const version_info *table; const version_info *vent; switch (s->method->version) { default: /* * If this SSL handle is not from a version flexible method we don't * (and never did) check min/max FIPS or Suite B constraints. Hope * that's OK. It is up to the caller to not choose fixed protocol * versions they don't want. If not, then easy to fix, just return * ssl_method_error(s, s->method) */ *min_version = *max_version = s->version; return 0; case TLS_ANY_VERSION: table = tls_version_table; break; case DTLS_ANY_VERSION: table = dtls_version_table; break; } /* * SSL_OP_NO_X disables all protocols above X *if* there are some protocols * below X enabled. This is required in order to maintain the "version * capability" vector contiguous. Any versions with a NULL client method * (protocol version client is disabled at compile-time) is also a "hole". * * Our initial state is hole == 1, version == 0. That is, versions above * the first version in the method table are disabled (a "hole" above * the valid protocol entries) and we don't have a selected version yet. * * Whenever "hole == 1", and we hit an enabled method, its version becomes * the selected version, and the method becomes a candidate "single" * method. We're no longer in a hole, so "hole" becomes 0. * * If "hole == 0" and we hit an enabled method, then "single" is cleared, * as we support a contiguous range of at least two methods. If we hit * a disabled method, then hole becomes true again, but nothing else * changes yet, because all the remaining methods may be disabled too. * If we again hit an enabled method after the new hole, it becomes * selected, as we start from scratch. */ *min_version = version = 0; hole = 1; for (vent = table; vent->version != 0; ++vent) { /* * A table entry with a NULL client method is still a hole in the * "version capability" vector. */ if (vent->cmeth == NULL) { hole = 1; continue; } method = vent->cmeth(); if (ssl_method_error(s, method) != 0) { hole = 1; } else if (!hole) { single = NULL; *min_version = method->version; } else { version = (single = method)->version; *min_version = version; hole = 0; } } *max_version = version; /* Fail if everything is disabled */ if (version == 0) return SSL_R_NO_PROTOCOLS_AVAILABLE; return 0; } /* * ssl_set_client_hello_version - Work out what version we should be using for * the initial ClientHello. * * @s: client SSL handle. * * Returns 0 on success or an SSL error reason number on failure. */ int ssl_set_client_hello_version(SSL *s) { int ver_min, ver_max, ret; /* * In a renegotiation we always send the same client_version that we sent * last time, regardless of which version we eventually negotiated. */ if (!SSL_IS_FIRST_HANDSHAKE(s)) return 0; ret = ssl_get_client_min_max_version(s, &ver_min, &ver_max); if (ret != 0) return ret; s->client_version = s->version = ver_max; return 0; }
716046.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/kernel/printk.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Modified to make sys_syslog() more flexible: added commands to * return the last 4k of kernel messages, regardless of whether * they've been read or not. Added option to suppress kernel printk's * to the console. Added hook for sending the console messages * elsewhere, in preparation for a serial line console (someday). * Ted Ts'o, 2/11/93. * Modified for sysctl support, 1/8/97, Chris Horn. * Fixed SMP synchronization, 08/08/99, Manfred Spraul * [email protected] * Rewrote bits to get rid of console_lock * 01Mar01 Andrew Morton */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/console.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/nmi.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/smp.h> #include <linux/security.h> #include <linux/memblock.h> #include <linux/syscalls.h> #include <linux/crash_core.h> #include <linux/kdb.h> #include <linux/ratelimit.h> #include <linux/kmsg_dump.h> #include <linux/syslog.h> #include <linux/cpu.h> #include <linux/rculist.h> #include <linux/poll.h> #include <linux/irq_work.h> #include <linux/ctype.h> #include <linux/uio.h> #include <linux/sched/clock.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/uaccess.h> #include <asm/sections.h> #include <trace/events/initcall.h> #define CREATE_TRACE_POINTS #include <trace/events/printk.h> #include "console_cmdline.h" #include "braille.h" #include "internal.h" int console_printk[4] = { CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */ CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */ CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */ }; EXPORT_SYMBOL_GPL(console_printk); atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0); EXPORT_SYMBOL(ignore_console_lock_warning); /* * Low level drivers may need that to know if they can schedule in * their unblank() callback or not. So let's export it. */ int oops_in_progress; EXPORT_SYMBOL(oops_in_progress); /* * console_sem protects the console_drivers list, and also * provides serialisation for access to the entire console * driver system. */ static DEFINE_SEMAPHORE(console_sem); struct console *console_drivers; EXPORT_SYMBOL_GPL(console_drivers); /* * System may need to suppress printk message under certain * circumstances, like after kernel panic happens. */ int __read_mostly suppress_printk; #ifdef CONFIG_LOCKDEP static struct lockdep_map console_lock_dep_map = { .name = "console_lock" }; #endif enum devkmsg_log_bits { __DEVKMSG_LOG_BIT_ON = 0, __DEVKMSG_LOG_BIT_OFF, __DEVKMSG_LOG_BIT_LOCK, }; enum devkmsg_log_masks { DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON), DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF), DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK), }; /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */ #define DEVKMSG_LOG_MASK_DEFAULT 0 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; static int __control_devkmsg(char *str) { size_t len; if (!str) return -EINVAL; len = str_has_prefix(str, "on"); if (len) { devkmsg_log = DEVKMSG_LOG_MASK_ON; return len; } len = str_has_prefix(str, "off"); if (len) { devkmsg_log = DEVKMSG_LOG_MASK_OFF; return len; } len = str_has_prefix(str, "ratelimit"); if (len) { devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; return len; } return -EINVAL; } static int __init control_devkmsg(char *str) { if (__control_devkmsg(str) < 0) return 1; /* * Set sysctl string accordingly: */ if (devkmsg_log == DEVKMSG_LOG_MASK_ON) strcpy(devkmsg_log_str, "on"); else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF) strcpy(devkmsg_log_str, "off"); /* else "ratelimit" which is set by default. */ /* * Sysctl cannot change it anymore. The kernel command line setting of * this parameter is to force the setting to be permanent throughout the * runtime of the system. This is a precation measure against userspace * trying to be a smarta** and attempting to change it up on us. */ devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; return 0; } __setup("printk.devkmsg=", control_devkmsg); char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { char old_str[DEVKMSG_STR_MAX_SIZE]; unsigned int old; int err; if (write) { if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK) return -EINVAL; old = devkmsg_log; strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE); } err = proc_dostring(table, write, buffer, lenp, ppos); if (err) return err; if (write) { err = __control_devkmsg(devkmsg_log_str); /* * Do not accept an unknown string OR a known string with * trailing crap... */ if (err < 0 || (err + 1 != *lenp)) { /* ... and restore old setting. */ devkmsg_log = old; strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE); return -EINVAL; } } return 0; } /* Number of registered extended console drivers. */ static int nr_ext_console_drivers; /* * Helper macros to handle lockdep when locking/unlocking console_sem. We use * macros instead of functions so that _RET_IP_ contains useful information. */ #define down_console_sem() do { \ down(&console_sem);\ mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\ } while (0) static int __down_trylock_console_sem(unsigned long ip) { int lock_failed; unsigned long flags; /* * Here and in __up_console_sem() we need to be in safe mode, * because spindump/WARN/etc from under console ->lock will * deadlock in printk()->down_trylock_console_sem() otherwise. */ printk_safe_enter_irqsave(flags); lock_failed = down_trylock(&console_sem); printk_safe_exit_irqrestore(flags); if (lock_failed) return 1; mutex_acquire(&console_lock_dep_map, 0, 1, ip); return 0; } #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_) static void __up_console_sem(unsigned long ip) { unsigned long flags; mutex_release(&console_lock_dep_map, 1, ip); printk_safe_enter_irqsave(flags); up(&console_sem); printk_safe_exit_irqrestore(flags); } #define up_console_sem() __up_console_sem(_RET_IP_) /* * This is used for debugging the mess that is the VT code by * keeping track if we have the console semaphore held. It's * definitely not the perfect debug tool (we don't know if _WE_ * hold it and are racing, but it helps tracking those weird code * paths in the console code where we end up in places I want * locked without the console sempahore held). */ static int console_locked, console_suspended; /* * If exclusive_console is non-NULL then only this console is to be printed to. */ static struct console *exclusive_console; /* * Array of consoles built from command line options (console=) */ #define MAX_CMDLINECONSOLES 8 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; static int preferred_console = -1; int console_set_on_cmdline; EXPORT_SYMBOL(console_set_on_cmdline); /* Flag: console code may call schedule() */ static int console_may_schedule; enum con_msg_format_flags { MSG_FORMAT_DEFAULT = 0, MSG_FORMAT_SYSLOG = (1 << 0), }; static int console_msg_format = MSG_FORMAT_DEFAULT; /* * The printk log buffer consists of a chain of concatenated variable * length records. Every record starts with a record header, containing * the overall length of the record. * * The heads to the first and last entry in the buffer, as well as the * sequence numbers of these entries are maintained when messages are * stored. * * If the heads indicate available messages, the length in the header * tells the start next message. A length == 0 for the next message * indicates a wrap-around to the beginning of the buffer. * * Every record carries the monotonic timestamp in microseconds, as well as * the standard userspace syslog level and syslog facility. The usual * kernel messages use LOG_KERN; userspace-injected messages always carry * a matching syslog facility, by default LOG_USER. The origin of every * message can be reliably determined that way. * * The human readable log message directly follows the message header. The * length of the message text is stored in the header, the stored message * is not terminated. * * Optionally, a message can carry a dictionary of properties (key/value pairs), * to provide userspace with a machine-readable message context. * * Examples for well-defined, commonly used property names are: * DEVICE=b12:8 device identifier * b12:8 block dev_t * c127:3 char dev_t * n8 netdev ifindex * +sound:card0 subsystem:devname * SUBSYSTEM=pci driver-core subsystem name * * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value * follows directly after a '=' character. Every property is terminated by * a '\0' character. The last property is not terminated. * * Example of a message structure: * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec * 0008 34 00 record is 52 bytes long * 000a 0b 00 text is 11 bytes long * 000c 1f 00 dictionary is 23 bytes long * 000e 03 00 LOG_KERN (facility) LOG_ERR (level) * 0010 69 74 27 73 20 61 20 6c "it's a l" * 69 6e 65 "ine" * 001b 44 45 56 49 43 "DEVIC" * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D" * 52 49 56 45 52 3d 62 75 "RIVER=bu" * 67 "g" * 0032 00 00 00 padding to next message header * * The 'struct printk_log' buffer header must never be directly exported to * userspace, it is a kernel-private implementation detail that might * need to be changed in the future, when the requirements change. * * /dev/kmsg exports the structured data in the following line format: * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" * * Users of the export format should ignore possible additional values * separated by ',', and find the message after the ';' character. * * The optional key/value pairs are attached as continuation lines starting * with a space character and terminated by a newline. All possible * non-prinatable characters are escaped in the "\xff" notation. */ enum log_flags { LOG_NEWLINE = 2, /* text ended with a newline */ LOG_CONT = 8, /* text is a fragment of a continuation line */ }; struct printk_log { u64 ts_nsec; /* timestamp in nanoseconds */ u16 len; /* length of entire record */ u16 text_len; /* length of text buffer */ u16 dict_len; /* length of dictionary buffer */ u8 facility; /* syslog facility */ u8 flags:5; /* internal record flags */ u8 level:3; /* syslog level */ #ifdef CONFIG_PRINTK_CALLER u32 caller_id; /* thread id or processor id */ #endif } #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS __packed __aligned(4) #endif ; /* * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken * within the scheduler's rq lock. It must be released before calling * console_unlock() or anything else that might wake up a process. */ DEFINE_RAW_SPINLOCK(logbuf_lock); /* * Helper macros to lock/unlock logbuf_lock and switch between * printk-safe/unsafe modes. */ #define logbuf_lock_irq() \ do { \ printk_safe_enter_irq(); \ raw_spin_lock(&logbuf_lock); \ } while (0) #define logbuf_unlock_irq() \ do { \ raw_spin_unlock(&logbuf_lock); \ printk_safe_exit_irq(); \ } while (0) #define logbuf_lock_irqsave(flags) \ do { \ printk_safe_enter_irqsave(flags); \ raw_spin_lock(&logbuf_lock); \ } while (0) #define logbuf_unlock_irqrestore(flags) \ do { \ raw_spin_unlock(&logbuf_lock); \ printk_safe_exit_irqrestore(flags); \ } while (0) #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); /* the next printk record to read by syslog(READ) or /proc/kmsg */ static u64 syslog_seq; static u32 syslog_idx; static size_t syslog_partial; static bool syslog_time; /* index and sequence number of the first record stored in the buffer */ static u64 log_first_seq; static u32 log_first_idx; /* index and sequence number of the next record to store in the buffer */ static u64 log_next_seq; static u32 log_next_idx; /* the next printk record to write to the console */ static u64 console_seq; static u32 console_idx; static u64 exclusive_console_stop_seq; /* the next printk record to read after the last 'clear' command */ static u64 clear_seq; static u32 clear_idx; #ifdef CONFIG_PRINTK_CALLER #define PREFIX_MAX 48 #else #define PREFIX_MAX 32 #endif #define LOG_LINE_MAX (1024 - PREFIX_MAX) #define LOG_LEVEL(v) ((v) & 0x07) #define LOG_FACILITY(v) ((v) >> 3 & 0xff) /* record buffer */ #define LOG_ALIGN __alignof__(struct printk_log) #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) #define LOG_BUF_LEN_MAX (u32)(1 << 31) static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); static char *log_buf = __log_buf; static u32 log_buf_len = __LOG_BUF_LEN; /* * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before * per_cpu_areas are initialised. This variable is set to true when * it's safe to access per-CPU data. */ static bool __printk_percpu_data_ready __read_mostly; bool printk_percpu_data_ready(void) { return __printk_percpu_data_ready; } /* Return log buffer address */ char *log_buf_addr_get(void) { return log_buf; } /* Return log buffer size */ u32 log_buf_len_get(void) { return log_buf_len; } /* human readable text of the record */ static char *log_text(const struct printk_log *msg) { return (char *)msg + sizeof(struct printk_log); } /* optional key/value pair dictionary attached to the record */ static char *log_dict(const struct printk_log *msg) { return (char *)msg + sizeof(struct printk_log) + msg->text_len; } /* get record by index; idx must point to valid msg */ static struct printk_log *log_from_idx(u32 idx) { struct printk_log *msg = (struct printk_log *)(log_buf + idx); /* * A length == 0 record is the end of buffer marker. Wrap around and * read the message at the start of the buffer. */ if (!msg->len) return (struct printk_log *)log_buf; return msg; } /* get next record; idx must point to valid msg */ static u32 log_next(u32 idx) { struct printk_log *msg = (struct printk_log *)(log_buf + idx); /* length == 0 indicates the end of the buffer; wrap */ /* * A length == 0 record is the end of buffer marker. Wrap around and * read the message at the start of the buffer as *this* one, and * return the one after that. */ if (!msg->len) { msg = (struct printk_log *)log_buf; return msg->len; } return idx + msg->len; } /* * Check whether there is enough free space for the given message. * * The same values of first_idx and next_idx mean that the buffer * is either empty or full. * * If the buffer is empty, we must respect the position of the indexes. * They cannot be reset to the beginning of the buffer. */ static int logbuf_has_space(u32 msg_size, bool empty) { u32 free; if (log_next_idx > log_first_idx || empty) free = max(log_buf_len - log_next_idx, log_first_idx); else free = log_first_idx - log_next_idx; /* * We need space also for an empty header that signalizes wrapping * of the buffer. */ return free >= msg_size + sizeof(struct printk_log); } static int log_make_free_space(u32 msg_size) { while (log_first_seq < log_next_seq && !logbuf_has_space(msg_size, false)) { /* drop old messages until we have enough contiguous space */ log_first_idx = log_next(log_first_idx); log_first_seq++; } if (clear_seq < log_first_seq) { clear_seq = log_first_seq; clear_idx = log_first_idx; } /* sequence numbers are equal, so the log buffer is empty */ if (logbuf_has_space(msg_size, log_first_seq == log_next_seq)) return 0; return -ENOMEM; } /* compute the message size including the padding bytes */ static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len) { u32 size; size = sizeof(struct printk_log) + text_len + dict_len; *pad_len = (-size) & (LOG_ALIGN - 1); size += *pad_len; return size; } /* * Define how much of the log buffer we could take at maximum. The value * must be greater than two. Note that only half of the buffer is available * when the index points to the middle. */ #define MAX_LOG_TAKE_PART 4 static const char trunc_msg[] = "<truncated>"; static u32 truncate_msg(u16 *text_len, u16 *trunc_msg_len, u16 *dict_len, u32 *pad_len) { /* * The message should not take the whole buffer. Otherwise, it might * get removed too soon. */ u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART; if (*text_len > max_text_len) *text_len = max_text_len; /* enable the warning message */ *trunc_msg_len = strlen(trunc_msg); /* disable the "dict" completely */ *dict_len = 0; /* compute the size again, count also the warning message */ return msg_used_size(*text_len + *trunc_msg_len, 0, pad_len); } /* insert record into the buffer, discard old ones, update heads */ static int log_store(u32 caller_id, int facility, int level, enum log_flags flags, u64 ts_nsec, const char *dict, u16 dict_len, const char *text, u16 text_len) { struct printk_log *msg; u32 size, pad_len; u16 trunc_msg_len = 0; /* number of '\0' padding bytes to next message */ size = msg_used_size(text_len, dict_len, &pad_len); if (log_make_free_space(size)) { /* truncate the message if it is too long for empty buffer */ size = truncate_msg(&text_len, &trunc_msg_len, &dict_len, &pad_len); /* survive when the log buffer is too small for trunc_msg */ if (log_make_free_space(size)) return 0; } if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) { /* * This message + an additional empty header does not fit * at the end of the buffer. Add an empty header with len == 0 * to signify a wrap around. */ memset(log_buf + log_next_idx, 0, sizeof(struct printk_log)); log_next_idx = 0; } /* fill message */ msg = (struct printk_log *)(log_buf + log_next_idx); memcpy(log_text(msg), text, text_len); msg->text_len = text_len; if (trunc_msg_len) { memcpy(log_text(msg) + text_len, trunc_msg, trunc_msg_len); msg->text_len += trunc_msg_len; } memcpy(log_dict(msg), dict, dict_len); msg->dict_len = dict_len; msg->facility = facility; msg->level = level & 7; msg->flags = flags & 0x1f; if (ts_nsec > 0) msg->ts_nsec = ts_nsec; else msg->ts_nsec = local_clock(); #ifdef CONFIG_PRINTK_CALLER msg->caller_id = caller_id; #endif memset(log_dict(msg) + dict_len, 0, pad_len); msg->len = size; /* insert message */ log_next_idx += msg->len; log_next_seq++; return msg->text_len; } int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); static int syslog_action_restricted(int type) { if (dmesg_restrict) return 1; /* * Unless restricted, we allow "read all" and "get buffer size" * for everybody. */ return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER; } static int check_syslog_permissions(int type, int source) { /* * If this is from /proc/kmsg and we've already opened it, then we've * already done the capabilities checks at open time. */ if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN) goto ok; if (syslog_action_restricted(type)) { if (capable(CAP_SYSLOG)) goto ok; /* * For historical reasons, accept CAP_SYS_ADMIN too, with * a warning. */ if (capable(CAP_SYS_ADMIN)) { pr_warn_once("%s (%d): Attempt to access syslog with " "CAP_SYS_ADMIN but no CAP_SYSLOG " "(deprecated).\n", current->comm, task_pid_nr(current)); goto ok; } return -EPERM; } ok: return security_syslog(type); } static void append_char(char **pp, char *e, char c) { if (*pp < e) *(*pp)++ = c; } static ssize_t msg_print_ext_header(char *buf, size_t size, struct printk_log *msg, u64 seq) { u64 ts_usec = msg->ts_nsec; char caller[20]; #ifdef CONFIG_PRINTK_CALLER u32 id = msg->caller_id; snprintf(caller, sizeof(caller), ",caller=%c%u", id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); #else caller[0] = '\0'; #endif do_div(ts_usec, 1000); return scnprintf(buf, size, "%u,%llu,%llu,%c%s;", (msg->facility << 3) | msg->level, seq, ts_usec, msg->flags & LOG_CONT ? 'c' : '-', caller); } static ssize_t msg_print_ext_body(char *buf, size_t size, char *dict, size_t dict_len, char *text, size_t text_len) { char *p = buf, *e = buf + size; size_t i; /* escape non-printable characters */ for (i = 0; i < text_len; i++) { unsigned char c = text[i]; if (c < ' ' || c >= 127 || c == '\\') p += scnprintf(p, e - p, "\\x%02x", c); else append_char(&p, e, c); } append_char(&p, e, '\n'); if (dict_len) { bool line = true; for (i = 0; i < dict_len; i++) { unsigned char c = dict[i]; if (line) { append_char(&p, e, ' '); line = false; } if (c == '\0') { append_char(&p, e, '\n'); line = true; continue; } if (c < ' ' || c >= 127 || c == '\\') { p += scnprintf(p, e - p, "\\x%02x", c); continue; } append_char(&p, e, c); } append_char(&p, e, '\n'); } return p - buf; } /* /dev/kmsg - userspace message inject/listen interface */ struct devkmsg_user { u64 seq; u32 idx; struct ratelimit_state rs; struct mutex lock; char buf[CONSOLE_EXT_LOG_MAX]; }; static __printf(3, 4) __cold int devkmsg_emit(int facility, int level, const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = vprintk_emit(facility, level, NULL, 0, fmt, args); va_end(args); return r; } static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) { char *buf, *line; int level = default_message_loglevel; int facility = 1; /* LOG_USER */ struct file *file = iocb->ki_filp; struct devkmsg_user *user = file->private_data; size_t len = iov_iter_count(from); ssize_t ret = len; if (!user || len > LOG_LINE_MAX) return -EINVAL; /* Ignore when user logging is disabled. */ if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) return len; /* Ratelimit when not explicitly enabled. */ if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) { if (!___ratelimit(&user->rs, current->comm)) return ret; } buf = kmalloc(len+1, GFP_KERNEL); if (buf == NULL) return -ENOMEM; buf[len] = '\0'; if (!copy_from_iter_full(buf, len, from)) { kfree(buf); return -EFAULT; } /* * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace * the decimal value represents 32bit, the lower 3 bit are the log * level, the rest are the log facility. * * If no prefix or no userspace facility is specified, we * enforce LOG_USER, to be able to reliably distinguish * kernel-generated messages from userspace-injected ones. */ line = buf; if (line[0] == '<') { char *endp = NULL; unsigned int u; u = simple_strtoul(line + 1, &endp, 10); if (endp && endp[0] == '>') { level = LOG_LEVEL(u); if (LOG_FACILITY(u) != 0) facility = LOG_FACILITY(u); endp++; len -= endp - line; line = endp; } } devkmsg_emit(facility, level, "%s", line); kfree(buf); return ret; } static ssize_t devkmsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct devkmsg_user *user = file->private_data; struct printk_log *msg; size_t len; ssize_t ret; if (!user) return -EBADF; ret = mutex_lock_interruptible(&user->lock); if (ret) return ret; logbuf_lock_irq(); while (user->seq == log_next_seq) { if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; logbuf_unlock_irq(); goto out; } logbuf_unlock_irq(); ret = wait_event_interruptible(log_wait, user->seq != log_next_seq); if (ret) goto out; logbuf_lock_irq(); } if (user->seq < log_first_seq) { /* our last seen message is gone, return error and reset */ user->idx = log_first_idx; user->seq = log_first_seq; ret = -EPIPE; logbuf_unlock_irq(); goto out; } msg = log_from_idx(user->idx); len = msg_print_ext_header(user->buf, sizeof(user->buf), msg, user->seq); len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len, log_dict(msg), msg->dict_len, log_text(msg), msg->text_len); user->idx = log_next(user->idx); user->seq++; logbuf_unlock_irq(); if (len > count) { ret = -EINVAL; goto out; } if (copy_to_user(buf, user->buf, len)) { ret = -EFAULT; goto out; } ret = len; out: mutex_unlock(&user->lock); return ret; } static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) { struct devkmsg_user *user = file->private_data; loff_t ret = 0; if (!user) return -EBADF; if (offset) return -ESPIPE; logbuf_lock_irq(); switch (whence) { case SEEK_SET: /* the first record */ user->idx = log_first_idx; user->seq = log_first_seq; break; case SEEK_DATA: /* * The first record after the last SYSLOG_ACTION_CLEAR, * like issued by 'dmesg -c'. Reading /dev/kmsg itself * changes no global state, and does not clear anything. */ user->idx = clear_idx; user->seq = clear_seq; break; case SEEK_END: /* after the last record */ user->idx = log_next_idx; user->seq = log_next_seq; break; default: ret = -EINVAL; } logbuf_unlock_irq(); return ret; } static __poll_t devkmsg_poll(struct file *file, poll_table *wait) { struct devkmsg_user *user = file->private_data; __poll_t ret = 0; if (!user) return EPOLLERR|EPOLLNVAL; poll_wait(file, &log_wait, wait); logbuf_lock_irq(); if (user->seq < log_next_seq) { /* return error when data has vanished underneath us */ if (user->seq < log_first_seq) ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; else ret = EPOLLIN|EPOLLRDNORM; } logbuf_unlock_irq(); return ret; } static int devkmsg_open(struct inode *inode, struct file *file) { struct devkmsg_user *user; int err; if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) return -EPERM; /* write-only does not need any file context */ if ((file->f_flags & O_ACCMODE) != O_WRONLY) { err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, SYSLOG_FROM_READER); if (err) return err; } user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); if (!user) return -ENOMEM; ratelimit_default_init(&user->rs); ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE); mutex_init(&user->lock); logbuf_lock_irq(); user->idx = log_first_idx; user->seq = log_first_seq; logbuf_unlock_irq(); file->private_data = user; return 0; } static int devkmsg_release(struct inode *inode, struct file *file) { struct devkmsg_user *user = file->private_data; if (!user) return 0; ratelimit_state_exit(&user->rs); mutex_destroy(&user->lock); kfree(user); return 0; } const struct file_operations kmsg_fops = { .open = devkmsg_open, .read = devkmsg_read, .write_iter = devkmsg_write, .llseek = devkmsg_llseek, .poll = devkmsg_poll, .release = devkmsg_release, }; #ifdef CONFIG_CRASH_CORE /* * This appends the listed symbols to /proc/vmcore * * /proc/vmcore is used by various utilities, like crash and makedumpfile to * obtain access to symbols that are otherwise very difficult to locate. These * symbols are specifically used so that utilities can access and extract the * dmesg log from a vmcore file after a crash. */ void log_buf_vmcoreinfo_setup(void) { VMCOREINFO_SYMBOL(log_buf); VMCOREINFO_SYMBOL(log_buf_len); VMCOREINFO_SYMBOL(log_first_idx); VMCOREINFO_SYMBOL(clear_idx); VMCOREINFO_SYMBOL(log_next_idx); /* * Export struct printk_log size and field offsets. User space tools can * parse it and detect any changes to structure down the line. */ VMCOREINFO_STRUCT_SIZE(printk_log); VMCOREINFO_OFFSET(printk_log, ts_nsec); VMCOREINFO_OFFSET(printk_log, len); VMCOREINFO_OFFSET(printk_log, text_len); VMCOREINFO_OFFSET(printk_log, dict_len); #ifdef CONFIG_PRINTK_CALLER VMCOREINFO_OFFSET(printk_log, caller_id); #endif } #endif /* requested log_buf_len from kernel cmdline */ static unsigned long __initdata new_log_buf_len; /* we practice scaling the ring buffer by powers of 2 */ static void __init log_buf_len_update(u64 size) { if (size > (u64)LOG_BUF_LEN_MAX) { size = (u64)LOG_BUF_LEN_MAX; pr_err("log_buf over 2G is not supported.\n"); } if (size) size = roundup_pow_of_two(size); if (size > log_buf_len) new_log_buf_len = (unsigned long)size; } /* save requested log_buf_len since it's too early to process it */ static int __init log_buf_len_setup(char *str) { u64 size; if (!str) return -EINVAL; size = memparse(str, &str); log_buf_len_update(size); return 0; } early_param("log_buf_len", log_buf_len_setup); #ifdef CONFIG_SMP #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT) static void __init log_buf_add_cpu(void) { unsigned int cpu_extra; /* * archs should set up cpu_possible_bits properly with * set_cpu_possible() after setup_arch() but just in * case lets ensure this is valid. */ if (num_possible_cpus() == 1) return; cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN; /* by default this will only continue through for large > 64 CPUs */ if (cpu_extra <= __LOG_BUF_LEN / 2) return; pr_info("log_buf_len individual max cpu contribution: %d bytes\n", __LOG_CPU_MAX_BUF_LEN); pr_info("log_buf_len total cpu_extra contributions: %d bytes\n", cpu_extra); pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN); log_buf_len_update(cpu_extra + __LOG_BUF_LEN); } #else /* !CONFIG_SMP */ static inline void log_buf_add_cpu(void) {} #endif /* CONFIG_SMP */ static void __init set_percpu_data_ready(void) { printk_safe_init(); /* Make sure we set this flag only after printk_safe() init is done */ barrier(); __printk_percpu_data_ready = true; } void __init setup_log_buf(int early) { unsigned long flags; char *new_log_buf; unsigned int free; /* * Some archs call setup_log_buf() multiple times - first is very * early, e.g. from setup_arch(), and second - when percpu_areas * are initialised. */ if (!early) set_percpu_data_ready(); if (log_buf != __log_buf) return; if (!early && !new_log_buf_len) log_buf_add_cpu(); if (!new_log_buf_len) return; new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN); if (unlikely(!new_log_buf)) { pr_err("log_buf_len: %lu bytes not available\n", new_log_buf_len); return; } logbuf_lock_irqsave(flags); log_buf_len = new_log_buf_len; log_buf = new_log_buf; new_log_buf_len = 0; free = __LOG_BUF_LEN - log_next_idx; memcpy(log_buf, __log_buf, __LOG_BUF_LEN); logbuf_unlock_irqrestore(flags); pr_info("log_buf_len: %u bytes\n", log_buf_len); pr_info("early log buf free: %u(%u%%)\n", free, (free * 100) / __LOG_BUF_LEN); } static bool __read_mostly ignore_loglevel; static int __init ignore_loglevel_setup(char *str) { ignore_loglevel = true; pr_info("debug: ignoring loglevel setting.\n"); return 0; } early_param("ignore_loglevel", ignore_loglevel_setup); module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting (prints all kernel messages to the console)"); static bool suppress_message_printing(int level) { return (level >= console_loglevel && !ignore_loglevel); } #ifdef CONFIG_BOOT_PRINTK_DELAY static int boot_delay; /* msecs delay after each printk during bootup */ static unsigned long long loops_per_msec; /* based on boot_delay */ static int __init boot_delay_setup(char *str) { unsigned long lpj; lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ loops_per_msec = (unsigned long long)lpj / 1000 * HZ; get_option(&str, &boot_delay); if (boot_delay > 10 * 1000) boot_delay = 0; pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " "HZ: %d, loops_per_msec: %llu\n", boot_delay, preset_lpj, lpj, HZ, loops_per_msec); return 0; } early_param("boot_delay", boot_delay_setup); static void boot_delay_msec(int level) { unsigned long long k; unsigned long timeout; if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) || suppress_message_printing(level)) { return; } k = (unsigned long long)loops_per_msec * boot_delay; timeout = jiffies + msecs_to_jiffies(boot_delay); while (k) { k--; cpu_relax(); /* * use (volatile) jiffies to prevent * compiler reduction; loop termination via jiffies * is secondary and may or may not happen. */ if (time_after(jiffies, timeout)) break; touch_nmi_watchdog(); } } #else static inline void boot_delay_msec(int level) { } #endif static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME); module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); static size_t print_syslog(unsigned int level, char *buf) { return sprintf(buf, "<%u>", level); } static size_t print_time(u64 ts, char *buf) { unsigned long rem_nsec = do_div(ts, 1000000000); return sprintf(buf, "[%5lu.%06lu]", (unsigned long)ts, rem_nsec / 1000); } #ifdef CONFIG_PRINTK_CALLER static size_t print_caller(u32 id, char *buf) { char caller[12]; snprintf(caller, sizeof(caller), "%c%u", id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); return sprintf(buf, "[%6s]", caller); } #else #define print_caller(id, buf) 0 #endif static size_t print_prefix(const struct printk_log *msg, bool syslog, bool time, char *buf) { size_t len = 0; if (syslog) len = print_syslog((msg->facility << 3) | msg->level, buf); if (time) len += print_time(msg->ts_nsec, buf + len); len += print_caller(msg->caller_id, buf + len); if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) { buf[len++] = ' '; buf[len] = '\0'; } return len; } static size_t msg_print_text(const struct printk_log *msg, bool syslog, bool time, char *buf, size_t size) { const char *text = log_text(msg); size_t text_size = msg->text_len; size_t len = 0; char prefix[PREFIX_MAX]; const size_t prefix_len = print_prefix(msg, syslog, time, prefix); do { const char *next = memchr(text, '\n', text_size); size_t text_len; if (next) { text_len = next - text; next++; text_size -= next - text; } else { text_len = text_size; } if (buf) { if (prefix_len + text_len + 1 >= size - len) break; memcpy(buf + len, prefix, prefix_len); len += prefix_len; memcpy(buf + len, text, text_len); len += text_len; buf[len++] = '\n'; } else { /* SYSLOG_ACTION_* buffer size only calculation */ len += prefix_len + text_len + 1; } text = next; } while (text); return len; } static int syslog_print(char __user *buf, int size) { char *text; struct printk_log *msg; int len = 0; text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); if (!text) return -ENOMEM; while (size > 0) { size_t n; size_t skip; logbuf_lock_irq(); if (syslog_seq < log_first_seq) { /* messages are gone, move to first one */ syslog_seq = log_first_seq; syslog_idx = log_first_idx; syslog_partial = 0; } if (syslog_seq == log_next_seq) { logbuf_unlock_irq(); break; } /* * To keep reading/counting partial line consistent, * use printk_time value as of the beginning of a line. */ if (!syslog_partial) syslog_time = printk_time; skip = syslog_partial; msg = log_from_idx(syslog_idx); n = msg_print_text(msg, true, syslog_time, text, LOG_LINE_MAX + PREFIX_MAX); if (n - syslog_partial <= size) { /* message fits into buffer, move forward */ syslog_idx = log_next(syslog_idx); syslog_seq++; n -= syslog_partial; syslog_partial = 0; } else if (!len){ /* partial read(), remember position */ n = size; syslog_partial += n; } else n = 0; logbuf_unlock_irq(); if (!n) break; if (copy_to_user(buf, text + skip, n)) { if (!len) len = -EFAULT; break; } len += n; size -= n; buf += n; } kfree(text); return len; } static int syslog_print_all(char __user *buf, int size, bool clear) { char *text; int len = 0; u64 next_seq; u64 seq; u32 idx; bool time; text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); if (!text) return -ENOMEM; time = printk_time; logbuf_lock_irq(); /* * Find first record that fits, including all following records, * into the user-provided buffer for this dump. */ seq = clear_seq; idx = clear_idx; while (seq < log_next_seq) { struct printk_log *msg = log_from_idx(idx); len += msg_print_text(msg, true, time, NULL, 0); idx = log_next(idx); seq++; } /* move first record forward until length fits into the buffer */ seq = clear_seq; idx = clear_idx; while (len > size && seq < log_next_seq) { struct printk_log *msg = log_from_idx(idx); len -= msg_print_text(msg, true, time, NULL, 0); idx = log_next(idx); seq++; } /* last message fitting into this dump */ next_seq = log_next_seq; len = 0; while (len >= 0 && seq < next_seq) { struct printk_log *msg = log_from_idx(idx); int textlen = msg_print_text(msg, true, time, text, LOG_LINE_MAX + PREFIX_MAX); idx = log_next(idx); seq++; logbuf_unlock_irq(); if (copy_to_user(buf + len, text, textlen)) len = -EFAULT; else len += textlen; logbuf_lock_irq(); if (seq < log_first_seq) { /* messages are gone, move to next one */ seq = log_first_seq; idx = log_first_idx; } } if (clear) { clear_seq = log_next_seq; clear_idx = log_next_idx; } logbuf_unlock_irq(); kfree(text); return len; } static void syslog_clear(void) { logbuf_lock_irq(); clear_seq = log_next_seq; clear_idx = log_next_idx; logbuf_unlock_irq(); } int do_syslog(int type, char __user *buf, int len, int source) { bool clear = false; static int saved_console_loglevel = LOGLEVEL_DEFAULT; int error; error = check_syslog_permissions(type, source); if (error) return error; switch (type) { case SYSLOG_ACTION_CLOSE: /* Close log */ break; case SYSLOG_ACTION_OPEN: /* Open log */ break; case SYSLOG_ACTION_READ: /* Read from log */ if (!buf || len < 0) return -EINVAL; if (!len) return 0; if (!access_ok(buf, len)) return -EFAULT; error = wait_event_interruptible(log_wait, syslog_seq != log_next_seq); if (error) return error; error = syslog_print(buf, len); break; /* Read/clear last kernel messages */ case SYSLOG_ACTION_READ_CLEAR: clear = true; /* FALL THRU */ /* Read last kernel messages */ case SYSLOG_ACTION_READ_ALL: if (!buf || len < 0) return -EINVAL; if (!len) return 0; if (!access_ok(buf, len)) return -EFAULT; error = syslog_print_all(buf, len, clear); break; /* Clear ring buffer */ case SYSLOG_ACTION_CLEAR: syslog_clear(); break; /* Disable logging to console */ case SYSLOG_ACTION_CONSOLE_OFF: if (saved_console_loglevel == LOGLEVEL_DEFAULT) saved_console_loglevel = console_loglevel; console_loglevel = minimum_console_loglevel; break; /* Enable logging to console */ case SYSLOG_ACTION_CONSOLE_ON: if (saved_console_loglevel != LOGLEVEL_DEFAULT) { console_loglevel = saved_console_loglevel; saved_console_loglevel = LOGLEVEL_DEFAULT; } break; /* Set level of messages printed to console */ case SYSLOG_ACTION_CONSOLE_LEVEL: if (len < 1 || len > 8) return -EINVAL; if (len < minimum_console_loglevel) len = minimum_console_loglevel; console_loglevel = len; /* Implicitly re-enable logging to console */ saved_console_loglevel = LOGLEVEL_DEFAULT; break; /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: logbuf_lock_irq(); if (syslog_seq < log_first_seq) { /* messages are gone, move to first one */ syslog_seq = log_first_seq; syslog_idx = log_first_idx; syslog_partial = 0; } if (source == SYSLOG_FROM_PROC) { /* * Short-cut for poll(/"proc/kmsg") which simply checks * for pending data, not the size; return the count of * records, not the length. */ error = log_next_seq - syslog_seq; } else { u64 seq = syslog_seq; u32 idx = syslog_idx; bool time = syslog_partial ? syslog_time : printk_time; while (seq < log_next_seq) { struct printk_log *msg = log_from_idx(idx); error += msg_print_text(msg, true, time, NULL, 0); time = printk_time; idx = log_next(idx); seq++; } error -= syslog_partial; } logbuf_unlock_irq(); break; /* Size of the log buffer */ case SYSLOG_ACTION_SIZE_BUFFER: error = log_buf_len; break; default: error = -EINVAL; break; } return error; } SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) { return do_syslog(type, buf, len, SYSLOG_FROM_READER); } /* * Special console_lock variants that help to reduce the risk of soft-lockups. * They allow to pass console_lock to another printk() call using a busy wait. */ #ifdef CONFIG_LOCKDEP static struct lockdep_map console_owner_dep_map = { .name = "console_owner" }; #endif static DEFINE_RAW_SPINLOCK(console_owner_lock); static struct task_struct *console_owner; static bool console_waiter; /** * console_lock_spinning_enable - mark beginning of code where another * thread might safely busy wait * * This basically converts console_lock into a spinlock. This marks * the section where the console_lock owner can not sleep, because * there may be a waiter spinning (like a spinlock). Also it must be * ready to hand over the lock at the end of the section. */ static void console_lock_spinning_enable(void) { raw_spin_lock(&console_owner_lock); console_owner = current; raw_spin_unlock(&console_owner_lock); /* The waiter may spin on us after setting console_owner */ spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); } /** * console_lock_spinning_disable_and_check - mark end of code where another * thread was able to busy wait and check if there is a waiter * * This is called at the end of the section where spinning is allowed. * It has two functions. First, it is a signal that it is no longer * safe to start busy waiting for the lock. Second, it checks if * there is a busy waiter and passes the lock rights to her. * * Important: Callers lose the lock if there was a busy waiter. * They must not touch items synchronized by console_lock * in this case. * * Return: 1 if the lock rights were passed, 0 otherwise. */ static int console_lock_spinning_disable_and_check(void) { int waiter; raw_spin_lock(&console_owner_lock); waiter = READ_ONCE(console_waiter); console_owner = NULL; raw_spin_unlock(&console_owner_lock); if (!waiter) { spin_release(&console_owner_dep_map, 1, _THIS_IP_); return 0; } /* The waiter is now free to continue */ WRITE_ONCE(console_waiter, false); spin_release(&console_owner_dep_map, 1, _THIS_IP_); /* * Hand off console_lock to waiter. The waiter will perform * the up(). After this, the waiter is the console_lock owner. */ mutex_release(&console_lock_dep_map, 1, _THIS_IP_); return 1; } /** * console_trylock_spinning - try to get console_lock by busy waiting * * This allows to busy wait for the console_lock when the current * owner is running in specially marked sections. It means that * the current owner is running and cannot reschedule until it * is ready to lose the lock. * * Return: 1 if we got the lock, 0 othrewise */ static int console_trylock_spinning(void) { struct task_struct *owner = NULL; bool waiter; bool spin = false; unsigned long flags; if (console_trylock()) return 1; printk_safe_enter_irqsave(flags); raw_spin_lock(&console_owner_lock); owner = READ_ONCE(console_owner); waiter = READ_ONCE(console_waiter); if (!waiter && owner && owner != current) { WRITE_ONCE(console_waiter, true); spin = true; } raw_spin_unlock(&console_owner_lock); /* * If there is an active printk() writing to the * consoles, instead of having it write our data too, * see if we can offload that load from the active * printer, and do some printing ourselves. * Go into a spin only if there isn't already a waiter * spinning, and there is an active printer, and * that active printer isn't us (recursive printk?). */ if (!spin) { printk_safe_exit_irqrestore(flags); return 0; } /* We spin waiting for the owner to release us */ spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); /* Owner will clear console_waiter on hand off */ while (READ_ONCE(console_waiter)) cpu_relax(); spin_release(&console_owner_dep_map, 1, _THIS_IP_); printk_safe_exit_irqrestore(flags); /* * The owner passed the console lock to us. * Since we did not spin on console lock, annotate * this as a trylock. Otherwise lockdep will * complain. */ mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_); return 1; } /* * Call the console drivers, asking them to write out * log_buf[start] to log_buf[end - 1]. * The console_lock must be held. */ static void call_console_drivers(const char *ext_text, size_t ext_len, const char *text, size_t len) { struct console *con; trace_console_rcuidle(text, len); if (!console_drivers) return; for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; if (!(con->flags & CON_ENABLED)) continue; if (!con->write) continue; if (!cpu_online(smp_processor_id()) && !(con->flags & CON_ANYTIME)) continue; if (con->flags & CON_EXTENDED) con->write(con, ext_text, ext_len); else con->write(con, text, len); } } int printk_delay_msec __read_mostly; static inline void printk_delay(void) { if (unlikely(printk_delay_msec)) { int m = printk_delay_msec; while (m--) { mdelay(1); touch_nmi_watchdog(); } } } static inline u32 printk_caller_id(void) { return in_task() ? task_pid_nr(current) : 0x80000000 + raw_smp_processor_id(); } /* * Continuation lines are buffered, and not committed to the record buffer * until the line is complete, or a race forces it. The line fragments * though, are printed immediately to the consoles to ensure everything has * reached the console in case of a kernel crash. */ static struct cont { char buf[LOG_LINE_MAX]; size_t len; /* length == 0 means unused buffer */ u32 caller_id; /* printk_caller_id() of first print */ u64 ts_nsec; /* time of first print */ u8 level; /* log level of first message */ u8 facility; /* log facility of first message */ enum log_flags flags; /* prefix, newline flags */ } cont; static void cont_flush(void) { if (cont.len == 0) return; log_store(cont.caller_id, cont.facility, cont.level, cont.flags, cont.ts_nsec, NULL, 0, cont.buf, cont.len); cont.len = 0; } static bool cont_add(u32 caller_id, int facility, int level, enum log_flags flags, const char *text, size_t len) { /* If the line gets too long, split it up in separate records. */ if (cont.len + len > sizeof(cont.buf)) { cont_flush(); return false; } if (!cont.len) { cont.facility = facility; cont.level = level; cont.caller_id = caller_id; cont.ts_nsec = local_clock(); cont.flags = flags; } memcpy(cont.buf + cont.len, text, len); cont.len += len; // The original flags come from the first line, // but later continuations can add a newline. if (flags & LOG_NEWLINE) { cont.flags |= LOG_NEWLINE; cont_flush(); } return true; } static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len) { const u32 caller_id = printk_caller_id(); /* * If an earlier line was buffered, and we're a continuation * write from the same context, try to add it to the buffer. */ if (cont.len) { if (cont.caller_id == caller_id && (lflags & LOG_CONT)) { if (cont_add(caller_id, facility, level, lflags, text, text_len)) return text_len; } /* Otherwise, make sure it's flushed */ cont_flush(); } /* Skip empty continuation lines that couldn't be added - they just flush */ if (!text_len && (lflags & LOG_CONT)) return 0; /* If it doesn't end in a newline, try to buffer the current line */ if (!(lflags & LOG_NEWLINE)) { if (cont_add(caller_id, facility, level, lflags, text, text_len)) return text_len; } /* Store it in the record log */ return log_store(caller_id, facility, level, lflags, 0, dict, dictlen, text, text_len); } /* Must be called under logbuf_lock. */ int vprintk_store(int facility, int level, const char *dict, size_t dictlen, const char *fmt, va_list args) { static char textbuf[LOG_LINE_MAX]; char *text = textbuf; size_t text_len; enum log_flags lflags = 0; /* * The printf needs to come first; we need the syslog * prefix which might be passed-in as a parameter. */ text_len = vscnprintf(text, sizeof(textbuf), fmt, args); /* mark and strip a trailing newline */ if (text_len && text[text_len-1] == '\n') { text_len--; lflags |= LOG_NEWLINE; } /* strip kernel syslog prefix and extract log level or control flags */ if (facility == 0) { int kern_level; while ((kern_level = printk_get_level(text)) != 0) { switch (kern_level) { case '0' ... '7': if (level == LOGLEVEL_DEFAULT) level = kern_level - '0'; break; case 'c': /* KERN_CONT */ lflags |= LOG_CONT; } text_len -= 2; text += 2; } } if (level == LOGLEVEL_DEFAULT) level = default_message_loglevel; if (dict) lflags |= LOG_NEWLINE; return log_output(facility, level, lflags, dict, dictlen, text, text_len); } asmlinkage int vprintk_emit(int facility, int level, const char *dict, size_t dictlen, const char *fmt, va_list args) { int printed_len; bool in_sched = false, pending_output; unsigned long flags; u64 curr_log_seq; /* Suppress unimportant messages after panic happens */ if (unlikely(suppress_printk)) return 0; if (level == LOGLEVEL_SCHED) { level = LOGLEVEL_DEFAULT; in_sched = true; } boot_delay_msec(level); printk_delay(); /* This stops the holder of console_sem just where we want him */ logbuf_lock_irqsave(flags); curr_log_seq = log_next_seq; printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args); pending_output = (curr_log_seq != log_next_seq); logbuf_unlock_irqrestore(flags); /* If called from the scheduler, we can not call up(). */ if (!in_sched && pending_output) { /* * Disable preemption to avoid being preempted while holding * console_sem which would prevent anyone from printing to * console */ preempt_disable(); /* * Try to acquire and then immediately release the console * semaphore. The release will print out buffers and wake up * /dev/kmsg and syslog() users. */ if (console_trylock_spinning()) console_unlock(); preempt_enable(); } if (pending_output) wake_up_klogd(); return printed_len; } EXPORT_SYMBOL(vprintk_emit); asmlinkage int vprintk(const char *fmt, va_list args) { return vprintk_func(fmt, args); } EXPORT_SYMBOL(vprintk); int vprintk_default(const char *fmt, va_list args) { int r; #ifdef CONFIG_KGDB_KDB /* Allow to pass printk() to kdb but avoid a recursion. */ if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) { r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); return r; } #endif r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); return r; } EXPORT_SYMBOL_GPL(vprintk_default); /** * printk - print a kernel message * @fmt: format string * * This is printk(). It can be called from any context. We want it to work. * * We try to grab the console_lock. If we succeed, it's easy - we log the * output and call the console drivers. If we fail to get the semaphore, we * place the output into the log buffer and return. The current holder of * the console_sem will notice the new output in console_unlock(); and will * send it to the consoles before releasing the lock. * * One effect of this deferred printing is that code which calls printk() and * then changes console_loglevel may break. This is because console_loglevel * is inspected when the actual printing occurs. * * See also: * printf(3) * * See the vsnprintf() documentation for format string extensions over C99. */ asmlinkage __visible int printk(const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = vprintk_func(fmt, args); va_end(args); return r; } EXPORT_SYMBOL(printk); #else /* CONFIG_PRINTK */ #define LOG_LINE_MAX 0 #define PREFIX_MAX 0 #define printk_time false static u64 syslog_seq; static u32 syslog_idx; static u64 console_seq; static u32 console_idx; static u64 exclusive_console_stop_seq; static u64 log_first_seq; static u32 log_first_idx; static u64 log_next_seq; static char *log_text(const struct printk_log *msg) { return NULL; } static char *log_dict(const struct printk_log *msg) { return NULL; } static struct printk_log *log_from_idx(u32 idx) { return NULL; } static u32 log_next(u32 idx) { return 0; } static ssize_t msg_print_ext_header(char *buf, size_t size, struct printk_log *msg, u64 seq) { return 0; } static ssize_t msg_print_ext_body(char *buf, size_t size, char *dict, size_t dict_len, char *text, size_t text_len) { return 0; } static void console_lock_spinning_enable(void) { } static int console_lock_spinning_disable_and_check(void) { return 0; } static void call_console_drivers(const char *ext_text, size_t ext_len, const char *text, size_t len) {} static size_t msg_print_text(const struct printk_log *msg, bool syslog, bool time, char *buf, size_t size) { return 0; } static bool suppress_message_printing(int level) { return false; } #endif /* CONFIG_PRINTK */ #ifdef CONFIG_EARLY_PRINTK struct console *early_console; asmlinkage __visible void early_printk(const char *fmt, ...) { va_list ap; char buf[512]; int n; if (!early_console) return; va_start(ap, fmt); n = vscnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); early_console->write(early_console, buf, n); } #endif static int __add_preferred_console(char *name, int idx, char *options, char *brl_options) { struct console_cmdline *c; int i; /* * See if this tty is not yet registered, and * if we have a slot free. */ for (i = 0, c = console_cmdline; i < MAX_CMDLINECONSOLES && c->name[0]; i++, c++) { if (strcmp(c->name, name) == 0 && c->index == idx) { if (!brl_options) preferred_console = i; return 0; } } if (i == MAX_CMDLINECONSOLES) return -E2BIG; if (!brl_options) preferred_console = i; strlcpy(c->name, name, sizeof(c->name)); c->options = options; braille_set_options(c, brl_options); c->index = idx; return 0; } static int __init console_msg_format_setup(char *str) { if (!strcmp(str, "syslog")) console_msg_format = MSG_FORMAT_SYSLOG; if (!strcmp(str, "default")) console_msg_format = MSG_FORMAT_DEFAULT; return 1; } __setup("console_msg_format=", console_msg_format_setup); /* * Set up a console. Called via do_early_param() in init/main.c * for each "console=" parameter in the boot command line. */ static int __init console_setup(char *str) { char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */ char *s, *options, *brl_options = NULL; int idx; if (_braille_console_setup(&str, &brl_options)) return 1; /* * Decode str into name, index, options. */ if (str[0] >= '0' && str[0] <= '9') { strcpy(buf, "ttyS"); strncpy(buf + 4, str, sizeof(buf) - 5); } else { strncpy(buf, str, sizeof(buf) - 1); } buf[sizeof(buf) - 1] = 0; options = strchr(str, ','); if (options) *(options++) = 0; #ifdef __sparc__ if (!strcmp(str, "ttya")) strcpy(buf, "ttyS0"); if (!strcmp(str, "ttyb")) strcpy(buf, "ttyS1"); #endif for (s = buf; *s; s++) if (isdigit(*s) || *s == ',') break; idx = simple_strtoul(s, NULL, 10); *s = 0; __add_preferred_console(buf, idx, options, brl_options); console_set_on_cmdline = 1; return 1; } __setup("console=", console_setup); /** * add_preferred_console - add a device to the list of preferred consoles. * @name: device name * @idx: device index * @options: options for this console * * The last preferred console added will be used for kernel messages * and stdin/out/err for init. Normally this is used by console_setup * above to handle user-supplied console arguments; however it can also * be used by arch-specific code either to override the user or more * commonly to provide a default console (ie from PROM variables) when * the user has not supplied one. */ int add_preferred_console(char *name, int idx, char *options) { return __add_preferred_console(name, idx, options, NULL); } bool console_suspend_enabled = true; EXPORT_SYMBOL(console_suspend_enabled); static int __init console_suspend_disable(char *str) { console_suspend_enabled = false; return 1; } __setup("no_console_suspend", console_suspend_disable); module_param_named(console_suspend, console_suspend_enabled, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(console_suspend, "suspend console during suspend" " and hibernate operations"); /** * suspend_console - suspend the console subsystem * * This disables printk() while we go into suspend states */ void suspend_console(void) { if (!console_suspend_enabled) return; pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); console_lock(); console_suspended = 1; up_console_sem(); } void resume_console(void) { if (!console_suspend_enabled) return; down_console_sem(); console_suspended = 0; console_unlock(); } /** * console_cpu_notify - print deferred console messages after CPU hotplug * @cpu: unused * * If printk() is called from a CPU that is not online yet, the messages * will be printed on the console only if there are CON_ANYTIME consoles. * This function is called when a new CPU comes online (or fails to come * up) or goes offline. */ static int console_cpu_notify(unsigned int cpu) { if (!cpuhp_tasks_frozen) { /* If trylock fails, someone else is doing the printing */ if (console_trylock()) console_unlock(); } return 0; } /** * console_lock - lock the console system for exclusive use. * * Acquires a lock which guarantees that the caller has * exclusive access to the console system and the console_drivers list. * * Can sleep, returns nothing. */ void console_lock(void) { might_sleep(); down_console_sem(); if (console_suspended) return; console_locked = 1; console_may_schedule = 1; } EXPORT_SYMBOL(console_lock); /** * console_trylock - try to lock the console system for exclusive use. * * Try to acquire a lock which guarantees that the caller has exclusive * access to the console system and the console_drivers list. * * returns 1 on success, and 0 on failure to acquire the lock. */ int console_trylock(void) { if (down_trylock_console_sem()) return 0; if (console_suspended) { up_console_sem(); return 0; } console_locked = 1; console_may_schedule = 0; return 1; } EXPORT_SYMBOL(console_trylock); int is_console_locked(void) { return console_locked; } EXPORT_SYMBOL(is_console_locked); /* * Check if we have any console that is capable of printing while cpu is * booting or shutting down. Requires console_sem. */ static int have_callable_console(void) { struct console *con; for_each_console(con) if ((con->flags & CON_ENABLED) && (con->flags & CON_ANYTIME)) return 1; return 0; } /* * Can we actually use the console at this time on this cpu? * * Console drivers may assume that per-cpu resources have been allocated. So * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't * call them until this CPU is officially up. */ static inline int can_use_console(void) { return cpu_online(raw_smp_processor_id()) || have_callable_console(); } /** * console_unlock - unlock the console system * * Releases the console_lock which the caller holds on the console system * and the console driver list. * * While the console_lock was held, console output may have been buffered * by printk(). If this is the case, console_unlock(); emits * the output prior to releasing the lock. * * If there is output waiting, we wake /dev/kmsg and syslog() users. * * console_unlock(); may be called from any context. */ void console_unlock(void) { static char ext_text[CONSOLE_EXT_LOG_MAX]; static char text[LOG_LINE_MAX + PREFIX_MAX]; unsigned long flags; bool do_cond_resched, retry; if (console_suspended) { up_console_sem(); return; } /* * Console drivers are called with interrupts disabled, so * @console_may_schedule should be cleared before; however, we may * end up dumping a lot of lines, for example, if called from * console registration path, and should invoke cond_resched() * between lines if allowable. Not doing so can cause a very long * scheduling stall on a slow console leading to RCU stall and * softlockup warnings which exacerbate the issue with more * messages practically incapacitating the system. * * console_trylock() is not able to detect the preemptive * context reliably. Therefore the value must be stored before * and cleared after the the "again" goto label. */ do_cond_resched = console_may_schedule; again: console_may_schedule = 0; /* * We released the console_sem lock, so we need to recheck if * cpu is online and (if not) is there at least one CON_ANYTIME * console. */ if (!can_use_console()) { console_locked = 0; up_console_sem(); return; } for (;;) { struct printk_log *msg; size_t ext_len = 0; size_t len; printk_safe_enter_irqsave(flags); raw_spin_lock(&logbuf_lock); if (console_seq < log_first_seq) { len = sprintf(text, "** %llu printk messages dropped **\n", log_first_seq - console_seq); /* messages are gone, move to first one */ console_seq = log_first_seq; console_idx = log_first_idx; } else { len = 0; } skip: if (console_seq == log_next_seq) break; msg = log_from_idx(console_idx); if (suppress_message_printing(msg->level)) { /* * Skip record we have buffered and already printed * directly to the console when we received it, and * record that has level above the console loglevel. */ console_idx = log_next(console_idx); console_seq++; goto skip; } /* Output to all consoles once old messages replayed. */ if (unlikely(exclusive_console && console_seq >= exclusive_console_stop_seq)) { exclusive_console = NULL; } len += msg_print_text(msg, console_msg_format & MSG_FORMAT_SYSLOG, printk_time, text + len, sizeof(text) - len); if (nr_ext_console_drivers) { ext_len = msg_print_ext_header(ext_text, sizeof(ext_text), msg, console_seq); ext_len += msg_print_ext_body(ext_text + ext_len, sizeof(ext_text) - ext_len, log_dict(msg), msg->dict_len, log_text(msg), msg->text_len); } console_idx = log_next(console_idx); console_seq++; raw_spin_unlock(&logbuf_lock); /* * While actively printing out messages, if another printk() * were to occur on another CPU, it may wait for this one to * finish. This task can not be preempted if there is a * waiter waiting to take over. */ console_lock_spinning_enable(); stop_critical_timings(); /* don't trace print latency */ call_console_drivers(ext_text, ext_len, text, len); start_critical_timings(); if (console_lock_spinning_disable_and_check()) { printk_safe_exit_irqrestore(flags); return; } printk_safe_exit_irqrestore(flags); if (do_cond_resched) cond_resched(); } console_locked = 0; raw_spin_unlock(&logbuf_lock); up_console_sem(); /* * Someone could have filled up the buffer again, so re-check if there's * something to flush. In case we cannot trylock the console_sem again, * there's a new owner and the console_unlock() from them will do the * flush, no worries. */ raw_spin_lock(&logbuf_lock); retry = console_seq != log_next_seq; raw_spin_unlock(&logbuf_lock); printk_safe_exit_irqrestore(flags); if (retry && console_trylock()) goto again; } EXPORT_SYMBOL(console_unlock); /** * console_conditional_schedule - yield the CPU if required * * If the console code is currently allowed to sleep, and * if this CPU should yield the CPU to another task, do * so here. * * Must be called within console_lock();. */ void __sched console_conditional_schedule(void) { if (console_may_schedule) cond_resched(); } EXPORT_SYMBOL(console_conditional_schedule); void console_unblank(void) { struct console *c; /* * console_unblank can no longer be called in interrupt context unless * oops_in_progress is set to 1.. */ if (oops_in_progress) { if (down_trylock_console_sem() != 0) return; } else console_lock(); console_locked = 1; console_may_schedule = 0; for_each_console(c) if ((c->flags & CON_ENABLED) && c->unblank) c->unblank(); console_unlock(); } /** * console_flush_on_panic - flush console content on panic * @mode: flush all messages in buffer or just the pending ones * * Immediately output all pending messages no matter what. */ void console_flush_on_panic(enum con_flush_mode mode) { /* * If someone else is holding the console lock, trylock will fail * and may_schedule may be set. Ignore and proceed to unlock so * that messages are flushed out. As this can be called from any * context and we don't want to get preempted while flushing, * ensure may_schedule is cleared. */ console_trylock(); console_may_schedule = 0; if (mode == CONSOLE_REPLAY_ALL) { unsigned long flags; logbuf_lock_irqsave(flags); console_seq = log_first_seq; console_idx = log_first_idx; logbuf_unlock_irqrestore(flags); } console_unlock(); } /* * Return the console tty driver structure and its associated index */ struct tty_driver *console_device(int *index) { struct console *c; struct tty_driver *driver = NULL; console_lock(); for_each_console(c) { if (!c->device) continue; driver = c->device(c, index); if (driver) break; } console_unlock(); return driver; } /* * Prevent further output on the passed console device so that (for example) * serial drivers can disable console output before suspending a port, and can * re-enable output afterwards. */ void console_stop(struct console *console) { console_lock(); console->flags &= ~CON_ENABLED; console_unlock(); } EXPORT_SYMBOL(console_stop); void console_start(struct console *console) { console_lock(); console->flags |= CON_ENABLED; console_unlock(); } EXPORT_SYMBOL(console_start); static int __read_mostly keep_bootcon; static int __init keep_bootcon_setup(char *str) { keep_bootcon = 1; pr_info("debug: skip boot console de-registration.\n"); return 0; } early_param("keep_bootcon", keep_bootcon_setup); /* * The console driver calls this routine during kernel initialization * to register the console printing procedure with printk() and to * print any messages that were printed by the kernel before the * console driver was initialized. * * This can happen pretty early during the boot process (because of * early_printk) - sometimes before setup_arch() completes - be careful * of what kernel features are used - they may not be initialised yet. * * There are two types of consoles - bootconsoles (early_printk) and * "real" consoles (everything which is not a bootconsole) which are * handled differently. * - Any number of bootconsoles can be registered at any time. * - As soon as a "real" console is registered, all bootconsoles * will be unregistered automatically. * - Once a "real" console is registered, any attempt to register a * bootconsoles will be rejected */ void register_console(struct console *newcon) { int i; unsigned long flags; struct console *bcon = NULL; struct console_cmdline *c; static bool has_preferred; if (console_drivers) for_each_console(bcon) if (WARN(bcon == newcon, "console '%s%d' already registered\n", bcon->name, bcon->index)) return; /* * before we register a new CON_BOOT console, make sure we don't * already have a valid console */ if (console_drivers && newcon->flags & CON_BOOT) { /* find the last or real console */ for_each_console(bcon) { if (!(bcon->flags & CON_BOOT)) { pr_info("Too late to register bootconsole %s%d\n", newcon->name, newcon->index); return; } } } if (console_drivers && console_drivers->flags & CON_BOOT) bcon = console_drivers; if (!has_preferred || bcon || !console_drivers) has_preferred = preferred_console >= 0; /* * See if we want to use this console driver. If we * didn't select a console we take the first one * that registers here. */ if (!has_preferred) { if (newcon->index < 0) newcon->index = 0; if (newcon->setup == NULL || newcon->setup(newcon, NULL) == 0) { newcon->flags |= CON_ENABLED; if (newcon->device) { newcon->flags |= CON_CONSDEV; has_preferred = true; } } } /* * See if this console matches one we selected on * the command line. */ for (i = 0, c = console_cmdline; i < MAX_CMDLINECONSOLES && c->name[0]; i++, c++) { if (!newcon->match || newcon->match(newcon, c->name, c->index, c->options) != 0) { /* default matching */ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); if (strcmp(c->name, newcon->name) != 0) continue; if (newcon->index >= 0 && newcon->index != c->index) continue; if (newcon->index < 0) newcon->index = c->index; if (_braille_register_console(newcon, c)) return; if (newcon->setup && newcon->setup(newcon, c->options) != 0) break; } newcon->flags |= CON_ENABLED; if (i == preferred_console) { newcon->flags |= CON_CONSDEV; has_preferred = true; } break; } if (!(newcon->flags & CON_ENABLED)) return; /* * If we have a bootconsole, and are switching to a real console, * don't print everything out again, since when the boot console, and * the real console are the same physical device, it's annoying to * see the beginning boot messages twice */ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) newcon->flags &= ~CON_PRINTBUFFER; /* * Put this console in the list - keep the * preferred driver at the head of the list. */ console_lock(); if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { newcon->next = console_drivers; console_drivers = newcon; if (newcon->next) newcon->next->flags &= ~CON_CONSDEV; } else { newcon->next = console_drivers->next; console_drivers->next = newcon; } if (newcon->flags & CON_EXTENDED) nr_ext_console_drivers++; if (newcon->flags & CON_PRINTBUFFER) { /* * console_unlock(); will print out the buffered messages * for us. */ logbuf_lock_irqsave(flags); /* * We're about to replay the log buffer. Only do this to the * just-registered console to avoid excessive message spam to * the already-registered consoles. * * Set exclusive_console with disabled interrupts to reduce * race window with eventual console_flush_on_panic() that * ignores console_lock. */ exclusive_console = newcon; exclusive_console_stop_seq = console_seq; console_seq = syslog_seq; console_idx = syslog_idx; logbuf_unlock_irqrestore(flags); } console_unlock(); console_sysfs_notify(); /* * By unregistering the bootconsoles after we enable the real console * we get the "console xxx enabled" message on all the consoles - * boot consoles, real consoles, etc - this is to ensure that end * users know there might be something in the kernel's log buffer that * went to the bootconsole (that they do not see on the real console) */ pr_info("%sconsole [%s%d] enabled\n", (newcon->flags & CON_BOOT) ? "boot" : "" , newcon->name, newcon->index); if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && !keep_bootcon) { /* We need to iterate through all boot consoles, to make * sure we print everything out, before we unregister them. */ for_each_console(bcon) if (bcon->flags & CON_BOOT) unregister_console(bcon); } } EXPORT_SYMBOL(register_console); int unregister_console(struct console *console) { struct console *a, *b; int res; pr_info("%sconsole [%s%d] disabled\n", (console->flags & CON_BOOT) ? "boot" : "" , console->name, console->index); res = _braille_unregister_console(console); if (res) return res; res = 1; console_lock(); if (console_drivers == console) { console_drivers=console->next; res = 0; } else if (console_drivers) { for (a=console_drivers->next, b=console_drivers ; a; b=a, a=b->next) { if (a == console) { b->next = a->next; res = 0; break; } } } if (!res && (console->flags & CON_EXTENDED)) nr_ext_console_drivers--; /* * If this isn't the last console and it has CON_CONSDEV set, we * need to set it on the next preferred console. */ if (console_drivers != NULL && console->flags & CON_CONSDEV) console_drivers->flags |= CON_CONSDEV; console->flags &= ~CON_ENABLED; console_unlock(); console_sysfs_notify(); return res; } EXPORT_SYMBOL(unregister_console); /* * Initialize the console device. This is called *early*, so * we can't necessarily depend on lots of kernel help here. * Just do some early initializations, and do the complex setup * later. */ void __init console_init(void) { int ret; initcall_t call; initcall_entry_t *ce; /* Setup the default TTY line discipline. */ n_tty_init(); /* * set up the console device so that later boot sequences can * inform about problems etc.. */ ce = __con_initcall_start; trace_initcall_level("console"); while (ce < __con_initcall_end) { call = initcall_from_entry(ce); trace_initcall_start(call); ret = call(); trace_initcall_finish(call, ret); ce++; } } /* * Some boot consoles access data that is in the init section and which will * be discarded after the initcalls have been run. To make sure that no code * will access this data, unregister the boot consoles in a late initcall. * * If for some reason, such as deferred probe or the driver being a loadable * module, the real console hasn't registered yet at this point, there will * be a brief interval in which no messages are logged to the console, which * makes it difficult to diagnose problems that occur during this time. * * To mitigate this problem somewhat, only unregister consoles whose memory * intersects with the init section. Note that all other boot consoles will * get unregistred when the real preferred console is registered. */ static int __init printk_late_init(void) { struct console *con; int ret; for_each_console(con) { if (!(con->flags & CON_BOOT)) continue; /* Check addresses that might be used for enabled consoles. */ if (init_section_intersects(con, sizeof(*con)) || init_section_contains(con->write, 0) || init_section_contains(con->read, 0) || init_section_contains(con->device, 0) || init_section_contains(con->unblank, 0) || init_section_contains(con->data, 0)) { /* * Please, consider moving the reported consoles out * of the init section. */ pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n", con->name, con->index); unregister_console(con); } } ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL, console_cpu_notify); WARN_ON(ret < 0); ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online", console_cpu_notify, NULL); WARN_ON(ret < 0); return 0; } late_initcall(printk_late_init); #if defined CONFIG_PRINTK /* * Delayed printk version, for scheduler-internal messages: */ #define PRINTK_PENDING_WAKEUP 0x01 #define PRINTK_PENDING_OUTPUT 0x02 static DEFINE_PER_CPU(int, printk_pending); static void wake_up_klogd_work_func(struct irq_work *irq_work) { int pending = __this_cpu_xchg(printk_pending, 0); if (pending & PRINTK_PENDING_OUTPUT) { /* If trylock fails, someone else is doing the printing */ if (console_trylock()) console_unlock(); } if (pending & PRINTK_PENDING_WAKEUP) wake_up_interruptible(&log_wait); } static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { .func = wake_up_klogd_work_func, .flags = IRQ_WORK_LAZY, }; void wake_up_klogd(void) { if (!printk_percpu_data_ready()) return; preempt_disable(); if (waitqueue_active(&log_wait)) { this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); } preempt_enable(); } void defer_console_output(void) { if (!printk_percpu_data_ready()) return; preempt_disable(); __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); preempt_enable(); } int vprintk_deferred(const char *fmt, va_list args) { int r; r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); defer_console_output(); return r; } int printk_deferred(const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = vprintk_deferred(fmt, args); va_end(args); return r; } /* * printk rate limiting, lifted from the networking subsystem. * * This enforces a rate limit: not more than 10 kernel messages * every 5s to make a denial-of-service attack impossible. */ DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); int __printk_ratelimit(const char *func) { return ___ratelimit(&printk_ratelimit_state, func); } EXPORT_SYMBOL(__printk_ratelimit); /** * printk_timed_ratelimit - caller-controlled printk ratelimiting * @caller_jiffies: pointer to caller's state * @interval_msecs: minimum interval between prints * * printk_timed_ratelimit() returns true if more than @interval_msecs * milliseconds have elapsed since the last time printk_timed_ratelimit() * returned true. */ bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msecs) { unsigned long elapsed = jiffies - *caller_jiffies; if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs)) return false; *caller_jiffies = jiffies; return true; } EXPORT_SYMBOL(printk_timed_ratelimit); static DEFINE_SPINLOCK(dump_list_lock); static LIST_HEAD(dump_list); /** * kmsg_dump_register - register a kernel log dumper. * @dumper: pointer to the kmsg_dumper structure * * Adds a kernel log dumper to the system. The dump callback in the * structure will be called when the kernel oopses or panics and must be * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. */ int kmsg_dump_register(struct kmsg_dumper *dumper) { unsigned long flags; int err = -EBUSY; /* The dump callback needs to be set */ if (!dumper->dump) return -EINVAL; spin_lock_irqsave(&dump_list_lock, flags); /* Don't allow registering multiple times */ if (!dumper->registered) { dumper->registered = 1; list_add_tail_rcu(&dumper->list, &dump_list); err = 0; } spin_unlock_irqrestore(&dump_list_lock, flags); return err; } EXPORT_SYMBOL_GPL(kmsg_dump_register); /** * kmsg_dump_unregister - unregister a kmsg dumper. * @dumper: pointer to the kmsg_dumper structure * * Removes a dump device from the system. Returns zero on success and * %-EINVAL otherwise. */ int kmsg_dump_unregister(struct kmsg_dumper *dumper) { unsigned long flags; int err = -EINVAL; spin_lock_irqsave(&dump_list_lock, flags); if (dumper->registered) { dumper->registered = 0; list_del_rcu(&dumper->list); err = 0; } spin_unlock_irqrestore(&dump_list_lock, flags); synchronize_rcu(); return err; } EXPORT_SYMBOL_GPL(kmsg_dump_unregister); static bool always_kmsg_dump; module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); /** * kmsg_dump - dump kernel log to kernel message dumpers. * @reason: the reason (oops, panic etc) for dumping * * Call each of the registered dumper's dump() callback, which can * retrieve the kmsg records with kmsg_dump_get_line() or * kmsg_dump_get_buffer(). */ void kmsg_dump(enum kmsg_dump_reason reason) { struct kmsg_dumper *dumper; unsigned long flags; if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) return; rcu_read_lock(); list_for_each_entry_rcu(dumper, &dump_list, list) { if (dumper->max_reason && reason > dumper->max_reason) continue; /* initialize iterator with data about the stored records */ dumper->active = true; logbuf_lock_irqsave(flags); dumper->cur_seq = clear_seq; dumper->cur_idx = clear_idx; dumper->next_seq = log_next_seq; dumper->next_idx = log_next_idx; logbuf_unlock_irqrestore(flags); /* invoke dumper which will iterate over records */ dumper->dump(dumper, reason); /* reset iterator */ dumper->active = false; } rcu_read_unlock(); } /** * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version) * @dumper: registered kmsg dumper * @syslog: include the "<4>" prefixes * @line: buffer to copy the line to * @size: maximum size of the buffer * @len: length of line placed into buffer * * Start at the beginning of the kmsg buffer, with the oldest kmsg * record, and copy one record into the provided buffer. * * Consecutive calls will return the next available record moving * towards the end of the buffer with the youngest messages. * * A return value of FALSE indicates that there are no more records to * read. * * The function is similar to kmsg_dump_get_line(), but grabs no locks. */ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, char *line, size_t size, size_t *len) { struct printk_log *msg; size_t l = 0; bool ret = false; if (!dumper->active) goto out; if (dumper->cur_seq < log_first_seq) { /* messages are gone, move to first available one */ dumper->cur_seq = log_first_seq; dumper->cur_idx = log_first_idx; } /* last entry */ if (dumper->cur_seq >= log_next_seq) goto out; msg = log_from_idx(dumper->cur_idx); l = msg_print_text(msg, syslog, printk_time, line, size); dumper->cur_idx = log_next(dumper->cur_idx); dumper->cur_seq++; ret = true; out: if (len) *len = l; return ret; } /** * kmsg_dump_get_line - retrieve one kmsg log line * @dumper: registered kmsg dumper * @syslog: include the "<4>" prefixes * @line: buffer to copy the line to * @size: maximum size of the buffer * @len: length of line placed into buffer * * Start at the beginning of the kmsg buffer, with the oldest kmsg * record, and copy one record into the provided buffer. * * Consecutive calls will return the next available record moving * towards the end of the buffer with the youngest messages. * * A return value of FALSE indicates that there are no more records to * read. */ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, char *line, size_t size, size_t *len) { unsigned long flags; bool ret; logbuf_lock_irqsave(flags); ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); logbuf_unlock_irqrestore(flags); return ret; } EXPORT_SYMBOL_GPL(kmsg_dump_get_line); /** * kmsg_dump_get_buffer - copy kmsg log lines * @dumper: registered kmsg dumper * @syslog: include the "<4>" prefixes * @buf: buffer to copy the line to * @size: maximum size of the buffer * @len: length of line placed into buffer * * Start at the end of the kmsg buffer and fill the provided buffer * with as many of the the *youngest* kmsg records that fit into it. * If the buffer is large enough, all available kmsg records will be * copied with a single call. * * Consecutive calls will fill the buffer with the next block of * available older records, not including the earlier retrieved ones. * * A return value of FALSE indicates that there are no more records to * read. */ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, char *buf, size_t size, size_t *len) { unsigned long flags; u64 seq; u32 idx; u64 next_seq; u32 next_idx; size_t l = 0; bool ret = false; bool time = printk_time; if (!dumper->active) goto out; logbuf_lock_irqsave(flags); if (dumper->cur_seq < log_first_seq) { /* messages are gone, move to first available one */ dumper->cur_seq = log_first_seq; dumper->cur_idx = log_first_idx; } /* last entry */ if (dumper->cur_seq >= dumper->next_seq) { logbuf_unlock_irqrestore(flags); goto out; } /* calculate length of entire buffer */ seq = dumper->cur_seq; idx = dumper->cur_idx; while (seq < dumper->next_seq) { struct printk_log *msg = log_from_idx(idx); l += msg_print_text(msg, true, time, NULL, 0); idx = log_next(idx); seq++; } /* move first record forward until length fits into the buffer */ seq = dumper->cur_seq; idx = dumper->cur_idx; while (l >= size && seq < dumper->next_seq) { struct printk_log *msg = log_from_idx(idx); l -= msg_print_text(msg, true, time, NULL, 0); idx = log_next(idx); seq++; } /* last message in next interation */ next_seq = seq; next_idx = idx; l = 0; while (seq < dumper->next_seq) { struct printk_log *msg = log_from_idx(idx); l += msg_print_text(msg, syslog, time, buf + l, size - l); idx = log_next(idx); seq++; } dumper->next_seq = next_seq; dumper->next_idx = next_idx; ret = true; logbuf_unlock_irqrestore(flags); out: if (len) *len = l; return ret; } EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); /** * kmsg_dump_rewind_nolock - reset the interator (unlocked version) * @dumper: registered kmsg dumper * * Reset the dumper's iterator so that kmsg_dump_get_line() and * kmsg_dump_get_buffer() can be called again and used multiple * times within the same dumper.dump() callback. * * The function is similar to kmsg_dump_rewind(), but grabs no locks. */ void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) { dumper->cur_seq = clear_seq; dumper->cur_idx = clear_idx; dumper->next_seq = log_next_seq; dumper->next_idx = log_next_idx; } /** * kmsg_dump_rewind - reset the interator * @dumper: registered kmsg dumper * * Reset the dumper's iterator so that kmsg_dump_get_line() and * kmsg_dump_get_buffer() can be called again and used multiple * times within the same dumper.dump() callback. */ void kmsg_dump_rewind(struct kmsg_dumper *dumper) { unsigned long flags; logbuf_lock_irqsave(flags); kmsg_dump_rewind_nolock(dumper); logbuf_unlock_irqrestore(flags); } EXPORT_SYMBOL_GPL(kmsg_dump_rewind); #endif
731700.c
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. #include "real_string_utils_renames.h" // IWYU pragma: keep #include "real_timer_renames.h" // IWYU pragma: keep #include "real_gballoc_hl_renames.h" // IWYU pragma: keep #include "real_srw_lock_renames.h" // IWYU pragma: keep #include "srw_lock_win32.c"
110738.c
#include "NAMidiParser.h" #include "NAMidiPreprocessor.h" #include "NAMidiAST.h" #include "NAIO.h" #include "NAMidi_yacc.h" #include "NAMidi_lex.h" #include "NASet.h" #include "NAMap.h" #include <stdlib.h> #include <libgen.h> typedef struct _NAMidiParser { DSLParser parser; ParseContext *context; char *currentFilepath; } NAMidiParser; extern int NAMidi_parse(yyscan_t scanner, void **node); static Node *NAMidiParserParse(void *_self, const char *filepath) { NAMidiParser *self = _self; NAMidiPreprocessor *preprocessor = NAMidiPreprocessorCreate(self->context); FILE *fp = NAMidiPreprocessorScanFile(preprocessor, filepath); NAMidiPreprocessorDestroy(preprocessor); yyscan_t scanner; NAMidi_lex_init_extra(self, &scanner); YY_BUFFER_STATE state = NAMidi__create_buffer(fp, YY_BUF_SIZE, scanner); NAMidi__switch_to_buffer(state, scanner); Node *node = NULL; NAMidi_parse(scanner, (void **)&node); NAMidi__delete_buffer(state, scanner); NAMidi_lex_destroy(scanner); fclose(fp); return node; } static void NAMidiParserDestroy(void *_self) { NAMidiParser *self = _self; free(self); } DSLParser *NAMidiParserCreate(ParseContext *context) { NAMidiParser *self = calloc(1, sizeof(NAMidiParser)); self->context = context; self->parser.parse = NAMidiParserParse; self->parser.destroy = NAMidiParserDestroy; return (DSLParser *)self; } void NAMidiParserSetCurrentFilepath(void *_self, const char *filepath) { NAMidiParser *self = _self; self->currentFilepath = self->context->appendFile(self->context, filepath); } char *NAMidiParserGetCurrentFilepath(void *_self) { NAMidiParser *self = _self; return self->currentFilepath; } void NAMidiParserSyntaxError(void *_self, FileLocation *location, const char *token) { NAMidiParser *self = _self; if ('\n' == token[0]) { self->context->appendError(self->context, location, NAMidiParseErrorUnexpectedEOL, NULL); } else { self->context->appendError(self->context, location, GeneralParseErrorSyntaxError, token, NULL); } } void NAMidiParserUnExpectedEOF(void *_self, FileLocation *location) { NAMidiParser *self = _self; self->context->appendError(self->context, location, NAMidiParseErrorUnexpectedEOF, NULL); }
984895.c
/* -*- c-file-style: "ruby"; indent-tabs-mode: nil -*- */ /************************************************ rbgtktextiter.c - $Author: ggc $ $Date: 2007/07/13 16:07:32 $ Copyright (C) 2002-2005 Ruby-GNOME2 Project Team Copyright (C) 2002,2003 Masahiro Sakai ************************************************/ #include "global.h" #define _SELF(s) ((GtkTextIter*)RVAL2BOXED(s, GTK_TYPE_TEXT_ITER)) #define RVAL2TAG(t) (GTK_TEXT_TAG(RVAL2GOBJ(t))) #define ITR2RVAL(i) (BOXED2RVAL(i, GTK_TYPE_TEXT_ITER)) static gboolean is_compat_240; static ID id_pixbuf; static VALUE get_buffer(self) VALUE self; { return GOBJ2RVAL(gtk_text_iter_get_buffer(_SELF(self))); } #define def_gint_getter(__name__) \ static VALUE \ get_##__name__(self) \ VALUE self; \ { \ return INT2NUM(gtk_text_iter_get_##__name__(_SELF(self))); \ } def_gint_getter(offset) def_gint_getter(line) def_gint_getter(line_offset) def_gint_getter(line_index) def_gint_getter(visible_line_offset) def_gint_getter(visible_line_index) static VALUE get_char(self) VALUE self; { gchar buf[10]; gint len = g_unichar_to_utf8(gtk_text_iter_get_char(_SELF(self)), buf); buf[len] = '\0'; return CSTR2RVAL(buf); } static VALUE get_slice(self, rhs) VALUE self, rhs; { return CSTR2RVAL(gtk_text_iter_get_slice(_SELF(self), _SELF(rhs))); } static VALUE get_text(self, rhs) VALUE self, rhs; { return CSTR2RVAL(gtk_text_iter_get_text(_SELF(self), _SELF(rhs))); } static VALUE get_visible_slice(self, rhs) VALUE self, rhs; { return CSTR2RVAL(gtk_text_iter_get_visible_slice(_SELF(self), _SELF(rhs))); } static VALUE get_visible_text(self, rhs) VALUE self, rhs; { return CSTR2RVAL(gtk_text_iter_get_visible_text(_SELF(self), _SELF(rhs))); } static VALUE get_pixbuf(self) VALUE self; { GdkPixbuf* pixbuf = gtk_text_iter_get_pixbuf(_SELF(self)); VALUE ret = Qnil; if (pixbuf){ ret = GOBJ2RVAL(pixbuf); G_CHILD_SET(self, id_pixbuf, ret); } return ret; } static VALUE get_marks(self) VALUE self; { return GSLIST2ARYF(gtk_text_iter_get_marks(_SELF(self))); } static VALUE get_toggled_tags(self, toggled_on) VALUE self, toggled_on; { return GSLIST2ARYF(gtk_text_iter_get_toggled_tags(_SELF(self), RVAL2CBOOL(toggled_on))); } static VALUE get_child_anchor(self) VALUE self; { GtkTextChildAnchor* anchor = gtk_text_iter_get_child_anchor(_SELF(self)); return anchor ? GOBJ2RVAL(anchor) : Qnil; } static VALUE begins_tag(self, tag) VALUE self, tag; { return CBOOL2RVAL(gtk_text_iter_begins_tag(_SELF(self), RVAL2TAG(tag))); } static VALUE ends_tag(self, tag) VALUE self, tag; { return CBOOL2RVAL(gtk_text_iter_ends_tag(_SELF(self), RVAL2TAG(tag))); } static VALUE toggles_tag(self, tag) VALUE self, tag; { return CBOOL2RVAL(gtk_text_iter_toggles_tag(_SELF(self), RVAL2TAG(tag))); } static VALUE has_tag(self, tag) VALUE self, tag; { return CBOOL2RVAL(gtk_text_iter_has_tag(_SELF(self), RVAL2TAG(tag))); } static VALUE get_tags(self) VALUE self; { return GSLIST2ARYF(gtk_text_iter_get_tags(_SELF(self))); } static VALUE editable(self, default_setting) VALUE self, default_setting; { return CBOOL2RVAL(gtk_text_iter_editable(_SELF(self), RVAL2CBOOL(default_setting))); } static VALUE can_insert(self, default_setting) VALUE self, default_setting; { return CBOOL2RVAL(gtk_text_iter_can_insert(_SELF(self), RVAL2CBOOL(default_setting))); } #define def_predicate(__name__) \ static VALUE \ __name__(self) \ VALUE self; \ { \ return CBOOL2RVAL(gtk_text_iter_##__name__(_SELF(self))); \ } def_predicate(starts_word) def_predicate(ends_word) def_predicate(inside_word) def_predicate(starts_sentence) def_predicate(ends_sentence) def_predicate(starts_line) def_predicate(ends_line) def_predicate(is_cursor_position) def_gint_getter(chars_in_line) def_gint_getter(bytes_in_line) static VALUE get_attributes(self) VALUE self; { GtkTextAttributes attr; if(gtk_text_iter_get_attributes(_SELF(self), &attr) == TRUE){ return BOXED2RVAL(&attr, GTK_TYPE_TEXT_ATTRIBUTES); } else { return Qnil; } } static VALUE get_language(self) VALUE self; { return CSTR2RVAL(pango_language_to_string(gtk_text_iter_get_language(_SELF(self)))); } def_predicate(is_end) def_predicate(is_start) #define def_move(__name__) \ static VALUE \ __name__(self) \ VALUE self; \ { \ return CBOOL2RVAL(gtk_text_iter_##__name__(_SELF(self))); \ } #define def_move_gint(__name__) \ static VALUE \ __name__(self, i) \ VALUE self, i; \ { \ return CBOOL2RVAL(gtk_text_iter_##__name__(_SELF(self), NUM2INT(i))); \ } def_move(forward_char) def_move(backward_char) def_move_gint(forward_chars) def_move_gint(backward_chars) def_move(forward_line) def_move(backward_line) def_move_gint(forward_lines) def_move_gint(backward_lines) def_move(forward_word_end) def_move(backward_word_start) def_move_gint(forward_word_ends) def_move_gint(backward_word_starts) def_move(forward_sentence_end) def_move(backward_sentence_start) def_move_gint(forward_sentence_ends) def_move_gint(backward_sentence_starts) #if GTK_CHECK_VERSION(2,4,0) def_move_gint(forward_visible_word_ends) def_move_gint(backward_visible_word_starts) def_move(forward_visible_word_end) def_move(backward_visible_word_start) def_move(forward_visible_cursor_position) def_move(backward_visible_cursor_position) def_move_gint(forward_visible_cursor_positions) def_move_gint(backward_visible_cursor_positions) #endif #if GTK_CHECK_VERSION(2,8,0) def_move(forward_visible_line) def_move(backward_visible_line) def_move_gint(forward_visible_lines) def_move_gint(backward_visible_lines) #endif def_move(forward_cursor_position) def_move(backward_cursor_position) def_move_gint(forward_cursor_positions) def_move_gint(backward_cursor_positions) def_move(forward_to_line_end) static VALUE forward_to_end(self) VALUE self; { gtk_text_iter_forward_to_end(_SELF(self)); return self; } #define def_gint_setter(__name__) \ static VALUE \ set_##__name__(self, val) \ VALUE self, val; \ { \ gtk_text_iter_set_##__name__(_SELF(self), NUM2INT(val)); \ return val; \ } def_gint_setter(offset) def_gint_setter(line) def_gint_setter(line_offset) def_gint_setter(line_index) def_gint_setter(visible_line_offset) def_gint_setter(visible_line_index) static VALUE forward_to_tag_toggle(argc, argv, self) int argc; VALUE *argv; VALUE self; { VALUE tag; rb_scan_args(argc, argv, "01", &tag); return CBOOL2RVAL(gtk_text_iter_forward_to_tag_toggle(_SELF(self), NIL_P(tag) ? NULL : RVAL2TAG(tag))); } static VALUE backward_to_tag_toggle(argc, argv, self) int argc; VALUE *argv; VALUE self; { VALUE tag; rb_scan_args(argc, argv, "01", &tag); return CBOOL2RVAL(gtk_text_iter_backward_to_tag_toggle(_SELF(self), NIL_P(tag) ? NULL : RVAL2TAG(tag))); } static gboolean char_predicate_func(ch, func) guint32 ch; gpointer func; { return RVAL2CBOOL(rb_funcall((VALUE)func, id_call, 1, UINT2NUM(ch))); } static VALUE forward_find_char(argc, argv, self) int argc; VALUE *argv; VALUE self; { VALUE limit; volatile VALUE func = rb_block_proc(); rb_scan_args(argc, argv, "01", &limit); return CBOOL2RVAL(gtk_text_iter_forward_find_char(_SELF(self), (GtkTextCharPredicate)char_predicate_func, (gpointer)func, NIL_P(limit) ? NULL : _SELF(limit))); } static VALUE backward_find_char(argc, argv, self) int argc; VALUE *argv; VALUE self; { VALUE limit; volatile VALUE func = rb_block_proc(); rb_scan_args(argc, argv, "01", &limit); return CBOOL2RVAL(gtk_text_iter_backward_find_char(_SELF(self), (GtkTextCharPredicate)char_predicate_func, (gpointer)func, NIL_P(limit) ? NULL : _SELF(limit))); } static VALUE forward_search(argc, argv, self) int argc; VALUE *argv; VALUE self; { GtkTextIter m_start, m_end; VALUE str, flags, limit; gboolean ret; rb_scan_args(argc, argv, "21", &str, &flags, &limit); if (is_compat_240){ ret = gtk_text_iter_forward_search(_SELF(self), RVAL2CSTR(str), RVAL2GFLAGS(flags, GTK_TYPE_TEXT_SEARCH_FLAGS), &m_start, &m_end, NIL_P(limit) ? NULL : _SELF(limit)); } else { ret = gtk_text_iter_forward_search(_SELF(self), RVAL2CSTR(str), RVAL2GENUM(flags, GTK_TYPE_TEXT_SEARCH_FLAGS), &m_start, &m_end, NIL_P(limit) ? NULL : _SELF(limit)); } return ret ? rb_ary_new3(2, ITR2RVAL(&m_start), ITR2RVAL(&m_end)) : Qnil; } static VALUE backward_search(argc, argv, self) int argc; VALUE *argv; VALUE self; { GtkTextIter m_start, m_end; VALUE str, flags, limit; gboolean ret; rb_scan_args(argc, argv, "21", &str, &flags, &limit); if (is_compat_240){ ret = gtk_text_iter_backward_search(_SELF(self), RVAL2CSTR(str), RVAL2GFLAGS(flags, GTK_TYPE_TEXT_SEARCH_FLAGS), &m_start, &m_end, NIL_P(limit) ? NULL : _SELF(limit)); } else { ret = gtk_text_iter_backward_search(_SELF(self), RVAL2CSTR(str), RVAL2GENUM(flags, GTK_TYPE_TEXT_SEARCH_FLAGS), &m_start, &m_end, NIL_P(limit) ? NULL : _SELF(limit)); } return ret ? rb_ary_new3(2, ITR2RVAL(&m_start), ITR2RVAL(&m_end)) : Qnil; } static VALUE equal(self, other) VALUE self, other; { return CBOOL2RVAL(gtk_text_iter_equal(_SELF(self), _SELF(other))); } static VALUE compare(self, rhs) VALUE self, rhs; { return INT2NUM(gtk_text_iter_compare(_SELF(self), _SELF(rhs))); } /* The following methods don't have to be implimented. Including Comparable module is enough. gboolean gtk_text_iter_in_range (const GtkTextIter *iter, const GtkTextIter *start, const GtkTextIter *end); void gtk_text_iter_order (GtkTextIter *first, GtkTextIter *second); */ void Init_gtk_textiter() { VALUE cTextIter = G_DEF_CLASS(GTK_TYPE_TEXT_ITER, "TextIter", mGtk); rb_include_module(cTextIter, rb_mComparable); is_compat_240 = gtk_check_version(2, 4, 0) ? FALSE : TRUE; id_pixbuf = rb_intern("pixbuf"); rb_define_method(cTextIter, "buffer", get_buffer, 0); rb_define_method(cTextIter, "offset", get_offset, 0); rb_define_method(cTextIter, "line", get_line, 0); rb_define_method(cTextIter, "line_offset", get_line_offset, 0); rb_define_method(cTextIter, "line_index", get_line_index, 0); rb_define_method(cTextIter, "visible_line_offset", get_visible_line_offset, 0); rb_define_method(cTextIter, "visible_line_index", get_visible_line_index, 0); rb_define_method(cTextIter, "char", get_char, 0); rb_define_method(cTextIter, "get_slice", get_slice, 1); rb_define_method(cTextIter, "get_text", get_text, 1); rb_define_method(cTextIter, "get_visible_slice", get_visible_slice, 1); rb_define_method(cTextIter, "get_visible_text", get_visible_text, 1); rb_define_method(cTextIter, "pixbuf", get_pixbuf, 0); rb_define_method(cTextIter, "marks", get_marks, 0); rb_define_method(cTextIter, "child_anchor", get_child_anchor, 0); rb_define_method(cTextIter, "toggled_tags", get_toggled_tags, 1); rb_define_method(cTextIter, "begins_tag?", begins_tag, 1); rb_define_method(cTextIter, "ends_tag?", ends_tag, 1); rb_define_method(cTextIter, "toggles_tag?", toggles_tag, 1); rb_define_method(cTextIter, "has_tag?", has_tag, 1); rb_define_method(cTextIter, "tags", get_tags, 0); rb_define_method(cTextIter, "editable?", editable, 1); rb_define_method(cTextIter, "can_insert?", can_insert, 1); rb_define_method(cTextIter, "starts_word?", starts_word, 0); rb_define_method(cTextIter, "ends_word?", ends_word, 0); rb_define_method(cTextIter, "inside_word?", inside_word, 0); rb_define_method(cTextIter, "starts_sentence?", starts_sentence, 0); rb_define_method(cTextIter, "ends_sentence?", ends_sentence, 0); rb_define_method(cTextIter, "starts_line?", starts_line, 0); rb_define_method(cTextIter, "ends_line?", ends_line, 0); rb_define_method(cTextIter, "cursor_position?", is_cursor_position, 0); rb_define_method(cTextIter, "chars_in_line", get_chars_in_line, 0); rb_define_method(cTextIter, "bytes_in_line", get_bytes_in_line, 0); rb_define_method(cTextIter, "attributes", get_attributes, 0); rb_define_method(cTextIter, "language", get_language, 0); rb_define_method(cTextIter, "end?", is_end, 0); rb_define_method(cTextIter, "start?", is_start, 0); rb_define_method(cTextIter, "forward_char", forward_char, 0); rb_define_method(cTextIter, "backward_char", backward_char, 0); rb_define_method(cTextIter, "forward_chars", forward_chars, 1); rb_define_method(cTextIter, "backward_chars", backward_chars, 1); rb_define_method(cTextIter, "forward_line", forward_line, 0); rb_define_method(cTextIter, "backward_line", backward_line, 0); rb_define_method(cTextIter, "forward_lines", forward_lines, 1); rb_define_method(cTextIter, "backward_lines", backward_lines, 1); rb_define_method(cTextIter, "forward_word_end", forward_word_end, 0); rb_define_method(cTextIter, "backward_word_start", backward_word_start, 0); rb_define_method(cTextIter, "forward_word_ends", forward_word_ends, 1); rb_define_method(cTextIter, "backward_word_starts", backward_word_starts, 1); rb_define_method(cTextIter, "forward_sentence_end", forward_sentence_end, 0); rb_define_method(cTextIter, "backward_sentence_start", backward_sentence_start, 0); rb_define_method(cTextIter, "forward_sentence_ends", forward_sentence_ends, 1); rb_define_method(cTextIter, "backward_sentence_starts", backward_sentence_starts, 1); #if GTK_CHECK_VERSION(2,4,0) rb_define_method(cTextIter, "forward_visible_word_ends", forward_visible_word_ends, 1); rb_define_method(cTextIter, "backward_visible_word_starts", backward_visible_word_starts, 1); rb_define_method(cTextIter, "forward_visible_word_end", forward_visible_word_end, 0); rb_define_method(cTextIter, "backword_visible_word_start", backward_visible_word_start, 0); rb_define_method(cTextIter, "forward_visible_cursor_position", forward_visible_cursor_position, 0); rb_define_method(cTextIter, "backward_visible_cursor_position", backward_visible_cursor_position, 0); rb_define_method(cTextIter, "forward_visible_cursor_positions", forward_visible_cursor_positions, 1); rb_define_method(cTextIter, "backward_visible_cursor_positions", backward_visible_cursor_positions, 1); #endif #if GTK_CHECK_VERSION(2,8,0) rb_define_method(cTextIter, "forward_visible_line", forward_visible_line, 0); rb_define_method(cTextIter, "backward_visible_line", backward_visible_line, 0); rb_define_method(cTextIter, "forward_visible_lines", forward_visible_lines, 1); rb_define_method(cTextIter, "backward_visible_lines", backward_visible_lines, 1); #endif rb_define_method(cTextIter, "forward_cursor_position", forward_cursor_position, 0); rb_define_method(cTextIter, "backward_cursor_position", backward_cursor_position, 0); rb_define_method(cTextIter, "forward_cursor_positions", forward_cursor_positions, 1); rb_define_method(cTextIter, "backward_cursor_positions", backward_cursor_positions, 1); rb_define_method(cTextIter, "forward_to_end", forward_to_end, 0); rb_define_method(cTextIter, "forward_to_line_end", forward_to_line_end, 0); rb_define_method(cTextIter, "set_offset", set_offset, 1); rb_define_method(cTextIter, "set_line", set_line, 1); rb_define_method(cTextIter, "set_line_offset", set_line_offset, 1); rb_define_method(cTextIter, "set_line_index", set_line_index, 1); rb_define_method(cTextIter, "set_visible_line_offset", set_visible_line_offset, 1); rb_define_method(cTextIter, "set_visible_line_index", set_visible_line_index, 1); rb_define_method(cTextIter, "forward_to_tag_toggle", forward_to_tag_toggle, -1); rb_define_method(cTextIter, "backward_to_tag_toggle", backward_to_tag_toggle, -1); rb_define_method(cTextIter, "forward_find_char", forward_find_char, -1); rb_define_method(cTextIter, "backward_find_char", backward_find_char, -1); rb_define_method(cTextIter, "forward_search", forward_search, -1); rb_define_method(cTextIter, "backward_search", backward_search, -1); rb_define_method(cTextIter, "==", equal, 1); rb_define_method(cTextIter, "<=>", compare, 1); G_DEF_SETTERS(cTextIter); /* GtkTextSearchFlags */ G_DEF_CLASS(GTK_TYPE_TEXT_SEARCH_FLAGS, "SearchFlags", cTextIter); G_DEF_CONSTANTS(cTextIter, GTK_TYPE_TEXT_SEARCH_FLAGS, "GTK_TEXT_"); }
638775.c
/* Copyright (C) 1995-1998 Eric Young ([email protected]) * All rights reserved. * * This package is an SSL implementation written * by Eric Young ([email protected]). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson ([email protected]). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young ([email protected])" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson ([email protected])" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include <openssl/asn1.h> #include <limits.h> #include <string.h> #include <openssl/asn1_mac.h> #include <openssl/err.h> #include <openssl/mem.h> #include "../internal.h" /* Cross-module errors from crypto/x509/i2d_pr.c. */ OPENSSL_DECLARE_ERROR_REASON(ASN1, UNSUPPORTED_PUBLIC_KEY_TYPE) /* Cross-module errors from crypto/x509/algorithm.c. */ OPENSSL_DECLARE_ERROR_REASON(ASN1, CONTEXT_NOT_INITIALISED) OPENSSL_DECLARE_ERROR_REASON(ASN1, DIGEST_AND_KEY_TYPE_NOT_SUPPORTED) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNKNOWN_MESSAGE_DIGEST_ALGORITHM) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNKNOWN_SIGNATURE_ALGORITHM) OPENSSL_DECLARE_ERROR_REASON(ASN1, WRONG_PUBLIC_KEY_TYPE) /* * Cross-module errors from crypto/x509/asn1_gen.c. TODO(davidben): Remove * these once asn1_gen.c is gone. */ OPENSSL_DECLARE_ERROR_REASON(ASN1, DEPTH_EXCEEDED) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_BITSTRING_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_BOOLEAN) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_HEX) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_IMPLICIT_TAG) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_INTEGER) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_NESTED_TAGGING) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_NULL_VALUE) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_OBJECT) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_TIME_VALUE) OPENSSL_DECLARE_ERROR_REASON(ASN1, INTEGER_NOT_ASCII_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, INVALID_MODIFIER) OPENSSL_DECLARE_ERROR_REASON(ASN1, INVALID_NUMBER) OPENSSL_DECLARE_ERROR_REASON(ASN1, LIST_ERROR) OPENSSL_DECLARE_ERROR_REASON(ASN1, MISSING_VALUE) OPENSSL_DECLARE_ERROR_REASON(ASN1, NOT_ASCII_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, OBJECT_NOT_ASCII_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, SEQUENCE_OR_SET_NEEDS_CONFIG) OPENSSL_DECLARE_ERROR_REASON(ASN1, TIME_NOT_ASCII_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNKNOWN_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNKNOWN_TAG) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNSUPPORTED_TYPE) static int asn1_get_length(const unsigned char **pp, int *inf, long *rl, long max); static void asn1_put_length(unsigned char **pp, int length); int ASN1_get_object(const unsigned char **pp, long *plength, int *ptag, int *pclass, long omax) { int i, ret; long l; const unsigned char *p = *pp; int tag, xclass, inf; long max = omax; if (!max) goto err; ret = (*p & V_ASN1_CONSTRUCTED); xclass = (*p & V_ASN1_PRIVATE); i = *p & V_ASN1_PRIMITIVE_TAG; if (i == V_ASN1_PRIMITIVE_TAG) { /* high-tag */ p++; if (--max == 0) goto err; l = 0; while (*p & 0x80) { l <<= 7L; l |= *(p++) & 0x7f; if (--max == 0) goto err; if (l > (INT_MAX >> 7L)) goto err; } l <<= 7L; l |= *(p++) & 0x7f; tag = (int)l; if (--max == 0) goto err; } else { tag = i; p++; if (--max == 0) goto err; } /* To avoid ambiguity with V_ASN1_NEG, impose a limit on universal tags. */ if (xclass == V_ASN1_UNIVERSAL && tag > V_ASN1_MAX_UNIVERSAL) goto err; *ptag = tag; *pclass = xclass; if (!asn1_get_length(&p, &inf, plength, max)) goto err; if (inf && !(ret & V_ASN1_CONSTRUCTED)) goto err; #if 0 fprintf(stderr, "p=%d + *plength=%ld > omax=%ld + *pp=%d (%d > %d)\n", (int)p, *plength, omax, (int)*pp, (int)(p + *plength), (int)(omax + *pp)); #endif if (*plength > (omax - (p - *pp))) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_TOO_LONG); /* * Set this so that even if things are not long enough the values are * set correctly */ ret |= 0x80; } *pp = p; return (ret | inf); err: OPENSSL_PUT_ERROR(ASN1, ASN1_R_HEADER_TOO_LONG); return (0x80); } static int asn1_get_length(const unsigned char **pp, int *inf, long *rl, long max) { const unsigned char *p = *pp; unsigned long ret = 0; unsigned long i; if (max-- < 1) return 0; if (*p == 0x80) { *inf = 1; ret = 0; p++; } else { *inf = 0; i = *p & 0x7f; if (*(p++) & 0x80) { if (i > sizeof(ret) || max < (long)i) return 0; while (i-- > 0) { ret <<= 8L; ret |= *(p++); } } else ret = i; } /* * Bound the length to comfortably fit in an int. Lengths in this module * often switch between int and long without overflow checks. */ if (ret > INT_MAX / 2) return 0; *pp = p; *rl = (long)ret; return 1; } /* * class 0 is constructed constructed == 2 for indefinite length constructed */ void ASN1_put_object(unsigned char **pp, int constructed, int length, int tag, int xclass) { unsigned char *p = *pp; int i, ttag; i = (constructed) ? V_ASN1_CONSTRUCTED : 0; i |= (xclass & V_ASN1_PRIVATE); if (tag < 31) *(p++) = i | (tag & V_ASN1_PRIMITIVE_TAG); else { *(p++) = i | V_ASN1_PRIMITIVE_TAG; for (i = 0, ttag = tag; ttag > 0; i++) ttag >>= 7; ttag = i; while (i-- > 0) { p[i] = tag & 0x7f; if (i != (ttag - 1)) p[i] |= 0x80; tag >>= 7; } p += ttag; } if (constructed == 2) *(p++) = 0x80; else asn1_put_length(&p, length); *pp = p; } int ASN1_put_eoc(unsigned char **pp) { unsigned char *p = *pp; *p++ = 0; *p++ = 0; *pp = p; return 2; } static void asn1_put_length(unsigned char **pp, int length) { unsigned char *p = *pp; int i, l; if (length <= 127) *(p++) = (unsigned char)length; else { l = length; for (i = 0; l > 0; i++) l >>= 8; *(p++) = i | 0x80; l = i; while (i-- > 0) { p[i] = length & 0xff; length >>= 8; } p += l; } *pp = p; } int ASN1_object_size(int constructed, int length, int tag) { int ret = 1; if (length < 0) return -1; if (tag >= 31) { while (tag > 0) { tag >>= 7; ret++; } } if (constructed == 2) { ret += 3; } else { ret++; if (length > 127) { int tmplen = length; while (tmplen > 0) { tmplen >>= 8; ret++; } } } if (ret >= INT_MAX - length) return -1; return ret + length; } int ASN1_STRING_copy(ASN1_STRING *dst, const ASN1_STRING *str) { if (str == NULL) return 0; dst->type = str->type; if (!ASN1_STRING_set(dst, str->data, str->length)) return 0; dst->flags = str->flags; return 1; } ASN1_STRING *ASN1_STRING_dup(const ASN1_STRING *str) { ASN1_STRING *ret; if (!str) return NULL; ret = ASN1_STRING_new(); if (!ret) return NULL; if (!ASN1_STRING_copy(ret, str)) { ASN1_STRING_free(ret); return NULL; } return ret; } int ASN1_STRING_set(ASN1_STRING *str, const void *_data, int len) { unsigned char *c; const char *data = _data; if (len < 0) { if (data == NULL) return (0); else len = strlen(data); } if ((str->length <= len) || (str->data == NULL)) { c = str->data; if (c == NULL) str->data = OPENSSL_malloc(len + 1); else str->data = OPENSSL_realloc(c, len + 1); if (str->data == NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_MALLOC_FAILURE); str->data = c; return (0); } } str->length = len; if (data != NULL) { OPENSSL_memcpy(str->data, data, len); /* an allowance for strings :-) */ str->data[len] = '\0'; } return (1); } void ASN1_STRING_set0(ASN1_STRING *str, void *data, int len) { if (str->data) OPENSSL_free(str->data); str->data = data; str->length = len; } ASN1_STRING *ASN1_STRING_new(void) { return (ASN1_STRING_type_new(V_ASN1_OCTET_STRING)); } ASN1_STRING *ASN1_STRING_type_new(int type) { ASN1_STRING *ret; ret = (ASN1_STRING *)OPENSSL_malloc(sizeof(ASN1_STRING)); if (ret == NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_MALLOC_FAILURE); return (NULL); } ret->length = 0; ret->type = type; ret->data = NULL; ret->flags = 0; return (ret); } void ASN1_STRING_free(ASN1_STRING *a) { if (a == NULL) return; if (a->data && !(a->flags & ASN1_STRING_FLAG_NDEF)) OPENSSL_free(a->data); OPENSSL_free(a); } int ASN1_STRING_cmp(const ASN1_STRING *a, const ASN1_STRING *b) { int i; i = (a->length - b->length); if (i == 0) { i = OPENSSL_memcmp(a->data, b->data, a->length); if (i == 0) return (a->type - b->type); else return (i); } else return (i); } int ASN1_STRING_length(const ASN1_STRING *x) { return M_ASN1_STRING_length(x); } void ASN1_STRING_length_set(ASN1_STRING *x, int len) { M_ASN1_STRING_length_set(x, len); return; } int ASN1_STRING_type(ASN1_STRING *x) { return M_ASN1_STRING_type(x); } unsigned char *ASN1_STRING_data(ASN1_STRING *x) { return M_ASN1_STRING_data(x); } const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *x) { return x->data; }
478527.c
#ifdef PLAN9 #include <u.h> #include <libc.h> #include <bio.h> #else #include <stdio.h> #include <unistd.h> #include "plan9.h" #endif #include "hdr.h" #include "conv.h" #include "gb.h" /* a state machine for interpreting gb. */ void gbproc(int c, Rune **r, long input_loc) { static enum { state0, state1 } state = state0; static int lastc; long n, ch, cold = c; again: switch(state) { case state0: /* idle state */ if(c < 0) return; if(c >= 0xA1){ lastc = c; state = state1; return; } emit(c); return; case state1: /* seen a font spec */ if(c >= 0xA1) n = (lastc-0xA0)*100 + (c-0xA0); else { nerrors++; if(squawk) EPR "%s: bad gb glyph %d (from 0x%x,0x%x) near byte %ld in %s\n", argv0, c-0xA0, lastc, cold, input_loc, file); if(!clean) emit(BADMAP); state = state0; return; } ch = tabgb[n]; if(ch < 0){ nerrors++; if(squawk) EPR "%s: unknown gb %d (from 0x%x,0x%x) near byte %ld in %s\n", argv0, n, lastc, cold, input_loc, file); if(!clean) emit(BADMAP); } else emit(ch); state = state0; } } void gb_in(int fd, long *notused, struct convert *out) { Rune ob[N]; Rune *r, *re; uchar ibuf[N]; int n, i; long nin; USED(notused); r = ob; re = ob+N-3; nin = 0; while((n = read(fd, ibuf, sizeof ibuf)) > 0){ for(i = 0; i < n; i++){ gbproc(ibuf[i], &r, nin++); if(r >= re){ OUT(out, ob, r-ob); r = ob; } } if(r > ob){ OUT(out, ob, r-ob); r = ob; } } gbproc(-1, &r, nin); if(r > ob) OUT(out, ob, r-ob); } void gb_out(Rune *base, int n, long *notused) { char *p; int i; Rune r; static int first = 1; USED(notused); if(first){ first = 0; for(i = 0; i < NRUNE; i++) tab[i] = -1; for(i = 0; i < GBMAX; i++) if(tabgb[i] != -1) tab[tabgb[i]] = i; } nrunes += n; p = obuf; for(i = 0; i < n; i++){ r = base[i]; if(r < 128) *p++ = r; else { if(tab[r] != -1){ r = tab[r]; *p++ = 0xA0 + (r/100); *p++ = 0xA0 + (r%100); continue; } if(squawk) EPR "%s: rune 0x%x not in output cs\n", argv0, r); nerrors++; if(clean) continue; *p++ = BYTEBADMAP; } } noutput += p-obuf; if(p > obuf) write(1, obuf, p-obuf); }
281653.c
/* Copyright (c) 1989 Michael Landy Disclaimer: No guarantees of performance accompany this software, nor is any responsibility assumed on the part of the authors. All the software has been tested extensively and every effort has been made to insure its reliability. */ /* this is the 4board version for systems with 4 DR256 memories */ /* * rframe.c - read a frame from the Grinnell * * Usage: rframe [rows [cols [initialrow [initialcol]]]] [-v] * * Defaults: rows: 512, cols: 512, initialrow: 0, initialcol: 0 * * Load: cc -o rframe rframe.c -lhips * * Michael Landy - 6/22/85 * * Reads a frame from the Adage starting at (initialrow,initialcol) * with size rows x cols. There is no wraparound so large sizes will * be truncated. The -v switch uses standard 30Hz video. */ #include <hipl_format.h> #include <stdio.h> #include <sys/ikio.h> #include <graphics/ik_const.h> int videosw=0; main(argc,argv) char *argv[]; { extern int Ikonas; int r,c,ir,ic,or,oc,i,argcc; char *fr; struct header hd; Progname = strsave(*argv); argcc=argc; r=c=512; ir=ic=0; if (strcmp(argv[argc-1],"-v")==0) { videosw++; argc--; } if(argv[argc-1][0]=='-') argc--; if(argc>1) r=atoi(argv[1]); if(argc>2) c=atoi(argv[2]); if(argc>3) ir=atoi(argv[3]); if(argc>4) ic=atoi(argv[4]); or=512-ir;or=r<or?r:or; oc=(512-ic) & (~01);oc=c<oc?c:oc; if((or<1)||(oc<1)) perr(HE_MSG,"wrong dimensions"); if (oc & 01) perr(HE_MSG,"number of columns must be even"); if ((fr = (char *) calloc(or*oc,sizeof(char))) == 0) perr(HE_MSG,"can't allocate core"); init_header(&hd,"","",1,"",or,oc,PFBYTE,1,""); update_header(&hd,argcc,argv); write_header(&hd); Ik_open(); Ik_init(videosw ? IK_30INT_HIRES : IK_60NON_HIRES); Ik_set_mode(SET_8_BIT_MODE); Ik_windowdma(ic,oc,IK_HXY_ADDR); Ik_dmard8(IK_HXY_ADDR,ic,ir,fr,or*oc); if (fwrite(fr,or*oc*sizeof(char),1,stdout) != 1) perr(HE_MSG,"error during write"); Ik_close(); return(0); }
708282.c
/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc. See LICENSE. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifndef WIN32 #include <sys/time.h> #endif #include "lsquic_types.h" #include "lsquic.h" #include "lsquic_alarmset.h" #include "lsquic_packet_common.h" #include "lsquic_packet_out.h" #include "lsquic_conn.h" #include "lsquic_parse.h" struct test { /* Inputs. */ const struct parse_funcs *pf; size_t bufsz; lsquic_cid_t cid; /* Zero means connection ID is not specified */ const char *nonce; lsquic_packno_t packno; enum packno_bits bits; /* The test has been retrofitted by adding bits parameter. The test can * be made more complicated by calculating packet number length based on * some other inputs. However, this is tested elsewhere. */ union { unsigned char buf[4]; lsquic_ver_tag_t val; } ver; /* Outputs */ int len; /* Retval */ char out[0x100]; /* Contents */ }; static const struct test tests[] = { { .pf = select_pf_by_ver(LSQVER_035), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0x0102030405060708UL, .nonce = NULL, .packno = 0x01020304, .bits = GQUIC_PACKNO_LEN_4, .len = 1 + 8 + 0 + 4, .out = { (0 << 2) /* Nonce present */ | 0x08 /* Connection ID present */ | 0x20 /* Packet number length */ , 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, /* Connection ID */ 0x04, 0x03, 0x02, 0x01, /* Packet number */ }, }, { .pf = select_pf_by_ver(LSQVER_039), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0x0102030405060708UL, .nonce = NULL, .packno = 0x01020304, .bits = GQUIC_PACKNO_LEN_4, .len = 1 + 8 + 0 + 4, .out = { (0 << 2) /* Nonce present */ | 0x08 /* Connection ID present */ | 0x20 /* Packet number length */ , 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, /* Connection ID */ 0x01, 0x02, 0x03, 0x04, /* Packet number */ }, }, { .pf = select_pf_by_ver(LSQVER_035), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0x0102030405060708UL, .nonce = NULL, .packno = 0x00, .bits = GQUIC_PACKNO_LEN_1, .len = 1 + 8 + 0 + 1, .out = { (0 << 2) /* Nonce present */ | 0x08 /* Connection ID present */ | 0x00 /* Packet number length */ , 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, /* Connection ID */ 0x00, /* Packet number */ }, }, { .pf = select_pf_by_ver(LSQVER_035), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0x0102030405060708UL, .nonce = NULL, .packno = 0x00, .bits = GQUIC_PACKNO_LEN_1, .ver.buf= { 'Q', '0', '3', '5', }, .len = 1 + 8 + 4 + 0 + 1, .out = { (0 << 2) /* Nonce present */ | 0x01 /* Version present */ | 0x08 /* Connection ID present */ | 0x00 /* Packet number length */ , 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, /* Connection ID */ 'Q', '0', '3', '5', 0x00, /* Packet number */ }, }, { .pf = select_pf_by_ver(LSQVER_039), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0x0102030405060708UL, .nonce = NULL, .packno = 0x09, .bits = GQUIC_PACKNO_LEN_1, .ver.buf= { 'Q', '0', '3', '9', }, .len = 1 + 8 + 4 + 0 + 1, .out = { (0 << 2) /* Nonce present */ | 0x01 /* Version present */ | 0x08 /* Connection ID present */ | 0x00 /* Packet number length */ , 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, /* Connection ID */ 'Q', '0', '3', '9', 0x09, /* Packet number */ }, }, #define NONCENSE "0123456789abcdefghijklmnopqrstuv" #define NONCENSE_BYTES '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v' { .pf = select_pf_by_ver(LSQVER_035), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0x0102030405060708UL, .nonce = NONCENSE, .packno = 0x00, .bits = GQUIC_PACKNO_LEN_1, .len = 1 + 8 + 32 + 1, .out = { (1 << 2) /* Nonce present */ | 0x08 /* Connection ID present */ | 0x00 /* Packet number length */ , 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, /* Connection ID */ NONCENSE_BYTES, 0x00, /* Packet number */ }, }, { .pf = select_pf_by_ver(LSQVER_035), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0, /* Do not set connection ID */ .nonce = NONCENSE, .packno = 0x00, .bits = GQUIC_PACKNO_LEN_1, .len = 1 + 0 + 32 + 1, .out = { (1 << 2) /* Nonce present */ | 0x00 /* Packet number length */ , NONCENSE_BYTES, 0x00, /* Packet number */ }, }, { .pf = select_pf_by_ver(LSQVER_035), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0x0102030405060708UL, .nonce = NONCENSE, .packno = 0x00, .bits = GQUIC_PACKNO_LEN_1, .ver.buf= { 'Q', '0', '3', '5', }, .len = 1 + 8 + 4 + 32 + 1, .out = { (1 << 2) /* Nonce present */ | 0x01 /* Version present */ | 0x08 /* Connection ID present */ | 0x00 /* Packet number length */ , 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, /* Connection ID */ 'Q', '0', '3', '5', NONCENSE_BYTES, 0x00, /* Packet number */ }, }, { .pf = select_pf_by_ver(LSQVER_035), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0x0102030405060708UL, .nonce = NONCENSE, .packno = 0xA0A1A2A3A4A5A6A7UL, .bits = GQUIC_PACKNO_LEN_6, .len = 1 + 8 + 32 + 6, .out = { (1 << 2) /* Nonce present */ | 0x08 /* Connection ID present */ | 0x30 /* Packet number length */ , 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, /* Connection ID */ NONCENSE_BYTES, 0xA7, 0xA6, 0xA5, 0xA4, 0xA3, 0xA2, }, }, { .pf = select_pf_by_ver(LSQVER_039), .bufsz = QUIC_MAX_PUBHDR_SZ, .cid = 0x0102030405060708UL, .nonce = NONCENSE, .packno = 0xA0A1A2A3A4A5A6A7UL, .bits = GQUIC_PACKNO_LEN_6, .len = 1 + 8 + 32 + 6, .out = { (1 << 2) /* Nonce present */ | 0x08 /* Connection ID present */ | 0x30 /* Packet number length */ , 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, /* Connection ID */ NONCENSE_BYTES, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, }, }, }; static void run_test (int i) { const struct test *const test = &tests[i]; struct lsquic_packet_out packet_out = { .po_flags = (test->cid ? PO_CONN_ID : 0) | (test->ver.val ? PO_VERSION : 0) | (test->nonce ? PO_NONCE: 0) , .po_nonce = (unsigned char *) test->nonce, .po_ver_tag = test->ver.val, .po_packno = test->packno, }; lsquic_packet_out_set_packno_bits(&packet_out, test->bits); struct lsquic_conn lconn = { .cn_cid = test->cid, }; unsigned char out[QUIC_MAX_PUBHDR_SZ]; int len = test->pf->pf_gen_reg_pkt_header(&lconn, &packet_out, out, sizeof(out)); assert(("Packet length is correct", len == test->len)); if (test->len > 0) assert(("Packet contents are correct", 0 == memcmp(out, test->out, len))); } int main (void) { unsigned i; for (i = 0; i < sizeof(tests) / sizeof(tests[0]); ++i) run_test(i); return 0; }
209957.c
/* * Copyright (C) by Argonne National Laboratory * See COPYRIGHT in top-level directory */ #include "mpioimpl.h" #ifdef HAVE_WEAK_SYMBOLS #if defined(HAVE_PRAGMA_WEAK) #pragma weak MPI_File_get_view = PMPI_File_get_view #elif defined(HAVE_PRAGMA_HP_SEC_DEF) #pragma _HP_SECONDARY_DEF PMPI_File_get_view MPI_File_get_view #elif defined(HAVE_PRAGMA_CRI_DUP) #pragma _CRI duplicate MPI_File_get_view as PMPI_File_get_view /* end of weak pragmas */ #elif defined(HAVE_WEAK_ATTRIBUTE) int MPI_File_get_view(MPI_File fh, MPI_Offset * disp, MPI_Datatype * etype, MPI_Datatype * filetype, char *datarep) __attribute__ ((weak, alias("PMPI_File_get_view"))); #endif /* Include mapping from MPI->PMPI */ #define MPIO_BUILD_PROFILING #include "mpioprof.h" #endif /*@ MPI_File_get_view - Returns the file view Input Parameters: . fh - file handle (handle) Output Parameters: . disp - displacement (nonnegative integer) . etype - elementary datatype (handle) . filetype - filetype (handle) . datarep - data representation (string) .N fortran @*/ int MPI_File_get_view(MPI_File fh, MPI_Offset * disp, MPI_Datatype * etype, MPI_Datatype * filetype, char *datarep) { int error_code; ADIO_File adio_fh; static char myname[] = "MPI_FILE_GET_VIEW"; int i, j, k, combiner; MPI_Datatype copy_etype, copy_filetype; ROMIO_THREAD_CS_ENTER(); adio_fh = MPIO_File_resolve(fh); /* --BEGIN ERROR HANDLING-- */ MPIO_CHECK_FILE_HANDLE(adio_fh, myname, error_code); if (datarep == NULL) { error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, myname, __LINE__, MPI_ERR_ARG, "**iodatarepnomem", 0); error_code = MPIO_Err_return_file(adio_fh, error_code); goto fn_exit; } /* --END ERROR HANDLING-- */ *disp = adio_fh->disp; ADIOI_Strncpy(datarep, (adio_fh->is_external32 ? "external32" : "native"), MPI_MAX_DATAREP_STRING); MPI_Type_get_envelope(adio_fh->etype, &i, &j, &k, &combiner); if (combiner == MPI_COMBINER_NAMED) *etype = adio_fh->etype; else { /* FIXME: It is wrong to use MPI_Type_contiguous; the user could choose to * re-implement MPI_Type_contiguous in an unexpected way. Either use * MPID_Barrier as in MPICH or PMPI_Type_contiguous */ MPI_Type_contiguous(1, adio_fh->etype, &copy_etype); /* FIXME: Ditto for MPI_Type_commit - use NMPI or PMPI */ MPI_Type_commit(&copy_etype); *etype = copy_etype; } /* FIXME: Ditto for MPI_Type_xxx - use NMPI or PMPI */ MPI_Type_get_envelope(adio_fh->filetype, &i, &j, &k, &combiner); if (combiner == MPI_COMBINER_NAMED) *filetype = adio_fh->filetype; else { MPI_Type_contiguous(1, adio_fh->filetype, &copy_filetype); MPI_Type_commit(&copy_filetype); *filetype = copy_filetype; } fn_exit: ROMIO_THREAD_CS_EXIT(); return MPI_SUCCESS; }
912555.c
/* * Copyright (c) 1990, 1992, 1993 Jan-Simon Pendry * Copyright (c) 1992, 1993, 1994 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Jan-Simon Pendry. * * %sccs.include.redist.c% */ #ifndef lint char copyright[] = "@(#) Copyright (c) 1992, 1993, 1994\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint static char sccsid[] = "@(#)mount_procfs.c 8.4 (Berkeley) 04/26/95"; #endif /* not lint */ #include <sys/param.h> #include <sys/mount.h> #include <err.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mntopts.h" struct mntopt mopts[] = { MOPT_STDOPTS, { NULL } }; void usage __P((void)); int main(argc, argv) int argc; char *argv[]; { int ch, mntflags; mntflags = 0; while ((ch = getopt(argc, argv, "o:")) != EOF) switch (ch) { case 'o': getmntopts(optarg, mopts, &mntflags, 0); break; case '?': default: usage(); } argc -= optind; argv += optind; if (argc != 2) usage(); if (mount("procfs", argv[1], mntflags, NULL)) err(1, NULL); exit(0); } void usage() { (void)fprintf(stderr, "usage: mount_procfs [-o options] /proc mount_point\n"); exit(1); }
388626.c
#include <stdio.h> #include <stdlib.h> int strcmp(char *s, char *t); int main() { int x; char s[20] = "Hola mundo"; char t[20] = "Hola mund"; x = strcmp(s, t); printf("%i\n", x); return 0; } int strcmp(char *s, char *t) { for ( ; *s == *t; s++, t++) if (*s == '\0') return 0; return *s - *t; }
935213.c
int a[3][4] = { {0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11} }; int a[3][4] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; array1[array2[3][i]][2][string] //estou considerando 3 dimensões e que string é um inteiro //também estou considerando que array2 é de um tipo inteiro array1[array2][2][string] //estou considerando 3 dimensões e que string é um inteiro array1[array2[0][0]][2][string] //estou considerando 3 dimensões e que string é um inteiro //https://pt.stackoverflow.com/q/42836/101
300922.c
/* * Driver for HAL2 sound processors * Copyright (c) 2001, 2002 Ladislav Michl <[email protected]> * * Based on Ulf Carlsson's code. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Supported devices: * /dev/dsp standard dsp device, (mostly) OSS compatible * /dev/mixer standard mixer device, (mostly) OSS compatible * * BUGS: * + Driver currently supports indigo mode only. * + Recording doesn't work. I guess that it is caused by PBUS channel * misconfiguration, but until I get relevant info I'm unable to fix it. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/sound.h> #include <linux/soundcard.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/sgi/sgint23.h> #include "hal2.h" #define DEBUG(args...) #define DEBUG_MIX(args...) #define H2_INDIRECT_WAIT(regs) while (regs->isr & H2_ISR_TSTATUS); #define H2_READ_ADDR(addr) (addr | (1<<7)) #define H2_WRITE_ADDR(addr) (addr) static char *hal2str = "HAL2 audio"; static int ibuffers = 32; static int obuffers = 32; /* I doubt anyone has a machine with two HAL2 cards. It's possible to * have two HPC's, so it is probably possible to have two HAL2 cards. * Try to deal with it, but note that it is not tested. */ #define MAXCARDS 2 static hal2_card_t* hal2_card[MAXCARDS]; static const struct { unsigned char idx:4, avail:1; } mixtable[SOUND_MIXER_NRDEVICES] = { [SOUND_MIXER_PCM] = { H2_MIX_OUTPUT_ATT, 1 }, /* voice */ [SOUND_MIXER_MIC] = { H2_MIX_INPUT_GAIN, 1 }, /* mic */ }; #define H2_SUPPORTED_FORMATS (AFMT_S16_LE | AFMT_S16_BE) static inline void hal2_isr_write(hal2_card_t *hal2, u32 val) { hal2->ctl_regs->isr = val; } static inline u32 hal2_isr_look(hal2_card_t *hal2) { return hal2->ctl_regs->isr; } static inline u32 hal2_rev_look(hal2_card_t *hal2) { return hal2->ctl_regs->rev; } static u32 hal2_i_look32(hal2_card_t *hal2, u32 addr) { u32 ret; hal2_ctl_regs_t *regs = hal2->ctl_regs; regs->iar = H2_READ_ADDR(addr); H2_INDIRECT_WAIT(regs); ret = regs->idr0 & 0xffff; regs->iar = H2_READ_ADDR(addr | 0x1); H2_INDIRECT_WAIT(regs); ret |= (regs->idr0 & 0xffff) << 16; return ret; } static void hal2_i_write16(hal2_card_t *hal2, u32 addr, u16 val) { hal2_ctl_regs_t *regs = hal2->ctl_regs; regs->idr0 = val; regs->idr1 = 0; regs->idr2 = 0; regs->idr3 = 0; regs->iar = H2_WRITE_ADDR(addr); H2_INDIRECT_WAIT(regs); } static void hal2_i_write32(hal2_card_t *hal2, u32 addr, u32 val) { hal2_ctl_regs_t *regs = hal2->ctl_regs; regs->idr0 = val & 0xffff; regs->idr1 = val >> 16; regs->idr2 = 0; regs->idr3 = 0; regs->iar = H2_WRITE_ADDR(addr); H2_INDIRECT_WAIT(regs); } static void hal2_i_setbit16(hal2_card_t *hal2, u32 addr, u16 bit) { hal2_ctl_regs_t *regs = hal2->ctl_regs; regs->iar = H2_READ_ADDR(addr); H2_INDIRECT_WAIT(regs); regs->idr0 = regs->idr0 | bit; regs->idr1 = 0; regs->idr2 = 0; regs->idr3 = 0; regs->iar = H2_WRITE_ADDR(addr); H2_INDIRECT_WAIT(regs); } static void hal2_i_setbit32(hal2_card_t *hal2, u32 addr, u32 bit) { u32 tmp; hal2_ctl_regs_t *regs = hal2->ctl_regs; regs->iar = H2_READ_ADDR(addr); H2_INDIRECT_WAIT(regs); tmp = regs->idr0 | (regs->idr1 << 16) | bit; regs->idr0 = tmp & 0xffff; regs->idr1 = tmp >> 16; regs->idr2 = 0; regs->idr3 = 0; regs->iar = H2_WRITE_ADDR(addr); H2_INDIRECT_WAIT(regs); } static void hal2_i_clearbit16(hal2_card_t *hal2, u32 addr, u16 bit) { hal2_ctl_regs_t *regs = hal2->ctl_regs; regs->iar = H2_READ_ADDR(addr); H2_INDIRECT_WAIT(regs); regs->idr0 = regs->idr0 & ~bit; regs->idr1 = 0; regs->idr2 = 0; regs->idr3 = 0; regs->iar = H2_WRITE_ADDR(addr); H2_INDIRECT_WAIT(regs); } #ifdef HAL2_DEBUG static void hal2_dump_regs(hal2_card_t *hal2) { printk("isr: %08hx ", hal2_isr_look(hal2)); printk("rev: %08hx\n", hal2_rev_look(hal2)); printk("relay: %04hx\n", hal2_i_look16(hal2, H2I_RELAY_C)); printk("port en: %04hx ", hal2_i_look16(hal2, H2I_DMA_PORT_EN)); printk("dma end: %04hx ", hal2_i_look16(hal2, H2I_DMA_END)); printk("dma drv: %04hx\n", hal2_i_look16(hal2, H2I_DMA_DRV)); printk("syn ctl: %04hx ", hal2_i_look16(hal2, H2I_SYNTH_C)); printk("aesrx ctl: %04hx ", hal2_i_look16(hal2, H2I_AESRX_C)); printk("aestx ctl: %04hx ", hal2_i_look16(hal2, H2I_AESTX_C)); printk("dac ctl1: %04hx ", hal2_i_look16(hal2, H2I_ADC_C1)); printk("dac ctl2: %08lx ", hal2_i_look32(hal2, H2I_ADC_C2)); printk("adc ctl1: %04hx ", hal2_i_look16(hal2, H2I_DAC_C1)); printk("adc ctl2: %08lx ", hal2_i_look32(hal2, H2I_DAC_C2)); printk("syn map: %04hx\n", hal2_i_look16(hal2, H2I_SYNTH_MAP_C)); printk("bres1 ctl1: %04hx ", hal2_i_look16(hal2, H2I_BRES1_C1)); printk("bres1 ctl2: %04lx ", hal2_i_look32(hal2, H2I_BRES1_C2)); printk("bres2 ctl1: %04hx ", hal2_i_look16(hal2, H2I_BRES2_C1)); printk("bres2 ctl2: %04lx ", hal2_i_look32(hal2, H2I_BRES2_C2)); printk("bres3 ctl1: %04hx ", hal2_i_look16(hal2, H2I_BRES3_C1)); printk("bres3 ctl2: %04lx\n", hal2_i_look32(hal2, H2I_BRES3_C2)); } #endif static hal2_card_t* hal2_dsp_find_card(int minor) { int i; for (i = 0; i < MAXCARDS; i++) if (hal2_card[i] != NULL && hal2_card[i]->dev_dsp == minor) return hal2_card[i]; return NULL; } static hal2_card_t* hal2_mixer_find_card(int minor) { int i; for (i = 0; i < MAXCARDS; i++) if (hal2_card[i] != NULL && hal2_card[i]->dev_mixer == minor) return hal2_card[i]; return NULL; } static void hal2_dac_interrupt(hal2_codec_t *dac) { int running; spin_lock(&dac->lock); /* if tail buffer contains zero samples DMA stream was already * stopped */ running = dac->tail->info.cnt; dac->tail->info.cnt = 0; dac->tail->info.desc.cntinfo = HPCDMA_XIE | HPCDMA_EOX; dma_cache_wback_inv((unsigned long) dac->tail, sizeof(struct hpc_dma_desc)); /* we just proccessed empty buffer, don't update tail pointer */ if (running) dac->tail = dac->tail->info.next; spin_unlock(&dac->lock); wake_up(&dac->dma_wait); } static void hal2_adc_interrupt(hal2_codec_t *adc) { int running; spin_lock(&adc->lock); /* if head buffer contains nonzero samples DMA stream was already * stopped */ running = !adc->head->info.cnt; adc->head->info.cnt = H2_BUFFER_SIZE; adc->head->info.desc.cntinfo = HPCDMA_XIE | HPCDMA_EOX; dma_cache_wback_inv((unsigned long) adc->head, sizeof(struct hpc_dma_desc)); /* we just proccessed empty buffer, don't update head pointer */ if (running) { dma_cache_inv((unsigned long) adc->head->data, H2_BUFFER_SIZE); adc->head = adc->head->info.next; } spin_unlock(&adc->lock); wake_up(&adc->dma_wait); } static void hal2_interrupt(int irq, void *dev_id, struct pt_regs *regs) { hal2_card_t *hal2 = (hal2_card_t*)dev_id; /* decide what caused this interrupt */ if (hal2->dac.pbus.pbus->pbdma_ctrl & HPC3_PDMACTRL_INT) hal2_dac_interrupt(&hal2->dac); if (hal2->adc.pbus.pbus->pbdma_ctrl & HPC3_PDMACTRL_INT) hal2_adc_interrupt(&hal2->adc); } static int hal2_compute_rate(hal2_codec_t *codec, unsigned int rate) { unsigned short inc; /* We default to 44.1 kHz and if it isn't possible to fall back to * 48.0 kHz with the needed adjustments of real_rate. */ DEBUG("rate: %d\n", rate); /* Refer to CS4216 data sheet */ if (rate < 4000) rate = 4000; if (rate > 50000) rate = 50000; /* Note: This is NOT the way they set up the bresenham clock generators * in the specification. I've tried to implement that method but it * doesn't work. It's probably another silly bug in the spec. * * I accidently discovered this method while I was testing and it seems * to work very well with all frequencies, and thee shall follow rule #1 * of programming :-) */ if (44100 % rate == 0) { inc = 44100 / rate; if (inc < 1) inc = 1; codec->master = 44100; } else { inc = 48000 / rate; if (inc < 1) inc = 1; rate = 48000 / inc; codec->master = 48000; } codec->inc = inc; codec->mod = 1; DEBUG("real_rate: %d\n", rate); return rate; } static void hal2_set_dac_rate(hal2_card_t *hal2) { unsigned int master = hal2->dac.master; int inc = hal2->dac.inc; int mod = hal2->dac.mod; DEBUG("master: %d inc: %d mod: %d\n", master, inc, mod); hal2_i_write16(hal2, H2I_BRES1_C1, (master == 44100) ? 1 : 0); hal2_i_write32(hal2, H2I_BRES1_C2, ((0xffff & (mod - inc - 1)) << 16) | 1); } static void hal2_set_adc_rate(hal2_card_t *hal2) { unsigned int master = hal2->adc.master; int inc = hal2->adc.inc; int mod = hal2->adc.mod; DEBUG("master: %d inc: %d mod: %d\n", master, inc, mod); hal2_i_write16(hal2, H2I_BRES2_C1, (master == 44100) ? 1 : 0); hal2_i_write32(hal2, H2I_BRES2_C2, ((0xffff & (mod - inc - 1)) << 16) | 1); } static void hal2_setup_dac(hal2_card_t *hal2) { unsigned int fifobeg, fifoend, highwater, sample_size; hal2_pbus_t *pbus = &hal2->dac.pbus; DEBUG("hal2_setup_dac\n"); /* Now we set up some PBUS information. The PBUS needs information about * what portion of the fifo it will use. If it's receiving or * transmitting, and finally whether the stream is little endian or big * endian. The information is written later, on the start call. */ sample_size = 2 * hal2->dac.voices; /* Fifo should be set to hold exactly four samples. Highwater mark * should be set to two samples. */ highwater = (sample_size * 2) >> 1; /* halfwords */ fifobeg = 0; /* playback is first */ fifoend = (sample_size * 4) >> 3; /* doublewords */ pbus->ctrl = HPC3_PDMACTRL_RT | HPC3_PDMACTRL_LD | (highwater << 8) | (fifobeg << 16) | (fifoend << 24); /* We disable everything before we do anything at all */ pbus->pbus->pbdma_ctrl = HPC3_PDMACTRL_LD; hal2_i_clearbit16(hal2, H2I_DMA_PORT_EN, H2I_DMA_PORT_EN_CODECTX); hal2_i_clearbit16(hal2, H2I_DMA_DRV, (1 << pbus->pbusnr)); /* Setup the HAL2 for playback */ hal2_set_dac_rate(hal2); /* We are using 1st Bresenham clock generator for playback */ hal2_i_write16(hal2, H2I_DAC_C1, (pbus->pbusnr << H2I_C1_DMA_SHIFT) | (1 << H2I_C1_CLKID_SHIFT) | (hal2->dac.voices << H2I_C1_DATAT_SHIFT)); } static void hal2_setup_adc(hal2_card_t *hal2) { unsigned int fifobeg, fifoend, highwater, sample_size; hal2_pbus_t *pbus = &hal2->adc.pbus; DEBUG("hal2_setup_adc\n"); sample_size = 2 * hal2->adc.voices; highwater = (sample_size * 2) >> 1; /* halfwords */ fifobeg = (4 * 4) >> 3; /* record is second */ fifoend = (4 * 4 + sample_size * 4) >> 3; /* doublewords */ pbus->ctrl = HPC3_PDMACTRL_RT | HPC3_PDMACTRL_RCV | HPC3_PDMACTRL_LD | (highwater << 8) | (fifobeg << 16) | (fifoend << 24); pbus->pbus->pbdma_ctrl = HPC3_PDMACTRL_LD; hal2_i_clearbit16(hal2, H2I_DMA_PORT_EN, H2I_DMA_PORT_EN_CODECR); hal2_i_clearbit16(hal2, H2I_DMA_DRV, (1 << pbus->pbusnr)); /* Setup the HAL2 for record */ hal2_set_adc_rate(hal2); /* We are using 2nd Bresenham clock generator for record */ hal2_i_write16(hal2, H2I_ADC_C1, (pbus->pbusnr << H2I_C1_DMA_SHIFT) | (2 << H2I_C1_CLKID_SHIFT) | (hal2->adc.voices << H2I_C1_DATAT_SHIFT)); } static void hal2_start_dac(hal2_card_t *hal2) { hal2_pbus_t *pbus = &hal2->dac.pbus; DEBUG("hal2_start_dac\n"); pbus->pbus->pbdma_dptr = PHYSADDR(hal2->dac.tail); pbus->pbus->pbdma_ctrl = pbus->ctrl | HPC3_PDMACTRL_ACT; /* set endianess */ if (hal2->dac.format & AFMT_S16_LE) hal2_i_setbit16(hal2, H2I_DMA_END, H2I_DMA_END_CODECTX); else hal2_i_clearbit16(hal2, H2I_DMA_END, H2I_DMA_END_CODECTX); /* set DMA bus */ hal2_i_setbit16(hal2, H2I_DMA_DRV, (1 << pbus->pbusnr)); /* enable DAC */ hal2_i_setbit16(hal2, H2I_DMA_PORT_EN, H2I_DMA_PORT_EN_CODECTX); } static void hal2_start_adc(hal2_card_t *hal2) { hal2_pbus_t *pbus = &hal2->adc.pbus; DEBUG("hal2_start_adc\n"); pbus->pbus->pbdma_dptr = PHYSADDR(hal2->adc.head); pbus->pbus->pbdma_ctrl = pbus->ctrl | HPC3_PDMACTRL_ACT; /* set endianess */ if (hal2->adc.format & AFMT_S16_LE) hal2_i_setbit16(hal2, H2I_DMA_END, H2I_DMA_END_CODECR); else hal2_i_clearbit16(hal2, H2I_DMA_END, H2I_DMA_END_CODECR); /* set DMA bus */ hal2_i_setbit16(hal2, H2I_DMA_DRV, (1 << pbus->pbusnr)); /* enable ADC */ hal2_i_setbit16(hal2, H2I_DMA_PORT_EN, H2I_DMA_PORT_EN_CODECR); } static inline void hal2_stop_dac(hal2_card_t *hal2) { DEBUG("hal2_stop_dac\n"); hal2->dac.pbus.pbus->pbdma_ctrl = HPC3_PDMACTRL_LD; /* The HAL2 itself may remain enabled safely */ } static inline void hal2_stop_adc(hal2_card_t *hal2) { DEBUG("hal2_stop_adc\n"); hal2->adc.pbus.pbus->pbdma_ctrl = HPC3_PDMACTRL_LD; } #define hal2_alloc_dac_dmabuf(hal2) hal2_alloc_dmabuf(hal2, 1) #define hal2_alloc_adc_dmabuf(hal2) hal2_alloc_dmabuf(hal2, 0) static int hal2_alloc_dmabuf(hal2_card_t *hal2, int is_dac) { int buffers, cntinfo; hal2_buf_t *buf, *prev; hal2_codec_t *codec; if (is_dac) { codec = &hal2->dac; buffers = obuffers; cntinfo = HPCDMA_XIE | HPCDMA_EOX; } else { codec = &hal2->adc; buffers = ibuffers; cntinfo = HPCDMA_XIE | H2_BUFFER_SIZE; } DEBUG("allocating %d DMA buffers.\n", buffers); buf = (hal2_buf_t*) get_zeroed_page(GFP_KERNEL); if (!buf) return -ENOMEM; codec->head = buf; codec->tail = buf; while (--buffers) { buf->info.desc.pbuf = PHYSADDR(&buf->data); buf->info.desc.cntinfo = cntinfo; buf->info.cnt = 0; prev = buf; buf = (hal2_buf_t*) get_zeroed_page(GFP_KERNEL); if (!buf) { printk("HAL2: Not enough memory for DMA buffer.\n"); buf = codec->head; while (buf) { prev = buf; free_page((unsigned long) buf); buf = prev->info.next; } return -ENOMEM; } prev->info.next = buf; prev->info.desc.pnext = PHYSADDR(buf); /* The PBUS can prolly not read this stuff when it's in * the cache so we have to flush it back to main memory */ dma_cache_wback_inv((unsigned long) prev, PAGE_SIZE); } buf->info.desc.pbuf = PHYSADDR(&buf->data); buf->info.desc.cntinfo = cntinfo; buf->info.cnt = 0; buf->info.next = codec->head; buf->info.desc.pnext = PHYSADDR(codec->head); dma_cache_wback_inv((unsigned long) buf, PAGE_SIZE); return 0; } #define hal2_free_dac_dmabuf(hal2) hal2_free_dmabuf(hal2, 1) #define hal2_free_adc_dmabuf(hal2) hal2_free_dmabuf(hal2, 0) static void hal2_free_dmabuf(hal2_card_t *hal2, int is_dac) { hal2_buf_t *buf, *next; hal2_codec_t *codec = (is_dac) ? &hal2->dac : &hal2->adc; if (!codec->head) return; buf = codec->head->info.next; codec->head->info.next = NULL; while (buf) { next = buf->info.next; free_page((unsigned long) buf); buf = next; } codec->head = codec->tail = NULL; } /* * Add 'count' bytes to 'buffer' from DMA ring buffers. Return number of * bytes added or -EFAULT if copy_from_user failed. */ static int hal2_get_buffer(hal2_card_t *hal2, char *buffer, int count) { unsigned long flags; int size, ret = 0; hal2_codec_t *adc = &hal2->adc; spin_lock_irqsave(&adc->lock, flags); DEBUG("getting %d bytes ", count); /* enable DMA stream if there are no data */ if (!(adc->pbus.pbus->pbdma_ctrl & HPC3_PDMACTRL_ISACT) && adc->tail->info.cnt == 0) hal2_start_adc(hal2); DEBUG("... "); while (adc->tail->info.cnt > 0 && count > 0) { size = min(adc->tail->info.cnt, count); spin_unlock_irqrestore(&adc->lock, flags); if (copy_to_user(buffer, &adc->tail->data[H2_BUFFER_SIZE-size], size)) { ret = -EFAULT; goto out; } spin_lock_irqsave(&adc->lock, flags); adc->tail->info.cnt -= size; /* buffer is empty, update tail pointer */ if (adc->tail->info.cnt == 0) { adc->tail->info.desc.cntinfo = HPCDMA_XIE | H2_BUFFER_SIZE; dma_cache_wback_inv((unsigned long) adc->tail, sizeof(struct hpc_dma_desc)); adc->tail = adc->tail->info.next; /* enable DMA stream again if needed */ if (!(adc->pbus.pbus->pbdma_ctrl & HPC3_PDMACTRL_ISACT)) hal2_start_adc(hal2); } buffer += size; ret += size; count -= size; DEBUG("(%d) ", size); } spin_unlock_irqrestore(&adc->lock, flags); out: DEBUG("\n"); return ret; } /* * Add 'count' bytes from 'buffer' to DMA ring buffers. Return number of * bytes added or -EFAULT if copy_from_user failed. */ static int hal2_add_buffer(hal2_card_t *hal2, char *buffer, int count) { unsigned long flags; int size, ret = 0; hal2_codec_t *dac = &hal2->dac; spin_lock_irqsave(&dac->lock, flags); DEBUG("adding %d bytes ", count); while (dac->head->info.cnt == 0 && count > 0) { size = min((int)H2_BUFFER_SIZE, count); spin_unlock_irqrestore(&dac->lock, flags); if (copy_from_user(dac->head->data, buffer, size)) { ret = -EFAULT; goto out; } spin_lock_irqsave(&dac->lock, flags); dac->head->info.desc.cntinfo = size | HPCDMA_XIE; dac->head->info.cnt = size; dma_cache_wback_inv((unsigned long) dac->head, size + PAGE_SIZE - H2_BUFFER_SIZE); buffer += size; ret += size; count -= size; dac->head = dac->head->info.next; DEBUG("(%d) ", size); } if (!(dac->pbus.pbus->pbdma_ctrl & HPC3_PDMACTRL_ISACT) && ret > 0) hal2_start_dac(hal2); spin_unlock_irqrestore(&dac->lock, flags); out: DEBUG("\n"); return ret; } #define hal2_reset_dac_pointer(hal2) hal2_reset_pointer(hal2, 1) #define hal2_reset_adc_pointer(hal2) hal2_reset_pointer(hal2, 0) static void hal2_reset_pointer(hal2_card_t *hal2, int is_dac) { hal2_codec_t *codec = (is_dac) ? &hal2->dac : &hal2->adc; DEBUG("hal2_reset_pointer\n"); codec->tail = codec->head; do { codec->tail->info.desc.cntinfo = HPCDMA_XIE | (is_dac) ? HPCDMA_EOX : H2_BUFFER_SIZE; codec->tail->info.cnt = 0; dma_cache_wback_inv((unsigned long) codec->tail, sizeof(struct hpc_dma_desc)); codec->tail = codec->tail->info.next; } while (codec->tail != codec->head); } static int hal2_sync_dac(hal2_card_t *hal2) { DECLARE_WAITQUEUE(wait, current); hal2_codec_t *dac = &hal2->dac; int ret = 0; signed long timeout = 1000 * H2_BUFFER_SIZE * 2 * dac->voices * HZ / dac->sample_rate / 900; down(&dac->sem); while (dac->pbus.pbus->pbdma_ctrl & HPC3_PDMACTRL_ISACT) { add_wait_queue(&dac->dma_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); if (!schedule_timeout(timeout)) /* We may get bogus timeout when system is * heavily loaded */ if (dac->tail->info.cnt) { printk("HAL2: timeout...\n"); ret = -ETIME; } if (signal_pending(current)) ret = -ERESTARTSYS; if (ret) { hal2_stop_dac(hal2); hal2_reset_dac_pointer(hal2); } remove_wait_queue(&dac->dma_wait, &wait); } up(&dac->sem); return ret; } static int hal2_write_mixer(hal2_card_t *hal2, int index, int vol) { unsigned int l, r; DEBUG_MIX("mixer %d write\n", index); if (index >= SOUND_MIXER_NRDEVICES || !mixtable[index].avail) return -EINVAL; r = (vol >> 8) & 0xff; if (r > 100) r = 100; l = vol & 0xff; if (l > 100) l = 100; hal2->mixer.volume[mixtable[index].idx] = l | (r << 8); switch (mixtable[index].idx) { case H2_MIX_OUTPUT_ATT: { DEBUG_MIX("output attenuator %d,%d\n", l, r); if (r | l) { unsigned int tmp = hal2_i_look32(hal2, H2I_DAC_C2); tmp &= ~(H2I_C2_L_ATT_M | H2I_C2_R_ATT_M | H2I_C2_MUTE); /* Attenuator has five bits */ l = (31 * (100 - l) / 99); r = (31 * (100 - r) / 99); DEBUG_MIX("left: %d, right %d\n", l, r); tmp |= (l << H2I_C2_L_ATT_SHIFT) & H2I_C2_L_ATT_M; tmp |= (r << H2I_C2_R_ATT_SHIFT) & H2I_C2_R_ATT_M; hal2_i_write32(hal2, H2I_DAC_C2, tmp); } else hal2_i_setbit32(hal2, H2I_DAC_C2, H2I_C2_MUTE); } case H2_MIX_INPUT_GAIN: { /* TODO */ } } return 0; } static void hal2_init_mixer(hal2_card_t *hal2) { int i; for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) hal2_write_mixer(hal2, i, 100 | (100 << 8)); } static int hal2_mixer_ioctl(hal2_card_t *hal2, unsigned int cmd, unsigned long arg) { int val; if (cmd == SOUND_MIXER_INFO) { mixer_info info; strncpy(info.id, hal2str, sizeof(info.id)); strncpy(info.name, hal2str, sizeof(info.name)); info.modify_counter = hal2->mixer.modcnt; if (copy_to_user((void *)arg, &info, sizeof(info))) return -EFAULT; return 0; } if (cmd == SOUND_OLD_MIXER_INFO) { _old_mixer_info info; strncpy(info.id, hal2str, sizeof(info.id)); strncpy(info.name, hal2str, sizeof(info.name)); if (copy_to_user((void *)arg, &info, sizeof(info))) return -EFAULT; return 0; } if (cmd == OSS_GETVERSION) return put_user(SOUND_VERSION, (int *)arg); if (_IOC_TYPE(cmd) != 'M' || _IOC_SIZE(cmd) != sizeof(int)) return -EINVAL; if (_IOC_DIR(cmd) == _IOC_READ) { switch (_IOC_NR(cmd)) { /* Give the current record source */ case SOUND_MIXER_RECSRC: val = 0; break; /* Give the supported mixers, all of them support stereo */ case SOUND_MIXER_DEVMASK: case SOUND_MIXER_STEREODEVS: { int i; for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++) if (mixtable[i].avail) val |= 1 << i; break; } /* Arg contains a bit for each supported recording source */ case SOUND_MIXER_RECMASK: val = 0; break; case SOUND_MIXER_CAPS: val = 0; break; /* Read a specific mixer */ default: { int i = _IOC_NR(cmd); if (i >= SOUND_MIXER_NRDEVICES || !mixtable[i].avail) return -EINVAL; val = hal2->mixer.volume[mixtable[i].idx]; break; } } return put_user(val, (int *)arg); } if (_IOC_DIR(cmd) != (_IOC_WRITE|_IOC_READ)) return -EINVAL; hal2->mixer.modcnt++; if (get_user(val, (int *)arg)) return -EFAULT; switch (_IOC_NR(cmd)) { /* Arg contains a bit for each recording source */ case SOUND_MIXER_RECSRC: return 0; default: return hal2_write_mixer(hal2, _IOC_NR(cmd), val); } return 0; } static int hal2_open_mixdev(struct inode *inode, struct file *file) { hal2_card_t *hal2 = hal2_mixer_find_card(MINOR(inode->i_rdev)); if (hal2) { file->private_data = hal2; return 0; } return -ENODEV; } static int hal2_release_mixdev(struct inode *inode, struct file *file) { return 0; } static int hal2_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { return hal2_mixer_ioctl((hal2_card_t *)file->private_data, cmd, arg); } static int hal2_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int val; hal2_card_t *hal2 = (hal2_card_t *) file->private_data; switch (cmd) { case OSS_GETVERSION: return put_user(SOUND_VERSION, (int *)arg); case SNDCTL_DSP_SYNC: if (file->f_mode & FMODE_WRITE) return hal2_sync_dac(hal2); return 0; case SNDCTL_DSP_SETDUPLEX: return 0; case SNDCTL_DSP_GETCAPS: return put_user(DSP_CAP_DUPLEX | DSP_CAP_MULTI, (int *)arg); case SNDCTL_DSP_RESET: if (file->f_mode & FMODE_READ) { hal2_stop_adc(hal2); hal2_reset_adc_pointer(hal2); } if (file->f_mode & FMODE_WRITE) { hal2_stop_dac(hal2); hal2_reset_dac_pointer(hal2); } return 0; case SNDCTL_DSP_SPEED: if (get_user(val, (int *)arg)) return -EFAULT; if (file->f_mode & FMODE_READ) { hal2_stop_adc(hal2); val = hal2_compute_rate(&hal2->adc, val); hal2->adc.sample_rate = val; hal2_set_adc_rate(hal2); } if (file->f_mode & FMODE_WRITE) { hal2_stop_dac(hal2); val = hal2_compute_rate(&hal2->dac, val); hal2->dac.sample_rate = val; hal2_set_dac_rate(hal2); } return put_user(val, (int *)arg); case SNDCTL_DSP_STEREO: if (get_user(val, (int *)arg)) return -EFAULT; if (file->f_mode & FMODE_READ) { hal2_stop_adc(hal2); hal2->adc.voices = (val) ? 2 : 1; hal2_setup_adc(hal2); } if (file->f_mode & FMODE_WRITE) { hal2_stop_dac(hal2); hal2->dac.voices = (val) ? 2 : 1; hal2_setup_dac(hal2); } return 0; case SNDCTL_DSP_CHANNELS: if (get_user(val, (int *)arg)) return -EFAULT; if (val != 0) { if (file->f_mode & FMODE_READ) { hal2_stop_adc(hal2); hal2->adc.voices = (val == 1) ? 1 : 2; hal2_setup_adc(hal2); } if (file->f_mode & FMODE_WRITE) { hal2_stop_dac(hal2); hal2->dac.voices = (val == 1) ? 1 : 2; hal2_setup_dac(hal2); } } val = -EINVAL; if (file->f_mode & FMODE_READ) val = hal2->adc.voices; if (file->f_mode & FMODE_WRITE) val = hal2->dac.voices; return put_user(val, (int *)arg); case SNDCTL_DSP_GETFMTS: /* Returns a mask */ return put_user(H2_SUPPORTED_FORMATS, (int *)arg); case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/ if (get_user(val, (int *)arg)) return -EFAULT; if (val != AFMT_QUERY) { if (!(val & H2_SUPPORTED_FORMATS)) return -EINVAL; if (file->f_mode & FMODE_READ) { hal2_stop_adc(hal2); hal2->adc.format = val; hal2_setup_adc(hal2); } if (file->f_mode & FMODE_WRITE) { hal2_stop_dac(hal2); hal2->dac.format = val; hal2_setup_dac(hal2); } } else { val = -EINVAL; if (file->f_mode & FMODE_READ) val = hal2->adc.format; if (file->f_mode & FMODE_WRITE) val = hal2->dac.format; } return put_user(val, (int *)arg); case SNDCTL_DSP_POST: return 0; case SNDCTL_DSP_GETOSPACE: { unsigned long flags; audio_buf_info info; hal2_buf_t *buf; hal2_codec_t *dac = &hal2->dac; if (!(file->f_mode & FMODE_WRITE)) return -EINVAL; spin_lock_irqsave(&dac->lock, flags); info.fragments = 0; buf = dac->head; while (buf->info.cnt == 0 && buf != dac->tail) { info.fragments++; buf = buf->info.next; } spin_unlock_irqrestore(&dac->lock, flags); info.fragstotal = obuffers; info.fragsize = H2_BUFFER_SIZE; info.bytes = info.fragsize * info.fragments; return copy_to_user((void *)arg, &info, sizeof(info)) ? -EFAULT : 0; } case SNDCTL_DSP_GETISPACE: { unsigned long flags; audio_buf_info info; hal2_buf_t *buf; hal2_codec_t *adc = &hal2->adc; if (!(file->f_mode & FMODE_READ)) return -EINVAL; spin_lock_irqsave(&adc->lock, flags); info.fragments = 0; info.bytes = 0; buf = adc->tail; while (buf->info.cnt > 0 && buf != adc->head) { info.fragments++; info.bytes += buf->info.cnt; buf = buf->info.next; } spin_unlock_irqrestore(&adc->lock, flags); info.fragstotal = ibuffers; info.fragsize = H2_BUFFER_SIZE; return copy_to_user((void *)arg, &info, sizeof(info)) ? -EFAULT : 0; } case SNDCTL_DSP_NONBLOCK: file->f_flags |= O_NONBLOCK; return 0; case SNDCTL_DSP_GETBLKSIZE: return put_user(H2_BUFFER_SIZE, (int *)arg); case SNDCTL_DSP_SETFRAGMENT: return 0; case SOUND_PCM_READ_RATE: val = -EINVAL; if (file->f_mode & FMODE_READ) val = hal2->adc.sample_rate; if (file->f_mode & FMODE_WRITE) val = hal2->dac.sample_rate; return put_user(val, (int *)arg); case SOUND_PCM_READ_CHANNELS: val = -EINVAL; if (file->f_mode & FMODE_READ) val = hal2->adc.voices; if (file->f_mode & FMODE_WRITE) val = hal2->dac.voices; return put_user(val, (int *)arg); case SOUND_PCM_READ_BITS: val = 16; return put_user(val, (int *)arg); } return hal2_mixer_ioctl(hal2, cmd, arg); } static ssize_t hal2_read(struct file *file, char *buffer, size_t count, loff_t *ppos) { ssize_t err; hal2_card_t *hal2 = (hal2_card_t *) file->private_data; hal2_codec_t *adc = &hal2->adc; if (count == 0) return 0; if (ppos != &file->f_pos) return -ESPIPE; down(&adc->sem); if (file->f_flags & O_NONBLOCK) { err = hal2_get_buffer(hal2, buffer, count); err = err == 0 ? -EAGAIN : err; } else { do { /* ~10% longer */ signed long timeout = 1000 * H2_BUFFER_SIZE * 2 * adc->voices * HZ / adc->sample_rate / 900; DECLARE_WAITQUEUE(wait, current); ssize_t cnt = 0; err = hal2_get_buffer(hal2, buffer, count); if (err > 0) { count -= err; cnt += err; buffer += err; err = cnt; } if (count > 0 && err >= 0) { add_wait_queue(&adc->dma_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); /* Well, it is possible, that interrupt already * arrived. Hmm, shit happens, we have one more * buffer filled ;) */ if (!schedule_timeout(timeout)) /* We may get bogus timeout when system * is heavily loaded */ if (!adc->tail->info.cnt) { printk("HAL2: timeout...\n"); hal2_stop_adc(hal2); hal2_reset_adc_pointer(hal2); err = -EAGAIN; } if (signal_pending(current)) err = -ERESTARTSYS; remove_wait_queue(&adc->dma_wait, &wait); } } while (count > 0 && err >= 0); } up(&adc->sem); return err; } static ssize_t hal2_write(struct file *file, const char *buffer, size_t count, loff_t *ppos) { ssize_t err; char *buf = (char*) buffer; hal2_card_t *hal2 = (hal2_card_t *) file->private_data; hal2_codec_t *dac = &hal2->dac; if (count == 0) return 0; if (ppos != &file->f_pos) return -ESPIPE; down(&dac->sem); if (file->f_flags & O_NONBLOCK) { err = hal2_add_buffer(hal2, buf, count); err = err == 0 ? -EAGAIN : err; } else { do { /* ~10% longer */ signed long timeout = 1000 * H2_BUFFER_SIZE * 2 * dac->voices * HZ / dac->sample_rate / 900; DECLARE_WAITQUEUE(wait, current); ssize_t cnt = 0; err = hal2_add_buffer(hal2, buf, count); if (err > 0) { count -= err; cnt += err; buf += err; err = cnt; } if (count > 0 && err >= 0) { add_wait_queue(&dac->dma_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); /* Well, it is possible, that interrupt already * arrived. Hmm, shit happens, we have one more * buffer free ;) */ if (!schedule_timeout(timeout)) /* We may get bogus timeout when system * is heavily loaded */ if (dac->head->info.cnt) { printk("HAL2: timeout...\n"); hal2_stop_dac(hal2); hal2_reset_dac_pointer(hal2); err = -EAGAIN; } if (signal_pending(current)) err = -ERESTARTSYS; remove_wait_queue(&dac->dma_wait, &wait); } } while (count > 0 && err >= 0); } up(&dac->sem); return err; } static unsigned int hal2_poll(struct file *file, struct poll_table_struct *wait) { unsigned long flags; unsigned int mask = 0; hal2_card_t *hal2 = (hal2_card_t *) file->private_data; if (file->f_mode & FMODE_READ) { hal2_codec_t *adc = &hal2->adc; poll_wait(file, &hal2->adc.dma_wait, wait); spin_lock_irqsave(&adc->lock, flags); if (adc->tail->info.cnt > 0) mask |= POLLIN; spin_unlock_irqrestore(&adc->lock, flags); } if (file->f_mode & FMODE_WRITE) { hal2_codec_t *dac = &hal2->dac; poll_wait(file, &dac->dma_wait, wait); spin_lock_irqsave(&dac->lock, flags); if (dac->head->info.cnt == 0) mask |= POLLOUT; spin_unlock_irqrestore(&dac->lock, flags); } return mask; } static int hal2_open(struct inode *inode, struct file *file) { int err; hal2_card_t *hal2 = hal2_dsp_find_card(MINOR(inode->i_rdev)); DEBUG("opening audio device.\n"); if (!hal2) { printk("HAL2: Whee?! Open door and go away!\n"); return -ENODEV; } file->private_data = hal2; if (file->f_mode & FMODE_READ) { if (hal2->adc.usecount) return -EBUSY; /* OSS spec wanted us to use 8 bit, 8 kHz mono by default, * but HAL2 can't do 8bit audio */ hal2->adc.format = AFMT_S16_BE; hal2->adc.voices = 1; hal2->adc.sample_rate = hal2_compute_rate(&hal2->adc, 8000); hal2_set_adc_rate(hal2); /* alloc DMA buffers */ err = hal2_alloc_adc_dmabuf(hal2); if (err) return err; hal2_setup_adc(hal2); hal2->adc.usecount++; } if (file->f_mode & FMODE_WRITE) { if (hal2->dac.usecount) return -EBUSY; hal2->dac.format = AFMT_S16_BE; hal2->dac.voices = 1; hal2->dac.sample_rate = hal2_compute_rate(&hal2->dac, 8000); hal2_set_dac_rate(hal2); /* alloc DMA buffers */ err = hal2_alloc_dac_dmabuf(hal2); if (err) return err; hal2_setup_dac(hal2); hal2->dac.usecount++; } return 0; } static int hal2_release(struct inode *inode, struct file *file) { hal2_card_t *hal2 = (hal2_card_t *) file->private_data; if (file->f_mode & FMODE_READ) { hal2_stop_adc(hal2); hal2_free_adc_dmabuf(hal2); hal2->adc.usecount--; } if (file->f_mode & FMODE_WRITE) { hal2_sync_dac(hal2); hal2_free_dac_dmabuf(hal2); hal2->dac.usecount--; } return 0; } static struct file_operations hal2_audio_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = hal2_read, .write = hal2_write, .poll = hal2_poll, .ioctl = hal2_ioctl, .open = hal2_open, .release = hal2_release, }; static struct file_operations hal2_mixer_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .ioctl = hal2_ioctl_mixdev, .open = hal2_open_mixdev, .release = hal2_release_mixdev, }; static int hal2_request_irq(hal2_card_t *hal2, int irq) { unsigned long flags; int ret = 0; save_and_cli(flags); if (request_irq(irq, hal2_interrupt, SA_SHIRQ, hal2str, hal2)) { printk(KERN_ERR "HAL2: Can't get irq %d\n", irq); ret = -EAGAIN; } restore_flags(flags); return ret; } static int hal2_alloc_resources(hal2_card_t *hal2, struct hpc3_regs *hpc3) { hal2_pbus_t *pbus; pbus = &hal2->dac.pbus; pbus->pbusnr = 0; pbus->pbus = &hpc3->pbdma[pbus->pbusnr]; /* The spec says that we should write 0x08248844 but that's WRONG. HAL2 * does 8 bit DMA, not 16 bit even if it generates 16 bit audio. */ hpc3->pbus_dmacfgs[pbus->pbusnr][0] = 0x08208844; /* Magic :-) */ pbus = &hal2->adc.pbus; pbus->pbusnr = 1; pbus->pbus = &hpc3->pbdma[pbus->pbusnr]; hpc3->pbus_dmacfgs[pbus->pbusnr][0] = 0x08208844; /* Magic :-) */ return hal2_request_irq(hal2, SGI_HPCDMA_IRQ); } static void hal2_init_codec(hal2_codec_t *codec) { init_waitqueue_head(&codec->dma_wait); init_MUTEX(&codec->sem); spin_lock_init(&codec->lock); } static void hal2_free_resources(hal2_card_t *hal2) { free_irq(SGI_HPCDMA_IRQ, hal2); } static int hal2_detect(hal2_card_t *hal2) { unsigned short board, major, minor; unsigned short rev; /* reset HAL2 */ hal2_isr_write(hal2, 0); /* release reset */ hal2_isr_write(hal2, H2_ISR_GLOBAL_RESET_N | H2_ISR_CODEC_RESET_N); hal2_i_write16(hal2, H2I_RELAY_C, H2I_RELAY_C_STATE); if ((rev = hal2_rev_look(hal2)) & H2_REV_AUDIO_PRESENT) { DEBUG("HAL2: no device detected, rev: 0x%04hx\n", rev); return -ENODEV; } board = (rev & H2_REV_BOARD_M) >> 12; major = (rev & H2_REV_MAJOR_CHIP_M) >> 4; minor = (rev & H2_REV_MINOR_CHIP_M); printk("SGI HAL2 Processor revision %i.%i.%i detected\n", board, major, minor); if (board != 4 || major != 1 || minor != 0) printk( "Other revision than 4.1.0 detected. " "Your card is probably unsupported\n"); return 0; } static int hal2_init_card(hal2_card_t **phal2, struct hpc3_regs *hpc3, unsigned long hpc3_base) { int ret = 0; hal2_card_t *hal2; hal2 = (hal2_card_t *) kmalloc(sizeof(hal2_card_t), GFP_KERNEL); if (!hal2) return -ENOMEM; memset(hal2, 0, sizeof(hal2_card_t)); hal2->ctl_regs = (hal2_ctl_regs_t *) KSEG1ADDR(hpc3_base + H2_CTL_PIO); hal2->aes_regs = (hal2_aes_regs_t *) KSEG1ADDR(hpc3_base + H2_AES_PIO); hal2->vol_regs = (hal2_vol_regs_t *) KSEG1ADDR(hpc3_base + H2_VOL_PIO); hal2->syn_regs = (hal2_syn_regs_t *) KSEG1ADDR(hpc3_base + H2_SYN_PIO); if (hal2_detect(hal2) < 0) { printk("HAL2 audio processor not found\n"); ret = -ENODEV; goto fail1; } hal2_init_codec(&hal2->dac); hal2_init_codec(&hal2->adc); ret = hal2_alloc_resources(hal2, hpc3); if (ret) goto fail1; hal2_init_mixer(hal2); hal2->dev_dsp = register_sound_dsp(&hal2_audio_fops, -1); if (hal2->dev_dsp < 0) { ret = hal2->dev_dsp; goto fail2; } hal2->dev_mixer = register_sound_mixer(&hal2_mixer_fops, -1); if (hal2->dev_mixer < 0) { ret = hal2->dev_mixer; goto fail3; } *phal2 = hal2; return 0; fail3: unregister_sound_dsp(hal2->dev_dsp); fail2: hal2_free_resources(hal2); fail1: kfree(hal2); return ret; } /* * We are assuming only one HAL2 card. If you ever meet machine with more than * one, tell immediately about it to someone. Preferably to me. --ladis */ static int __init init_hal2(void) { int i; for (i = 0; i < MAXCARDS; i++) hal2_card[i] = NULL; return hal2_init_card(&hal2_card[0], hpc3c0, HPC3_CHIP0_PBASE); } static void __exit exit_hal2(void) { int i; for (i = 0; i < MAXCARDS; i++) if (hal2_card[i]) { hal2_free_resources(hal2_card[i]); unregister_sound_dsp(hal2_card[i]->dev_dsp); unregister_sound_mixer(hal2_card[i]->dev_mixer); kfree(hal2_card[i]); } } module_init(init_hal2); module_exit(exit_hal2); MODULE_DESCRIPTION("OSS compatible driver for SGI HAL2 audio"); MODULE_AUTHOR("Ladislav Michl"); MODULE_LICENSE("GPL");
261315.c
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Serial input plugin for Fluent Bit * ================================== * Copyright (C) 2015-2016 Takeshi HASEGAWA * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_input.h> #include <fluent-bit/flb_utils.h> #include <fluent-bit/flb_engine.h> #include <fluent-bit/flb_pack.h> #include <fluent-bit/flb_error.h> #include <msgpack.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <fcntl.h> #include <errno.h> #include <ctype.h> #include <sys/stat.h> #include <inttypes.h> #include <termios.h> #include "in_serial.h" #include "in_serial_config.h" static inline int process_line(msgpack_packer *mp_pck, char *line, int len, struct flb_in_serial_config *ctx) { /* * Store the new data into the MessagePack buffer, * we handle this as a list of maps. */ msgpack_pack_array(mp_pck, 2); flb_pack_time_now(mp_pck); msgpack_pack_map(mp_pck, 1); msgpack_pack_str(mp_pck, 3); msgpack_pack_str_body(mp_pck, "msg", 3); msgpack_pack_str(mp_pck, len); msgpack_pack_str_body(mp_pck, line, len); flb_debug("[in_serial] message '%s'", (const char *) line); return 0; } static inline int process_pack(msgpack_packer *mp_pck, struct flb_in_serial_config *ctx, char *pack, size_t size) { size_t off = 0; msgpack_unpacked result; msgpack_object entry; /* First pack the results, iterate concatenated messages */ msgpack_unpacked_init(&result); while (msgpack_unpack_next(&result, pack, size, &off) == MSGPACK_UNPACK_SUCCESS) { entry = result.data; msgpack_pack_array(mp_pck, 2); msgpack_pack_uint64(mp_pck, time(NULL)); msgpack_pack_map(mp_pck, 1); msgpack_pack_str(mp_pck, 3); msgpack_pack_str_body(mp_pck, "msg", 3); msgpack_pack_object(mp_pck, entry); } msgpack_unpacked_destroy(&result); return 0; } static inline void consume_bytes(char *buf, int bytes, int length) { memmove(buf, buf + bytes, length - bytes); } /* Callback triggered when some serial msgs are available */ static int in_serial_collect(struct flb_input_instance *in, struct flb_config *config, void *in_context) { int ret; int bytes = 0; int available; int len; int hits; char *sep; char *buf; struct flb_in_serial_config *ctx = in_context; msgpack_packer mp_pck; msgpack_sbuffer mp_sbuf; /* Initialize local msgpack buffer */ msgpack_sbuffer_init(&mp_sbuf); msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write); while (1) { available = (sizeof(ctx->buf_data) -1) - ctx->buf_len; if (available > 1) { bytes = read(ctx->fd, ctx->buf_data + ctx->buf_len, available); if (bytes == -1) { msgpack_sbuffer_destroy(&mp_sbuf); if (errno == EPIPE || errno == EINTR) { return -1; } return 0; } else if (bytes == 0) { msgpack_sbuffer_destroy(&mp_sbuf); return 0; } } ctx->buf_len += bytes; /* Always set a delimiter to avoid buffer trash */ ctx->buf_data[ctx->buf_len] = '\0'; /* Check if our buffer is full */ if (ctx->buffer_id + 1 == SERIAL_BUFFER_SIZE) { ret = flb_engine_flush(config, &in_serial_plugin); if (ret == -1) { ctx->buffer_id = 0; } } sep = NULL; buf = ctx->buf_data; len = ctx->buf_len; hits = 0; /* Handle FTDI handshake */ if (ctx->buf_data[0] == '\0') { consume_bytes(ctx->buf_data, 1, ctx->buf_len); ctx->buf_len--; } /* Strip CR or LF if found at first byte */ if (ctx->buf_data[0] == '\r' || ctx->buf_data[0] == '\n') { /* Skip message with one byte with CR or LF */ flb_trace("[in_serial] skip one byte message with ASCII code=%i", ctx->buf_data[0]); consume_bytes(ctx->buf_data, 1, ctx->buf_len); ctx->buf_len--; } /* Handle the case when a Separator is set */ if (ctx->separator) { while ((sep = strstr(ctx->buf_data, ctx->separator))) { len = (sep - ctx->buf_data); if (len > 0) { /* process the line based in the separator position */ process_line(&mp_pck, buf, len, ctx); consume_bytes(ctx->buf_data, len + ctx->sep_len, ctx->buf_len); ctx->buf_len -= (len + ctx->sep_len); hits++; } else { consume_bytes(ctx->buf_data, ctx->sep_len, ctx->buf_len); ctx->buf_len -= ctx->sep_len; } ctx->buf_data[ctx->buf_len] = '\0'; } if (hits == 0 && available <= 1) { flb_debug("[in_serial] no separator found, no more space"); ctx->buf_len = 0; msgpack_sbuffer_destroy(&mp_sbuf); return 0; } } else if (ctx->format == FLB_SERIAL_FORMAT_JSON) { /* JSON Format handler */ char *pack; int out_size; ret = flb_pack_json_state(ctx->buf_data, ctx->buf_len, &pack, &out_size, &ctx->pack_state); if (ret == FLB_ERR_JSON_PART) { flb_debug("[in_serial] JSON incomplete, waiting for more data..."); msgpack_sbuffer_destroy(&mp_sbuf); return 0; } else if (ret == FLB_ERR_JSON_INVAL) { flb_debug("[in_serial] invalid JSON message, skipping"); flb_pack_state_reset(&ctx->pack_state); flb_pack_state_init(&ctx->pack_state); ctx->pack_state.multiple = FLB_TRUE; msgpack_sbuffer_destroy(&mp_sbuf); return -1; } /* * Given the Tokens used for the packaged message, append * the records and then adjust buffer. */ process_pack(&mp_pck, ctx, pack, out_size); flb_free(pack); consume_bytes(ctx->buf_data, ctx->pack_state.last_byte, ctx->buf_len); ctx->buf_len -= ctx->pack_state.last_byte; ctx->buf_data[ctx->buf_len] = '\0'; flb_pack_state_reset(&ctx->pack_state); flb_pack_state_init(&ctx->pack_state); ctx->pack_state.multiple = FLB_TRUE; } else { /* Process and enqueue the received line */ process_line(&mp_pck, ctx->buf_data, ctx->buf_len, ctx); ctx->buf_len = 0; } } flb_input_chunk_append_raw(in, NULL, 0, mp_sbuf.data, mp_sbuf.size); msgpack_sbuffer_destroy(&mp_sbuf); return 0; } /* Cleanup serial input */ int in_serial_exit(void *in_context, struct flb_config *config) { struct flb_in_serial_config *ctx = in_context; flb_trace("[in_serial] Restoring original termios..."); tcsetattr(ctx->fd, TCSANOW, &ctx->tio_orig); flb_pack_state_reset(&ctx->pack_state); flb_free(ctx); return 0; } /* Init serial input */ int in_serial_init(struct flb_input_instance *in, struct flb_config *config, void *data) { int fd; int ret; int br; struct flb_in_serial_config *ctx; (void) data; ctx = flb_calloc(1, sizeof(struct flb_in_serial_config)); if (!ctx) { perror("calloc"); return -1; } ctx->format = FLB_SERIAL_FORMAT_NONE; if (!serial_config_read(ctx, in)) { return -1; } /* Initialize JSON pack state */ if (ctx->format == FLB_SERIAL_FORMAT_JSON) { flb_pack_state_init(&ctx->pack_state); ctx->pack_state.multiple = FLB_TRUE; } /* Input instance */ ctx->i_ins = in; /* set context */ flb_input_set_context(in, ctx); /* open device */ fd = open(ctx->file, O_RDWR | O_NOCTTY | O_NONBLOCK); if (fd == -1) { perror("open"); flb_error("[in_serial] Could not open serial port device"); flb_free(ctx); return -1; } ctx->fd = fd; /* Store original settings */ tcgetattr(fd, &ctx->tio_orig); /* Reset for new... */ memset(&ctx->tio, 0, sizeof(ctx->tio)); tcgetattr(fd, &ctx->tio); br = atoi(ctx->bitrate); cfsetospeed(&ctx->tio, (speed_t) flb_serial_speed(br)); cfsetispeed(&ctx->tio, (speed_t) flb_serial_speed(br)); /* Settings */ ctx->tio.c_cflag &= ~PARENB; /* 8N1 */ ctx->tio.c_cflag &= ~CSTOPB; ctx->tio.c_cflag &= ~CSIZE; ctx->tio.c_cflag |= CS8; ctx->tio.c_cflag &= ~CRTSCTS; /* No flow control */ ctx->tio.c_cc[VMIN] = ctx->min_bytes; /* Min number of bytes to read */ ctx->tio.c_cflag |= CREAD | CLOCAL; /* Enable READ & ign ctrl lines */ tcflush(fd, TCIFLUSH); tcsetattr(fd, TCSANOW, &ctx->tio); #if __linux__ /* Set our collector based on a file descriptor event */ ret = flb_input_set_collector_event(in, in_serial_collect, ctx->fd, config); #else /* Set our collector based on a timer event */ ret = flb_input_set_collector_time(in, in_serial_collect, IN_SERIAL_COLLECT_SEC, IN_SERIAL_COLLECT_NSEC, config); #endif if (ret == -1) { return -1; } return 0; } /* Plugin reference */ struct flb_input_plugin in_serial_plugin = { .name = "serial", .description = "Serial input", .cb_init = in_serial_init, .cb_pre_run = NULL, .cb_collect = in_serial_collect, .cb_flush_buf = NULL, .cb_exit = in_serial_exit };
854824.c
#include "pronouns.h" #include <ncurses.h> int main (){ }
246453.c
/* * QTest testcase for filter-redirector * * Copyright (c) 2016 FUJITSU LIMITED * Author: Zhang Chen <[email protected]> * * This work is licensed under the terms of the GNU GPL, version 2 or * later. See the COPYING file in the top-level directory. * * Case 1, tx traffic flow: * * qemu side | test side * | * +---------+ | +-------+ * | backend <---------------+ sock0 | * +----+----+ | +-------+ * | | * +----v----+ +-------+ | * | rd0 +->+chardev| | * +---------+ +---+---+ | * | | * +---------+ | | * | rd1 <------+ | * +----+----+ | * | | * +----v----+ | +-------+ * | rd2 +--------------->sock1 | * +---------+ | +-------+ * + * * -------------------------------------- * Case 2, rx traffic flow * qemu side | test side * | * +---------+ | +-------+ * | backend +---------------> sock1 | * +----^----+ | +-------+ * | | * +----+----+ +-------+ | * | rd0 +<-+chardev| | * +---------+ +---+---+ | * ^ | * +---------+ | | * | rd1 +------+ | * +----^----+ | * | | * +----+----+ | +-------+ * | rd2 <---------------+sock0 | * +---------+ | +-------+ * + */ #include "qemu/osdep.h" #include "qemu-common.h" #include "libqos/libqtest.h" #include "qapi/qmp/qdict.h" #include "qemu/iov.h" #include "qemu/sockets.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" /* TODO actually test the results and get rid of this */ #define qmp_discard_response(qs, ...) qobject_unref(qtest_qmp(qs, __VA_ARGS__)) static const char *get_devstr(void) { if (g_str_equal(qtest_get_arch(), "s390x")) { return "virtio-net-ccw"; } return "rtl8139"; } static void test_redirector_tx(void) { int backend_sock[2], recv_sock; uint32_t ret = 0, len = 0; char send_buf[] = "Hello!!"; char sock_path0[] = "filter-redirector0.XXXXXX"; char sock_path1[] = "filter-redirector1.XXXXXX"; char *recv_buf; uint32_t size = sizeof(send_buf); size = htonl(size); QTestState *qts; ret = socketpair(PF_UNIX, SOCK_STREAM, 0, backend_sock); g_assert_cmpint(ret, !=, -1); ret = mkstemp(sock_path0); g_assert_cmpint(ret, !=, -1); ret = mkstemp(sock_path1); g_assert_cmpint(ret, !=, -1); qts = qtest_initf( "-netdev socket,id=qtest-bn0,fd=%d " "-device %s,netdev=qtest-bn0,id=qtest-e0 " "-chardev socket,id=redirector0,path=%s,server=on,wait=off " "-chardev socket,id=redirector1,path=%s,server=on,wait=off " "-chardev socket,id=redirector2,path=%s " "-object filter-redirector,id=qtest-f0,netdev=qtest-bn0," "queue=tx,outdev=redirector0 " "-object filter-redirector,id=qtest-f1,netdev=qtest-bn0," "queue=tx,indev=redirector2 " "-object filter-redirector,id=qtest-f2,netdev=qtest-bn0," "queue=tx,outdev=redirector1 ", backend_sock[1], get_devstr(), sock_path0, sock_path1, sock_path0); recv_sock = unix_connect(sock_path1, NULL); g_assert_cmpint(recv_sock, !=, -1); /* send a qmp command to guarantee that 'connected' is setting to true. */ qmp_discard_response(qts, "{ 'execute' : 'query-status'}"); struct iovec iov[] = { { .iov_base = &size, .iov_len = sizeof(size), }, { .iov_base = send_buf, .iov_len = sizeof(send_buf), }, }; ret = iov_send(backend_sock[0], iov, 2, 0, sizeof(size) + sizeof(send_buf)); g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); close(backend_sock[0]); ret = qemu_recv(recv_sock, &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); ret = qemu_recv(recv_sock, recv_buf, len, 0); g_assert_cmpstr(recv_buf, ==, send_buf); g_free(recv_buf); close(recv_sock); unlink(sock_path0); unlink(sock_path1); qtest_quit(qts); } static void test_redirector_rx(void) { int backend_sock[2], send_sock; uint32_t ret = 0, len = 0; char send_buf[] = "Hello!!"; char sock_path0[] = "filter-redirector0.XXXXXX"; char sock_path1[] = "filter-redirector1.XXXXXX"; char *recv_buf; uint32_t size = sizeof(send_buf); size = htonl(size); QTestState *qts; ret = socketpair(PF_UNIX, SOCK_STREAM, 0, backend_sock); g_assert_cmpint(ret, !=, -1); ret = mkstemp(sock_path0); g_assert_cmpint(ret, !=, -1); ret = mkstemp(sock_path1); g_assert_cmpint(ret, !=, -1); qts = qtest_initf( "-netdev socket,id=qtest-bn0,fd=%d " "-device %s,netdev=qtest-bn0,id=qtest-e0 " "-chardev socket,id=redirector0,path=%s,server=on,wait=off " "-chardev socket,id=redirector1,path=%s,server=on,wait=off " "-chardev socket,id=redirector2,path=%s " "-object filter-redirector,id=qtest-f0,netdev=qtest-bn0," "queue=rx,indev=redirector0 " "-object filter-redirector,id=qtest-f1,netdev=qtest-bn0," "queue=rx,outdev=redirector2 " "-object filter-redirector,id=qtest-f2,netdev=qtest-bn0," "queue=rx,indev=redirector1 ", backend_sock[1], get_devstr(), sock_path0, sock_path1, sock_path0); struct iovec iov[] = { { .iov_base = &size, .iov_len = sizeof(size), }, { .iov_base = send_buf, .iov_len = sizeof(send_buf), }, }; send_sock = unix_connect(sock_path1, NULL); g_assert_cmpint(send_sock, !=, -1); /* send a qmp command to guarantee that 'connected' is setting to true. */ qmp_discard_response(qts, "{ 'execute' : 'query-status'}"); ret = iov_send(send_sock, iov, 2, 0, sizeof(size) + sizeof(send_buf)); g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); ret = qemu_recv(backend_sock[0], &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); ret = qemu_recv(backend_sock[0], recv_buf, len, 0); g_assert_cmpstr(recv_buf, ==, send_buf); close(send_sock); g_free(recv_buf); unlink(sock_path0); unlink(sock_path1); qtest_quit(qts); } int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); qtest_add_func("/netfilter/redirector_tx", test_redirector_tx); qtest_add_func("/netfilter/redirector_rx", test_redirector_rx); return g_test_run(); }
549358.c
/*- * Copyright (c) 2006 Verdens Gang AS * Copyright (c) 2006-2009 Linpro AS * All rights reserved. * * Author: Poul-Henning Kamp <[email protected]> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Storage method based on umem_alloc(3MALLOC) */ #include "config.h" #include "svnid.h" SVNID("$Id: storage_umem.c 145 2011-04-15 18:58:22Z jwg286 $") #ifdef HAVE_LIBUMEM #include <sys/types.h> #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <umem.h> #include "config.h" #include "shmlog.h" #include "cache.h" #include "stevedore.h" static ssize_t smu_max = SIZE_MAX; static MTX smu_mtx; struct smu { struct storage s; ssize_t sz; }; static struct storage * smu_alloc(struct stevedore *st, ssize_t size) { struct smu *smu; Lck_Lock(&smu_mtx); VSL_stats->sma_nreq++; if (VSL_stats->sma_nbytes + size > smu_max) size = 0; else { VSL_stats->sma_nobj++; VSL_stats->sma_nbytes += size; VSL_stats->sma_balloc += size; } Lck_Unlock(&smu_mtx); if (size == 0) return (NULL); smu = umem_zalloc(sizeof *smu, UMEM_DEFAULT); if (smu == NULL) return (NULL); smu->sz = size; smu->s.priv = smu; smu->s.ptr = umem_alloc(size, UMEM_DEFAULT); XXXAN(smu->s.ptr); smu->s.len = 0; smu->s.space = size; smu->s.fd = -1; smu->s.stevedore = st; smu->s.magic = STORAGE_MAGIC; return (&smu->s); } /*lint -e{818} not const-able */ static void smu_free(struct storage *s) { struct smu *smu; CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC); smu = s->priv; assert(smu->sz == smu->s.space); Lck_Lock(&smu_mtx); VSL_stats->sma_nobj--; VSL_stats->sma_nbytes -= smu->sz; VSL_stats->sma_bfree += smu->sz; Lck_Unlock(&smu_mtx); umem_free(smu->s.ptr, smu->s.space); umem_free(smu, sizeof *smu); } static void smu_trim(const struct storage *s, ssize_t size) { struct smu *smu; void *p; CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC); smu = s->priv; assert(smu->sz == smu->s.space); if ((p = umem_alloc(size, UMEM_DEFAULT)) != NULL) { memcpy(p, smu->s.ptr, size); umem_free(smu->s.ptr, smu->s.space); Lck_Lock(&smu_mtx); VSL_stats->sma_nbytes -= (smu->sz - size); VSL_stats->sma_bfree += smu->sz - size; smu->sz = size; Lck_Unlock(&smu_mtx); smu->s.ptr = p; smu->s.space = size; } } static void smu_init(struct stevedore *parent, int ac, char * const *av) { const char *e; uintmax_t u; (void)parent; AZ(av[ac]); if (ac > 1) ARGV_ERR("(-sumem) too many arguments\n"); if (ac == 0 || *av[0] == '\0') return; e = str2bytes(av[0], &u, 0); if (e != NULL) ARGV_ERR("(-sumem) size \"%s\": %s\n", av[0], e); if ((u != (uintmax_t)(ssize_t)u)) ARGV_ERR("(-sumem) size \"%s\": too big\n", av[0]); smu_max = u; } static void smu_open(const struct stevedore *st) { (void)st; AZ(pthread_mutex_init(&smu_mtx, NULL)); } struct stevedore smu_stevedore = { .magic = STEVEDORE_MAGIC, .name = "umem", .init = smu_init, .open = smu_open, .alloc = smu_alloc, .free = smu_free, .trim = smu_trim, }; #endif /* HAVE_UMEM_H */
128132.c
/* * This file is part of x48, an emulator of the HP-48sx Calculator. * Copyright (C) 1994 Eddie C. Dost ([email protected]) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* $Log: checkrom.c,v $ * Revision 1.4 1995/01/11 18:20:01 ecd * major update to support HP48 G/GX * * Revision 1.3 1994/11/02 14:40:38 ecd * support for "compressed" rom files added * * Revision 1.3 1994/11/02 14:40:38 ecd * support for "compressed" rom files added * * Revision 1.2 1994/10/06 16:30:05 ecd * changed char to unsigned * * Revision 1.1 1994/10/01 10:12:53 ecd * Initial revision * * * $Id: checkrom.c,v 1.4 1995/01/11 18:20:01 ecd Exp ecd $ */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/stat.h> #include "global.h" #include "romio.h" unsigned char *rom; unsigned short rom_crc, crc; int verbose = 0; char *progname; #define calc_crc(n) (crc = ((crc >> 4) ^ (((crc ^ n) & 0xf) * 0x1081))) int #ifdef __FunctionProto__ main(int argc, char **argv) #else main(argc, argv) int argc; char **argv; #endif { unsigned char version[7]; long ver_addr; int i, a, c, d, d0, d1, D0, D1; int fail; if (argc < 2) { fprintf(stderr, "usage: %s rom-file\n", argv[0]); exit (1); } if (!read_rom_file(argv[1], &rom, &rom_size)) { fprintf(stderr, "%s: can\'t read ROM from %s\n", argv[0], argv[1]); exit (1); } if (opt_gx != 0) ver_addr = 0x7ffbf; else ver_addr = 0x7fff0; for (i = 0; i < 6; i++) { version[i] = rom[ver_addr + 2 * i + 1] << 4; version[i] |= rom[ver_addr + 2 * i]; } version[6] = '\0'; printf("ROM Version is %s\n", version); for (i = 0x100; i < 0x140; i++) { rom[i] = 0x0; } fail = a = 0; D0 = 0x00000; D1 = 0x40000; for (d = 1; d <= rom_size / 0x80000; d++) { crc = 0x0000; rom_crc = 0; for (i = 0; i < 4; i++) { rom_crc <<= 4; rom_crc |= (rom[0x80000 * d - i - 1] & 0x0f); } if (opt_gx) printf("ROM CRC %d reads 0x%.4x\n", d, rom_crc); else printf("ROM CRC reads 0x%.4x\n", rom_crc); d0 = D0; d1 = D1; for (c = 0x3fff; c >= 0x0000; c--) { for (i = 0; i < 16; i++) { calc_crc(rom[d0 + i]); } d0 += 16; for (i = 0; i < 16; i++) { calc_crc(rom[d1 + i]); } d1 += 16; } D0 += 0x80000; D1 += 0x80000; a = crc; a = ((a | 0xf0000) + 1) & 0xfffff; if (a != 0x00000) { fail++; } } if (fail != 0) printf("IROM %.4x: ROM CRC test FAILED !!!\n", a & 0xffff); else printf("IROM OK: ROM CRC test passed.\n"); return 0; }
627639.c
/* * fs/kernfs/dir.c - kernfs directory implementation * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007, 2013 Tejun Heo <[email protected]> * * This file is released under the GPLv2. */ #include <linux/sched.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/idr.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/hash.h> #include "kernfs-internal.h" DEFINE_MUTEX(kernfs_mutex); static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */ static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */ static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */ #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) static bool kernfs_active(struct kernfs_node *kn) { lockdep_assert_held(&kernfs_mutex); return atomic_read(&kn->active) >= 0; } static bool kernfs_lockdep(struct kernfs_node *kn) { #ifdef CONFIG_DEBUG_LOCK_ALLOC return kn->flags & KERNFS_LOCKDEP; #else return false; #endif } static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen) { if (!kn) return strlcpy(buf, "(null)", buflen); return strlcpy(buf, kn->parent ? kn->name : "/", buflen); } /* kernfs_node_depth - compute depth from @from to @to */ static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to) { size_t depth = 0; while (to->parent && to != from) { depth++; to = to->parent; } return depth; } static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a, struct kernfs_node *b) { size_t da, db; struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b); if (ra != rb) return NULL; da = kernfs_depth(ra->kn, a); db = kernfs_depth(rb->kn, b); while (da > db) { a = a->parent; da--; } while (db > da) { b = b->parent; db--; } /* worst case b and a will be the same at root */ while (b != a) { b = b->parent; a = a->parent; } return a; } /** * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to, * where kn_from is treated as root of the path. * @kn_from: kernfs node which should be treated as root for the path * @kn_to: kernfs node to which path is needed * @buf: buffer to copy the path into * @buflen: size of @buf * * We need to handle couple of scenarios here: * [1] when @kn_from is an ancestor of @kn_to at some level * kn_from: /n1/n2/n3 * kn_to: /n1/n2/n3/n4/n5 * result: /n4/n5 * * [2] when @kn_from is on a different hierarchy and we need to find common * ancestor between @kn_from and @kn_to. * kn_from: /n1/n2/n3/n4 * kn_to: /n1/n2/n5 * result: /../../n5 * OR * kn_from: /n1/n2/n3/n4/n5 [depth=5] * kn_to: /n1/n2/n3 [depth=3] * result: /../.. * * [3] when @kn_to is NULL result will be "(null)" * * Returns the length of the full path. If the full length is equal to or * greater than @buflen, @buf contains the truncated path with the trailing * '\0'. On error, -errno is returned. */ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to, struct kernfs_node *kn_from, char *buf, size_t buflen) { struct kernfs_node *kn, *common; const char parent_str[] = "/.."; size_t depth_from, depth_to, len = 0; int i, j; if (!kn_to) return strlcpy(buf, "(null)", buflen); if (!kn_from) kn_from = kernfs_root(kn_to)->kn; if (kn_from == kn_to) return strlcpy(buf, "/", buflen); common = kernfs_common_ancestor(kn_from, kn_to); if (WARN_ON(!common)) return -EINVAL; depth_to = kernfs_depth(common, kn_to); depth_from = kernfs_depth(common, kn_from); if (buf) buf[0] = '\0'; for (i = 0; i < depth_from; i++) len += strlcpy(buf + len, parent_str, len < buflen ? buflen - len : 0); /* Calculate how many bytes we need for the rest */ for (i = depth_to - 1; i >= 0; i--) { for (kn = kn_to, j = 0; j < i; j++) kn = kn->parent; len += strlcpy(buf + len, "/", len < buflen ? buflen - len : 0); len += strlcpy(buf + len, kn->name, len < buflen ? buflen - len : 0); } return len; } /** * kernfs_name - obtain the name of a given node * @kn: kernfs_node of interest * @buf: buffer to copy @kn's name into * @buflen: size of @buf * * Copies the name of @kn into @buf of @buflen bytes. The behavior is * similar to strlcpy(). It returns the length of @kn's name and if @buf * isn't long enough, it's filled upto @buflen-1 and nul terminated. * * Fills buffer with "(null)" if @kn is NULL. * * This function can be called from any context. */ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) { unsigned long flags; int ret; spin_lock_irqsave(&kernfs_rename_lock, flags); ret = kernfs_name_locked(kn, buf, buflen); spin_unlock_irqrestore(&kernfs_rename_lock, flags); return ret; } /** * kernfs_path_from_node - build path of node @to relative to @from. * @from: parent kernfs_node relative to which we need to build the path * @to: kernfs_node of interest * @buf: buffer to copy @to's path into * @buflen: size of @buf * * Builds @to's path relative to @from in @buf. @from and @to must * be on the same kernfs-root. If @from is not parent of @to, then a relative * path (which includes '..'s) as needed to reach from @from to @to is * returned. * * Returns the length of the full path. If the full length is equal to or * greater than @buflen, @buf contains the truncated path with the trailing * '\0'. On error, -errno is returned. */ int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from, char *buf, size_t buflen) { unsigned long flags; int ret; spin_lock_irqsave(&kernfs_rename_lock, flags); ret = kernfs_path_from_node_locked(to, from, buf, buflen); spin_unlock_irqrestore(&kernfs_rename_lock, flags); return ret; } EXPORT_SYMBOL_GPL(kernfs_path_from_node); /** * pr_cont_kernfs_name - pr_cont name of a kernfs_node * @kn: kernfs_node of interest * * This function can be called from any context. */ void pr_cont_kernfs_name(struct kernfs_node *kn) { unsigned long flags; spin_lock_irqsave(&kernfs_rename_lock, flags); kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); pr_cont("%s", kernfs_pr_cont_buf); spin_unlock_irqrestore(&kernfs_rename_lock, flags); } /** * pr_cont_kernfs_path - pr_cont path of a kernfs_node * @kn: kernfs_node of interest * * This function can be called from any context. */ void pr_cont_kernfs_path(struct kernfs_node *kn) { unsigned long flags; int sz; spin_lock_irqsave(&kernfs_rename_lock, flags); sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); if (sz < 0) { pr_cont("(error)"); goto out; } if (sz >= sizeof(kernfs_pr_cont_buf)) { pr_cont("(name too long)"); goto out; } pr_cont("%s", kernfs_pr_cont_buf); out: spin_unlock_irqrestore(&kernfs_rename_lock, flags); } /** * kernfs_get_parent - determine the parent node and pin it * @kn: kernfs_node of interest * * Determines @kn's parent, pins and returns it. This function can be * called from any context. */ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn) { struct kernfs_node *parent; unsigned long flags; spin_lock_irqsave(&kernfs_rename_lock, flags); parent = kn->parent; kernfs_get(parent); spin_unlock_irqrestore(&kernfs_rename_lock, flags); return parent; } /** * kernfs_name_hash * @name: Null terminated string to hash * @ns: Namespace tag to hash * * Returns 31 bit hash of ns + name (so it fits in an off_t ) */ static unsigned int kernfs_name_hash(const char *name, const void *ns) { unsigned long hash = init_name_hash(ns); unsigned int len = strlen(name); while (len--) hash = partial_name_hash(*name++, hash); hash = end_name_hash(hash); hash &= 0x7fffffffU; /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */ if (hash < 2) hash += 2; if (hash >= INT_MAX) hash = INT_MAX - 1; return hash; } static int kernfs_name_compare(unsigned int hash, const char *name, const void *ns, const struct kernfs_node *kn) { if (hash < kn->hash) return -1; if (hash > kn->hash) return 1; if (ns < kn->ns) return -1; if (ns > kn->ns) return 1; return strcmp(name, kn->name); } static int kernfs_sd_compare(const struct kernfs_node *left, const struct kernfs_node *right) { return kernfs_name_compare(left->hash, left->name, left->ns, right); } /** * kernfs_link_sibling - link kernfs_node into sibling rbtree * @kn: kernfs_node of interest * * Link @kn into its sibling rbtree which starts from * @kn->parent->dir.children. * * Locking: * mutex_lock(kernfs_mutex) * * RETURNS: * 0 on susccess -EEXIST on failure. */ static int kernfs_link_sibling(struct kernfs_node *kn) { struct rb_node **node = &kn->parent->dir.children.rb_node; struct rb_node *parent = NULL; while (*node) { struct kernfs_node *pos; int result; pos = rb_to_kn(*node); parent = *node; result = kernfs_sd_compare(kn, pos); if (result < 0) node = &pos->rb.rb_left; else if (result > 0) node = &pos->rb.rb_right; else return -EEXIST; } /* add new node and rebalance the tree */ rb_link_node(&kn->rb, parent, node); rb_insert_color(&kn->rb, &kn->parent->dir.children); /* successfully added, account subdir number */ if (kernfs_type(kn) == KERNFS_DIR) kn->parent->dir.subdirs++; return 0; } /** * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree * @kn: kernfs_node of interest * * Try to unlink @kn from its sibling rbtree which starts from * kn->parent->dir.children. Returns %true if @kn was actually * removed, %false if @kn wasn't on the rbtree. * * Locking: * mutex_lock(kernfs_mutex) */ static bool kernfs_unlink_sibling(struct kernfs_node *kn) { if (RB_EMPTY_NODE(&kn->rb)) return false; if (kernfs_type(kn) == KERNFS_DIR) kn->parent->dir.subdirs--; rb_erase(&kn->rb, &kn->parent->dir.children); RB_CLEAR_NODE(&kn->rb); return true; } /** * kernfs_get_active - get an active reference to kernfs_node * @kn: kernfs_node to get an active reference to * * Get an active reference of @kn. This function is noop if @kn * is NULL. * * RETURNS: * Pointer to @kn on success, NULL on failure. */ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) { if (unlikely(!kn)) return NULL; if (!atomic_inc_unless_negative(&kn->active)) return NULL; if (kernfs_lockdep(kn)) rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); return kn; } /** * kernfs_put_active - put an active reference to kernfs_node * @kn: kernfs_node to put an active reference to * * Put an active reference to @kn. This function is noop if @kn * is NULL. */ void kernfs_put_active(struct kernfs_node *kn) { struct kernfs_root *root = kernfs_root(kn); int v; if (unlikely(!kn)) return; if (kernfs_lockdep(kn)) rwsem_release(&kn->dep_map, 1, _RET_IP_); v = atomic_dec_return(&kn->active); if (likely(v != KN_DEACTIVATED_BIAS)) return; wake_up_all(&root->deactivate_waitq); } /** * kernfs_drain - drain kernfs_node * @kn: kernfs_node to drain * * Drain existing usages and nuke all existing mmaps of @kn. Mutiple * removers may invoke this function concurrently on @kn and all will * return after draining is complete. */ static void kernfs_drain(struct kernfs_node *kn) __releases(&kernfs_mutex) __acquires(&kernfs_mutex) { struct kernfs_root *root = kernfs_root(kn); lockdep_assert_held(&kernfs_mutex); WARN_ON_ONCE(kernfs_active(kn)); mutex_unlock(&kernfs_mutex); if (kernfs_lockdep(kn)) { rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) lock_contended(&kn->dep_map, _RET_IP_); } /* but everyone should wait for draining */ wait_event(root->deactivate_waitq, atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); if (kernfs_lockdep(kn)) { lock_acquired(&kn->dep_map, _RET_IP_); rwsem_release(&kn->dep_map, 1, _RET_IP_); } kernfs_drain_open_files(kn); mutex_lock(&kernfs_mutex); } /** * kernfs_get - get a reference count on a kernfs_node * @kn: the target kernfs_node */ void kernfs_get(struct kernfs_node *kn) { if (kn) { WARN_ON(!atomic_read(&kn->count)); atomic_inc(&kn->count); } } EXPORT_SYMBOL_GPL(kernfs_get); /** * kernfs_put - put a reference count on a kernfs_node * @kn: the target kernfs_node * * Put a reference count of @kn and destroy it if it reached zero. */ void kernfs_put(struct kernfs_node *kn) { struct kernfs_node *parent; struct kernfs_root *root; /* * kernfs_node is freed with ->count 0, kernfs_find_and_get_node_by_ino * depends on this to filter reused stale node */ if (!kn || !atomic_dec_and_test(&kn->count)) return; root = kernfs_root(kn); repeat: /* * Moving/renaming is always done while holding reference. * kn->parent won't change beneath us. */ parent = kn->parent; WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS, "kernfs_put: %s/%s: released with incorrect active_ref %d\n", parent ? parent->name : "", kn->name, atomic_read(&kn->active)); if (kernfs_type(kn) == KERNFS_LINK) kernfs_put(kn->symlink.target_kn); kfree_const(kn->name); if (kn->iattr) { if (kn->iattr->ia_secdata) security_release_secctx(kn->iattr->ia_secdata, kn->iattr->ia_secdata_len); simple_xattrs_free(&kn->iattr->xattrs); kmem_cache_free(kernfs_iattrs_cache, kn->iattr); } spin_lock(&kernfs_idr_lock); idr_remove(&root->ino_idr, kn->id.ino); spin_unlock(&kernfs_idr_lock); kmem_cache_free(kernfs_node_cache, kn); kn = parent; if (kn) { if (atomic_dec_and_test(&kn->count)) goto repeat; } else { /* just released the root kn, free @root too */ idr_destroy(&root->ino_idr); kfree(root); } } EXPORT_SYMBOL_GPL(kernfs_put); static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags) { struct kernfs_node *kn; if (flags & LOOKUP_RCU) return -ECHILD; /* Always perform fresh lookup for negatives */ if (d_really_is_negative(dentry)) goto out_bad_unlocked; kn = kernfs_dentry_node(dentry); mutex_lock(&kernfs_mutex); /* The kernfs node has been deactivated */ if (!kernfs_active(kn)) goto out_bad; /* The kernfs node has been moved? */ if (kernfs_dentry_node(dentry->d_parent) != kn->parent) goto out_bad; /* The kernfs node has been renamed */ if (strcmp(dentry->d_name.name, kn->name) != 0) goto out_bad; /* The kernfs node has been moved to a different namespace */ if (kn->parent && kernfs_ns_enabled(kn->parent) && kernfs_info(dentry->d_sb)->ns != kn->ns) goto out_bad; mutex_unlock(&kernfs_mutex); return 1; out_bad: mutex_unlock(&kernfs_mutex); out_bad_unlocked: return 0; } const struct dentry_operations kernfs_dops = { .d_revalidate = kernfs_dop_revalidate, }; /** * kernfs_node_from_dentry - determine kernfs_node associated with a dentry * @dentry: the dentry in question * * Return the kernfs_node associated with @dentry. If @dentry is not a * kernfs one, %NULL is returned. * * While the returned kernfs_node will stay accessible as long as @dentry * is accessible, the returned node can be in any state and the caller is * fully responsible for determining what's accessible. */ struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) { if (dentry->d_sb->s_op == &kernfs_sops && !d_really_is_negative(dentry)) return kernfs_dentry_node(dentry); return NULL; } static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, const char *name, umode_t mode, kuid_t uid, kgid_t gid, unsigned flags) { struct kernfs_node *kn; u32 gen; int cursor; int ret; name = kstrdup_const(name, GFP_KERNEL); if (!name) return NULL; kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL); if (!kn) goto err_out1; idr_preload(GFP_KERNEL); spin_lock(&kernfs_idr_lock); cursor = idr_get_cursor(&root->ino_idr); ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC); if (ret >= 0 && ret < cursor) root->next_generation++; gen = root->next_generation; spin_unlock(&kernfs_idr_lock); idr_preload_end(); if (ret < 0) goto err_out2; kn->id.ino = ret; kn->id.generation = gen; /* * set ino first. This RELEASE is paired with atomic_inc_not_zero in * kernfs_find_and_get_node_by_ino */ atomic_set_release(&kn->count, 1); atomic_set(&kn->active, KN_DEACTIVATED_BIAS); RB_CLEAR_NODE(&kn->rb); kn->name = name; kn->mode = mode; kn->flags = flags; if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) { struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, .ia_uid = uid, .ia_gid = gid, }; ret = __kernfs_setattr(kn, &iattr); if (ret < 0) goto err_out3; } return kn; err_out3: idr_remove(&root->ino_idr, kn->id.ino); err_out2: kmem_cache_free(kernfs_node_cache, kn); err_out1: kfree_const(name); return NULL; } struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, const char *name, umode_t mode, kuid_t uid, kgid_t gid, unsigned flags) { struct kernfs_node *kn; kn = __kernfs_new_node(kernfs_root(parent), name, mode, uid, gid, flags); if (kn) { kernfs_get(parent); kn->parent = parent; } return kn; } /* * kernfs_find_and_get_node_by_ino - get kernfs_node from inode number * @root: the kernfs root * @ino: inode number * * RETURNS: * NULL on failure. Return a kernfs node with reference counter incremented */ struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root, unsigned int ino) { struct kernfs_node *kn; rcu_read_lock(); kn = idr_find(&root->ino_idr, ino); if (!kn) goto out; /* * Since kernfs_node is freed in RCU, it's possible an old node for ino * is freed, but reused before RCU grace period. But a freed node (see * kernfs_put) or an incompletedly initialized node (see * __kernfs_new_node) should have 'count' 0. We can use this fact to * filter out such node. */ if (!atomic_inc_not_zero(&kn->count)) { kn = NULL; goto out; } /* * The node could be a new node or a reused node. If it's a new node, * we are ok. If it's reused because of RCU (because of * SLAB_TYPESAFE_BY_RCU), the __kernfs_new_node always sets its 'ino' * before 'count'. So if 'count' is uptodate, 'ino' should be uptodate, * hence we can use 'ino' to filter stale node. */ if (kn->id.ino != ino) goto out; rcu_read_unlock(); return kn; out: rcu_read_unlock(); kernfs_put(kn); return NULL; } /** * kernfs_add_one - add kernfs_node to parent without warning * @kn: kernfs_node to be added * * The caller must already have initialized @kn->parent. This * function increments nlink of the parent's inode if @kn is a * directory and link into the children list of the parent. * * RETURNS: * 0 on success, -EEXIST if entry with the given name already * exists. */ int kernfs_add_one(struct kernfs_node *kn) { struct kernfs_node *parent = kn->parent; struct kernfs_iattrs *ps_iattr; bool has_ns; int ret; mutex_lock(&kernfs_mutex); ret = -EINVAL; has_ns = kernfs_ns_enabled(parent); if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", has_ns ? "required" : "invalid", parent->name, kn->name)) goto out_unlock; if (kernfs_type(parent) != KERNFS_DIR) goto out_unlock; ret = -ENOENT; if (parent->flags & KERNFS_EMPTY_DIR) goto out_unlock; if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent)) goto out_unlock; kn->hash = kernfs_name_hash(kn->name, kn->ns); ret = kernfs_link_sibling(kn); if (ret) goto out_unlock; /* Update timestamps on the parent */ ps_iattr = parent->iattr; if (ps_iattr) { struct iattr *ps_iattrs = &ps_iattr->ia_iattr; ktime_get_real_ts64(&ps_iattrs->ia_ctime); ps_iattrs->ia_mtime = ps_iattrs->ia_ctime; } mutex_unlock(&kernfs_mutex); /* * Activate the new node unless CREATE_DEACTIVATED is requested. * If not activated here, the kernfs user is responsible for * activating the node with kernfs_activate(). A node which hasn't * been activated is not visible to userland and its removal won't * trigger deactivation. */ if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) kernfs_activate(kn); return 0; out_unlock: mutex_unlock(&kernfs_mutex); return ret; } /** * kernfs_find_ns - find kernfs_node with the given name * @parent: kernfs_node to search under * @name: name to look for * @ns: the namespace tag to use * * Look for kernfs_node with name @name under @parent. Returns pointer to * the found kernfs_node on success, %NULL on failure. */ static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent, const unsigned char *name, const void *ns) { struct rb_node *node = parent->dir.children.rb_node; bool has_ns = kernfs_ns_enabled(parent); unsigned int hash; lockdep_assert_held(&kernfs_mutex); if (has_ns != (bool)ns) { WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", has_ns ? "required" : "invalid", parent->name, name); return NULL; } hash = kernfs_name_hash(name, ns); while (node) { struct kernfs_node *kn; int result; kn = rb_to_kn(node); result = kernfs_name_compare(hash, name, ns, kn); if (result < 0) node = node->rb_left; else if (result > 0) node = node->rb_right; else return kn; } return NULL; } static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent, const unsigned char *path, const void *ns) { size_t len; char *p, *name; lockdep_assert_held(&kernfs_mutex); /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */ spin_lock_irq(&kernfs_rename_lock); len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf)); if (len >= sizeof(kernfs_pr_cont_buf)) { spin_unlock_irq(&kernfs_rename_lock); return NULL; } p = kernfs_pr_cont_buf; while ((name = strsep(&p, "/")) && parent) { if (*name == '\0') continue; parent = kernfs_find_ns(parent, name, ns); } spin_unlock_irq(&kernfs_rename_lock); return parent; } /** * kernfs_find_and_get_ns - find and get kernfs_node with the given name * @parent: kernfs_node to search under * @name: name to look for * @ns: the namespace tag to use * * Look for kernfs_node with name @name under @parent and get a reference * if found. This function may sleep and returns pointer to the found * kernfs_node on success, %NULL on failure. */ struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, const void *ns) { struct kernfs_node *kn; mutex_lock(&kernfs_mutex); kn = kernfs_find_ns(parent, name, ns); kernfs_get(kn); mutex_unlock(&kernfs_mutex); return kn; } EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns); /** * kernfs_walk_and_get_ns - find and get kernfs_node with the given path * @parent: kernfs_node to search under * @path: path to look for * @ns: the namespace tag to use * * Look for kernfs_node with path @path under @parent and get a reference * if found. This function may sleep and returns pointer to the found * kernfs_node on success, %NULL on failure. */ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path, const void *ns) { struct kernfs_node *kn; mutex_lock(&kernfs_mutex); kn = kernfs_walk_ns(parent, path, ns); kernfs_get(kn); mutex_unlock(&kernfs_mutex); return kn; } /** * kernfs_create_root - create a new kernfs hierarchy * @scops: optional syscall operations for the hierarchy * @flags: KERNFS_ROOT_* flags * @priv: opaque data associated with the new directory * * Returns the root of the new hierarchy on success, ERR_PTR() value on * failure. */ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags, void *priv) { struct kernfs_root *root; struct kernfs_node *kn; root = kzalloc(sizeof(*root), GFP_KERNEL); if (!root) return ERR_PTR(-ENOMEM); idr_init(&root->ino_idr); INIT_LIST_HEAD(&root->supers); root->next_generation = 1; kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR); if (!kn) { idr_destroy(&root->ino_idr); kfree(root); return ERR_PTR(-ENOMEM); } kn->priv = priv; kn->dir.root = root; root->syscall_ops = scops; root->flags = flags; root->kn = kn; init_waitqueue_head(&root->deactivate_waitq); if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) kernfs_activate(kn); return root; } /** * kernfs_destroy_root - destroy a kernfs hierarchy * @root: root of the hierarchy to destroy * * Destroy the hierarchy anchored at @root by removing all existing * directories and destroying @root. */ void kernfs_destroy_root(struct kernfs_root *root) { kernfs_remove(root->kn); /* will also free @root */ } /** * kernfs_create_dir_ns - create a directory * @parent: parent in which to create a new directory * @name: name of the new directory * @mode: mode of the new directory * @uid: uid of the new directory * @gid: gid of the new directory * @priv: opaque data associated with the new directory * @ns: optional namespace tag of the directory * * Returns the created node on success, ERR_PTR() value on failure. */ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, umode_t mode, kuid_t uid, kgid_t gid, void *priv, const void *ns) { struct kernfs_node *kn; int rc; /* allocate */ kn = kernfs_new_node(parent, name, mode | S_IFDIR, uid, gid, KERNFS_DIR); if (!kn) return ERR_PTR(-ENOMEM); kn->dir.root = parent->dir.root; kn->ns = ns; kn->priv = priv; /* link in */ rc = kernfs_add_one(kn); if (!rc) return kn; kernfs_put(kn); return ERR_PTR(rc); } /** * kernfs_create_empty_dir - create an always empty directory * @parent: parent in which to create a new directory * @name: name of the new directory * * Returns the created node on success, ERR_PTR() value on failure. */ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, const char *name) { struct kernfs_node *kn; int rc; /* allocate */ kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR); if (!kn) return ERR_PTR(-ENOMEM); kn->flags |= KERNFS_EMPTY_DIR; kn->dir.root = parent->dir.root; kn->ns = NULL; kn->priv = NULL; /* link in */ rc = kernfs_add_one(kn); if (!rc) return kn; kernfs_put(kn); return ERR_PTR(rc); } static struct dentry *kernfs_iop_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct dentry *ret; struct kernfs_node *parent = dir->i_private; struct kernfs_node *kn; struct inode *inode; const void *ns = NULL; mutex_lock(&kernfs_mutex); if (kernfs_ns_enabled(parent)) ns = kernfs_info(dir->i_sb)->ns; kn = kernfs_find_ns(parent, dentry->d_name.name, ns); /* no such entry */ if (!kn || !kernfs_active(kn)) { ret = NULL; goto out_unlock; } /* attach dentry and inode */ inode = kernfs_get_inode(dir->i_sb, kn); if (!inode) { ret = ERR_PTR(-ENOMEM); goto out_unlock; } /* instantiate and hash dentry */ ret = d_splice_alias(inode, dentry); out_unlock: mutex_unlock(&kernfs_mutex); return ret; } static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct kernfs_node *parent = dir->i_private; struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops; int ret; if (!scops || !scops->mkdir) return -EPERM; if (!kernfs_get_active(parent)) return -ENODEV; ret = scops->mkdir(parent, dentry->d_name.name, mode); kernfs_put_active(parent); return ret; } static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry) { struct kernfs_node *kn = kernfs_dentry_node(dentry); struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; int ret; if (!scops || !scops->rmdir) return -EPERM; if (!kernfs_get_active(kn)) return -ENODEV; ret = scops->rmdir(kn); kernfs_put_active(kn); return ret; } static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct kernfs_node *kn = kernfs_dentry_node(old_dentry); struct kernfs_node *new_parent = new_dir->i_private; struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; int ret; if (flags) return -EINVAL; if (!scops || !scops->rename) return -EPERM; if (!kernfs_get_active(kn)) return -ENODEV; if (!kernfs_get_active(new_parent)) { kernfs_put_active(kn); return -ENODEV; } ret = scops->rename(kn, new_parent, new_dentry->d_name.name); kernfs_put_active(new_parent); kernfs_put_active(kn); return ret; } const struct inode_operations kernfs_dir_iops = { .lookup = kernfs_iop_lookup, .permission = kernfs_iop_permission, .setattr = kernfs_iop_setattr, .getattr = kernfs_iop_getattr, .listxattr = kernfs_iop_listxattr, .mkdir = kernfs_iop_mkdir, .rmdir = kernfs_iop_rmdir, .rename = kernfs_iop_rename, }; static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos) { struct kernfs_node *last; while (true) { struct rb_node *rbn; last = pos; if (kernfs_type(pos) != KERNFS_DIR) break; rbn = rb_first(&pos->dir.children); if (!rbn) break; pos = rb_to_kn(rbn); } return last; } /** * kernfs_next_descendant_post - find the next descendant for post-order walk * @pos: the current position (%NULL to initiate traversal) * @root: kernfs_node whose descendants to walk * * Find the next descendant to visit for post-order traversal of @root's * descendants. @root is included in the iteration and the last node to be * visited. */ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, struct kernfs_node *root) { struct rb_node *rbn; lockdep_assert_held(&kernfs_mutex); /* if first iteration, visit leftmost descendant which may be root */ if (!pos) return kernfs_leftmost_descendant(root); /* if we visited @root, we're done */ if (pos == root) return NULL; /* if there's an unvisited sibling, visit its leftmost descendant */ rbn = rb_next(&pos->rb); if (rbn) return kernfs_leftmost_descendant(rb_to_kn(rbn)); /* no sibling left, visit parent */ return pos->parent; } /** * kernfs_activate - activate a node which started deactivated * @kn: kernfs_node whose subtree is to be activated * * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node * needs to be explicitly activated. A node which hasn't been activated * isn't visible to userland and deactivation is skipped during its * removal. This is useful to construct atomic init sequences where * creation of multiple nodes should either succeed or fail atomically. * * The caller is responsible for ensuring that this function is not called * after kernfs_remove*() is invoked on @kn. */ void kernfs_activate(struct kernfs_node *kn) { struct kernfs_node *pos; mutex_lock(&kernfs_mutex); pos = NULL; while ((pos = kernfs_next_descendant_post(pos, kn))) { if (!pos || (pos->flags & KERNFS_ACTIVATED)) continue; WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb)); WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS); atomic_sub(KN_DEACTIVATED_BIAS, &pos->active); pos->flags |= KERNFS_ACTIVATED; } mutex_unlock(&kernfs_mutex); } static void __kernfs_remove(struct kernfs_node *kn) { struct kernfs_node *pos; lockdep_assert_held(&kernfs_mutex); /* * Short-circuit if non-root @kn has already finished removal. * This is for kernfs_remove_self() which plays with active ref * after removal. */ if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb))) return; pr_debug("kernfs %s: removing\n", kn->name); /* prevent any new usage under @kn by deactivating all nodes */ pos = NULL; while ((pos = kernfs_next_descendant_post(pos, kn))) if (kernfs_active(pos)) atomic_add(KN_DEACTIVATED_BIAS, &pos->active); /* deactivate and unlink the subtree node-by-node */ do { pos = kernfs_leftmost_descendant(kn); /* * kernfs_drain() drops kernfs_mutex temporarily and @pos's * base ref could have been put by someone else by the time * the function returns. Make sure it doesn't go away * underneath us. */ kernfs_get(pos); /* * Drain iff @kn was activated. This avoids draining and * its lockdep annotations for nodes which have never been * activated and allows embedding kernfs_remove() in create * error paths without worrying about draining. */ if (kn->flags & KERNFS_ACTIVATED) kernfs_drain(pos); else WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); /* * kernfs_unlink_sibling() succeeds once per node. Use it * to decide who's responsible for cleanups. */ if (!pos->parent || kernfs_unlink_sibling(pos)) { struct kernfs_iattrs *ps_iattr = pos->parent ? pos->parent->iattr : NULL; /* update timestamps on the parent */ if (ps_iattr) { ktime_get_real_ts64(&ps_iattr->ia_iattr.ia_ctime); ps_iattr->ia_iattr.ia_mtime = ps_iattr->ia_iattr.ia_ctime; } kernfs_put(pos); } kernfs_put(pos); } while (pos != kn); } /** * kernfs_remove - remove a kernfs_node recursively * @kn: the kernfs_node to remove * * Remove @kn along with all its subdirectories and files. */ void kernfs_remove(struct kernfs_node *kn) { mutex_lock(&kernfs_mutex); __kernfs_remove(kn); mutex_unlock(&kernfs_mutex); } /** * kernfs_break_active_protection - break out of active protection * @kn: the self kernfs_node * * The caller must be running off of a kernfs operation which is invoked * with an active reference - e.g. one of kernfs_ops. Each invocation of * this function must also be matched with an invocation of * kernfs_unbreak_active_protection(). * * This function releases the active reference of @kn the caller is * holding. Once this function is called, @kn may be removed at any point * and the caller is solely responsible for ensuring that the objects it * dereferences are accessible. */ void kernfs_break_active_protection(struct kernfs_node *kn) { /* * Take out ourself out of the active ref dependency chain. If * we're called without an active ref, lockdep will complain. */ kernfs_put_active(kn); } /** * kernfs_unbreak_active_protection - undo kernfs_break_active_protection() * @kn: the self kernfs_node * * If kernfs_break_active_protection() was called, this function must be * invoked before finishing the kernfs operation. Note that while this * function restores the active reference, it doesn't and can't actually * restore the active protection - @kn may already or be in the process of * being removed. Once kernfs_break_active_protection() is invoked, that * protection is irreversibly gone for the kernfs operation instance. * * While this function may be called at any point after * kernfs_break_active_protection() is invoked, its most useful location * would be right before the enclosing kernfs operation returns. */ void kernfs_unbreak_active_protection(struct kernfs_node *kn) { /* * @kn->active could be in any state; however, the increment we do * here will be undone as soon as the enclosing kernfs operation * finishes and this temporary bump can't break anything. If @kn * is alive, nothing changes. If @kn is being deactivated, the * soon-to-follow put will either finish deactivation or restore * deactivated state. If @kn is already removed, the temporary * bump is guaranteed to be gone before @kn is released. */ atomic_inc(&kn->active); if (kernfs_lockdep(kn)) rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_); } /** * kernfs_remove_self - remove a kernfs_node from its own method * @kn: the self kernfs_node to remove * * The caller must be running off of a kernfs operation which is invoked * with an active reference - e.g. one of kernfs_ops. This can be used to * implement a file operation which deletes itself. * * For example, the "delete" file for a sysfs device directory can be * implemented by invoking kernfs_remove_self() on the "delete" file * itself. This function breaks the circular dependency of trying to * deactivate self while holding an active ref itself. It isn't necessary * to modify the usual removal path to use kernfs_remove_self(). The * "delete" implementation can simply invoke kernfs_remove_self() on self * before proceeding with the usual removal path. kernfs will ignore later * kernfs_remove() on self. * * kernfs_remove_self() can be called multiple times concurrently on the * same kernfs_node. Only the first one actually performs removal and * returns %true. All others will wait until the kernfs operation which * won self-removal finishes and return %false. Note that the losers wait * for the completion of not only the winning kernfs_remove_self() but also * the whole kernfs_ops which won the arbitration. This can be used to * guarantee, for example, all concurrent writes to a "delete" file to * finish only after the whole operation is complete. */ bool kernfs_remove_self(struct kernfs_node *kn) { bool ret; mutex_lock(&kernfs_mutex); kernfs_break_active_protection(kn); /* * SUICIDAL is used to arbitrate among competing invocations. Only * the first one will actually perform removal. When the removal * is complete, SUICIDED is set and the active ref is restored * while holding kernfs_mutex. The ones which lost arbitration * waits for SUICDED && drained which can happen only after the * enclosing kernfs operation which executed the winning instance * of kernfs_remove_self() finished. */ if (!(kn->flags & KERNFS_SUICIDAL)) { kn->flags |= KERNFS_SUICIDAL; __kernfs_remove(kn); kn->flags |= KERNFS_SUICIDED; ret = true; } else { wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq; DEFINE_WAIT(wait); while (true) { prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE); if ((kn->flags & KERNFS_SUICIDED) && atomic_read(&kn->active) == KN_DEACTIVATED_BIAS) break; mutex_unlock(&kernfs_mutex); schedule(); mutex_lock(&kernfs_mutex); } finish_wait(waitq, &wait); WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb)); ret = false; } /* * This must be done while holding kernfs_mutex; otherwise, waiting * for SUICIDED && deactivated could finish prematurely. */ kernfs_unbreak_active_protection(kn); mutex_unlock(&kernfs_mutex); return ret; } /** * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it * @parent: parent of the target * @name: name of the kernfs_node to remove * @ns: namespace tag of the kernfs_node to remove * * Look for the kernfs_node with @name and @ns under @parent and remove it. * Returns 0 on success, -ENOENT if such entry doesn't exist. */ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, const void *ns) { struct kernfs_node *kn; if (!parent) { WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n", name); return -ENOENT; } mutex_lock(&kernfs_mutex); kn = kernfs_find_ns(parent, name, ns); if (kn) __kernfs_remove(kn); mutex_unlock(&kernfs_mutex); if (kn) return 0; else return -ENOENT; } /** * kernfs_rename_ns - move and rename a kernfs_node * @kn: target node * @new_parent: new parent to put @sd under * @new_name: new name * @new_ns: new namespace tag */ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, const char *new_name, const void *new_ns) { struct kernfs_node *old_parent; const char *old_name = NULL; int error; /* can't move or rename root */ if (!kn->parent) return -EINVAL; mutex_lock(&kernfs_mutex); error = -ENOENT; if (!kernfs_active(kn) || !kernfs_active(new_parent) || (new_parent->flags & KERNFS_EMPTY_DIR)) goto out; error = 0; if ((kn->parent == new_parent) && (kn->ns == new_ns) && (strcmp(kn->name, new_name) == 0)) goto out; /* nothing to rename */ error = -EEXIST; if (kernfs_find_ns(new_parent, new_name, new_ns)) goto out; /* rename kernfs_node */ if (strcmp(kn->name, new_name) != 0) { error = -ENOMEM; new_name = kstrdup_const(new_name, GFP_KERNEL); if (!new_name) goto out; } else { new_name = NULL; } /* * Move to the appropriate place in the appropriate directories rbtree. */ kernfs_unlink_sibling(kn); kernfs_get(new_parent); /* rename_lock protects ->parent and ->name accessors */ spin_lock_irq(&kernfs_rename_lock); old_parent = kn->parent; kn->parent = new_parent; kn->ns = new_ns; if (new_name) { old_name = kn->name; kn->name = new_name; } spin_unlock_irq(&kernfs_rename_lock); kn->hash = kernfs_name_hash(kn->name, kn->ns); kernfs_link_sibling(kn); kernfs_put(old_parent); kfree_const(old_name); error = 0; out: mutex_unlock(&kernfs_mutex); return error; } /* Relationship between s_mode and the DT_xxx types */ static inline unsigned char dt_type(struct kernfs_node *kn) { return (kn->mode >> 12) & 15; } static int kernfs_dir_fop_release(struct inode *inode, struct file *filp) { kernfs_put(filp->private_data); return 0; } static struct kernfs_node *kernfs_dir_pos(const void *ns, struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos) { if (pos) { int valid = kernfs_active(pos) && pos->parent == parent && hash == pos->hash; kernfs_put(pos); if (!valid) pos = NULL; } if (!pos && (hash > 1) && (hash < INT_MAX)) { struct rb_node *node = parent->dir.children.rb_node; while (node) { pos = rb_to_kn(node); if (hash < pos->hash) node = node->rb_left; else if (hash > pos->hash) node = node->rb_right; else break; } } /* Skip over entries which are dying/dead or in the wrong namespace */ while (pos && (!kernfs_active(pos) || pos->ns != ns)) { struct rb_node *node = rb_next(&pos->rb); if (!node) pos = NULL; else pos = rb_to_kn(node); } return pos; } static struct kernfs_node *kernfs_dir_next_pos(const void *ns, struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos) { pos = kernfs_dir_pos(ns, parent, ino, pos); if (pos) { do { struct rb_node *node = rb_next(&pos->rb); if (!node) pos = NULL; else pos = rb_to_kn(node); } while (pos && (!kernfs_active(pos) || pos->ns != ns)); } return pos; } static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dentry = file->f_path.dentry; struct kernfs_node *parent = kernfs_dentry_node(dentry); struct kernfs_node *pos = file->private_data; const void *ns = NULL; if (!dir_emit_dots(file, ctx)) return 0; mutex_lock(&kernfs_mutex); if (kernfs_ns_enabled(parent)) ns = kernfs_info(dentry->d_sb)->ns; for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos); pos; pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) { const char *name = pos->name; unsigned int type = dt_type(pos); int len = strlen(name); ino_t ino = pos->id.ino; ctx->pos = pos->hash; file->private_data = pos; kernfs_get(pos); mutex_unlock(&kernfs_mutex); if (!dir_emit(ctx, name, len, ino, type)) return 0; mutex_lock(&kernfs_mutex); } mutex_unlock(&kernfs_mutex); file->private_data = NULL; ctx->pos = INT_MAX; return 0; } const struct file_operations kernfs_dir_fops = { .read = generic_read_dir, .iterate_shared = kernfs_fop_readdir, .release = kernfs_dir_fop_release, .llseek = generic_file_llseek, };
187970.c
/**************************************************************************** * Copyright (C) 2004 Leonid Zolotarev * * Licensed under the terms of the BSD license, see file COPYING * for details. * * GTK ACX Tool. * * Rate widget. * * $Id: gacxrate.c,v 1.1.1.1 2005-07-23 23:16:11 zoleo Exp $ ***************************************************************************/ #include "support.h" #include "acxlib.h" #include "gacxrate.h" /***************************************************************************/ gacx_rate_data* _rate_data = NULL; /***************************************************************************/ void gacx_rate_data_create ( GtkWidget* main_window ) { if ( ! _rate_data ) { _rate_data = g_malloc ( sizeof ( gacx_rate_data ) ); _rate_data->rd_1 = lookup_widget ( main_window, "eventbox_rate_1" ); _rate_data->rd_2 = lookup_widget ( main_window, "eventbox_rate_2" ); _rate_data->rd_5 = lookup_widget ( main_window, "eventbox_rate_5" ); _rate_data->rd_11 = lookup_widget ( main_window, "eventbox_rate_11" ); _rate_data->rd_22 = lookup_widget ( main_window, "eventbox_rate_22" ); _rate_data->rd_f1 = lookup_widget ( main_window, "frame_rate_1" ); _rate_data->rd_f2 = lookup_widget ( main_window, "frame_rate_2" ); _rate_data->rd_f5 = lookup_widget ( main_window, "frame_rate_5" ); _rate_data->rd_f11 = lookup_widget ( main_window, "frame_rate_11" ); _rate_data->rd_f22 = lookup_widget ( main_window, "frame_rate_22" ); _rate_data->rd_rates = 0; } } /***************************************************************************/ void gacx_rate_data_destroy () { if ( _rate_data ) { _rate_data->rd_1 = NULL; _rate_data->rd_2 = NULL; _rate_data->rd_5 = NULL; _rate_data->rd_11 = NULL; _rate_data->rd_22 = NULL; _rate_data->rd_f1 = NULL; _rate_data->rd_f2 = NULL; _rate_data->rd_f5 = NULL; _rate_data->rd_f11 = NULL; _rate_data->rd_f22 = NULL; _rate_data->rd_rates = 0; g_free ( _rate_data ); _rate_data = NULL; } } /***************************************************************************/ void gacx_rate_data_update_current () { GdkColor color_enable; GdkColor color_disable; GdkColor color; gint rate = 0; if ( _rate_data ) { color_enable = _rate_data->rd_color_enable; color_disable = _rate_data->rd_color_disable; rate = net_wi_get_txrate (); if ( rate == 1 ) { color = color_enable; } else { color = color_disable; } gtk_widget_modify_bg ( _rate_data->rd_1, GTK_STATE_NORMAL, &color ); if ( rate == 2 ) { color = color_enable; } else { color = color_disable; } gtk_widget_modify_bg ( _rate_data->rd_2, GTK_STATE_NORMAL, &color ); if ( rate > 2 && rate <= 6 ) { color = color_enable; } else { color = color_disable; } gtk_widget_modify_bg ( _rate_data->rd_5, GTK_STATE_NORMAL, &color ); if ( rate > 6 && rate <= 11 ) { color = color_enable; } else { color = color_disable; } gtk_widget_modify_bg ( _rate_data->rd_11, GTK_STATE_NORMAL, &color ); if ( rate >= 22 ) { color = color_enable; } else { color = color_disable; } gtk_widget_modify_bg ( _rate_data->rd_22, GTK_STATE_NORMAL, &color ); } } /***************************************************************************/ void gacx_rate_data_update_rates ( gint rates ) { GtkStyle* style = NULL; if ( _rate_data ) { if ( _rate_data->rd_rates == 0 && _rate_data->rd_1 ) { style = gtk_widget_get_style ( _rate_data->rd_1 ); if ( style ) { _rate_data->rd_color_enable = style->bg [ GTK_STATE_PRELIGHT ]; _rate_data->rd_color_disable = style->bg [ GTK_STATE_SELECTED ]; } } if ( _rate_data->rd_rates != rates ) { _rate_data->rd_rates = rates; if ( acx_rate_supported ( acx_rate_1, rates ) ) { gtk_widget_show ( _rate_data->rd_f1 ); } else { gtk_widget_hide ( _rate_data->rd_f1 ); } if ( acx_rate_supported ( acx_rate_2, rates ) ) { gtk_widget_show ( _rate_data->rd_f2 ); } else { gtk_widget_hide ( _rate_data->rd_f2 ); } if ( acx_rate_supported ( acx_rate_5, rates ) ) { gtk_widget_show ( _rate_data->rd_f5 ); } else { gtk_widget_hide ( _rate_data->rd_f5 ); } if ( acx_rate_supported ( acx_rate_11, rates ) ) { gtk_widget_show ( _rate_data->rd_f11 ); } else { gtk_widget_hide ( _rate_data->rd_f11 ); } if ( acx_rate_supported ( acx_rate_22, rates ) ) { gtk_widget_show ( _rate_data->rd_f22 ); } else { gtk_widget_hide ( _rate_data->rd_f22 ); } } } } /***************************************************************************/ void gacx_rate_data_update ( gint rates ) { gacx_rate_data_update_rates ( rates ); gacx_rate_data_update_current (); } /***************************************************************************/
368360.c
/* Copyright (c), 2004-2005,2007-2010 Trident Microsystems, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Trident Microsystems nor Hauppauge Computer Works nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. DRXJ specific implementation of DRX driver authors: Dragan Savic, Milos Nikolic, Mihajlo Katona, Tao Ding, Paul Janssen The Linux DVB Driver for Micronas DRX39xx family (drx3933j) was written by Devin Heitmueller <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /*----------------------------------------------------------------------------- INCLUDE FILES ----------------------------------------------------------------------------*/ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/module.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/div64.h> #include "dvb_frontend.h" #include "drx39xxj.h" #include "drxj.h" #include "drxj_map.h" /*============================================================================*/ /*=== DEFINES ================================================================*/ /*============================================================================*/ #define DRX39XX_MAIN_FIRMWARE "dvb-fe-drxj-mc-1.0.8.fw" /** * \brief Maximum u32 value. */ #ifndef MAX_U32 #define MAX_U32 ((u32) (0xFFFFFFFFL)) #endif /* Customer configurable hardware settings, etc */ #ifndef MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH #define MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH 0x02 #endif #ifndef MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH #define MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH 0x02 #endif #ifndef MPEG_OUTPUT_CLK_DRIVE_STRENGTH #define MPEG_OUTPUT_CLK_DRIVE_STRENGTH 0x06 #endif #ifndef OOB_CRX_DRIVE_STRENGTH #define OOB_CRX_DRIVE_STRENGTH 0x02 #endif #ifndef OOB_DRX_DRIVE_STRENGTH #define OOB_DRX_DRIVE_STRENGTH 0x02 #endif /**** START DJCOMBO patches to DRXJ registermap constants *********************/ /**** registermap 200706071303 from drxj **************************************/ #define ATV_TOP_CR_AMP_TH_FM 0x0 #define ATV_TOP_CR_AMP_TH_L 0xA #define ATV_TOP_CR_AMP_TH_LP 0xA #define ATV_TOP_CR_AMP_TH_BG 0x8 #define ATV_TOP_CR_AMP_TH_DK 0x8 #define ATV_TOP_CR_AMP_TH_I 0x8 #define ATV_TOP_CR_CONT_CR_D_MN 0x18 #define ATV_TOP_CR_CONT_CR_D_FM 0x0 #define ATV_TOP_CR_CONT_CR_D_L 0x20 #define ATV_TOP_CR_CONT_CR_D_LP 0x20 #define ATV_TOP_CR_CONT_CR_D_BG 0x18 #define ATV_TOP_CR_CONT_CR_D_DK 0x18 #define ATV_TOP_CR_CONT_CR_D_I 0x18 #define ATV_TOP_CR_CONT_CR_I_MN 0x80 #define ATV_TOP_CR_CONT_CR_I_FM 0x0 #define ATV_TOP_CR_CONT_CR_I_L 0x80 #define ATV_TOP_CR_CONT_CR_I_LP 0x80 #define ATV_TOP_CR_CONT_CR_I_BG 0x80 #define ATV_TOP_CR_CONT_CR_I_DK 0x80 #define ATV_TOP_CR_CONT_CR_I_I 0x80 #define ATV_TOP_CR_CONT_CR_P_MN 0x4 #define ATV_TOP_CR_CONT_CR_P_FM 0x0 #define ATV_TOP_CR_CONT_CR_P_L 0x4 #define ATV_TOP_CR_CONT_CR_P_LP 0x4 #define ATV_TOP_CR_CONT_CR_P_BG 0x4 #define ATV_TOP_CR_CONT_CR_P_DK 0x4 #define ATV_TOP_CR_CONT_CR_P_I 0x4 #define ATV_TOP_CR_OVM_TH_MN 0xA0 #define ATV_TOP_CR_OVM_TH_FM 0x0 #define ATV_TOP_CR_OVM_TH_L 0xA0 #define ATV_TOP_CR_OVM_TH_LP 0xA0 #define ATV_TOP_CR_OVM_TH_BG 0xA0 #define ATV_TOP_CR_OVM_TH_DK 0xA0 #define ATV_TOP_CR_OVM_TH_I 0xA0 #define ATV_TOP_EQU0_EQU_C0_FM 0x0 #define ATV_TOP_EQU0_EQU_C0_L 0x3 #define ATV_TOP_EQU0_EQU_C0_LP 0x3 #define ATV_TOP_EQU0_EQU_C0_BG 0x7 #define ATV_TOP_EQU0_EQU_C0_DK 0x0 #define ATV_TOP_EQU0_EQU_C0_I 0x3 #define ATV_TOP_EQU1_EQU_C1_FM 0x0 #define ATV_TOP_EQU1_EQU_C1_L 0x1F6 #define ATV_TOP_EQU1_EQU_C1_LP 0x1F6 #define ATV_TOP_EQU1_EQU_C1_BG 0x197 #define ATV_TOP_EQU1_EQU_C1_DK 0x198 #define ATV_TOP_EQU1_EQU_C1_I 0x1F6 #define ATV_TOP_EQU2_EQU_C2_FM 0x0 #define ATV_TOP_EQU2_EQU_C2_L 0x28 #define ATV_TOP_EQU2_EQU_C2_LP 0x28 #define ATV_TOP_EQU2_EQU_C2_BG 0xC5 #define ATV_TOP_EQU2_EQU_C2_DK 0xB0 #define ATV_TOP_EQU2_EQU_C2_I 0x28 #define ATV_TOP_EQU3_EQU_C3_FM 0x0 #define ATV_TOP_EQU3_EQU_C3_L 0x192 #define ATV_TOP_EQU3_EQU_C3_LP 0x192 #define ATV_TOP_EQU3_EQU_C3_BG 0x12E #define ATV_TOP_EQU3_EQU_C3_DK 0x18E #define ATV_TOP_EQU3_EQU_C3_I 0x192 #define ATV_TOP_STD_MODE_MN 0x0 #define ATV_TOP_STD_MODE_FM 0x1 #define ATV_TOP_STD_MODE_L 0x0 #define ATV_TOP_STD_MODE_LP 0x0 #define ATV_TOP_STD_MODE_BG 0x0 #define ATV_TOP_STD_MODE_DK 0x0 #define ATV_TOP_STD_MODE_I 0x0 #define ATV_TOP_STD_VID_POL_MN 0x0 #define ATV_TOP_STD_VID_POL_FM 0x0 #define ATV_TOP_STD_VID_POL_L 0x2 #define ATV_TOP_STD_VID_POL_LP 0x2 #define ATV_TOP_STD_VID_POL_BG 0x0 #define ATV_TOP_STD_VID_POL_DK 0x0 #define ATV_TOP_STD_VID_POL_I 0x0 #define ATV_TOP_VID_AMP_MN 0x380 #define ATV_TOP_VID_AMP_FM 0x0 #define ATV_TOP_VID_AMP_L 0xF50 #define ATV_TOP_VID_AMP_LP 0xF50 #define ATV_TOP_VID_AMP_BG 0x380 #define ATV_TOP_VID_AMP_DK 0x394 #define ATV_TOP_VID_AMP_I 0x3D8 #define IQM_CF_OUT_ENA_OFDM__M 0x4 #define IQM_FS_ADJ_SEL_B_QAM 0x1 #define IQM_FS_ADJ_SEL_B_OFF 0x0 #define IQM_FS_ADJ_SEL_B_VSB 0x2 #define IQM_RC_ADJ_SEL_B_OFF 0x0 #define IQM_RC_ADJ_SEL_B_QAM 0x1 #define IQM_RC_ADJ_SEL_B_VSB 0x2 /**** END DJCOMBO patches to DRXJ registermap *********************************/ #include "drx_driver_version.h" /* #define DRX_DEBUG */ #ifdef DRX_DEBUG #include <stdio.h> #endif /*----------------------------------------------------------------------------- ENUMS ----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------- DEFINES ----------------------------------------------------------------------------*/ #ifndef DRXJ_WAKE_UP_KEY #define DRXJ_WAKE_UP_KEY (demod->my_i2c_dev_addr->i2c_addr) #endif /** * \def DRXJ_DEF_I2C_ADDR * \brief Default I2C address of a demodulator instance. */ #define DRXJ_DEF_I2C_ADDR (0x52) /** * \def DRXJ_DEF_DEMOD_DEV_ID * \brief Default device identifier of a demodultor instance. */ #define DRXJ_DEF_DEMOD_DEV_ID (1) /** * \def DRXJ_SCAN_TIMEOUT * \brief Timeout value for waiting on demod lock during channel scan (millisec). */ #define DRXJ_SCAN_TIMEOUT 1000 /** * \def HI_I2C_DELAY * \brief HI timing delay for I2C timing (in nano seconds) * * Used to compute HI_CFG_DIV */ #define HI_I2C_DELAY 42 /** * \def HI_I2C_BRIDGE_DELAY * \brief HI timing delay for I2C timing (in nano seconds) * * Used to compute HI_CFG_BDL */ #define HI_I2C_BRIDGE_DELAY 750 /** * \brief Time Window for MER and SER Measurement in Units of Segment duration. */ #define VSB_TOP_MEASUREMENT_PERIOD 64 #define SYMBOLS_PER_SEGMENT 832 /** * \brief bit rate and segment rate constants used for SER and BER. */ /* values taken from the QAM microcode */ #define DRXJ_QAM_SL_SIG_POWER_QAM_UNKNOWN 0 #define DRXJ_QAM_SL_SIG_POWER_QPSK 32768 #define DRXJ_QAM_SL_SIG_POWER_QAM8 24576 #define DRXJ_QAM_SL_SIG_POWER_QAM16 40960 #define DRXJ_QAM_SL_SIG_POWER_QAM32 20480 #define DRXJ_QAM_SL_SIG_POWER_QAM64 43008 #define DRXJ_QAM_SL_SIG_POWER_QAM128 20992 #define DRXJ_QAM_SL_SIG_POWER_QAM256 43520 /** * \brief Min supported symbolrates. */ #ifndef DRXJ_QAM_SYMBOLRATE_MIN #define DRXJ_QAM_SYMBOLRATE_MIN (520000) #endif /** * \brief Max supported symbolrates. */ #ifndef DRXJ_QAM_SYMBOLRATE_MAX #define DRXJ_QAM_SYMBOLRATE_MAX (7233000) #endif /** * \def DRXJ_QAM_MAX_WAITTIME * \brief Maximal wait time for QAM auto constellation in ms */ #ifndef DRXJ_QAM_MAX_WAITTIME #define DRXJ_QAM_MAX_WAITTIME 900 #endif #ifndef DRXJ_QAM_FEC_LOCK_WAITTIME #define DRXJ_QAM_FEC_LOCK_WAITTIME 150 #endif #ifndef DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME #define DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME 200 #endif /** * \def SCU status and results * \brief SCU */ #define DRX_SCU_READY 0 #define DRXJ_MAX_WAITTIME 100 /* ms */ #define FEC_RS_MEASUREMENT_PERIOD 12894 /* 1 sec */ #define FEC_RS_MEASUREMENT_PRESCALE 1 /* n sec */ /** * \def DRX_AUD_MAX_DEVIATION * \brief Needed for calculation of prescale feature in AUD */ #ifndef DRXJ_AUD_MAX_FM_DEVIATION #define DRXJ_AUD_MAX_FM_DEVIATION 100 /* kHz */ #endif /** * \brief Needed for calculation of NICAM prescale feature in AUD */ #ifndef DRXJ_AUD_MAX_NICAM_PRESCALE #define DRXJ_AUD_MAX_NICAM_PRESCALE (9) /* dB */ #endif /** * \brief Needed for calculation of NICAM prescale feature in AUD */ #ifndef DRXJ_AUD_MAX_WAITTIME #define DRXJ_AUD_MAX_WAITTIME 250 /* ms */ #endif /* ATV config changed flags */ #define DRXJ_ATV_CHANGED_COEF (0x00000001UL) #define DRXJ_ATV_CHANGED_PEAK_FLT (0x00000008UL) #define DRXJ_ATV_CHANGED_NOISE_FLT (0x00000010UL) #define DRXJ_ATV_CHANGED_OUTPUT (0x00000020UL) #define DRXJ_ATV_CHANGED_SIF_ATT (0x00000040UL) /* UIO define */ #define DRX_UIO_MODE_FIRMWARE_SMA DRX_UIO_MODE_FIRMWARE0 #define DRX_UIO_MODE_FIRMWARE_SAW DRX_UIO_MODE_FIRMWARE1 /* * MICROCODE RELATED DEFINES */ /* Magic word for checking correct Endianness of microcode data */ #define DRX_UCODE_MAGIC_WORD ((((u16)'H')<<8)+((u16)'L')) /* CRC flag in ucode header, flags field. */ #define DRX_UCODE_CRC_FLAG (0x0001) /* * Maximum size of buffer used to verify the microcode. * Must be an even number */ #define DRX_UCODE_MAX_BUF_SIZE (DRXDAP_MAX_RCHUNKSIZE) #if DRX_UCODE_MAX_BUF_SIZE & 1 #error DRX_UCODE_MAX_BUF_SIZE must be an even number #endif /* * Power mode macros */ #define DRX_ISPOWERDOWNMODE(mode) ((mode == DRX_POWER_MODE_9) || \ (mode == DRX_POWER_MODE_10) || \ (mode == DRX_POWER_MODE_11) || \ (mode == DRX_POWER_MODE_12) || \ (mode == DRX_POWER_MODE_13) || \ (mode == DRX_POWER_MODE_14) || \ (mode == DRX_POWER_MODE_15) || \ (mode == DRX_POWER_MODE_16) || \ (mode == DRX_POWER_DOWN)) /* Pin safe mode macro */ #define DRXJ_PIN_SAFE_MODE 0x0000 /*============================================================================*/ /*=== GLOBAL VARIABLEs =======================================================*/ /*============================================================================*/ /** */ /** * \brief Temporary register definitions. * (register definitions that are not yet available in register master) */ /******************************************************************************/ /* Audio block 0x103 is write only. To avoid shadowing in driver accessing */ /* RAM adresses directly. This must be READ ONLY to avoid problems. */ /* Writing to the interface adresses is more than only writing the RAM */ /* locations */ /******************************************************************************/ /** * \brief RAM location of MODUS registers */ #define AUD_DEM_RAM_MODUS_HI__A 0x10204A3 #define AUD_DEM_RAM_MODUS_HI__M 0xF000 #define AUD_DEM_RAM_MODUS_LO__A 0x10204A4 #define AUD_DEM_RAM_MODUS_LO__M 0x0FFF /** * \brief RAM location of I2S config registers */ #define AUD_DEM_RAM_I2S_CONFIG1__A 0x10204B1 #define AUD_DEM_RAM_I2S_CONFIG2__A 0x10204B2 /** * \brief RAM location of DCO config registers */ #define AUD_DEM_RAM_DCO_B_HI__A 0x1020461 #define AUD_DEM_RAM_DCO_B_LO__A 0x1020462 #define AUD_DEM_RAM_DCO_A_HI__A 0x1020463 #define AUD_DEM_RAM_DCO_A_LO__A 0x1020464 /** * \brief RAM location of Threshold registers */ #define AUD_DEM_RAM_NICAM_THRSHLD__A 0x102045A #define AUD_DEM_RAM_A2_THRSHLD__A 0x10204BB #define AUD_DEM_RAM_BTSC_THRSHLD__A 0x10204A6 /** * \brief RAM location of Carrier Threshold registers */ #define AUD_DEM_RAM_CM_A_THRSHLD__A 0x10204AF #define AUD_DEM_RAM_CM_B_THRSHLD__A 0x10204B0 /** * \brief FM Matrix register fix */ #ifdef AUD_DEM_WR_FM_MATRIX__A #undef AUD_DEM_WR_FM_MATRIX__A #endif #define AUD_DEM_WR_FM_MATRIX__A 0x105006F /*============================================================================*/ /** * \brief Defines required for audio */ #define AUD_VOLUME_ZERO_DB 115 #define AUD_VOLUME_DB_MIN -60 #define AUD_VOLUME_DB_MAX 12 #define AUD_CARRIER_STRENGTH_QP_0DB 0x4000 #define AUD_CARRIER_STRENGTH_QP_0DB_LOG10T100 421 #define AUD_MAX_AVC_REF_LEVEL 15 #define AUD_I2S_FREQUENCY_MAX 48000UL #define AUD_I2S_FREQUENCY_MIN 12000UL #define AUD_RDS_ARRAY_SIZE 18 /** * \brief Needed for calculation of prescale feature in AUD */ #ifndef DRX_AUD_MAX_FM_DEVIATION #define DRX_AUD_MAX_FM_DEVIATION (100) /* kHz */ #endif /** * \brief Needed for calculation of NICAM prescale feature in AUD */ #ifndef DRX_AUD_MAX_NICAM_PRESCALE #define DRX_AUD_MAX_NICAM_PRESCALE (9) /* dB */ #endif /*============================================================================*/ /* Values for I2S Master/Slave pin configurations */ #define SIO_PDR_I2S_CL_CFG_MODE__MASTER 0x0004 #define SIO_PDR_I2S_CL_CFG_DRIVE__MASTER 0x0008 #define SIO_PDR_I2S_CL_CFG_MODE__SLAVE 0x0004 #define SIO_PDR_I2S_CL_CFG_DRIVE__SLAVE 0x0000 #define SIO_PDR_I2S_DA_CFG_MODE__MASTER 0x0003 #define SIO_PDR_I2S_DA_CFG_DRIVE__MASTER 0x0008 #define SIO_PDR_I2S_DA_CFG_MODE__SLAVE 0x0003 #define SIO_PDR_I2S_DA_CFG_DRIVE__SLAVE 0x0008 #define SIO_PDR_I2S_WS_CFG_MODE__MASTER 0x0004 #define SIO_PDR_I2S_WS_CFG_DRIVE__MASTER 0x0008 #define SIO_PDR_I2S_WS_CFG_MODE__SLAVE 0x0004 #define SIO_PDR_I2S_WS_CFG_DRIVE__SLAVE 0x0000 /*============================================================================*/ /*=== REGISTER ACCESS MACROS =================================================*/ /*============================================================================*/ /** * This macro is used to create byte arrays for block writes. * Block writes speed up I2C traffic between host and demod. * The macro takes care of the required byte order in a 16 bits word. * x -> lowbyte(x), highbyte(x) */ #define DRXJ_16TO8(x) ((u8) (((u16)x) & 0xFF)), \ ((u8)((((u16)x)>>8)&0xFF)) /** * This macro is used to convert byte array to 16 bit register value for block read. * Block read speed up I2C traffic between host and demod. * The macro takes care of the required byte order in a 16 bits word. */ #define DRXJ_8TO16(x) ((u16) (x[0] | (x[1] << 8))) /*============================================================================*/ /*=== MISC DEFINES ===========================================================*/ /*============================================================================*/ /*============================================================================*/ /*=== HI COMMAND RELATED DEFINES =============================================*/ /*============================================================================*/ /** * \brief General maximum number of retries for ucode command interfaces */ #define DRXJ_MAX_RETRIES (100) /*============================================================================*/ /*=== STANDARD RELATED MACROS ================================================*/ /*============================================================================*/ #define DRXJ_ISATVSTD(std) ((std == DRX_STANDARD_PAL_SECAM_BG) || \ (std == DRX_STANDARD_PAL_SECAM_DK) || \ (std == DRX_STANDARD_PAL_SECAM_I) || \ (std == DRX_STANDARD_PAL_SECAM_L) || \ (std == DRX_STANDARD_PAL_SECAM_LP) || \ (std == DRX_STANDARD_NTSC) || \ (std == DRX_STANDARD_FM)) #define DRXJ_ISQAMSTD(std) ((std == DRX_STANDARD_ITU_A) || \ (std == DRX_STANDARD_ITU_B) || \ (std == DRX_STANDARD_ITU_C) || \ (std == DRX_STANDARD_ITU_D)) /*----------------------------------------------------------------------------- GLOBAL VARIABLES ----------------------------------------------------------------------------*/ /* * DRXJ DAP structures */ static int drxdap_fasi_read_block(struct i2c_device_addr *dev_addr, u32 addr, u16 datasize, u8 *data, u32 flags); static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr, u32 waddr, u32 raddr, u16 wdata, u16 *rdata); static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 *data, u32 flags); static int drxdap_fasi_read_reg32(struct i2c_device_addr *dev_addr, u32 addr, u32 *data, u32 flags); static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr, u32 addr, u16 datasize, u8 *data, u32 flags); static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 data, u32 flags); static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr, u32 addr, u32 data, u32 flags); static struct drxj_data drxj_data_g = { false, /* has_lna : true if LNA (aka PGA) present */ false, /* has_oob : true if OOB supported */ false, /* has_ntsc: true if NTSC supported */ false, /* has_btsc: true if BTSC supported */ false, /* has_smatx: true if SMA_TX pin is available */ false, /* has_smarx: true if SMA_RX pin is available */ false, /* has_gpio : true if GPIO pin is available */ false, /* has_irqn : true if IRQN pin is available */ 0, /* mfx A1/A2/A... */ /* tuner settings */ false, /* tuner mirrors RF signal */ /* standard/channel settings */ DRX_STANDARD_UNKNOWN, /* current standard */ DRX_CONSTELLATION_AUTO, /* constellation */ 0, /* frequency in KHz */ DRX_BANDWIDTH_UNKNOWN, /* curr_bandwidth */ DRX_MIRROR_NO, /* mirror */ /* signal quality information: */ /* default values taken from the QAM Programming guide */ /* fec_bits_desired should not be less than 4000000 */ 4000000, /* fec_bits_desired */ 5, /* fec_vd_plen */ 4, /* qam_vd_prescale */ 0xFFFF, /* qamVDPeriod */ 204 * 8, /* fec_rs_plen annex A */ 1, /* fec_rs_prescale */ FEC_RS_MEASUREMENT_PERIOD, /* fec_rs_period */ true, /* reset_pkt_err_acc */ 0, /* pkt_err_acc_start */ /* HI configuration */ 0, /* hi_cfg_timing_div */ 0, /* hi_cfg_bridge_delay */ 0, /* hi_cfg_wake_up_key */ 0, /* hi_cfg_ctrl */ 0, /* HICfgTimeout */ /* UIO configuration */ DRX_UIO_MODE_DISABLE, /* uio_sma_rx_mode */ DRX_UIO_MODE_DISABLE, /* uio_sma_tx_mode */ DRX_UIO_MODE_DISABLE, /* uioASELMode */ DRX_UIO_MODE_DISABLE, /* uio_irqn_mode */ /* FS setting */ 0UL, /* iqm_fs_rate_ofs */ false, /* pos_image */ /* RC setting */ 0UL, /* iqm_rc_rate_ofs */ /* AUD information */ /* false, * flagSetAUDdone */ /* false, * detectedRDS */ /* true, * flagASDRequest */ /* false, * flagHDevClear */ /* false, * flagHDevSet */ /* (u16) 0xFFF, * rdsLastCount */ /* ATV configuration */ 0UL, /* flags cfg changes */ /* shadow of ATV_TOP_EQU0__A */ {-5, ATV_TOP_EQU0_EQU_C0_FM, ATV_TOP_EQU0_EQU_C0_L, ATV_TOP_EQU0_EQU_C0_LP, ATV_TOP_EQU0_EQU_C0_BG, ATV_TOP_EQU0_EQU_C0_DK, ATV_TOP_EQU0_EQU_C0_I}, /* shadow of ATV_TOP_EQU1__A */ {-50, ATV_TOP_EQU1_EQU_C1_FM, ATV_TOP_EQU1_EQU_C1_L, ATV_TOP_EQU1_EQU_C1_LP, ATV_TOP_EQU1_EQU_C1_BG, ATV_TOP_EQU1_EQU_C1_DK, ATV_TOP_EQU1_EQU_C1_I}, /* shadow of ATV_TOP_EQU2__A */ {210, ATV_TOP_EQU2_EQU_C2_FM, ATV_TOP_EQU2_EQU_C2_L, ATV_TOP_EQU2_EQU_C2_LP, ATV_TOP_EQU2_EQU_C2_BG, ATV_TOP_EQU2_EQU_C2_DK, ATV_TOP_EQU2_EQU_C2_I}, /* shadow of ATV_TOP_EQU3__A */ {-160, ATV_TOP_EQU3_EQU_C3_FM, ATV_TOP_EQU3_EQU_C3_L, ATV_TOP_EQU3_EQU_C3_LP, ATV_TOP_EQU3_EQU_C3_BG, ATV_TOP_EQU3_EQU_C3_DK, ATV_TOP_EQU3_EQU_C3_I}, false, /* flag: true=bypass */ ATV_TOP_VID_PEAK__PRE, /* shadow of ATV_TOP_VID_PEAK__A */ ATV_TOP_NOISE_TH__PRE, /* shadow of ATV_TOP_NOISE_TH__A */ true, /* flag CVBS ouput enable */ false, /* flag SIF ouput enable */ DRXJ_SIF_ATTENUATION_0DB, /* current SIF att setting */ { /* qam_rf_agc_cfg */ DRX_STANDARD_ITU_B, /* standard */ DRX_AGC_CTRL_AUTO, /* ctrl_mode */ 0, /* output_level */ 0, /* min_output_level */ 0xFFFF, /* max_output_level */ 0x0000, /* speed */ 0x0000, /* top */ 0x0000 /* c.o.c. */ }, { /* qam_if_agc_cfg */ DRX_STANDARD_ITU_B, /* standard */ DRX_AGC_CTRL_AUTO, /* ctrl_mode */ 0, /* output_level */ 0, /* min_output_level */ 0xFFFF, /* max_output_level */ 0x0000, /* speed */ 0x0000, /* top (don't care) */ 0x0000 /* c.o.c. (don't care) */ }, { /* vsb_rf_agc_cfg */ DRX_STANDARD_8VSB, /* standard */ DRX_AGC_CTRL_AUTO, /* ctrl_mode */ 0, /* output_level */ 0, /* min_output_level */ 0xFFFF, /* max_output_level */ 0x0000, /* speed */ 0x0000, /* top (don't care) */ 0x0000 /* c.o.c. (don't care) */ }, { /* vsb_if_agc_cfg */ DRX_STANDARD_8VSB, /* standard */ DRX_AGC_CTRL_AUTO, /* ctrl_mode */ 0, /* output_level */ 0, /* min_output_level */ 0xFFFF, /* max_output_level */ 0x0000, /* speed */ 0x0000, /* top (don't care) */ 0x0000 /* c.o.c. (don't care) */ }, 0, /* qam_pga_cfg */ 0, /* vsb_pga_cfg */ { /* qam_pre_saw_cfg */ DRX_STANDARD_ITU_B, /* standard */ 0, /* reference */ false /* use_pre_saw */ }, { /* vsb_pre_saw_cfg */ DRX_STANDARD_8VSB, /* standard */ 0, /* reference */ false /* use_pre_saw */ }, /* Version information */ #ifndef _CH_ { "01234567890", /* human readable version microcode */ "01234567890" /* human readable version device specific code */ }, { { /* struct drx_version for microcode */ DRX_MODULE_UNKNOWN, (char *)(NULL), 0, 0, 0, (char *)(NULL) }, { /* struct drx_version for device specific code */ DRX_MODULE_UNKNOWN, (char *)(NULL), 0, 0, 0, (char *)(NULL) } }, { { /* struct drx_version_list for microcode */ (struct drx_version *) (NULL), (struct drx_version_list *) (NULL) }, { /* struct drx_version_list for device specific code */ (struct drx_version *) (NULL), (struct drx_version_list *) (NULL) } }, #endif false, /* smart_ant_inverted */ /* Tracking filter setting for OOB */ { 12000, 9300, 6600, 5280, 3700, 3000, 2000, 0}, false, /* oob_power_on */ 0, /* mpeg_ts_static_bitrate */ false, /* disable_te_ihandling */ false, /* bit_reverse_mpeg_outout */ DRXJ_MPEGOUTPUT_CLOCK_RATE_AUTO, /* mpeg_output_clock_rate */ DRXJ_MPEG_START_WIDTH_1CLKCYC, /* mpeg_start_width */ /* Pre SAW & Agc configuration for ATV */ { DRX_STANDARD_NTSC, /* standard */ 7, /* reference */ true /* use_pre_saw */ }, { /* ATV RF-AGC */ DRX_STANDARD_NTSC, /* standard */ DRX_AGC_CTRL_AUTO, /* ctrl_mode */ 0, /* output_level */ 0, /* min_output_level (d.c.) */ 0, /* max_output_level (d.c.) */ 3, /* speed */ 9500, /* top */ 4000 /* cut-off current */ }, { /* ATV IF-AGC */ DRX_STANDARD_NTSC, /* standard */ DRX_AGC_CTRL_AUTO, /* ctrl_mode */ 0, /* output_level */ 0, /* min_output_level (d.c.) */ 0, /* max_output_level (d.c.) */ 3, /* speed */ 2400, /* top */ 0 /* c.o.c. (d.c.) */ }, 140, /* ATV PGA config */ 0, /* curr_symbol_rate */ false, /* pdr_safe_mode */ SIO_PDR_GPIO_CFG__PRE, /* pdr_safe_restore_val_gpio */ SIO_PDR_VSYNC_CFG__PRE, /* pdr_safe_restore_val_v_sync */ SIO_PDR_SMA_RX_CFG__PRE, /* pdr_safe_restore_val_sma_rx */ SIO_PDR_SMA_TX_CFG__PRE, /* pdr_safe_restore_val_sma_tx */ 4, /* oob_pre_saw */ DRXJ_OOB_LO_POW_MINUS10DB, /* oob_lo_pow */ { false /* aud_data, only first member */ }, }; /** * \var drxj_default_addr_g * \brief Default I2C address and device identifier. */ static struct i2c_device_addr drxj_default_addr_g = { DRXJ_DEF_I2C_ADDR, /* i2c address */ DRXJ_DEF_DEMOD_DEV_ID /* device id */ }; /** * \var drxj_default_comm_attr_g * \brief Default common attributes of a drxj demodulator instance. */ static struct drx_common_attr drxj_default_comm_attr_g = { NULL, /* ucode file */ true, /* ucode verify switch */ {0}, /* version record */ 44000, /* IF in kHz in case no tuner instance is used */ (151875 - 0), /* system clock frequency in kHz */ 0, /* oscillator frequency kHz */ 0, /* oscillator deviation in ppm, signed */ false, /* If true mirror frequency spectrum */ { /* MPEG output configuration */ true, /* If true, enable MPEG ouput */ false, /* If true, insert RS byte */ false, /* If true, parallel out otherwise serial */ false, /* If true, invert DATA signals */ false, /* If true, invert ERR signal */ false, /* If true, invert STR signals */ false, /* If true, invert VAL signals */ false, /* If true, invert CLK signals */ true, /* If true, static MPEG clockrate will be used, otherwise clockrate will adapt to the bitrate of the TS */ 19392658UL, /* Maximum bitrate in b/s in case static clockrate is selected */ DRX_MPEG_STR_WIDTH_1 /* MPEG Start width in clock cycles */ }, /* Initilisations below can be omitted, they require no user input and are initialy 0, NULL or false. The compiler will initialize them to these values when omitted. */ false, /* is_opened */ /* SCAN */ NULL, /* no scan params yet */ 0, /* current scan index */ 0, /* next scan frequency */ false, /* scan ready flag */ 0, /* max channels to scan */ 0, /* nr of channels scanned */ NULL, /* default scan function */ NULL, /* default context pointer */ 0, /* millisec to wait for demod lock */ DRXJ_DEMOD_LOCK, /* desired lock */ false, /* Power management */ DRX_POWER_UP, /* Tuner */ 1, /* nr of I2C port to wich tuner is */ 0L, /* minimum RF input frequency, in kHz */ 0L, /* maximum RF input frequency, in kHz */ false, /* Rf Agc Polarity */ false, /* If Agc Polarity */ false, /* tuner slow mode */ { /* current channel (all 0) */ 0UL /* channel.frequency */ }, DRX_STANDARD_UNKNOWN, /* current standard */ DRX_STANDARD_UNKNOWN, /* previous standard */ DRX_STANDARD_UNKNOWN, /* di_cache_standard */ false, /* use_bootloader */ 0UL, /* capabilities */ 0 /* mfx */ }; /** * \var drxj_default_demod_g * \brief Default drxj demodulator instance. */ static struct drx_demod_instance drxj_default_demod_g = { &drxj_default_addr_g, /* i2c address & device id */ &drxj_default_comm_attr_g, /* demod common attributes */ &drxj_data_g /* demod device specific attributes */ }; /** * \brief Default audio data structure for DRK demodulator instance. * * This structure is DRXK specific. * */ static struct drx_aud_data drxj_default_aud_data_g = { false, /* audio_is_active */ DRX_AUD_STANDARD_AUTO, /* audio_standard */ /* i2sdata */ { false, /* output_enable */ 48000, /* frequency */ DRX_I2S_MODE_MASTER, /* mode */ DRX_I2S_WORDLENGTH_32, /* word_length */ DRX_I2S_POLARITY_RIGHT, /* polarity */ DRX_I2S_FORMAT_WS_WITH_DATA /* format */ }, /* volume */ { true, /* mute; */ 0, /* volume */ DRX_AUD_AVC_OFF, /* avc_mode */ 0, /* avc_ref_level */ DRX_AUD_AVC_MAX_GAIN_12DB, /* avc_max_gain */ DRX_AUD_AVC_MAX_ATTEN_24DB, /* avc_max_atten */ 0, /* strength_left */ 0 /* strength_right */ }, DRX_AUD_AUTO_SOUND_SELECT_ON_CHANGE_ON, /* auto_sound */ /* ass_thresholds */ { 440, /* A2 */ 12, /* BTSC */ 700, /* NICAM */ }, /* carrier */ { /* a */ { 42, /* thres */ DRX_NO_CARRIER_NOISE, /* opt */ 0, /* shift */ 0 /* dco */ }, /* b */ { 42, /* thres */ DRX_NO_CARRIER_MUTE, /* opt */ 0, /* shift */ 0 /* dco */ }, }, /* mixer */ { DRX_AUD_SRC_STEREO_OR_A, /* source_i2s */ DRX_AUD_I2S_MATRIX_STEREO, /* matrix_i2s */ DRX_AUD_FM_MATRIX_SOUND_A /* matrix_fm */ }, DRX_AUD_DEVIATION_NORMAL, /* deviation */ DRX_AUD_AVSYNC_OFF, /* av_sync */ /* prescale */ { DRX_AUD_MAX_FM_DEVIATION, /* fm_deviation */ DRX_AUD_MAX_NICAM_PRESCALE /* nicam_gain */ }, DRX_AUD_FM_DEEMPH_75US, /* deemph */ DRX_BTSC_STEREO, /* btsc_detect */ 0, /* rds_data_counter */ false /* rds_data_present */ }; /*----------------------------------------------------------------------------- STRUCTURES ----------------------------------------------------------------------------*/ struct drxjeq_stat { u16 eq_mse; u8 eq_mode; u8 eq_ctrl; u8 eq_stat; }; /* HI command */ struct drxj_hi_cmd { u16 cmd; u16 param1; u16 param2; u16 param3; u16 param4; u16 param5; u16 param6; }; /*============================================================================*/ /*=== MICROCODE RELATED STRUCTURES ===========================================*/ /*============================================================================*/ /** * struct drxu_code_block_hdr - Structure of the microcode block headers * * @addr: Destination address of the data in this block * @size: Size of the block data following this header counted in * 16 bits words * @CRC: CRC value of the data block, only valid if CRC flag is * set. */ struct drxu_code_block_hdr { u32 addr; u16 size; u16 flags; u16 CRC; }; /*----------------------------------------------------------------------------- FUNCTIONS ----------------------------------------------------------------------------*/ /* Some prototypes */ static int hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16 *result); static int ctrl_lock_status(struct drx_demod_instance *demod, enum drx_lock_status *lock_stat); static int ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode); static int power_down_aud(struct drx_demod_instance *demod); static int ctrl_set_cfg_pre_saw(struct drx_demod_instance *demod, struct drxj_cfg_pre_saw *pre_saw); static int ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain *afe_gain); /*============================================================================*/ /*============================================================================*/ /*== HELPER FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /*============================================================================*/ /* * \fn u32 frac28(u32 N, u32 D) * \brief Compute: (1<<28)*N/D * \param N 32 bits * \param D 32 bits * \return (1<<28)*N/D * This function is used to avoid floating-point calculations as they may * not be present on the target platform. * frac28 performs an unsigned 28/28 bits division to 32-bit fixed point * fraction used for setting the Frequency Shifter registers. * N and D can hold numbers up to width: 28-bits. * The 4 bits integer part and the 28 bits fractional part are calculated. * Usage condition: ((1<<28)*n)/d < ((1<<32)-1) => (n/d) < 15.999 * N: 0...(1<<28)-1 = 268435454 * D: 0...(1<<28)-1 * Q: 0...(1<<32)-1 */ static u32 frac28(u32 N, u32 D) { int i = 0; u32 Q1 = 0; u32 R0 = 0; R0 = (N % D) << 4; /* 32-28 == 4 shifts possible at max */ Q1 = N / D; /* integer part, only the 4 least significant bits will be visible in the result */ /* division using radix 16, 7 nibbles in the result */ for (i = 0; i < 7; i++) { Q1 = (Q1 << 4) | R0 / D; R0 = (R0 % D) << 4; } /* rounding */ if ((R0 >> 3) >= D) Q1++; return Q1; } /** * \fn u32 log1_times100( u32 x) * \brief Compute: 100*log10(x) * \param x 32 bits * \return 100*log10(x) * * 100*log10(x) * = 100*(log2(x)/log2(10))) * = (100*(2^15)*log2(x))/((2^15)*log2(10)) * = ((200*(2^15)*log2(x))/((2^15)*log2(10)))/2 * = ((200*(2^15)*(log2(x/y)+log2(y)))/((2^15)*log2(10)))/2 * = ((200*(2^15)*log2(x/y))+(200*(2^15)*log2(y)))/((2^15)*log2(10)))/2 * * where y = 2^k and 1<= (x/y) < 2 */ static u32 log1_times100(u32 x) { static const u8 scale = 15; static const u8 index_width = 5; /* log2lut[n] = (1<<scale) * 200 * log2( 1.0 + ( (1.0/(1<<INDEXWIDTH)) * n )) 0 <= n < ((1<<INDEXWIDTH)+1) */ static const u32 log2lut[] = { 0, /* 0.000000 */ 290941, /* 290941.300628 */ 573196, /* 573196.476418 */ 847269, /* 847269.179851 */ 1113620, /* 1113620.489452 */ 1372674, /* 1372673.576986 */ 1624818, /* 1624817.752104 */ 1870412, /* 1870411.981536 */ 2109788, /* 2109787.962654 */ 2343253, /* 2343252.817465 */ 2571091, /* 2571091.461923 */ 2793569, /* 2793568.696416 */ 3010931, /* 3010931.055901 */ 3223408, /* 3223408.452106 */ 3431216, /* 3431215.635215 */ 3634553, /* 3634553.498355 */ 3833610, /* 3833610.244726 */ 4028562, /* 4028562.434393 */ 4219576, /* 4219575.925308 */ 4406807, /* 4406806.721144 */ 4590402, /* 4590401.736809 */ 4770499, /* 4770499.491025 */ 4947231, /* 4947230.734179 */ 5120719, /* 5120719.018555 */ 5291081, /* 5291081.217197 */ 5458428, /* 5458427.996830 */ 5622864, /* 5622864.249668 */ 5784489, /* 5784489.488298 */ 5943398, /* 5943398.207380 */ 6099680, /* 6099680.215452 */ 6253421, /* 6253420.939751 */ 6404702, /* 6404701.706649 */ 6553600, /* 6553600.000000 */ }; u8 i = 0; u32 y = 0; u32 d = 0; u32 k = 0; u32 r = 0; if (x == 0) return 0; /* Scale x (normalize) */ /* computing y in log(x/y) = log(x) - log(y) */ if ((x & (((u32) (-1)) << (scale + 1))) == 0) { for (k = scale; k > 0; k--) { if (x & (((u32) 1) << scale)) break; x <<= 1; } } else { for (k = scale; k < 31; k++) { if ((x & (((u32) (-1)) << (scale + 1))) == 0) break; x >>= 1; } } /* Now x has binary point between bit[scale] and bit[scale-1] and 1.0 <= x < 2.0 */ /* correction for division: log(x) = log(x/y)+log(y) */ y = k * ((((u32) 1) << scale) * 200); /* remove integer part */ x &= ((((u32) 1) << scale) - 1); /* get index */ i = (u8) (x >> (scale - index_width)); /* compute delta (x-a) */ d = x & ((((u32) 1) << (scale - index_width)) - 1); /* compute log, multiplication ( d* (.. )) must be within range ! */ y += log2lut[i] + ((d * (log2lut[i + 1] - log2lut[i])) >> (scale - index_width)); /* Conver to log10() */ y /= 108853; /* (log2(10) << scale) */ r = (y >> 1); /* rounding */ if (y & ((u32)1)) r++; return r; } /** * \fn u32 frac_times1e6( u16 N, u32 D) * \brief Compute: (N/D) * 1000000. * \param N nominator 16-bits. * \param D denominator 32-bits. * \return u32 * \retval ((N/D) * 1000000), 32 bits * * No check on D=0! */ static u32 frac_times1e6(u32 N, u32 D) { u32 remainder = 0; u32 frac = 0; /* frac = (N * 1000000) / D To let it fit in a 32 bits computation: frac = (N * (1000000 >> 4)) / (D >> 4) This would result in a problem in case D < 16 (div by 0). So we do it more elaborate as shown below. */ frac = (((u32) N) * (1000000 >> 4)) / D; frac <<= 4; remainder = (((u32) N) * (1000000 >> 4)) % D; remainder <<= 4; frac += remainder / D; remainder = remainder % D; if ((remainder * 2) > D) frac++; return frac; } /*============================================================================*/ /** * \brief Values for NICAM prescaler gain. Computed from dB to integer * and rounded. For calc used formula: 16*10^(prescaleGain[dB]/20). * */ #if 0 /* Currently, unused as we lack support for analog TV */ static const u16 nicam_presc_table_val[43] = { 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 23, 25, 28, 32, 36, 40, 45, 51, 57, 64, 71, 80, 90, 101, 113, 127 }; #endif /*============================================================================*/ /*== END HELPER FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /*============================================================================*/ /*== DRXJ DAP FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /* This layer takes care of some device specific register access protocols: -conversion to short address format -access to audio block This layer is placed between the drx_dap_fasi and the rest of the drxj specific implementation. This layer can use address map knowledge whereas dap_fasi may not use memory map knowledge. * For audio currently only 16 bits read and write register access is supported. More is not needed. RMW and 32 or 8 bit access on audio registers will have undefined behaviour. Flags (RMW, CRC reset, broadcast single/multi master) will be ignored. TODO: check ignoring single/multimaster is ok for AUD access ? */ #define DRXJ_ISAUDWRITE(addr) (((((addr)>>16)&1) == 1) ? true : false) #define DRXJ_DAP_AUDTRIF_TIMEOUT 80 /* millisec */ /*============================================================================*/ /** * \fn bool is_handled_by_aud_tr_if( u32 addr ) * \brief Check if this address is handled by the audio token ring interface. * \param addr * \return bool * \retval true Yes, handled by audio token ring interface * \retval false No, not handled by audio token ring interface * */ static bool is_handled_by_aud_tr_if(u32 addr) { bool retval = false; if ((DRXDAP_FASI_ADDR2BLOCK(addr) == 4) && (DRXDAP_FASI_ADDR2BANK(addr) > 1) && (DRXDAP_FASI_ADDR2BANK(addr) < 6)) { retval = true; } return retval; } /*============================================================================*/ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr, u16 w_count, u8 *wData, struct i2c_device_addr *r_dev_addr, u16 r_count, u8 *r_data) { struct drx39xxj_state *state; struct i2c_msg msg[2]; unsigned int num_msgs; if (w_dev_addr == NULL) { /* Read only */ state = r_dev_addr->user_data; msg[0].addr = r_dev_addr->i2c_addr >> 1; msg[0].flags = I2C_M_RD; msg[0].buf = r_data; msg[0].len = r_count; num_msgs = 1; } else if (r_dev_addr == NULL) { /* Write only */ state = w_dev_addr->user_data; msg[0].addr = w_dev_addr->i2c_addr >> 1; msg[0].flags = 0; msg[0].buf = wData; msg[0].len = w_count; num_msgs = 1; } else { /* Both write and read */ state = w_dev_addr->user_data; msg[0].addr = w_dev_addr->i2c_addr >> 1; msg[0].flags = 0; msg[0].buf = wData; msg[0].len = w_count; msg[1].addr = r_dev_addr->i2c_addr >> 1; msg[1].flags = I2C_M_RD; msg[1].buf = r_data; msg[1].len = r_count; num_msgs = 2; } if (state->i2c == NULL) { pr_err("i2c was zero, aborting\n"); return 0; } if (i2c_transfer(state->i2c, msg, num_msgs) != num_msgs) { pr_warn("drx3933: I2C write/read failed\n"); return -EREMOTEIO; } #ifdef DJH_DEBUG if (w_dev_addr == NULL || r_dev_addr == NULL) return 0; state = w_dev_addr->user_data; if (state->i2c == NULL) return 0; msg[0].addr = w_dev_addr->i2c_addr; msg[0].flags = 0; msg[0].buf = wData; msg[0].len = w_count; msg[1].addr = r_dev_addr->i2c_addr; msg[1].flags = I2C_M_RD; msg[1].buf = r_data; msg[1].len = r_count; num_msgs = 2; pr_debug("drx3933 i2c operation addr=%x i2c=%p, wc=%x rc=%x\n", w_dev_addr->i2c_addr, state->i2c, w_count, r_count); if (i2c_transfer(state->i2c, msg, 2) != 2) { pr_warn("drx3933: I2C write/read failed\n"); return -EREMOTEIO; } #endif return 0; } /*============================================================================*/ /****************************** * * int drxdap_fasi_read_block ( * struct i2c_device_addr *dev_addr, -- address of I2C device * u32 addr, -- address of chip register/memory * u16 datasize, -- number of bytes to read * u8 *data, -- data to receive * u32 flags) -- special device flags * * Read block data from chip address. Because the chip is word oriented, * the number of bytes to read must be even. * * Make sure that the buffer to receive the data is large enough. * * Although this function expects an even number of bytes, it is still byte * oriented, and the data read back is NOT translated to the endianness of * the target platform. * * Output: * - 0 if reading was successful * in that case: data read is in *data. * - -EIO if anything went wrong * ******************************/ static int drxdap_fasi_read_block(struct i2c_device_addr *dev_addr, u32 addr, u16 datasize, u8 *data, u32 flags) { u8 buf[4]; u16 bufx; int rc; u16 overhead_size = 0; /* Check parameters ******************************************************* */ if (dev_addr == NULL) return -EINVAL; overhead_size = (IS_I2C_10BIT(dev_addr->i2c_addr) ? 2 : 1) + (DRXDAP_FASI_LONG_FORMAT(addr) ? 4 : 2); if ((DRXDAP_FASI_OFFSET_TOO_LARGE(addr)) || ((!(DRXDAPFASI_LONG_ADDR_ALLOWED)) && DRXDAP_FASI_LONG_FORMAT(addr)) || (overhead_size > (DRXDAP_MAX_WCHUNKSIZE)) || ((datasize != 0) && (data == NULL)) || ((datasize & 1) == 1)) { return -EINVAL; } /* ReadModifyWrite & mode flag bits are not allowed */ flags &= (~DRXDAP_FASI_RMW & ~DRXDAP_FASI_MODEFLAGS); #if DRXDAP_SINGLE_MASTER flags |= DRXDAP_FASI_SINGLE_MASTER; #endif /* Read block from I2C **************************************************** */ do { u16 todo = (datasize < DRXDAP_MAX_RCHUNKSIZE ? datasize : DRXDAP_MAX_RCHUNKSIZE); bufx = 0; addr &= ~DRXDAP_FASI_FLAGS; addr |= flags; #if ((DRXDAPFASI_LONG_ADDR_ALLOWED == 1) && (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1)) /* short format address preferred but long format otherwise */ if (DRXDAP_FASI_LONG_FORMAT(addr)) { #endif #if (DRXDAPFASI_LONG_ADDR_ALLOWED == 1) buf[bufx++] = (u8) (((addr << 1) & 0xFF) | 0x01); buf[bufx++] = (u8) ((addr >> 16) & 0xFF); buf[bufx++] = (u8) ((addr >> 24) & 0xFF); buf[bufx++] = (u8) ((addr >> 7) & 0xFF); #endif #if ((DRXDAPFASI_LONG_ADDR_ALLOWED == 1) && (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1)) } else { #endif #if (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1) buf[bufx++] = (u8) ((addr << 1) & 0xFF); buf[bufx++] = (u8) (((addr >> 16) & 0x0F) | ((addr >> 18) & 0xF0)); #endif #if ((DRXDAPFASI_LONG_ADDR_ALLOWED == 1) && (DRXDAPFASI_SHORT_ADDR_ALLOWED == 1)) } #endif #if DRXDAP_SINGLE_MASTER /* * In single master mode, split the read and write actions. * No special action is needed for write chunks here. */ rc = drxbsp_i2c_write_read(dev_addr, bufx, buf, NULL, 0, NULL); if (rc == 0) rc = drxbsp_i2c_write_read(NULL, 0, NULL, dev_addr, todo, data); #else /* In multi master mode, do everything in one RW action */ rc = drxbsp_i2c_write_read(dev_addr, bufx, buf, dev_addr, todo, data); #endif data += todo; addr += (todo >> 1); datasize -= todo; } while (datasize && rc == 0); return rc; } /****************************** * * int drxdap_fasi_read_reg16 ( * struct i2c_device_addr *dev_addr, -- address of I2C device * u32 addr, -- address of chip register/memory * u16 *data, -- data to receive * u32 flags) -- special device flags * * Read one 16-bit register or memory location. The data received back is * converted back to the target platform's endianness. * * Output: * - 0 if reading was successful * in that case: read data is at *data * - -EIO if anything went wrong * ******************************/ static int drxdap_fasi_read_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 *data, u32 flags) { u8 buf[sizeof(*data)]; int rc; if (!data) return -EINVAL; rc = drxdap_fasi_read_block(dev_addr, addr, sizeof(*data), buf, flags); *data = buf[0] + (((u16) buf[1]) << 8); return rc; } /****************************** * * int drxdap_fasi_read_reg32 ( * struct i2c_device_addr *dev_addr, -- address of I2C device * u32 addr, -- address of chip register/memory * u32 *data, -- data to receive * u32 flags) -- special device flags * * Read one 32-bit register or memory location. The data received back is * converted back to the target platform's endianness. * * Output: * - 0 if reading was successful * in that case: read data is at *data * - -EIO if anything went wrong * ******************************/ static int drxdap_fasi_read_reg32(struct i2c_device_addr *dev_addr, u32 addr, u32 *data, u32 flags) { u8 buf[sizeof(*data)]; int rc; if (!data) return -EINVAL; rc = drxdap_fasi_read_block(dev_addr, addr, sizeof(*data), buf, flags); *data = (((u32) buf[0]) << 0) + (((u32) buf[1]) << 8) + (((u32) buf[2]) << 16) + (((u32) buf[3]) << 24); return rc; } /****************************** * * int drxdap_fasi_write_block ( * struct i2c_device_addr *dev_addr, -- address of I2C device * u32 addr, -- address of chip register/memory * u16 datasize, -- number of bytes to read * u8 *data, -- data to receive * u32 flags) -- special device flags * * Write block data to chip address. Because the chip is word oriented, * the number of bytes to write must be even. * * Although this function expects an even number of bytes, it is still byte * oriented, and the data being written is NOT translated from the endianness of * the target platform. * * Output: * - 0 if writing was successful * - -EIO if anything went wrong * ******************************/ static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr, u32 addr, u16 datasize, u8 *data, u32 flags) { u8 buf[DRXDAP_MAX_WCHUNKSIZE]; int st = -EIO; int first_err = 0; u16 overhead_size = 0; u16 block_size = 0; /* Check parameters ******************************************************* */ if (dev_addr == NULL) return -EINVAL; overhead_size = (IS_I2C_10BIT(dev_addr->i2c_addr) ? 2 : 1) + (DRXDAP_FASI_LONG_FORMAT(addr) ? 4 : 2); if ((DRXDAP_FASI_OFFSET_TOO_LARGE(addr)) || ((!(DRXDAPFASI_LONG_ADDR_ALLOWED)) && DRXDAP_FASI_LONG_FORMAT(addr)) || (overhead_size > (DRXDAP_MAX_WCHUNKSIZE)) || ((datasize != 0) && (data == NULL)) || ((datasize & 1) == 1)) return -EINVAL; flags &= DRXDAP_FASI_FLAGS; flags &= ~DRXDAP_FASI_MODEFLAGS; #if DRXDAP_SINGLE_MASTER flags |= DRXDAP_FASI_SINGLE_MASTER; #endif /* Write block to I2C ***************************************************** */ block_size = ((DRXDAP_MAX_WCHUNKSIZE) - overhead_size) & ~1; do { u16 todo = 0; u16 bufx = 0; /* Buffer device address */ addr &= ~DRXDAP_FASI_FLAGS; addr |= flags; #if (((DRXDAPFASI_LONG_ADDR_ALLOWED) == 1) && ((DRXDAPFASI_SHORT_ADDR_ALLOWED) == 1)) /* short format address preferred but long format otherwise */ if (DRXDAP_FASI_LONG_FORMAT(addr)) { #endif #if ((DRXDAPFASI_LONG_ADDR_ALLOWED) == 1) buf[bufx++] = (u8) (((addr << 1) & 0xFF) | 0x01); buf[bufx++] = (u8) ((addr >> 16) & 0xFF); buf[bufx++] = (u8) ((addr >> 24) & 0xFF); buf[bufx++] = (u8) ((addr >> 7) & 0xFF); #endif #if (((DRXDAPFASI_LONG_ADDR_ALLOWED) == 1) && ((DRXDAPFASI_SHORT_ADDR_ALLOWED) == 1)) } else { #endif #if ((DRXDAPFASI_SHORT_ADDR_ALLOWED) == 1) buf[bufx++] = (u8) ((addr << 1) & 0xFF); buf[bufx++] = (u8) (((addr >> 16) & 0x0F) | ((addr >> 18) & 0xF0)); #endif #if (((DRXDAPFASI_LONG_ADDR_ALLOWED) == 1) && ((DRXDAPFASI_SHORT_ADDR_ALLOWED) == 1)) } #endif /* In single master mode block_size can be 0. In such a case this I2C sequense will be visible: (1) write address {i2c addr, 4 bytes chip address} (2) write data {i2c addr, 4 bytes data } (3) write address (4) write data etc... Address must be rewriten because HI is reset after data transport and expects an address. */ todo = (block_size < datasize ? block_size : datasize); if (todo == 0) { u16 overhead_size_i2c_addr = 0; u16 data_block_size = 0; overhead_size_i2c_addr = (IS_I2C_10BIT(dev_addr->i2c_addr) ? 2 : 1); data_block_size = (DRXDAP_MAX_WCHUNKSIZE - overhead_size_i2c_addr) & ~1; /* write device address */ st = drxbsp_i2c_write_read(dev_addr, (u16) (bufx), buf, (struct i2c_device_addr *)(NULL), 0, (u8 *)(NULL)); if ((st != 0) && (first_err == 0)) { /* at the end, return the first error encountered */ first_err = st; } bufx = 0; todo = (data_block_size < datasize ? data_block_size : datasize); } memcpy(&buf[bufx], data, todo); /* write (address if can do and) data */ st = drxbsp_i2c_write_read(dev_addr, (u16) (bufx + todo), buf, (struct i2c_device_addr *)(NULL), 0, (u8 *)(NULL)); if ((st != 0) && (first_err == 0)) { /* at the end, return the first error encountered */ first_err = st; } datasize -= todo; data += todo; addr += (todo >> 1); } while (datasize); return first_err; } /****************************** * * int drxdap_fasi_write_reg16 ( * struct i2c_device_addr *dev_addr, -- address of I2C device * u32 addr, -- address of chip register/memory * u16 data, -- data to send * u32 flags) -- special device flags * * Write one 16-bit register or memory location. The data being written is * converted from the target platform's endianness to little endian. * * Output: * - 0 if writing was successful * - -EIO if anything went wrong * ******************************/ static int drxdap_fasi_write_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 data, u32 flags) { u8 buf[sizeof(data)]; buf[0] = (u8) ((data >> 0) & 0xFF); buf[1] = (u8) ((data >> 8) & 0xFF); return drxdap_fasi_write_block(dev_addr, addr, sizeof(data), buf, flags); } /****************************** * * int drxdap_fasi_read_modify_write_reg16 ( * struct i2c_device_addr *dev_addr, -- address of I2C device * u32 waddr, -- address of chip register/memory * u32 raddr, -- chip address to read back from * u16 wdata, -- data to send * u16 *rdata) -- data to receive back * * Write 16-bit data, then read back the original contents of that location. * Requires long addressing format to be allowed. * * Before sending data, the data is converted to little endian. The * data received back is converted back to the target platform's endianness. * * WARNING: This function is only guaranteed to work if there is one * master on the I2C bus. * * Output: * - 0 if reading was successful * in that case: read back data is at *rdata * - -EIO if anything went wrong * ******************************/ static int drxdap_fasi_read_modify_write_reg16(struct i2c_device_addr *dev_addr, u32 waddr, u32 raddr, u16 wdata, u16 *rdata) { int rc = -EIO; #if (DRXDAPFASI_LONG_ADDR_ALLOWED == 1) if (rdata == NULL) return -EINVAL; rc = drxdap_fasi_write_reg16(dev_addr, waddr, wdata, DRXDAP_FASI_RMW); if (rc == 0) rc = drxdap_fasi_read_reg16(dev_addr, raddr, rdata, 0); #endif return rc; } /****************************** * * int drxdap_fasi_write_reg32 ( * struct i2c_device_addr *dev_addr, -- address of I2C device * u32 addr, -- address of chip register/memory * u32 data, -- data to send * u32 flags) -- special device flags * * Write one 32-bit register or memory location. The data being written is * converted from the target platform's endianness to little endian. * * Output: * - 0 if writing was successful * - -EIO if anything went wrong * ******************************/ static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr, u32 addr, u32 data, u32 flags) { u8 buf[sizeof(data)]; buf[0] = (u8) ((data >> 0) & 0xFF); buf[1] = (u8) ((data >> 8) & 0xFF); buf[2] = (u8) ((data >> 16) & 0xFF); buf[3] = (u8) ((data >> 24) & 0xFF); return drxdap_fasi_write_block(dev_addr, addr, sizeof(data), buf, flags); } /*============================================================================*/ /** * \fn int drxj_dap_rm_write_reg16short * \brief Read modify write 16 bits audio register using short format only. * \param dev_addr * \param waddr Address to write to * \param raddr Address to read from (usually SIO_HI_RA_RAM_S0_RMWBUF__A) * \param wdata Data to write * \param rdata Buffer for data to read * \return int * \retval 0 Succes * \retval -EIO Timeout, I2C error, illegal bank * * 16 bits register read modify write access using short addressing format only. * Requires knowledge of the registermap, thus device dependent. * Using DAP FASI directly to avoid endless recursion of RMWs to audio registers. * */ /* TODO correct define should be #if ( DRXDAPFASI_SHORT_ADDR_ALLOWED==1 ) See comments drxj_dap_read_modify_write_reg16 */ #if (DRXDAPFASI_LONG_ADDR_ALLOWED == 0) static int drxj_dap_rm_write_reg16short(struct i2c_device_addr *dev_addr, u32 waddr, u32 raddr, u16 wdata, u16 *rdata) { int rc; if (rdata == NULL) return -EINVAL; /* Set RMW flag */ rc = drxdap_fasi_write_reg16(dev_addr, SIO_HI_RA_RAM_S0_FLG_ACC__A, SIO_HI_RA_RAM_S0_FLG_ACC_S0_RWM__M, 0x0000); if (rc == 0) { /* Write new data: triggers RMW */ rc = drxdap_fasi_write_reg16(dev_addr, waddr, wdata, 0x0000); } if (rc == 0) { /* Read old data */ rc = drxdap_fasi_read_reg16(dev_addr, raddr, rdata, 0x0000); } if (rc == 0) { /* Reset RMW flag */ rc = drxdap_fasi_write_reg16(dev_addr, SIO_HI_RA_RAM_S0_FLG_ACC__A, 0, 0x0000); } return rc; } #endif /*============================================================================*/ static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr, u32 waddr, u32 raddr, u16 wdata, u16 *rdata) { /* TODO: correct short/long addressing format decision, now long format has higher prio then short because short also needs virt bnks (not impl yet) for certain audio registers */ #if (DRXDAPFASI_LONG_ADDR_ALLOWED == 1) return drxdap_fasi_read_modify_write_reg16(dev_addr, waddr, raddr, wdata, rdata); #else return drxj_dap_rm_write_reg16short(dev_addr, waddr, raddr, wdata, rdata); #endif } /*============================================================================*/ /** * \fn int drxj_dap_read_aud_reg16 * \brief Read 16 bits audio register * \param dev_addr * \param addr * \param data * \return int * \retval 0 Succes * \retval -EIO Timeout, I2C error, illegal bank * * 16 bits register read access via audio token ring interface. * */ static int drxj_dap_read_aud_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 *data) { u32 start_timer = 0; u32 current_timer = 0; u32 delta_timer = 0; u16 tr_status = 0; int stat = -EIO; /* No read possible for bank 3, return with error */ if (DRXDAP_FASI_ADDR2BANK(addr) == 3) { stat = -EINVAL; } else { const u32 write_bit = ((dr_xaddr_t) 1) << 16; /* Force reset write bit */ addr &= (~write_bit); /* Set up read */ start_timer = jiffies_to_msecs(jiffies); do { /* RMW to aud TR IF until request is granted or timeout */ stat = drxj_dap_read_modify_write_reg16(dev_addr, addr, SIO_HI_RA_RAM_S0_RMWBUF__A, 0x0000, &tr_status); if (stat != 0) break; current_timer = jiffies_to_msecs(jiffies); delta_timer = current_timer - start_timer; if (delta_timer > DRXJ_DAP_AUDTRIF_TIMEOUT) { stat = -EIO; break; } } while (((tr_status & AUD_TOP_TR_CTR_FIFO_LOCK__M) == AUD_TOP_TR_CTR_FIFO_LOCK_LOCKED) || ((tr_status & AUD_TOP_TR_CTR_FIFO_FULL__M) == AUD_TOP_TR_CTR_FIFO_FULL_FULL)); } /* if ( DRXDAP_FASI_ADDR2BANK(addr)!=3 ) */ /* Wait for read ready status or timeout */ if (stat == 0) { start_timer = jiffies_to_msecs(jiffies); while ((tr_status & AUD_TOP_TR_CTR_FIFO_RD_RDY__M) != AUD_TOP_TR_CTR_FIFO_RD_RDY_READY) { stat = drxj_dap_read_reg16(dev_addr, AUD_TOP_TR_CTR__A, &tr_status, 0x0000); if (stat != 0) break; current_timer = jiffies_to_msecs(jiffies); delta_timer = current_timer - start_timer; if (delta_timer > DRXJ_DAP_AUDTRIF_TIMEOUT) { stat = -EIO; break; } } /* while ( ... ) */ } /* Read value */ if (stat == 0) stat = drxj_dap_read_modify_write_reg16(dev_addr, AUD_TOP_TR_RD_REG__A, SIO_HI_RA_RAM_S0_RMWBUF__A, 0x0000, data); return stat; } /*============================================================================*/ static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 *data, u32 flags) { int stat = -EIO; /* Check param */ if ((dev_addr == NULL) || (data == NULL)) return -EINVAL; if (is_handled_by_aud_tr_if(addr)) stat = drxj_dap_read_aud_reg16(dev_addr, addr, data); else stat = drxdap_fasi_read_reg16(dev_addr, addr, data, flags); return stat; } /*============================================================================*/ /** * \fn int drxj_dap_write_aud_reg16 * \brief Write 16 bits audio register * \param dev_addr * \param addr * \param data * \return int * \retval 0 Succes * \retval -EIO Timeout, I2C error, illegal bank * * 16 bits register write access via audio token ring interface. * */ static int drxj_dap_write_aud_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 data) { int stat = -EIO; /* No write possible for bank 2, return with error */ if (DRXDAP_FASI_ADDR2BANK(addr) == 2) { stat = -EINVAL; } else { u32 start_timer = 0; u32 current_timer = 0; u32 delta_timer = 0; u16 tr_status = 0; const u32 write_bit = ((dr_xaddr_t) 1) << 16; /* Force write bit */ addr |= write_bit; start_timer = jiffies_to_msecs(jiffies); do { /* RMW to aud TR IF until request is granted or timeout */ stat = drxj_dap_read_modify_write_reg16(dev_addr, addr, SIO_HI_RA_RAM_S0_RMWBUF__A, data, &tr_status); if (stat != 0) break; current_timer = jiffies_to_msecs(jiffies); delta_timer = current_timer - start_timer; if (delta_timer > DRXJ_DAP_AUDTRIF_TIMEOUT) { stat = -EIO; break; } } while (((tr_status & AUD_TOP_TR_CTR_FIFO_LOCK__M) == AUD_TOP_TR_CTR_FIFO_LOCK_LOCKED) || ((tr_status & AUD_TOP_TR_CTR_FIFO_FULL__M) == AUD_TOP_TR_CTR_FIFO_FULL_FULL)); } /* if ( DRXDAP_FASI_ADDR2BANK(addr)!=2 ) */ return stat; } /*============================================================================*/ static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 data, u32 flags) { int stat = -EIO; /* Check param */ if (dev_addr == NULL) return -EINVAL; if (is_handled_by_aud_tr_if(addr)) stat = drxj_dap_write_aud_reg16(dev_addr, addr, data); else stat = drxdap_fasi_write_reg16(dev_addr, addr, data, flags); return stat; } /*============================================================================*/ /* Free data ram in SIO HI */ #define SIO_HI_RA_RAM_USR_BEGIN__A 0x420040 #define SIO_HI_RA_RAM_USR_END__A 0x420060 #define DRXJ_HI_ATOMIC_BUF_START (SIO_HI_RA_RAM_USR_BEGIN__A) #define DRXJ_HI_ATOMIC_BUF_END (SIO_HI_RA_RAM_USR_BEGIN__A + 7) #define DRXJ_HI_ATOMIC_READ SIO_HI_RA_RAM_PAR_3_ACP_RW_READ #define DRXJ_HI_ATOMIC_WRITE SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE /** * \fn int drxj_dap_atomic_read_write_block() * \brief Basic access routine for atomic read or write access * \param dev_addr pointer to i2c dev address * \param addr destination/source address * \param datasize size of data buffer in bytes * \param data pointer to data buffer * \return int * \retval 0 Succes * \retval -EIO Timeout, I2C error, illegal bank * */ static int drxj_dap_atomic_read_write_block(struct i2c_device_addr *dev_addr, u32 addr, u16 datasize, u8 *data, bool read_flag) { struct drxj_hi_cmd hi_cmd; int rc; u16 word; u16 dummy = 0; u16 i = 0; /* Parameter check */ if (!data || !dev_addr || ((datasize % 2)) || ((datasize / 2) > 8)) return -EINVAL; /* Set up HI parameters to read or write n bytes */ hi_cmd.cmd = SIO_HI_RA_RAM_CMD_ATOMIC_COPY; hi_cmd.param1 = (u16) ((DRXDAP_FASI_ADDR2BLOCK(DRXJ_HI_ATOMIC_BUF_START) << 6) + DRXDAP_FASI_ADDR2BANK(DRXJ_HI_ATOMIC_BUF_START)); hi_cmd.param2 = (u16) DRXDAP_FASI_ADDR2OFFSET(DRXJ_HI_ATOMIC_BUF_START); hi_cmd.param3 = (u16) ((datasize / 2) - 1); if (!read_flag) hi_cmd.param3 |= DRXJ_HI_ATOMIC_WRITE; else hi_cmd.param3 |= DRXJ_HI_ATOMIC_READ; hi_cmd.param4 = (u16) ((DRXDAP_FASI_ADDR2BLOCK(addr) << 6) + DRXDAP_FASI_ADDR2BANK(addr)); hi_cmd.param5 = (u16) DRXDAP_FASI_ADDR2OFFSET(addr); if (!read_flag) { /* write data to buffer */ for (i = 0; i < (datasize / 2); i++) { word = ((u16) data[2 * i]); word += (((u16) data[(2 * i) + 1]) << 8); drxj_dap_write_reg16(dev_addr, (DRXJ_HI_ATOMIC_BUF_START + i), word, 0); } } rc = hi_command(dev_addr, &hi_cmd, &dummy); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (read_flag) { /* read data from buffer */ for (i = 0; i < (datasize / 2); i++) { drxj_dap_read_reg16(dev_addr, (DRXJ_HI_ATOMIC_BUF_START + i), &word, 0); data[2 * i] = (u8) (word & 0xFF); data[(2 * i) + 1] = (u8) (word >> 8); } } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int drxj_dap_atomic_read_reg32() * \brief Atomic read of 32 bits words */ static int drxj_dap_atomic_read_reg32(struct i2c_device_addr *dev_addr, u32 addr, u32 *data, u32 flags) { u8 buf[sizeof(*data)] = { 0 }; int rc = -EIO; u32 word = 0; if (!data) return -EINVAL; rc = drxj_dap_atomic_read_write_block(dev_addr, addr, sizeof(*data), buf, true); if (rc < 0) return 0; word = (u32) buf[3]; word <<= 8; word |= (u32) buf[2]; word <<= 8; word |= (u32) buf[1]; word <<= 8; word |= (u32) buf[0]; *data = word; return rc; } /*============================================================================*/ /*============================================================================*/ /*== END DRXJ DAP FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /*============================================================================*/ /*== HOST INTERFACE FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /** * \fn int hi_cfg_command() * \brief Configure HI with settings stored in the demod structure. * \param demod Demodulator. * \return int. * * This routine was created because to much orthogonal settings have * been put into one HI API function (configure). Especially the I2C bridge * enable/disable should not need re-configuration of the HI. * */ static int hi_cfg_command(const struct drx_demod_instance *demod) { struct drxj_data *ext_attr = (struct drxj_data *) (NULL); struct drxj_hi_cmd hi_cmd; u16 result = 0; int rc; ext_attr = (struct drxj_data *) demod->my_ext_attr; hi_cmd.cmd = SIO_HI_RA_RAM_CMD_CONFIG; hi_cmd.param1 = SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY; hi_cmd.param2 = ext_attr->hi_cfg_timing_div; hi_cmd.param3 = ext_attr->hi_cfg_bridge_delay; hi_cmd.param4 = ext_attr->hi_cfg_wake_up_key; hi_cmd.param5 = ext_attr->hi_cfg_ctrl; hi_cmd.param6 = ext_attr->hi_cfg_transmit; rc = hi_command(demod->my_i2c_dev_addr, &hi_cmd, &result); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Reset power down flag (set one call only) */ ext_attr->hi_cfg_ctrl &= (~(SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ)); return 0; rw_error: return rc; } /** * \fn int hi_command() * \brief Configure HI with settings stored in the demod structure. * \param dev_addr I2C address. * \param cmd HI command. * \param result HI command result. * \return int. * * Sends command to HI * */ static int hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16 *result) { u16 wait_cmd = 0; u16 nr_retries = 0; bool powerdown_cmd = false; int rc; /* Write parameters */ switch (cmd->cmd) { case SIO_HI_RA_RAM_CMD_CONFIG: case SIO_HI_RA_RAM_CMD_ATOMIC_COPY: rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_6__A, cmd->param6, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_5__A, cmd->param5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_4__A, cmd->param4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_3__A, cmd->param3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case SIO_HI_RA_RAM_CMD_BRDCTRL: rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_2__A, cmd->param2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_1__A, cmd->param1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case SIO_HI_RA_RAM_CMD_NULL: /* No parameters */ break; default: return -EINVAL; break; } /* Write command */ rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_CMD__A, cmd->cmd, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if ((cmd->cmd) == SIO_HI_RA_RAM_CMD_RESET) msleep(1); /* Detect power down to ommit reading result */ powerdown_cmd = (bool) ((cmd->cmd == SIO_HI_RA_RAM_CMD_CONFIG) && (((cmd-> param5) & SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M) == SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ)); if (!powerdown_cmd) { /* Wait until command rdy */ do { nr_retries++; if (nr_retries > DRXJ_MAX_RETRIES) { pr_err("timeout\n"); goto rw_error; } rc = drxj_dap_read_reg16(dev_addr, SIO_HI_RA_RAM_CMD__A, &wait_cmd, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } while (wait_cmd != 0); /* Read result */ rc = drxj_dap_read_reg16(dev_addr, SIO_HI_RA_RAM_RES__A, result, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* if ( powerdown_cmd == true ) */ return 0; rw_error: return rc; } /** * \fn int init_hi( const struct drx_demod_instance *demod ) * \brief Initialise and configurate HI. * \param demod pointer to demod data. * \return int Return status. * \retval 0 Success. * \retval -EIO Failure. * * Needs to know Psys (System Clock period) and Posc (Osc Clock period) * Need to store configuration in driver because of the way I2C * bridging is controlled. * */ static int init_hi(const struct drx_demod_instance *demod) { struct drxj_data *ext_attr = (struct drxj_data *) (NULL); struct drx_common_attr *common_attr = (struct drx_common_attr *) (NULL); struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL); int rc; ext_attr = (struct drxj_data *) demod->my_ext_attr; common_attr = (struct drx_common_attr *) demod->my_common_attr; dev_addr = demod->my_i2c_dev_addr; /* PATCH for bug 5003, HI ucode v3.1.0 */ rc = drxj_dap_write_reg16(dev_addr, 0x4301D7, 0x801, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Timing div, 250ns/Psys */ /* Timing div, = ( delay (nano seconds) * sysclk (kHz) )/ 1000 */ ext_attr->hi_cfg_timing_div = (u16) ((common_attr->sys_clock_freq / 1000) * HI_I2C_DELAY) / 1000; /* Clipping */ if ((ext_attr->hi_cfg_timing_div) > SIO_HI_RA_RAM_PAR_2_CFG_DIV__M) ext_attr->hi_cfg_timing_div = SIO_HI_RA_RAM_PAR_2_CFG_DIV__M; /* Bridge delay, uses oscilator clock */ /* Delay = ( delay (nano seconds) * oscclk (kHz) )/ 1000 */ /* SDA brdige delay */ ext_attr->hi_cfg_bridge_delay = (u16) ((common_attr->osc_clock_freq / 1000) * HI_I2C_BRIDGE_DELAY) / 1000; /* Clipping */ if ((ext_attr->hi_cfg_bridge_delay) > SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M) ext_attr->hi_cfg_bridge_delay = SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M; /* SCL bridge delay, same as SDA for now */ ext_attr->hi_cfg_bridge_delay += ((ext_attr->hi_cfg_bridge_delay) << SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B); /* Wakeup key, setting the read flag (as suggest in the documentation) does not always result into a working solution (barebones worked VI2C failed). Not setting the bit works in all cases . */ ext_attr->hi_cfg_wake_up_key = DRXJ_WAKE_UP_KEY; /* port/bridge/power down ctrl */ ext_attr->hi_cfg_ctrl = (SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE); /* transit mode time out delay and watch dog divider */ ext_attr->hi_cfg_transmit = SIO_HI_RA_RAM_PAR_6__PRE; rc = hi_cfg_command(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ /*== END HOST INTERFACE FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /*============================================================================*/ /*== AUXILIARY FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /** * \fn int get_device_capabilities() * \brief Get and store device capabilities. * \param demod Pointer to demodulator instance. * \return int. * \return 0 Success * \retval -EIO Failure * * Depending on pulldowns on MDx pins the following internals are set: * * common_attr->osc_clock_freq * * ext_attr->has_lna * * ext_attr->has_ntsc * * ext_attr->has_btsc * * ext_attr->has_oob * */ static int get_device_capabilities(struct drx_demod_instance *demod) { struct drx_common_attr *common_attr = (struct drx_common_attr *) (NULL); struct drxj_data *ext_attr = (struct drxj_data *) NULL; struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL); u16 sio_pdr_ohw_cfg = 0; u32 sio_top_jtagid_lo = 0; u16 bid = 0; int rc; common_attr = (struct drx_common_attr *) demod->my_common_attr; ext_attr = (struct drxj_data *) demod->my_ext_attr; dev_addr = demod->my_i2c_dev_addr; rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_read_reg16(dev_addr, SIO_PDR_OHW_CFG__A, &sio_pdr_ohw_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } switch ((sio_pdr_ohw_cfg & SIO_PDR_OHW_CFG_FREF_SEL__M)) { case 0: /* ignore (bypass ?) */ break; case 1: /* 27 MHz */ common_attr->osc_clock_freq = 27000; break; case 2: /* 20.25 MHz */ common_attr->osc_clock_freq = 20250; break; case 3: /* 4 MHz */ common_attr->osc_clock_freq = 4000; break; default: return -EIO; } /* Determine device capabilities Based on pinning v47 */ rc = drxdap_fasi_read_reg32(dev_addr, SIO_TOP_JTAGID_LO__A, &sio_top_jtagid_lo, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->mfx = (u8) ((sio_top_jtagid_lo >> 29) & 0xF); switch ((sio_top_jtagid_lo >> 12) & 0xFF) { case 0x31: rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_read_reg16(dev_addr, SIO_PDR_UIO_IN_HI__A, &bid, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } bid = (bid >> 10) & 0xf; rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->has_lna = true; ext_attr->has_ntsc = false; ext_attr->has_btsc = false; ext_attr->has_oob = false; ext_attr->has_smatx = true; ext_attr->has_smarx = false; ext_attr->has_gpio = false; ext_attr->has_irqn = false; break; case 0x33: ext_attr->has_lna = false; ext_attr->has_ntsc = false; ext_attr->has_btsc = false; ext_attr->has_oob = false; ext_attr->has_smatx = true; ext_attr->has_smarx = false; ext_attr->has_gpio = false; ext_attr->has_irqn = false; break; case 0x45: ext_attr->has_lna = true; ext_attr->has_ntsc = true; ext_attr->has_btsc = false; ext_attr->has_oob = false; ext_attr->has_smatx = true; ext_attr->has_smarx = true; ext_attr->has_gpio = true; ext_attr->has_irqn = false; break; case 0x46: ext_attr->has_lna = false; ext_attr->has_ntsc = true; ext_attr->has_btsc = false; ext_attr->has_oob = false; ext_attr->has_smatx = true; ext_attr->has_smarx = true; ext_attr->has_gpio = true; ext_attr->has_irqn = false; break; case 0x41: ext_attr->has_lna = true; ext_attr->has_ntsc = true; ext_attr->has_btsc = true; ext_attr->has_oob = false; ext_attr->has_smatx = true; ext_attr->has_smarx = true; ext_attr->has_gpio = true; ext_attr->has_irqn = false; break; case 0x43: ext_attr->has_lna = false; ext_attr->has_ntsc = true; ext_attr->has_btsc = true; ext_attr->has_oob = false; ext_attr->has_smatx = true; ext_attr->has_smarx = true; ext_attr->has_gpio = true; ext_attr->has_irqn = false; break; case 0x32: ext_attr->has_lna = true; ext_attr->has_ntsc = false; ext_attr->has_btsc = false; ext_attr->has_oob = true; ext_attr->has_smatx = true; ext_attr->has_smarx = true; ext_attr->has_gpio = true; ext_attr->has_irqn = true; break; case 0x34: ext_attr->has_lna = false; ext_attr->has_ntsc = true; ext_attr->has_btsc = true; ext_attr->has_oob = true; ext_attr->has_smatx = true; ext_attr->has_smarx = true; ext_attr->has_gpio = true; ext_attr->has_irqn = true; break; case 0x42: ext_attr->has_lna = true; ext_attr->has_ntsc = true; ext_attr->has_btsc = true; ext_attr->has_oob = true; ext_attr->has_smatx = true; ext_attr->has_smarx = true; ext_attr->has_gpio = true; ext_attr->has_irqn = true; break; case 0x44: ext_attr->has_lna = false; ext_attr->has_ntsc = true; ext_attr->has_btsc = true; ext_attr->has_oob = true; ext_attr->has_smatx = true; ext_attr->has_smarx = true; ext_attr->has_gpio = true; ext_attr->has_irqn = true; break; default: /* Unknown device variant */ return -EIO; break; } return 0; rw_error: return rc; } /** * \fn int power_up_device() * \brief Power up device. * \param demod Pointer to demodulator instance. * \return int. * \return 0 Success * \retval -EIO Failure, I2C or max retries reached * */ #ifndef DRXJ_MAX_RETRIES_POWERUP #define DRXJ_MAX_RETRIES_POWERUP 10 #endif static int power_up_device(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL); u8 data = 0; u16 retry_count = 0; struct i2c_device_addr wake_up_addr; dev_addr = demod->my_i2c_dev_addr; wake_up_addr.i2c_addr = DRXJ_WAKE_UP_KEY; wake_up_addr.i2c_dev_id = dev_addr->i2c_dev_id; wake_up_addr.user_data = dev_addr->user_data; /* * I2C access may fail in this case: no ack * dummy write must be used to wake uop device, dummy read must be used to * reset HI state machine (avoiding actual writes) */ do { data = 0; drxbsp_i2c_write_read(&wake_up_addr, 1, &data, (struct i2c_device_addr *)(NULL), 0, (u8 *)(NULL)); msleep(10); retry_count++; } while ((drxbsp_i2c_write_read ((struct i2c_device_addr *) (NULL), 0, (u8 *)(NULL), dev_addr, 1, &data) != 0) && (retry_count < DRXJ_MAX_RETRIES_POWERUP)); /* Need some recovery time .... */ msleep(10); if (retry_count == DRXJ_MAX_RETRIES_POWERUP) return -EIO; return 0; } /*----------------------------------------------------------------------------*/ /* MPEG Output Configuration Functions - begin */ /*----------------------------------------------------------------------------*/ /** * \fn int ctrl_set_cfg_mpeg_output() * \brief Set MPEG output configuration of the device. * \param devmod Pointer to demodulator instance. * \param cfg_data Pointer to mpeg output configuaration. * \return int. * * Configure MPEG output parameters. * */ static int ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_output *cfg_data) { struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL); struct drxj_data *ext_attr = (struct drxj_data *) (NULL); struct drx_common_attr *common_attr = (struct drx_common_attr *) (NULL); int rc; u16 fec_oc_reg_mode = 0; u16 fec_oc_reg_ipr_mode = 0; u16 fec_oc_reg_ipr_invert = 0; u32 max_bit_rate = 0; u32 rcn_rate = 0; u32 nr_bits = 0; u16 sio_pdr_md_cfg = 0; /* data mask for the output data byte */ u16 invert_data_mask = FEC_OC_IPR_INVERT_MD7__M | FEC_OC_IPR_INVERT_MD6__M | FEC_OC_IPR_INVERT_MD5__M | FEC_OC_IPR_INVERT_MD4__M | FEC_OC_IPR_INVERT_MD3__M | FEC_OC_IPR_INVERT_MD2__M | FEC_OC_IPR_INVERT_MD1__M | FEC_OC_IPR_INVERT_MD0__M; /* check arguments */ if ((demod == NULL) || (cfg_data == NULL)) return -EINVAL; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; common_attr = (struct drx_common_attr *) demod->my_common_attr; if (cfg_data->enable_mpeg_output == true) { /* quick and dirty patch to set MPEG incase current std is not producing MPEG */ switch (ext_attr->standard) { case DRX_STANDARD_8VSB: case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: break; default: return 0; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_OCR_INVERT__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } switch (ext_attr->standard) { case DRX_STANDARD_8VSB: rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, 7, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* 2048 bytes fifo ram */ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_TMD_CTL_UPD_RATE__A, 10, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_TMD_INT_UPD_RATE__A, 10, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_AVR_PARM_A__A, 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_AVR_PARM_B__A, 7, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_RCN_GAIN__A, 10, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Low Water Mark for synchronization */ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_LWM__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* High Water Mark for synchronization */ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_HWM__A, 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_C: switch (ext_attr->constellation) { case DRX_CONSTELLATION_QAM256: nr_bits = 8; break; case DRX_CONSTELLATION_QAM128: nr_bits = 7; break; case DRX_CONSTELLATION_QAM64: nr_bits = 6; break; case DRX_CONSTELLATION_QAM32: nr_bits = 5; break; case DRX_CONSTELLATION_QAM16: nr_bits = 4; break; default: return -EIO; } /* ext_attr->constellation */ /* max_bit_rate = symbol_rate * nr_bits * coef */ /* coef = 188/204 */ max_bit_rate = (ext_attr->curr_symbol_rate / 8) * nr_bits * 188; /* pass through b/c Annex A/c need following settings */ case DRX_STANDARD_ITU_B: rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, FEC_OC_FCT_USAGE__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_TMD_CTL_UPD_RATE__A, FEC_OC_TMD_CTL_UPD_RATE__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_TMD_INT_UPD_RATE__A, 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_AVR_PARM_A__A, FEC_OC_AVR_PARM_A__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_AVR_PARM_B__A, FEC_OC_AVR_PARM_B__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (cfg_data->static_clk == true) { rc = drxj_dap_write_reg16(dev_addr, FEC_OC_RCN_GAIN__A, 0xD, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else { rc = drxj_dap_write_reg16(dev_addr, FEC_OC_RCN_GAIN__A, FEC_OC_RCN_GAIN__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_LWM__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_HWM__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: break; } /* swtich (standard) */ /* Check insertion of the Reed-Solomon parity bytes */ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_MODE__A, &fec_oc_reg_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_read_reg16(dev_addr, FEC_OC_IPR_MODE__A, &fec_oc_reg_ipr_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (cfg_data->insert_rs_byte == true) { /* enable parity symbol forward */ fec_oc_reg_mode |= FEC_OC_MODE_PARITY__M; /* MVAL disable during parity bytes */ fec_oc_reg_ipr_mode |= FEC_OC_IPR_MODE_MVAL_DIS_PAR__M; switch (ext_attr->standard) { case DRX_STANDARD_8VSB: rcn_rate = 0x004854D3; break; case DRX_STANDARD_ITU_B: fec_oc_reg_mode |= FEC_OC_MODE_TRANSPARENT__M; switch (ext_attr->constellation) { case DRX_CONSTELLATION_QAM256: rcn_rate = 0x008945E7; break; case DRX_CONSTELLATION_QAM64: rcn_rate = 0x005F64D4; break; default: return -EIO; } break; case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_C: /* insert_rs_byte = true -> coef = 188/188 -> 1, RS bits are in MPEG output */ rcn_rate = (frac28 (max_bit_rate, (u32) (common_attr->sys_clock_freq / 8))) / 188; break; default: return -EIO; } /* ext_attr->standard */ } else { /* insert_rs_byte == false */ /* disable parity symbol forward */ fec_oc_reg_mode &= (~FEC_OC_MODE_PARITY__M); /* MVAL enable during parity bytes */ fec_oc_reg_ipr_mode &= (~FEC_OC_IPR_MODE_MVAL_DIS_PAR__M); switch (ext_attr->standard) { case DRX_STANDARD_8VSB: rcn_rate = 0x0041605C; break; case DRX_STANDARD_ITU_B: fec_oc_reg_mode &= (~FEC_OC_MODE_TRANSPARENT__M); switch (ext_attr->constellation) { case DRX_CONSTELLATION_QAM256: rcn_rate = 0x0082D6A0; break; case DRX_CONSTELLATION_QAM64: rcn_rate = 0x005AEC1A; break; default: return -EIO; } break; case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_C: /* insert_rs_byte = false -> coef = 188/204, RS bits not in MPEG output */ rcn_rate = (frac28 (max_bit_rate, (u32) (common_attr->sys_clock_freq / 8))) / 204; break; default: return -EIO; } /* ext_attr->standard */ } if (cfg_data->enable_parallel == true) { /* MPEG data output is parallel -> clear ipr_mode[0] */ fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M)); } else { /* MPEG data output is serial -> set ipr_mode[0] */ fec_oc_reg_ipr_mode |= FEC_OC_IPR_MODE_SERIAL__M; } /* Control slective inversion of output bits */ if (cfg_data->invert_data == true) fec_oc_reg_ipr_invert |= invert_data_mask; else fec_oc_reg_ipr_invert &= (~(invert_data_mask)); if (cfg_data->invert_err == true) fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MERR__M; else fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MERR__M)); if (cfg_data->invert_str == true) fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MSTRT__M; else fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MSTRT__M)); if (cfg_data->invert_val == true) fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MVAL__M; else fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MVAL__M)); if (cfg_data->invert_clk == true) fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MCLK__M; else fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MCLK__M)); if (cfg_data->static_clk == true) { /* Static mode */ u32 dto_rate = 0; u32 bit_rate = 0; u16 fec_oc_dto_burst_len = 0; u16 fec_oc_dto_period = 0; fec_oc_dto_burst_len = FEC_OC_DTO_BURST_LEN__PRE; switch (ext_attr->standard) { case DRX_STANDARD_8VSB: fec_oc_dto_period = 4; if (cfg_data->insert_rs_byte == true) fec_oc_dto_burst_len = 208; break; case DRX_STANDARD_ITU_A: { u32 symbol_rate_th = 6400000; if (cfg_data->insert_rs_byte == true) { fec_oc_dto_burst_len = 204; symbol_rate_th = 5900000; } if (ext_attr->curr_symbol_rate >= symbol_rate_th) { fec_oc_dto_period = 0; } else { fec_oc_dto_period = 1; } } break; case DRX_STANDARD_ITU_B: fec_oc_dto_period = 1; if (cfg_data->insert_rs_byte == true) fec_oc_dto_burst_len = 128; break; case DRX_STANDARD_ITU_C: fec_oc_dto_period = 1; if (cfg_data->insert_rs_byte == true) fec_oc_dto_burst_len = 204; break; default: return -EIO; } bit_rate = common_attr->sys_clock_freq * 1000 / (fec_oc_dto_period + 2); dto_rate = frac28(bit_rate, common_attr->sys_clock_freq * 1000); dto_rate >>= 3; rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_RATE_HI__A, (u16)((dto_rate >> 16) & FEC_OC_DTO_RATE_HI__M), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_RATE_LO__A, (u16)(dto_rate & FEC_OC_DTO_RATE_LO_RATE_LO__M), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_MODE__A, FEC_OC_DTO_MODE_DYNAMIC__M | FEC_OC_DTO_MODE_OFFSET_ENABLE__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_MODE__A, FEC_OC_FCT_MODE_RAT_ENA__M | FEC_OC_FCT_MODE_VIRT_ENA__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_BURST_LEN__A, fec_oc_dto_burst_len, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (ext_attr->mpeg_output_clock_rate != DRXJ_MPEGOUTPUT_CLOCK_RATE_AUTO) fec_oc_dto_period = ext_attr->mpeg_output_clock_rate - 1; rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_PERIOD__A, fec_oc_dto_period, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else { /* Dynamic mode */ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DTO_MODE__A, FEC_OC_DTO_MODE_DYNAMIC__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_MODE__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = drxdap_fasi_write_reg32(dev_addr, FEC_OC_RCN_CTL_RATE_LO__A, rcn_rate, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Write appropriate registers with requested configuration */ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_MODE__A, fec_oc_reg_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_IPR_MODE__A, fec_oc_reg_ipr_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_IPR_INVERT__A, fec_oc_reg_ipr_invert, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* enabling for both parallel and serial now */ /* Write magic word to enable pdr reg write */ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0xFABA, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Set MPEG TS pads to outputmode */ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MSTRT_CFG__A, 0x0013, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MERR_CFG__A, 0x0013, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MCLK_CFG__A, MPEG_OUTPUT_CLK_DRIVE_STRENGTH << SIO_PDR_MCLK_CFG_DRIVE__B | 0x03 << SIO_PDR_MCLK_CFG_MODE__B, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MVAL_CFG__A, 0x0013, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } sio_pdr_md_cfg = MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH << SIO_PDR_MD0_CFG_DRIVE__B | 0x03 << SIO_PDR_MD0_CFG_MODE__B; rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD0_CFG__A, sio_pdr_md_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (cfg_data->enable_parallel == true) { /* MPEG data output is parallel -> set MD1 to MD7 to output mode */ sio_pdr_md_cfg = MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH << SIO_PDR_MD0_CFG_DRIVE__B | 0x03 << SIO_PDR_MD0_CFG_MODE__B; rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD0_CFG__A, sio_pdr_md_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD1_CFG__A, sio_pdr_md_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD2_CFG__A, sio_pdr_md_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD3_CFG__A, sio_pdr_md_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD4_CFG__A, sio_pdr_md_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD5_CFG__A, sio_pdr_md_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD6_CFG__A, sio_pdr_md_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD7_CFG__A, sio_pdr_md_cfg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else { /* MPEG data output is serial -> set MD1 to MD7 to tri-state */ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD1_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD2_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD3_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD4_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD5_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD6_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD7_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* Enable Monitor Bus output over MPEG pads and ctl input */ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MON_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Write nomagic word to enable pdr reg write */ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else { /* Write magic word to enable pdr reg write */ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0xFABA, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Set MPEG TS pads to inputmode */ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MSTRT_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MERR_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MCLK_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MVAL_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD0_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD1_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD2_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD3_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD4_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD5_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD6_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MD7_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Enable Monitor Bus output over MPEG pads and ctl input */ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_MON_CFG__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Write nomagic word to enable pdr reg write */ rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* save values for restore after re-acquire */ common_attr->mpeg_cfg.enable_mpeg_output = cfg_data->enable_mpeg_output; return 0; rw_error: return rc; } /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ /* MPEG Output Configuration Functions - end */ /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ /* miscellaneous configurations - begin */ /*----------------------------------------------------------------------------*/ /** * \fn int set_mpegtei_handling() * \brief Activate MPEG TEI handling settings. * \param devmod Pointer to demodulator instance. * \return int. * * This routine should be called during a set channel of QAM/VSB * */ static int set_mpegtei_handling(struct drx_demod_instance *demod) { struct drxj_data *ext_attr = (struct drxj_data *) (NULL); struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL); int rc; u16 fec_oc_dpr_mode = 0; u16 fec_oc_snc_mode = 0; u16 fec_oc_ems_mode = 0; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; rc = drxj_dap_read_reg16(dev_addr, FEC_OC_DPR_MODE__A, &fec_oc_dpr_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_read_reg16(dev_addr, FEC_OC_EMS_MODE__A, &fec_oc_ems_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* reset to default, allow TEI bit to be changed */ fec_oc_dpr_mode &= (~FEC_OC_DPR_MODE_ERR_DISABLE__M); fec_oc_snc_mode &= (~(FEC_OC_SNC_MODE_ERROR_CTL__M | FEC_OC_SNC_MODE_CORR_DISABLE__M)); fec_oc_ems_mode &= (~FEC_OC_EMS_MODE_MODE__M); if (ext_attr->disable_te_ihandling) { /* do not change TEI bit */ fec_oc_dpr_mode |= FEC_OC_DPR_MODE_ERR_DISABLE__M; fec_oc_snc_mode |= FEC_OC_SNC_MODE_CORR_DISABLE__M | ((0x2) << (FEC_OC_SNC_MODE_ERROR_CTL__B)); fec_oc_ems_mode |= ((0x01) << (FEC_OC_EMS_MODE_MODE__B)); } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_DPR_MODE__A, fec_oc_dpr_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_MODE__A, fec_oc_snc_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_EMS_MODE__A, fec_oc_ems_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*----------------------------------------------------------------------------*/ /** * \fn int bit_reverse_mpeg_output() * \brief Set MPEG output bit-endian settings. * \param devmod Pointer to demodulator instance. * \return int. * * This routine should be called during a set channel of QAM/VSB * */ static int bit_reverse_mpeg_output(struct drx_demod_instance *demod) { struct drxj_data *ext_attr = (struct drxj_data *) (NULL); struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL); int rc; u16 fec_oc_ipr_mode = 0; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; rc = drxj_dap_read_reg16(dev_addr, FEC_OC_IPR_MODE__A, &fec_oc_ipr_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* reset to default (normal bit order) */ fec_oc_ipr_mode &= (~FEC_OC_IPR_MODE_REVERSE_ORDER__M); if (ext_attr->bit_reverse_mpeg_outout) fec_oc_ipr_mode |= FEC_OC_IPR_MODE_REVERSE_ORDER__M; rc = drxj_dap_write_reg16(dev_addr, FEC_OC_IPR_MODE__A, fec_oc_ipr_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*----------------------------------------------------------------------------*/ /** * \fn int set_mpeg_start_width() * \brief Set MPEG start width. * \param devmod Pointer to demodulator instance. * \return int. * * This routine should be called during a set channel of QAM/VSB * */ static int set_mpeg_start_width(struct drx_demod_instance *demod) { struct drxj_data *ext_attr = (struct drxj_data *) (NULL); struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL); struct drx_common_attr *common_attr = (struct drx_common_attr *) NULL; int rc; u16 fec_oc_comm_mb = 0; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; common_attr = demod->my_common_attr; if ((common_attr->mpeg_cfg.static_clk == true) && (common_attr->mpeg_cfg.enable_parallel == false)) { rc = drxj_dap_read_reg16(dev_addr, FEC_OC_COMM_MB__A, &fec_oc_comm_mb, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } fec_oc_comm_mb &= ~FEC_OC_COMM_MB_CTL_ON; if (ext_attr->mpeg_start_width == DRXJ_MPEG_START_WIDTH_8CLKCYC) fec_oc_comm_mb |= FEC_OC_COMM_MB_CTL_ON; rc = drxj_dap_write_reg16(dev_addr, FEC_OC_COMM_MB__A, fec_oc_comm_mb, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } return 0; rw_error: return rc; } /*----------------------------------------------------------------------------*/ /* miscellaneous configurations - end */ /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ /* UIO Configuration Functions - begin */ /*----------------------------------------------------------------------------*/ /** * \fn int ctrl_set_uio_cfg() * \brief Configure modus oprandi UIO. * \param demod Pointer to demodulator instance. * \param uio_cfg Pointer to a configuration setting for a certain UIO. * \return int. */ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg *uio_cfg) { struct drxj_data *ext_attr = (struct drxj_data *) (NULL); int rc; if ((uio_cfg == NULL) || (demod == NULL)) return -EINVAL; ext_attr = (struct drxj_data *) demod->my_ext_attr; /* Write magic word to enable pdr reg write */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } switch (uio_cfg->uio) { /*====================================================================*/ case DRX_UIO1: /* DRX_UIO1: SMA_TX UIO-1 */ if (!ext_attr->has_smatx) return -EIO; switch (uio_cfg->mode) { case DRX_UIO_MODE_FIRMWARE_SMA: /* falltrough */ case DRX_UIO_MODE_FIRMWARE_SAW: /* falltrough */ case DRX_UIO_MODE_READWRITE: ext_attr->uio_sma_tx_mode = uio_cfg->mode; break; case DRX_UIO_MODE_DISABLE: ext_attr->uio_sma_tx_mode = uio_cfg->mode; /* pad configuration register is set 0 - input mode */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_TX_CFG__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: return -EINVAL; } /* switch ( uio_cfg->mode ) */ break; /*====================================================================*/ case DRX_UIO2: /* DRX_UIO2: SMA_RX UIO-2 */ if (!ext_attr->has_smarx) return -EIO; switch (uio_cfg->mode) { case DRX_UIO_MODE_FIRMWARE0: /* falltrough */ case DRX_UIO_MODE_READWRITE: ext_attr->uio_sma_rx_mode = uio_cfg->mode; break; case DRX_UIO_MODE_DISABLE: ext_attr->uio_sma_rx_mode = uio_cfg->mode; /* pad configuration register is set 0 - input mode */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_RX_CFG__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: return -EINVAL; break; } /* switch ( uio_cfg->mode ) */ break; /*====================================================================*/ case DRX_UIO3: /* DRX_UIO3: GPIO UIO-3 */ if (!ext_attr->has_gpio) return -EIO; switch (uio_cfg->mode) { case DRX_UIO_MODE_FIRMWARE0: /* falltrough */ case DRX_UIO_MODE_READWRITE: ext_attr->uio_gpio_mode = uio_cfg->mode; break; case DRX_UIO_MODE_DISABLE: ext_attr->uio_gpio_mode = uio_cfg->mode; /* pad configuration register is set 0 - input mode */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_GPIO_CFG__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: return -EINVAL; break; } /* switch ( uio_cfg->mode ) */ break; /*====================================================================*/ case DRX_UIO4: /* DRX_UIO4: IRQN UIO-4 */ if (!ext_attr->has_irqn) return -EIO; switch (uio_cfg->mode) { case DRX_UIO_MODE_READWRITE: ext_attr->uio_irqn_mode = uio_cfg->mode; break; case DRX_UIO_MODE_DISABLE: /* pad configuration register is set 0 - input mode */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_IRQN_CFG__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->uio_irqn_mode = uio_cfg->mode; break; case DRX_UIO_MODE_FIRMWARE0: /* falltrough */ default: return -EINVAL; break; } /* switch ( uio_cfg->mode ) */ break; /*====================================================================*/ default: return -EINVAL; } /* switch ( uio_cfg->uio ) */ /* Write magic word to disable pdr reg write */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /** * \fn int ctrl_uio_write() * \brief Write to a UIO. * \param demod Pointer to demodulator instance. * \param uio_data Pointer to data container for a certain UIO. * \return int. */ static int ctrl_uio_write(struct drx_demod_instance *demod, struct drxuio_data *uio_data) { struct drxj_data *ext_attr = (struct drxj_data *) (NULL); int rc; u16 pin_cfg_value = 0; u16 value = 0; if ((uio_data == NULL) || (demod == NULL)) return -EINVAL; ext_attr = (struct drxj_data *) demod->my_ext_attr; /* Write magic word to enable pdr reg write */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } switch (uio_data->uio) { /*====================================================================*/ case DRX_UIO1: /* DRX_UIO1: SMA_TX UIO-1 */ if (!ext_attr->has_smatx) return -EIO; if ((ext_attr->uio_sma_tx_mode != DRX_UIO_MODE_READWRITE) && (ext_attr->uio_sma_tx_mode != DRX_UIO_MODE_FIRMWARE_SAW)) { return -EIO; } pin_cfg_value = 0; /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */ pin_cfg_value |= 0x0113; /* io_pad_cfg_mode output mode is drive always */ /* io_pad_cfg_drive is set to power 2 (23 mA) */ /* write to io pad configuration register - output mode */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_TX_CFG__A, pin_cfg_value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* use corresponding bit in io data output registar */ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, &value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (!uio_data->value) value &= 0x7FFF; /* write zero to 15th bit - 1st UIO */ else value |= 0x8000; /* write one to 15th bit - 1st UIO */ /* write back to io data output register */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; /*======================================================================*/ case DRX_UIO2: /* DRX_UIO2: SMA_RX UIO-2 */ if (!ext_attr->has_smarx) return -EIO; if (ext_attr->uio_sma_rx_mode != DRX_UIO_MODE_READWRITE) return -EIO; pin_cfg_value = 0; /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */ pin_cfg_value |= 0x0113; /* io_pad_cfg_mode output mode is drive always */ /* io_pad_cfg_drive is set to power 2 (23 mA) */ /* write to io pad configuration register - output mode */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_RX_CFG__A, pin_cfg_value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* use corresponding bit in io data output registar */ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, &value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (!uio_data->value) value &= 0xBFFF; /* write zero to 14th bit - 2nd UIO */ else value |= 0x4000; /* write one to 14th bit - 2nd UIO */ /* write back to io data output register */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; /*====================================================================*/ case DRX_UIO3: /* DRX_UIO3: ASEL UIO-3 */ if (!ext_attr->has_gpio) return -EIO; if (ext_attr->uio_gpio_mode != DRX_UIO_MODE_READWRITE) return -EIO; pin_cfg_value = 0; /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */ pin_cfg_value |= 0x0113; /* io_pad_cfg_mode output mode is drive always */ /* io_pad_cfg_drive is set to power 2 (23 mA) */ /* write to io pad configuration register - output mode */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_GPIO_CFG__A, pin_cfg_value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* use corresponding bit in io data output registar */ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_HI__A, &value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (!uio_data->value) value &= 0xFFFB; /* write zero to 2nd bit - 3rd UIO */ else value |= 0x0004; /* write one to 2nd bit - 3rd UIO */ /* write back to io data output register */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_HI__A, value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; /*=====================================================================*/ case DRX_UIO4: /* DRX_UIO4: IRQN UIO-4 */ if (!ext_attr->has_irqn) return -EIO; if (ext_attr->uio_irqn_mode != DRX_UIO_MODE_READWRITE) return -EIO; pin_cfg_value = 0; /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */ pin_cfg_value |= 0x0113; /* io_pad_cfg_mode output mode is drive always */ /* io_pad_cfg_drive is set to power 2 (23 mA) */ /* write to io pad configuration register - output mode */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_IRQN_CFG__A, pin_cfg_value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* use corresponding bit in io data output registar */ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, &value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (uio_data->value == false) value &= 0xEFFF; /* write zero to 12th bit - 4th UIO */ else value |= 0x1000; /* write one to 12th bit - 4th UIO */ /* write back to io data output register */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_UIO_OUT_LO__A, value, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; /*=====================================================================*/ default: return -EINVAL; } /* switch ( uio_data->uio ) */ /* Write magic word to disable pdr reg write */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*---------------------------------------------------------------------------*/ /* UIO Configuration Functions - end */ /*---------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ /* I2C Bridge Functions - begin */ /*----------------------------------------------------------------------------*/ /** * \fn int ctrl_i2c_bridge() * \brief Open or close the I2C switch to tuner. * \param demod Pointer to demodulator instance. * \param bridge_closed Pointer to bool indication if bridge is closed not. * \return int. */ static int ctrl_i2c_bridge(struct drx_demod_instance *demod, bool *bridge_closed) { struct drxj_hi_cmd hi_cmd; u16 result = 0; /* check arguments */ if (bridge_closed == NULL) return -EINVAL; hi_cmd.cmd = SIO_HI_RA_RAM_CMD_BRDCTRL; hi_cmd.param1 = SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY; if (*bridge_closed) hi_cmd.param2 = SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED; else hi_cmd.param2 = SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN; return hi_command(demod->my_i2c_dev_addr, &hi_cmd, &result); } /*----------------------------------------------------------------------------*/ /* I2C Bridge Functions - end */ /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ /* Smart antenna Functions - begin */ /*----------------------------------------------------------------------------*/ /** * \fn int smart_ant_init() * \brief Initialize Smart Antenna. * \param pointer to struct drx_demod_instance. * \return int. * */ static int smart_ant_init(struct drx_demod_instance *demod) { struct drxj_data *ext_attr = NULL; struct i2c_device_addr *dev_addr = NULL; struct drxuio_cfg uio_cfg = { DRX_UIO1, DRX_UIO_MODE_FIRMWARE_SMA }; int rc; u16 data = 0; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; /* Write magic word to enable pdr reg write */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* init smart antenna */ rc = drxj_dap_read_reg16(dev_addr, SIO_SA_TX_COMMAND__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (ext_attr->smart_ant_inverted) { rc = drxj_dap_write_reg16(dev_addr, SIO_SA_TX_COMMAND__A, (data | SIO_SA_TX_COMMAND_TX_INVERT__M) | SIO_SA_TX_COMMAND_TX_ENABLE__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else { rc = drxj_dap_write_reg16(dev_addr, SIO_SA_TX_COMMAND__A, (data & (~SIO_SA_TX_COMMAND_TX_INVERT__M)) | SIO_SA_TX_COMMAND_TX_ENABLE__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* config SMA_TX pin to smart antenna mode */ rc = ctrl_set_uio_cfg(demod, &uio_cfg); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_TX_CFG__A, 0x13, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_PDR_SMA_TX_GPIO_FNC__A, 0x03, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Write magic word to disable pdr reg write */ rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } static int scu_command(struct i2c_device_addr *dev_addr, struct drxjscu_cmd *cmd) { int rc; u16 cur_cmd = 0; unsigned long timeout; /* Check param */ if (cmd == NULL) return -EINVAL; /* Wait until SCU command interface is ready to receive command */ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_COMMAND__A, &cur_cmd, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (cur_cmd != DRX_SCU_READY) return -EIO; switch (cmd->parameter_len) { case 5: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_4__A, *(cmd->parameter + 4), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case 4: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_3__A, *(cmd->parameter + 3), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case 3: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_2__A, *(cmd->parameter + 2), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case 2: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_1__A, *(cmd->parameter + 1), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case 1: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_0__A, *(cmd->parameter + 0), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case 0: /* do nothing */ break; default: /* this number of parameters is not supported */ return -EIO; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_COMMAND__A, cmd->command, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Wait until SCU has processed command */ timeout = jiffies + msecs_to_jiffies(DRXJ_MAX_WAITTIME); while (time_is_after_jiffies(timeout)) { rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_COMMAND__A, &cur_cmd, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (cur_cmd == DRX_SCU_READY) break; usleep_range(1000, 2000); } if (cur_cmd != DRX_SCU_READY) return -EIO; /* read results */ if ((cmd->result_len > 0) && (cmd->result != NULL)) { s16 err; switch (cmd->result_len) { case 4: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_3__A, cmd->result + 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case 3: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_2__A, cmd->result + 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case 2: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_1__A, cmd->result + 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case 1: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_0__A, cmd->result + 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* fallthrough */ case 0: /* do nothing */ break; default: /* this number of parameters is not supported */ return -EIO; } /* Check if an error was reported by SCU */ err = cmd->result[0]; /* check a few fixed error codes */ if ((err == (s16) SCU_RAM_PARAM_0_RESULT_UNKSTD) || (err == (s16) SCU_RAM_PARAM_0_RESULT_UNKCMD) || (err == (s16) SCU_RAM_PARAM_0_RESULT_INVPAR) || (err == (s16) SCU_RAM_PARAM_0_RESULT_SIZE) ) { return -EINVAL; } /* here it is assumed that negative means error, and positive no error */ else if (err < 0) return -EIO; else return 0; } return 0; rw_error: return rc; } /** * \fn int DRXJ_DAP_SCUAtomicReadWriteBlock() * \brief Basic access routine for SCU atomic read or write access * \param dev_addr pointer to i2c dev address * \param addr destination/source address * \param datasize size of data buffer in bytes * \param data pointer to data buffer * \return int * \retval 0 Succes * \retval -EIO Timeout, I2C error, illegal bank * */ #define ADDR_AT_SCU_SPACE(x) ((x - 0x82E000) * 2) static int drxj_dap_scu_atomic_read_write_block(struct i2c_device_addr *dev_addr, u32 addr, u16 datasize, /* max 30 bytes because the limit of SCU parameter */ u8 *data, bool read_flag) { struct drxjscu_cmd scu_cmd; int rc; u16 set_param_parameters[18]; u16 cmd_result[15]; /* Parameter check */ if (!data || !dev_addr || (datasize % 2) || ((datasize / 2) > 16)) return -EINVAL; set_param_parameters[1] = (u16) ADDR_AT_SCU_SPACE(addr); if (read_flag) { /* read */ set_param_parameters[0] = ((~(0x0080)) & datasize); scu_cmd.parameter_len = 2; scu_cmd.result_len = datasize / 2 + 2; } else { int i = 0; set_param_parameters[0] = 0x0080 | datasize; for (i = 0; i < (datasize / 2); i++) { set_param_parameters[i + 2] = (data[2 * i] | (data[(2 * i) + 1] << 8)); } scu_cmd.parameter_len = datasize / 2 + 2; scu_cmd.result_len = 1; } scu_cmd.command = SCU_RAM_COMMAND_STANDARD_TOP | SCU_RAM_COMMAND_CMD_AUX_SCU_ATOMIC_ACCESS; scu_cmd.result = cmd_result; scu_cmd.parameter = set_param_parameters; rc = scu_command(dev_addr, &scu_cmd); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (read_flag) { int i = 0; /* read data from buffer */ for (i = 0; i < (datasize / 2); i++) { data[2 * i] = (u8) (scu_cmd.result[i + 2] & 0xFF); data[(2 * i) + 1] = (u8) (scu_cmd.result[i + 2] >> 8); } } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int DRXJ_DAP_AtomicReadReg16() * \brief Atomic read of 16 bits words */ static int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 *data, u32 flags) { u8 buf[2] = { 0 }; int rc = -EIO; u16 word = 0; if (!data) return -EINVAL; rc = drxj_dap_scu_atomic_read_write_block(dev_addr, addr, 2, buf, true); if (rc < 0) return rc; word = (u16) (buf[0] + (buf[1] << 8)); *data = word; return rc; } /*============================================================================*/ /** * \fn int drxj_dap_scu_atomic_write_reg16() * \brief Atomic read of 16 bits words */ static int drxj_dap_scu_atomic_write_reg16(struct i2c_device_addr *dev_addr, u32 addr, u16 data, u32 flags) { u8 buf[2]; int rc = -EIO; buf[0] = (u8) (data & 0xff); buf[1] = (u8) ((data >> 8) & 0xff); rc = drxj_dap_scu_atomic_read_write_block(dev_addr, addr, 2, buf, false); return rc; } /* -------------------------------------------------------------------------- */ /** * \brief Measure result of ADC synchronisation * \param demod demod instance * \param count (returned) count * \return int. * \retval 0 Success * \retval -EIO Failure: I2C error * */ static int adc_sync_measurement(struct drx_demod_instance *demod, u16 *count) { struct i2c_device_addr *dev_addr = NULL; int rc; u16 data = 0; dev_addr = demod->my_i2c_dev_addr; /* Start measurement */ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_COMM_EXEC__A, IQM_AF_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_START_LOCK__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Wait at least 3*128*(1/sysclk) <<< 1 millisec */ msleep(1); *count = 0; rc = drxj_dap_read_reg16(dev_addr, IQM_AF_PHASE0__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (data == 127) *count = *count + 1; rc = drxj_dap_read_reg16(dev_addr, IQM_AF_PHASE1__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (data == 127) *count = *count + 1; rc = drxj_dap_read_reg16(dev_addr, IQM_AF_PHASE2__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (data == 127) *count = *count + 1; return 0; rw_error: return rc; } /** * \brief Synchronize analog and digital clock domains * \param demod demod instance * \return int. * \retval 0 Success * \retval -EIO Failure: I2C error or failure to synchronize * * An IQM reset will also reset the results of this synchronization. * After an IQM reset this routine needs to be called again. * */ static int adc_synchronization(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = NULL; int rc; u16 count = 0; dev_addr = demod->my_i2c_dev_addr; rc = adc_sync_measurement(demod, &count); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (count == 1) { /* Try sampling on a different edge */ u16 clk_neg = 0; rc = drxj_dap_read_reg16(dev_addr, IQM_AF_CLKNEG__A, &clk_neg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } clk_neg ^= IQM_AF_CLKNEG_CLKNEGDATA__M; rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLKNEG__A, clk_neg, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = adc_sync_measurement(demod, &count); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* TODO: implement fallback scenarios */ if (count < 2) return -EIO; return 0; rw_error: return rc; } /*============================================================================*/ /*== END AUXILIARY FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /*============================================================================*/ /*== 8VSB & QAM COMMON DATAPATH FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /** * \fn int init_agc () * \brief Initialize AGC for all standards. * \param demod instance of demodulator. * \param channel pointer to channel data. * \return int. */ static int init_agc(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = NULL; struct drx_common_attr *common_attr = NULL; struct drxj_data *ext_attr = NULL; struct drxj_cfg_agc *p_agc_rf_settings = NULL; struct drxj_cfg_agc *p_agc_if_settings = NULL; int rc; u16 ingain_tgt_max = 0; u16 clp_dir_to = 0; u16 sns_sum_max = 0; u16 clp_sum_max = 0; u16 sns_dir_to = 0; u16 ki_innergain_min = 0; u16 agc_ki = 0; u16 ki_max = 0; u16 if_iaccu_hi_tgt_min = 0; u16 data = 0; u16 agc_ki_dgain = 0; u16 ki_min = 0; u16 clp_ctrl_mode = 0; u16 agc_rf = 0; u16 agc_if = 0; dev_addr = demod->my_i2c_dev_addr; common_attr = (struct drx_common_attr *) demod->my_common_attr; ext_attr = (struct drxj_data *) demod->my_ext_attr; switch (ext_attr->standard) { case DRX_STANDARD_8VSB: clp_sum_max = 1023; clp_dir_to = (u16) (-9); sns_sum_max = 1023; sns_dir_to = (u16) (-9); ki_innergain_min = (u16) (-32768); ki_max = 0x032C; agc_ki_dgain = 0xC; if_iaccu_hi_tgt_min = 2047; ki_min = 0x0117; ingain_tgt_max = 16383; clp_ctrl_mode = 0; rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MINGAIN__A, 0x7fff, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MAXGAIN__A, 0x0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_SUM__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_CYCCNT__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_WD__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_STP__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_SUM__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_CYCCNT__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_WD__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_STP__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN__A, 1024, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_VSB_AGC_POW_TGT__A, 22600, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN_TGT__A, 13200, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } p_agc_if_settings = &(ext_attr->vsb_if_agc_cfg); p_agc_rf_settings = &(ext_attr->vsb_rf_agc_cfg); break; #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_C: case DRX_STANDARD_ITU_B: ingain_tgt_max = 5119; clp_sum_max = 1023; clp_dir_to = (u16) (-5); sns_sum_max = 127; sns_dir_to = (u16) (-3); ki_innergain_min = 0; ki_max = 0x0657; if_iaccu_hi_tgt_min = 2047; agc_ki_dgain = 0x7; ki_min = 0x0117; clp_ctrl_mode = 0; rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MINGAIN__A, 0x7fff, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MAXGAIN__A, 0x0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_SUM__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_CYCCNT__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_WD__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_STP__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_SUM__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_CYCCNT__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_WD__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_STP__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } p_agc_if_settings = &(ext_attr->qam_if_agc_cfg); p_agc_rf_settings = &(ext_attr->qam_rf_agc_cfg); rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN_TGT__A, p_agc_if_settings->top, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_AGC_KI__A, &agc_ki, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } agc_ki &= 0xf000; rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI__A, agc_ki, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; #endif default: return -EINVAL; } /* for new AGC interface */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN_TGT_MIN__A, p_agc_if_settings->top, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN__A, p_agc_if_settings->top, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Gain fed from inner to outer AGC */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_INGAIN_TGT_MAX__A, ingain_tgt_max, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A, if_iaccu_hi_tgt_min, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_IF_IACCU_HI__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* set to p_agc_settings->top before */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_IF_IACCU_LO__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_RF_IACCU_HI__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_RF_IACCU_LO__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_RF_MAX__A, 32767, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_SUM_MAX__A, clp_sum_max, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_SUM_MAX__A, sns_sum_max, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_INNERGAIN_MIN__A, ki_innergain_min, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A, 50, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_CYCLEN__A, 500, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_CYCLEN__A, 500, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MAXMINGAIN_TH__A, 20, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MIN__A, ki_min, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_MAX__A, ki_max, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI_RED__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_SUM_MIN__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_CYCLEN__A, 500, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_DIR_TO__A, clp_dir_to, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_SUM_MIN__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_SNS_DIR_TO__A, sns_dir_to, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, 50, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_CLP_CTRL_MODE__A, clp_ctrl_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } agc_rf = 0x800 + p_agc_rf_settings->cut_off_current; if (common_attr->tuner_rf_agc_pol == true) agc_rf = 0x87ff - agc_rf; agc_if = 0x800; if (common_attr->tuner_if_agc_pol == true) agc_rf = 0x87ff - agc_rf; rc = drxj_dap_write_reg16(dev_addr, IQM_AF_AGC_RF__A, agc_rf, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_AGC_IF__A, agc_if, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Set/restore Ki DGAIN factor */ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_AGC_KI__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= ~SCU_RAM_AGC_KI_DGAIN__M; data |= (agc_ki_dgain << SCU_RAM_AGC_KI_DGAIN__B); rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_AGC_KI__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /** * \fn int set_frequency () * \brief Set frequency shift. * \param demod instance of demodulator. * \param channel pointer to channel data. * \param tuner_freq_offset residual frequency from tuner. * \return int. */ static int set_frequency(struct drx_demod_instance *demod, struct drx_channel *channel, s32 tuner_freq_offset) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; struct drxj_data *ext_attr = demod->my_ext_attr; int rc; s32 sampling_frequency = 0; s32 frequency_shift = 0; s32 if_freq_actual = 0; s32 rf_freq_residual = -1 * tuner_freq_offset; s32 adc_freq = 0; s32 intermediate_freq = 0; u32 iqm_fs_rate_ofs = 0; bool adc_flip = true; bool select_pos_image = false; bool rf_mirror; bool tuner_mirror; bool image_to_select = true; s32 fm_frequency_shift = 0; rf_mirror = (ext_attr->mirror == DRX_MIRROR_YES) ? true : false; tuner_mirror = demod->my_common_attr->mirror_freq_spect ? false : true; /* Program frequency shifter No need to account for mirroring on RF */ switch (ext_attr->standard) { case DRX_STANDARD_ITU_A: /* fallthrough */ case DRX_STANDARD_ITU_C: /* fallthrough */ case DRX_STANDARD_PAL_SECAM_LP: /* fallthrough */ case DRX_STANDARD_8VSB: select_pos_image = true; break; case DRX_STANDARD_FM: /* After IQM FS sound carrier must appear at 4 Mhz in spect. Sound carrier is already 3Mhz above centre frequency due to tuner setting so now add an extra shift of 1MHz... */ fm_frequency_shift = 1000; case DRX_STANDARD_ITU_B: /* fallthrough */ case DRX_STANDARD_NTSC: /* fallthrough */ case DRX_STANDARD_PAL_SECAM_BG: /* fallthrough */ case DRX_STANDARD_PAL_SECAM_DK: /* fallthrough */ case DRX_STANDARD_PAL_SECAM_I: /* fallthrough */ case DRX_STANDARD_PAL_SECAM_L: select_pos_image = false; break; default: return -EINVAL; } intermediate_freq = demod->my_common_attr->intermediate_freq; sampling_frequency = demod->my_common_attr->sys_clock_freq / 3; if (tuner_mirror) if_freq_actual = intermediate_freq + rf_freq_residual + fm_frequency_shift; else if_freq_actual = intermediate_freq - rf_freq_residual - fm_frequency_shift; if (if_freq_actual > sampling_frequency / 2) { /* adc mirrors */ adc_freq = sampling_frequency - if_freq_actual; adc_flip = true; } else { /* adc doesn't mirror */ adc_freq = if_freq_actual; adc_flip = false; } frequency_shift = adc_freq; image_to_select = (bool) (rf_mirror ^ tuner_mirror ^ adc_flip ^ select_pos_image); iqm_fs_rate_ofs = frac28(frequency_shift, sampling_frequency); if (image_to_select) iqm_fs_rate_ofs = ~iqm_fs_rate_ofs + 1; /* Program frequency shifter with tuner offset compensation */ /* frequency_shift += tuner_freq_offset; TODO */ rc = drxdap_fasi_write_reg32(dev_addr, IQM_FS_RATE_OFS_LO__A, iqm_fs_rate_ofs, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->iqm_fs_rate_ofs = iqm_fs_rate_ofs; ext_attr->pos_image = (bool) (rf_mirror ^ tuner_mirror ^ select_pos_image); return 0; rw_error: return rc; } /** * \fn int get_acc_pkt_err() * \brief Retrieve signal strength for VSB and QAM. * \param demod Pointer to demod instance * \param packet_err Pointer to packet error * \return int. * \retval 0 sig_strength contains valid data. * \retval -EINVAL sig_strength is NULL. * \retval -EIO Erroneous data, sig_strength contains invalid data. */ #ifdef DRXJ_SIGNAL_ACCUM_ERR static int get_acc_pkt_err(struct drx_demod_instance *demod, u16 *packet_err) { int rc; static u16 pkt_err; static u16 last_pkt_err; u16 data = 0; struct drxj_data *ext_attr = NULL; struct i2c_device_addr *dev_addr = NULL; ext_attr = (struct drxj_data *) demod->my_ext_attr; dev_addr = demod->my_i2c_dev_addr; rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (ext_attr->reset_pkt_err_acc) { last_pkt_err = data; pkt_err = 0; ext_attr->reset_pkt_err_acc = false; } if (data < last_pkt_err) { pkt_err += 0xffff - last_pkt_err; pkt_err += data; } else { pkt_err += (data - last_pkt_err); } *packet_err = pkt_err; last_pkt_err = data; return 0; rw_error: return rc; } #endif /*============================================================================*/ /** * \fn int set_agc_rf () * \brief Configure RF AGC * \param demod instance of demodulator. * \param agc_settings AGC configuration structure * \return int. */ static int set_agc_rf(struct drx_demod_instance *demod, struct drxj_cfg_agc *agc_settings, bool atomic) { struct i2c_device_addr *dev_addr = NULL; struct drxj_data *ext_attr = NULL; struct drxj_cfg_agc *p_agc_settings = NULL; struct drx_common_attr *common_attr = NULL; int rc; drx_write_reg16func_t scu_wr16 = NULL; drx_read_reg16func_t scu_rr16 = NULL; common_attr = (struct drx_common_attr *) demod->my_common_attr; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; if (atomic) { scu_rr16 = drxj_dap_scu_atomic_read_reg16; scu_wr16 = drxj_dap_scu_atomic_write_reg16; } else { scu_rr16 = drxj_dap_read_reg16; scu_wr16 = drxj_dap_write_reg16; } /* Configure AGC only if standard is currently active */ if ((ext_attr->standard == agc_settings->standard) || (DRXJ_ISQAMSTD(ext_attr->standard) && DRXJ_ISQAMSTD(agc_settings->standard)) || (DRXJ_ISATVSTD(ext_attr->standard) && DRXJ_ISATVSTD(agc_settings->standard))) { u16 data = 0; switch (agc_settings->ctrl_mode) { case DRX_AGC_CTRL_AUTO: /* Enable RF AGC DAC */ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data |= IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE; rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Enable SCU RF AGC loop */ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= ~SCU_RAM_AGC_KI_RF__M; if (ext_attr->standard == DRX_STANDARD_8VSB) data |= (2 << SCU_RAM_AGC_KI_RF__B); else if (DRXJ_ISQAMSTD(ext_attr->standard)) data |= (5 << SCU_RAM_AGC_KI_RF__B); else data |= (4 << SCU_RAM_AGC_KI_RF__B); if (common_attr->tuner_rf_agc_pol) data |= SCU_RAM_AGC_KI_INV_RF_POL__M; else data &= ~SCU_RAM_AGC_KI_INV_RF_POL__M; rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Set speed ( using complementary reduction value ) */ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI_RED__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= ~SCU_RAM_AGC_KI_RED_RAGC_RED__M; rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI_RED__A, (~(agc_settings->speed << SCU_RAM_AGC_KI_RED_RAGC_RED__B) & SCU_RAM_AGC_KI_RED_RAGC_RED__M) | data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (agc_settings->standard == DRX_STANDARD_8VSB) p_agc_settings = &(ext_attr->vsb_if_agc_cfg); else if (DRXJ_ISQAMSTD(agc_settings->standard)) p_agc_settings = &(ext_attr->qam_if_agc_cfg); else if (DRXJ_ISATVSTD(agc_settings->standard)) p_agc_settings = &(ext_attr->atv_if_agc_cfg); else return -EINVAL; /* Set TOP, only if IF-AGC is in AUTO mode */ if (p_agc_settings->ctrl_mode == DRX_AGC_CTRL_AUTO) { rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, agc_settings->top, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, agc_settings->top, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* Cut-Off current */ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_RF_IACCU_HI_CO__A, agc_settings->cut_off_current, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_AGC_CTRL_USER: /* Enable RF AGC DAC */ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data |= IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE; rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Disable SCU RF AGC loop */ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= ~SCU_RAM_AGC_KI_RF__M; if (common_attr->tuner_rf_agc_pol) data |= SCU_RAM_AGC_KI_INV_RF_POL__M; else data &= ~SCU_RAM_AGC_KI_INV_RF_POL__M; rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Write value to output pin */ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_RF_IACCU_HI__A, agc_settings->output_level, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_AGC_CTRL_OFF: /* Disable RF AGC DAC */ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= (~IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE); rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Disable SCU RF AGC loop */ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= ~SCU_RAM_AGC_KI_RF__M; rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: return -EINVAL; } /* switch ( agcsettings->ctrl_mode ) */ } /* Store rf agc settings */ switch (agc_settings->standard) { case DRX_STANDARD_8VSB: ext_attr->vsb_rf_agc_cfg = *agc_settings; break; #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: ext_attr->qam_rf_agc_cfg = *agc_settings; break; #endif default: return -EIO; } return 0; rw_error: return rc; } /** * \fn int set_agc_if () * \brief Configure If AGC * \param demod instance of demodulator. * \param agc_settings AGC configuration structure * \return int. */ static int set_agc_if(struct drx_demod_instance *demod, struct drxj_cfg_agc *agc_settings, bool atomic) { struct i2c_device_addr *dev_addr = NULL; struct drxj_data *ext_attr = NULL; struct drxj_cfg_agc *p_agc_settings = NULL; struct drx_common_attr *common_attr = NULL; drx_write_reg16func_t scu_wr16 = NULL; drx_read_reg16func_t scu_rr16 = NULL; int rc; common_attr = (struct drx_common_attr *) demod->my_common_attr; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; if (atomic) { scu_rr16 = drxj_dap_scu_atomic_read_reg16; scu_wr16 = drxj_dap_scu_atomic_write_reg16; } else { scu_rr16 = drxj_dap_read_reg16; scu_wr16 = drxj_dap_write_reg16; } /* Configure AGC only if standard is currently active */ if ((ext_attr->standard == agc_settings->standard) || (DRXJ_ISQAMSTD(ext_attr->standard) && DRXJ_ISQAMSTD(agc_settings->standard)) || (DRXJ_ISATVSTD(ext_attr->standard) && DRXJ_ISATVSTD(agc_settings->standard))) { u16 data = 0; switch (agc_settings->ctrl_mode) { case DRX_AGC_CTRL_AUTO: /* Enable IF AGC DAC */ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data |= IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE; rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Enable SCU IF AGC loop */ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= ~SCU_RAM_AGC_KI_IF_AGC_DISABLE__M; data &= ~SCU_RAM_AGC_KI_IF__M; if (ext_attr->standard == DRX_STANDARD_8VSB) data |= (3 << SCU_RAM_AGC_KI_IF__B); else if (DRXJ_ISQAMSTD(ext_attr->standard)) data |= (6 << SCU_RAM_AGC_KI_IF__B); else data |= (5 << SCU_RAM_AGC_KI_IF__B); if (common_attr->tuner_if_agc_pol) data |= SCU_RAM_AGC_KI_INV_IF_POL__M; else data &= ~SCU_RAM_AGC_KI_INV_IF_POL__M; rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Set speed (using complementary reduction value) */ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI_RED__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= ~SCU_RAM_AGC_KI_RED_IAGC_RED__M; rc = (*scu_wr16) (dev_addr, SCU_RAM_AGC_KI_RED__A, (~(agc_settings->speed << SCU_RAM_AGC_KI_RED_IAGC_RED__B) & SCU_RAM_AGC_KI_RED_IAGC_RED__M) | data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (agc_settings->standard == DRX_STANDARD_8VSB) p_agc_settings = &(ext_attr->vsb_rf_agc_cfg); else if (DRXJ_ISQAMSTD(agc_settings->standard)) p_agc_settings = &(ext_attr->qam_rf_agc_cfg); else if (DRXJ_ISATVSTD(agc_settings->standard)) p_agc_settings = &(ext_attr->atv_rf_agc_cfg); else return -EINVAL; /* Restore TOP */ if (p_agc_settings->ctrl_mode == DRX_AGC_CTRL_AUTO) { rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, p_agc_settings->top, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, p_agc_settings->top, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else { rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } break; case DRX_AGC_CTRL_USER: /* Enable IF AGC DAC */ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data |= IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE; rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Disable SCU IF AGC loop */ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= ~SCU_RAM_AGC_KI_IF_AGC_DISABLE__M; data |= SCU_RAM_AGC_KI_IF_AGC_DISABLE__M; if (common_attr->tuner_if_agc_pol) data |= SCU_RAM_AGC_KI_INV_IF_POL__M; else data &= ~SCU_RAM_AGC_KI_INV_IF_POL__M; rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Write value to output pin */ rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, agc_settings->output_level, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_AGC_CTRL_OFF: /* Disable If AGC DAC */ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= (~IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE); rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Disable SCU IF AGC loop */ rc = (*scu_rr16)(dev_addr, SCU_RAM_AGC_KI__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data &= ~SCU_RAM_AGC_KI_IF_AGC_DISABLE__M; data |= SCU_RAM_AGC_KI_IF_AGC_DISABLE__M; rc = (*scu_wr16)(dev_addr, SCU_RAM_AGC_KI__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: return -EINVAL; } /* switch ( agcsettings->ctrl_mode ) */ /* always set the top to support configurations without if-loop */ rc = (*scu_wr16) (dev_addr, SCU_RAM_AGC_INGAIN_TGT_MIN__A, agc_settings->top, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* Store if agc settings */ switch (agc_settings->standard) { case DRX_STANDARD_8VSB: ext_attr->vsb_if_agc_cfg = *agc_settings; break; #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: ext_attr->qam_if_agc_cfg = *agc_settings; break; #endif default: return -EIO; } return 0; rw_error: return rc; } /** * \fn int set_iqm_af () * \brief Configure IQM AF registers * \param demod instance of demodulator. * \param active * \return int. */ static int set_iqm_af(struct drx_demod_instance *demod, bool active) { u16 data = 0; struct i2c_device_addr *dev_addr = NULL; int rc; dev_addr = demod->my_i2c_dev_addr; /* Configure IQM */ rc = drxj_dap_read_reg16(dev_addr, IQM_AF_STDBY__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (!active) data &= ((~IQM_AF_STDBY_STDBY_ADC_A2_ACTIVE) & (~IQM_AF_STDBY_STDBY_AMP_A2_ACTIVE) & (~IQM_AF_STDBY_STDBY_PD_A2_ACTIVE) & (~IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE) & (~IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE)); else data |= (IQM_AF_STDBY_STDBY_ADC_A2_ACTIVE | IQM_AF_STDBY_STDBY_AMP_A2_ACTIVE | IQM_AF_STDBY_STDBY_PD_A2_ACTIVE | IQM_AF_STDBY_STDBY_TAGC_IF_A2_ACTIVE | IQM_AF_STDBY_STDBY_TAGC_RF_A2_ACTIVE); rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ /*== END 8VSB & QAM COMMON DATAPATH FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /*============================================================================*/ /*== 8VSB DATAPATH FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /** * \fn int power_down_vsb () * \brief Powr down QAM related blocks. * \param demod instance of demodulator. * \param channel pointer to channel data. * \return int. */ static int power_down_vsb(struct drx_demod_instance *demod, bool primary) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; struct drxjscu_cmd cmd_scu = { /* command */ 0, /* parameter_len */ 0, /* result_len */ 0, /* *parameter */ NULL, /* *result */ NULL }; struct drx_cfg_mpeg_output cfg_mpeg_output; int rc; u16 cmd_result = 0; /* STOP demodulator reset of FEC and VSB HW */ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB | SCU_RAM_COMMAND_CMD_DEMOD_STOP; cmd_scu.parameter_len = 0; cmd_scu.result_len = 1; cmd_scu.parameter = NULL; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* stop all comm_exec */ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_COMM_EXEC__A, VSB_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (primary) { rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_iqm_af(demod, false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else { rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } cfg_mpeg_output.enable_mpeg_output = false; rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /** * \fn int set_vsb_leak_n_gain () * \brief Set ATSC demod. * \param demod instance of demodulator. * \return int. */ static int set_vsb_leak_n_gain(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = NULL; int rc; const u8 vsb_ffe_leak_gain_ram0[] = { DRXJ_16TO8(0x8), /* FFETRAINLKRATIO1 */ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO2 */ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO3 */ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO4 */ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO5 */ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO6 */ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO7 */ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO8 */ DRXJ_16TO8(0xf), /* FFETRAINLKRATIO9 */ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO10 */ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO11 */ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO12 */ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO1 */ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO2 */ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO3 */ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO4 */ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO5 */ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO6 */ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO7 */ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO8 */ DRXJ_16TO8(0x20), /* FFERCA1TRAINLKRATIO9 */ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO10 */ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO11 */ DRXJ_16TO8(0x10), /* FFERCA1TRAINLKRATIO12 */ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO1 */ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO2 */ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO3 */ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO4 */ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO5 */ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO6 */ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO7 */ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO8 */ DRXJ_16TO8(0x20), /* FFERCA1DATALKRATIO9 */ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO10 */ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO11 */ DRXJ_16TO8(0x10), /* FFERCA1DATALKRATIO12 */ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO1 */ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO2 */ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO3 */ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO4 */ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO5 */ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO6 */ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO7 */ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO8 */ DRXJ_16TO8(0x20), /* FFERCA2TRAINLKRATIO9 */ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO10 */ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO11 */ DRXJ_16TO8(0x10), /* FFERCA2TRAINLKRATIO12 */ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO1 */ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO2 */ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO3 */ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO4 */ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO5 */ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO6 */ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO7 */ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO8 */ DRXJ_16TO8(0x20), /* FFERCA2DATALKRATIO9 */ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO10 */ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO11 */ DRXJ_16TO8(0x10), /* FFERCA2DATALKRATIO12 */ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO1 */ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO2 */ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO3 */ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO4 */ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO5 */ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO6 */ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO7 */ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO8 */ DRXJ_16TO8(0x0e), /* FFEDDM1TRAINLKRATIO9 */ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO10 */ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO11 */ DRXJ_16TO8(0x07), /* FFEDDM1TRAINLKRATIO12 */ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO1 */ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO2 */ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO3 */ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO4 */ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO5 */ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO6 */ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO7 */ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO8 */ DRXJ_16TO8(0x0e), /* FFEDDM1DATALKRATIO9 */ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO10 */ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO11 */ DRXJ_16TO8(0x07), /* FFEDDM1DATALKRATIO12 */ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO1 */ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO2 */ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO3 */ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO4 */ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO5 */ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO6 */ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO7 */ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO8 */ DRXJ_16TO8(0x0c), /* FFEDDM2TRAINLKRATIO9 */ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO10 */ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO11 */ DRXJ_16TO8(0x06), /* FFEDDM2TRAINLKRATIO12 */ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO1 */ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO2 */ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO3 */ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO4 */ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO5 */ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO6 */ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO7 */ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO8 */ DRXJ_16TO8(0x0c), /* FFEDDM2DATALKRATIO9 */ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO10 */ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO11 */ DRXJ_16TO8(0x06), /* FFEDDM2DATALKRATIO12 */ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN1 */ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN2 */ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN3 */ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN4 */ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN5 */ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN6 */ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN7 */ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN8 */ DRXJ_16TO8(0x4040), /* FIRTRAINGAIN9 */ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN10 */ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN11 */ DRXJ_16TO8(0x2020), /* FIRTRAINGAIN12 */ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN1 */ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN2 */ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN3 */ DRXJ_16TO8(0x1010), /* FIRRCA1GAIN4 */ DRXJ_16TO8(0x1010), /* FIRRCA1GAIN5 */ DRXJ_16TO8(0x1010), /* FIRRCA1GAIN6 */ DRXJ_16TO8(0x1010), /* FIRRCA1GAIN7 */ DRXJ_16TO8(0x1010) /* FIRRCA1GAIN8 */ }; const u8 vsb_ffe_leak_gain_ram1[] = { DRXJ_16TO8(0x1010), /* FIRRCA1GAIN9 */ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN10 */ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN11 */ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN12 */ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN1 */ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN2 */ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN3 */ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN4 */ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN5 */ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN6 */ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN7 */ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN8 */ DRXJ_16TO8(0x1010), /* FIRRCA2GAIN9 */ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN10 */ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN11 */ DRXJ_16TO8(0x0808), /* FIRRCA2GAIN12 */ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN1 */ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN2 */ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN3 */ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN4 */ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN5 */ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN6 */ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN7 */ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN8 */ DRXJ_16TO8(0x0606), /* FIRDDM1GAIN9 */ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN10 */ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN11 */ DRXJ_16TO8(0x0303), /* FIRDDM1GAIN12 */ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN1 */ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN2 */ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN3 */ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN4 */ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN5 */ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN6 */ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN7 */ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN8 */ DRXJ_16TO8(0x0505), /* FIRDDM2GAIN9 */ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN10 */ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN11 */ DRXJ_16TO8(0x0303), /* FIRDDM2GAIN12 */ DRXJ_16TO8(0x001f), /* DFETRAINLKRATIO */ DRXJ_16TO8(0x01ff), /* DFERCA1TRAINLKRATIO */ DRXJ_16TO8(0x01ff), /* DFERCA1DATALKRATIO */ DRXJ_16TO8(0x004f), /* DFERCA2TRAINLKRATIO */ DRXJ_16TO8(0x004f), /* DFERCA2DATALKRATIO */ DRXJ_16TO8(0x01ff), /* DFEDDM1TRAINLKRATIO */ DRXJ_16TO8(0x01ff), /* DFEDDM1DATALKRATIO */ DRXJ_16TO8(0x0352), /* DFEDDM2TRAINLKRATIO */ DRXJ_16TO8(0x0352), /* DFEDDM2DATALKRATIO */ DRXJ_16TO8(0x0000), /* DFETRAINGAIN */ DRXJ_16TO8(0x2020), /* DFERCA1GAIN */ DRXJ_16TO8(0x1010), /* DFERCA2GAIN */ DRXJ_16TO8(0x1818), /* DFEDDM1GAIN */ DRXJ_16TO8(0x1212) /* DFEDDM2GAIN */ }; dev_addr = demod->my_i2c_dev_addr; rc = drxdap_fasi_write_block(dev_addr, VSB_SYSCTRL_RAM0_FFETRAINLKRATIO1__A, sizeof(vsb_ffe_leak_gain_ram0), ((u8 *)vsb_ffe_leak_gain_ram0), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, VSB_SYSCTRL_RAM1_FIRRCA1GAIN9__A, sizeof(vsb_ffe_leak_gain_ram1), ((u8 *)vsb_ffe_leak_gain_ram1), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /** * \fn int set_vsb() * \brief Set 8VSB demod. * \param demod instance of demodulator. * \return int. * */ static int set_vsb(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = NULL; int rc; struct drx_common_attr *common_attr = NULL; struct drxjscu_cmd cmd_scu; struct drxj_data *ext_attr = NULL; u16 cmd_result = 0; u16 cmd_param = 0; const u8 vsb_taps_re[] = { DRXJ_16TO8(-2), /* re0 */ DRXJ_16TO8(4), /* re1 */ DRXJ_16TO8(1), /* re2 */ DRXJ_16TO8(-4), /* re3 */ DRXJ_16TO8(1), /* re4 */ DRXJ_16TO8(4), /* re5 */ DRXJ_16TO8(-3), /* re6 */ DRXJ_16TO8(-3), /* re7 */ DRXJ_16TO8(6), /* re8 */ DRXJ_16TO8(1), /* re9 */ DRXJ_16TO8(-9), /* re10 */ DRXJ_16TO8(3), /* re11 */ DRXJ_16TO8(12), /* re12 */ DRXJ_16TO8(-9), /* re13 */ DRXJ_16TO8(-15), /* re14 */ DRXJ_16TO8(17), /* re15 */ DRXJ_16TO8(19), /* re16 */ DRXJ_16TO8(-29), /* re17 */ DRXJ_16TO8(-22), /* re18 */ DRXJ_16TO8(45), /* re19 */ DRXJ_16TO8(25), /* re20 */ DRXJ_16TO8(-70), /* re21 */ DRXJ_16TO8(-28), /* re22 */ DRXJ_16TO8(111), /* re23 */ DRXJ_16TO8(30), /* re24 */ DRXJ_16TO8(-201), /* re25 */ DRXJ_16TO8(-31), /* re26 */ DRXJ_16TO8(629) /* re27 */ }; dev_addr = demod->my_i2c_dev_addr; common_attr = (struct drx_common_attr *) demod->my_common_attr; ext_attr = (struct drxj_data *) demod->my_ext_attr; /* stop all comm_exec */ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_COMM_EXEC__A, VSB_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* reset demodulator */ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB | SCU_RAM_COMMAND_CMD_DEMOD_RESET; cmd_scu.parameter_len = 0; cmd_scu.result_len = 1; cmd_scu.parameter = NULL; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_DCF_BYPASS__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_FS_ADJ_SEL__A, IQM_FS_ADJ_SEL_B_VSB, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_ADJ_SEL__A, IQM_RC_ADJ_SEL_B_VSB, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->iqm_rc_rate_ofs = 0x00AD0D79; rc = drxdap_fasi_write_reg32(dev_addr, IQM_RC_RATE_OFS_LO__A, ext_attr->iqm_rc_rate_ofs, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CFAGC_GAINSHIFT__A, 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN1TRK__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_CROUT_ENA__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_STRETCH__A, 28, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RT_ACTIVE__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SYMMETRIC__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_MIDTAP__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_OUT_ENA__A, IQM_CF_OUT_ENA_VSB__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE__A, 1393, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE_SH__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_POW_MEAS_LEN__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(vsb_taps_re), ((u8 *)vsb_taps_re), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(vsb_taps_re), ((u8 *)vsb_taps_re), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_BNTHRESH__A, 330, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* set higher threshold */ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CLPLASTNUM__A, 90, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* burst detection on */ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_RCA1__A, 0x0042, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* drop thresholds by 1 dB */ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_RCA2__A, 0x0053, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* drop thresholds by 2 dB */ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_EQCTRL__A, 0x1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* cma on */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_GPIO__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* GPIO */ /* Initialize the FEC Subsystem */ rc = drxj_dap_write_reg16(dev_addr, FEC_TOP_ANNEX__A, FEC_TOP_ANNEX_D, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } { u16 fec_oc_snc_mode = 0; rc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* output data even when not locked */ rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_MODE__A, fec_oc_snc_mode | FEC_OC_SNC_MODE_UNLOCK_ENABLE__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* set clip */ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_LEN__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_TH__A, 470, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_SNS_LEN__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_PT__A, 0xD4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* no transparent, no A&C framing; parity is set in mpegoutput */ { u16 fec_oc_reg_mode = 0; rc = drxj_dap_read_reg16(dev_addr, FEC_OC_MODE__A, &fec_oc_reg_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_MODE__A, fec_oc_reg_mode & (~(FEC_OC_MODE_TRANSPARENT__M | FEC_OC_MODE_CLEAR__M | FEC_OC_MODE_RETAIN_FRAMING__M)), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = drxj_dap_write_reg16(dev_addr, FEC_DI_TIMEOUT_LO__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* timeout counter for restarting */ rc = drxj_dap_write_reg16(dev_addr, FEC_DI_TIMEOUT_HI__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MODE__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* bypass disabled */ /* initialize RS packet error measurement parameters */ rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PERIOD__A, FEC_RS_MEASUREMENT_PERIOD, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PRESCALE__A, FEC_RS_MEASUREMENT_PRESCALE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* init measurement period of MER/SER */ rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_MEASUREMENT_PERIOD__A, VSB_TOP_MEASUREMENT_PERIOD, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_reg32(dev_addr, SCU_RAM_FEC_ACCUM_CW_CORRECTED_LO__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_MEAS_COUNT__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CKGN1TRK__A, 128, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* B-Input to ADC, PGA+filter in standby */ if (!ext_attr->has_lna) { rc = drxj_dap_write_reg16(dev_addr, IQM_AF_AMUX__A, 0x02, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* turn on IQMAF. It has to be in front of setAgc**() */ rc = set_iqm_af(demod, true); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = adc_synchronization(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = init_agc(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_agc_if(demod, &(ext_attr->vsb_if_agc_cfg), false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_agc_rf(demod, &(ext_attr->vsb_rf_agc_cfg), false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } { /* TODO fix this, store a struct drxj_cfg_afe_gain structure in struct drxj_data instead of only the gain */ struct drxj_cfg_afe_gain vsb_pga_cfg = { DRX_STANDARD_8VSB, 0 }; vsb_pga_cfg.gain = ext_attr->vsb_pga_cfg; rc = ctrl_set_cfg_afe_gain(demod, &vsb_pga_cfg); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = ctrl_set_cfg_pre_saw(demod, &(ext_attr->vsb_pre_saw_cfg)); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Mpeg output has to be in front of FEC active */ rc = set_mpegtei_handling(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = bit_reverse_mpeg_output(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_mpeg_start_width(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } { /* TODO: move to set_standard after hardware reset value problem is solved */ /* Configure initial MPEG output */ struct drx_cfg_mpeg_output cfg_mpeg_output; memcpy(&cfg_mpeg_output, &common_attr->mpeg_cfg, sizeof(cfg_mpeg_output)); cfg_mpeg_output.enable_mpeg_output = true; rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* TBD: what parameters should be set */ cmd_param = 0x00; /* Default mode AGC on, etc */ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM; cmd_scu.parameter_len = 1; cmd_scu.result_len = 1; cmd_scu.parameter = &cmd_param; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_BEAGC_GAINSHIFT__A, 0x0004, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_PT__A, 0x00D2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SYSSMTRNCTRL__A, VSB_TOP_SYSSMTRNCTRL__PRE | VSB_TOP_SYSSMTRNCTRL_NCOTIMEOUTCNTEN__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_BEDETCTRL__A, 0x142, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_LBAGCREFLVL__A, 640, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN1ACQ__A, 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN1TRK__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN2TRK__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* start demodulator */ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB | SCU_RAM_COMMAND_CMD_DEMOD_START; cmd_scu.parameter_len = 0; cmd_scu.result_len = 1; cmd_scu.parameter = NULL; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, VSB_COMM_EXEC__A, VSB_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /** * \fn static short get_vsb_post_rs_pck_err(struct i2c_device_addr *dev_addr, u16 *PckErrs) * \brief Get the values of packet error in 8VSB mode * \return Error code */ static int get_vsb_post_rs_pck_err(struct i2c_device_addr *dev_addr, u32 *pck_errs, u32 *pck_count) { int rc; u16 data = 0; u16 period = 0; u16 prescale = 0; u16 packet_errors_mant = 0; u16 packet_errors_exp = 0; rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_FAILURES__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } packet_errors_mant = data & FEC_RS_NR_FAILURES_FIXED_MANT__M; packet_errors_exp = (data & FEC_RS_NR_FAILURES_EXP__M) >> FEC_RS_NR_FAILURES_EXP__B; period = FEC_RS_MEASUREMENT_PERIOD; prescale = FEC_RS_MEASUREMENT_PRESCALE; /* packet error rate = (error packet number) per second */ /* 77.3 us is time for per packet */ if (period * prescale == 0) { pr_err("error: period and/or prescale is zero!\n"); return -EIO; } *pck_errs = packet_errors_mant * (1 << packet_errors_exp); *pck_count = period * prescale * 77; return 0; rw_error: return rc; } /** * \fn static short GetVSBBer(struct i2c_device_addr *dev_addr, u32 *ber) * \brief Get the values of ber in VSB mode * \return Error code */ static int get_vs_bpost_viterbi_ber(struct i2c_device_addr *dev_addr, u32 *ber, u32 *cnt) { int rc; u16 data = 0; u16 period = 0; u16 prescale = 0; u16 bit_errors_mant = 0; u16 bit_errors_exp = 0; rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_BIT_ERRORS__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } period = FEC_RS_MEASUREMENT_PERIOD; prescale = FEC_RS_MEASUREMENT_PRESCALE; bit_errors_mant = data & FEC_RS_NR_BIT_ERRORS_FIXED_MANT__M; bit_errors_exp = (data & FEC_RS_NR_BIT_ERRORS_EXP__M) >> FEC_RS_NR_BIT_ERRORS_EXP__B; *cnt = period * prescale * 207 * ((bit_errors_exp > 2) ? 1 : 8); if (((bit_errors_mant << bit_errors_exp) >> 3) > 68700) *ber = (*cnt) * 26570; else { if (period * prescale == 0) { pr_err("error: period and/or prescale is zero!\n"); return -EIO; } *ber = bit_errors_mant << ((bit_errors_exp > 2) ? (bit_errors_exp - 3) : bit_errors_exp); } return 0; rw_error: return rc; } /** * \fn static short get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr, u32 *ber) * \brief Get the values of ber in VSB mode * \return Error code */ static int get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr, u32 *ber, u32 *cnt) { u16 data = 0; int rc; rc = drxj_dap_read_reg16(dev_addr, VSB_TOP_NR_SYM_ERRS__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); return -EIO; } *ber = data; *cnt = VSB_TOP_MEASUREMENT_PERIOD * SYMBOLS_PER_SEGMENT; return 0; } /** * \fn static int get_vsbmer(struct i2c_device_addr *dev_addr, u16 *mer) * \brief Get the values of MER * \return Error code */ static int get_vsbmer(struct i2c_device_addr *dev_addr, u16 *mer) { int rc; u16 data_hi = 0; rc = drxj_dap_read_reg16(dev_addr, VSB_TOP_ERR_ENERGY_H__A, &data_hi, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } *mer = (u16) (log1_times100(21504) - log1_times100((data_hi << 6) / 52)); return 0; rw_error: return rc; } /*============================================================================*/ /*== END 8VSB DATAPATH FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /*============================================================================*/ /*== QAM DATAPATH FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /** * \fn int power_down_qam () * \brief Powr down QAM related blocks. * \param demod instance of demodulator. * \param channel pointer to channel data. * \return int. */ static int power_down_qam(struct drx_demod_instance *demod, bool primary) { struct drxjscu_cmd cmd_scu = { /* command */ 0, /* parameter_len */ 0, /* result_len */ 0, /* *parameter */ NULL, /* *result */ NULL }; int rc; struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; struct drx_cfg_mpeg_output cfg_mpeg_output; struct drx_common_attr *common_attr = demod->my_common_attr; u16 cmd_result = 0; /* STOP demodulator resets IQM, QAM and FEC HW blocks */ /* stop all comm_exec */ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_STOP; cmd_scu.parameter_len = 0; cmd_scu.result_len = 1; cmd_scu.parameter = NULL; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (primary) { rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_iqm_af(demod, false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else { rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } memcpy(&cfg_mpeg_output, &common_attr->mpeg_cfg, sizeof(cfg_mpeg_output)); cfg_mpeg_output.enable_mpeg_output = false; rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int set_qam_measurement () * \brief Setup of the QAM Measuremnt intervals for signal quality * \param demod instance of demod. * \param constellation current constellation. * \return int. * * NOTE: * Take into account that for certain settings the errorcounters can overflow. * The implementation does not check this. * * TODO: overriding the ext_attr->fec_bits_desired by constellation dependent * constants to get a measurement period of approx. 1 sec. Remove fec_bits_desired * field ? * */ #ifndef DRXJ_VSB_ONLY static int set_qam_measurement(struct drx_demod_instance *demod, enum drx_modulation constellation, u32 symbol_rate) { struct i2c_device_addr *dev_addr = NULL; /* device address for I2C writes */ struct drxj_data *ext_attr = NULL; /* Global data container for DRXJ specific data */ int rc; u32 fec_bits_desired = 0; /* BER accounting period */ u16 fec_rs_plen = 0; /* defines RS BER measurement period */ u16 fec_rs_prescale = 0; /* ReedSolomon Measurement Prescale */ u32 fec_rs_period = 0; /* Value for corresponding I2C register */ u32 fec_rs_bit_cnt = 0; /* Actual precise amount of bits */ u32 fec_oc_snc_fail_period = 0; /* Value for corresponding I2C register */ u32 qam_vd_period = 0; /* Value for corresponding I2C register */ u32 qam_vd_bit_cnt = 0; /* Actual precise amount of bits */ u16 fec_vd_plen = 0; /* no of trellis symbols: VD SER measur period */ u16 qam_vd_prescale = 0; /* Viterbi Measurement Prescale */ dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; fec_bits_desired = ext_attr->fec_bits_desired; fec_rs_prescale = ext_attr->fec_rs_prescale; switch (constellation) { case DRX_CONSTELLATION_QAM16: fec_bits_desired = 4 * symbol_rate; break; case DRX_CONSTELLATION_QAM32: fec_bits_desired = 5 * symbol_rate; break; case DRX_CONSTELLATION_QAM64: fec_bits_desired = 6 * symbol_rate; break; case DRX_CONSTELLATION_QAM128: fec_bits_desired = 7 * symbol_rate; break; case DRX_CONSTELLATION_QAM256: fec_bits_desired = 8 * symbol_rate; break; default: return -EINVAL; } /* Parameters for Reed-Solomon Decoder */ /* fecrs_period = (int)ceil(FEC_BITS_DESIRED/(fecrs_prescale*plen)) */ /* rs_bit_cnt = fecrs_period*fecrs_prescale*plen */ /* result is within 32 bit arithmetic -> */ /* no need for mult or frac functions */ /* TODO: use constant instead of calculation and remove the fec_rs_plen in ext_attr */ switch (ext_attr->standard) { case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_C: fec_rs_plen = 204 * 8; break; case DRX_STANDARD_ITU_B: fec_rs_plen = 128 * 7; break; default: return -EINVAL; } ext_attr->fec_rs_plen = fec_rs_plen; /* for getSigQual */ fec_rs_bit_cnt = fec_rs_prescale * fec_rs_plen; /* temp storage */ if (fec_rs_bit_cnt == 0) { pr_err("error: fec_rs_bit_cnt is zero!\n"); return -EIO; } fec_rs_period = fec_bits_desired / fec_rs_bit_cnt + 1; /* ceil */ if (ext_attr->standard != DRX_STANDARD_ITU_B) fec_oc_snc_fail_period = fec_rs_period; /* limit to max 16 bit value (I2C register width) if needed */ if (fec_rs_period > 0xFFFF) fec_rs_period = 0xFFFF; /* write corresponding registers */ switch (ext_attr->standard) { case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_C: break; case DRX_STANDARD_ITU_B: switch (constellation) { case DRX_CONSTELLATION_QAM64: fec_rs_period = 31581; fec_oc_snc_fail_period = 17932; break; case DRX_CONSTELLATION_QAM256: fec_rs_period = 45446; fec_oc_snc_fail_period = 25805; break; default: return -EINVAL; } break; default: return -EINVAL; } rc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_FAIL_PERIOD__A, (u16)fec_oc_snc_fail_period, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PERIOD__A, (u16)fec_rs_period, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PRESCALE__A, fec_rs_prescale, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->fec_rs_period = (u16) fec_rs_period; ext_attr->fec_rs_prescale = fec_rs_prescale; rc = drxdap_fasi_write_reg32(dev_addr, SCU_RAM_FEC_ACCUM_CW_CORRECTED_LO__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_MEAS_COUNT__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (ext_attr->standard == DRX_STANDARD_ITU_B) { /* Parameters for Viterbi Decoder */ /* qamvd_period = (int)ceil(FEC_BITS_DESIRED/ */ /* (qamvd_prescale*plen*(qam_constellation+1))) */ /* vd_bit_cnt = qamvd_period*qamvd_prescale*plen */ /* result is within 32 bit arithmetic -> */ /* no need for mult or frac functions */ /* a(8 bit) * b(8 bit) = 16 bit result => mult32 not needed */ fec_vd_plen = ext_attr->fec_vd_plen; qam_vd_prescale = ext_attr->qam_vd_prescale; qam_vd_bit_cnt = qam_vd_prescale * fec_vd_plen; /* temp storage */ switch (constellation) { case DRX_CONSTELLATION_QAM64: /* a(16 bit) * b(4 bit) = 20 bit result => mult32 not needed */ qam_vd_period = qam_vd_bit_cnt * (QAM_TOP_CONSTELLATION_QAM64 + 1) * (QAM_TOP_CONSTELLATION_QAM64 + 1); break; case DRX_CONSTELLATION_QAM256: /* a(16 bit) * b(5 bit) = 21 bit result => mult32 not needed */ qam_vd_period = qam_vd_bit_cnt * (QAM_TOP_CONSTELLATION_QAM256 + 1) * (QAM_TOP_CONSTELLATION_QAM256 + 1); break; default: return -EINVAL; } if (qam_vd_period == 0) { pr_err("error: qam_vd_period is zero!\n"); return -EIO; } qam_vd_period = fec_bits_desired / qam_vd_period; /* limit to max 16 bit value (I2C register width) if needed */ if (qam_vd_period > 0xFFFF) qam_vd_period = 0xFFFF; /* a(16 bit) * b(16 bit) = 32 bit result => mult32 not needed */ qam_vd_bit_cnt *= qam_vd_period; rc = drxj_dap_write_reg16(dev_addr, QAM_VD_MEASUREMENT_PERIOD__A, (u16)qam_vd_period, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_VD_MEASUREMENT_PRESCALE__A, qam_vd_prescale, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->qam_vd_period = (u16) qam_vd_period; ext_attr->qam_vd_prescale = qam_vd_prescale; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int set_qam16 () * \brief QAM16 specific setup * \param demod instance of demod. * \return int. */ static int set_qam16(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; const u8 qam_dq_qual_fun[] = { DRXJ_16TO8(2), /* fun0 */ DRXJ_16TO8(2), /* fun1 */ DRXJ_16TO8(2), /* fun2 */ DRXJ_16TO8(2), /* fun3 */ DRXJ_16TO8(3), /* fun4 */ DRXJ_16TO8(3), /* fun5 */ }; const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(13517), /* RAD0 */ DRXJ_16TO8(13517), /* RAD1 */ DRXJ_16TO8(13517), /* RAD2 */ DRXJ_16TO8(13517), /* RAD3 */ DRXJ_16TO8(13517), /* RAD4 */ DRXJ_16TO8(13517), /* RAD5 */ }; rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 140, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 50, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 120, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 230, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 95, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 105, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 56, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 220, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, 25, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, 6, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16)(-24), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16)(-65), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-127), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 10, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 50, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 32, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 240, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 32, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 40960, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int set_qam32 () * \brief QAM32 specific setup * \param demod instance of demod. * \return int. */ static int set_qam32(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; const u8 qam_dq_qual_fun[] = { DRXJ_16TO8(3), /* fun0 */ DRXJ_16TO8(3), /* fun1 */ DRXJ_16TO8(3), /* fun2 */ DRXJ_16TO8(3), /* fun3 */ DRXJ_16TO8(4), /* fun4 */ DRXJ_16TO8(4), /* fun5 */ }; const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(6707), /* RAD0 */ DRXJ_16TO8(6707), /* RAD1 */ DRXJ_16TO8(6707), /* RAD2 */ DRXJ_16TO8(6707), /* RAD3 */ DRXJ_16TO8(6707), /* RAD4 */ DRXJ_16TO8(6707), /* RAD5 */ }; rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 90, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 50, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 100, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 170, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 80, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 100, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 56, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 140, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16)(-8), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16)(-16), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16)(-26), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16)(-56), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-86), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 10, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 50, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 32, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 176, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 20480, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int set_qam64 () * \brief QAM64 specific setup * \param demod instance of demod. * \return int. */ static int set_qam64(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; const u8 qam_dq_qual_fun[] = { /* this is hw reset value. no necessary to re-write */ DRXJ_16TO8(4), /* fun0 */ DRXJ_16TO8(4), /* fun1 */ DRXJ_16TO8(4), /* fun2 */ DRXJ_16TO8(4), /* fun3 */ DRXJ_16TO8(6), /* fun4 */ DRXJ_16TO8(6), /* fun5 */ }; const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(13336), /* RAD0 */ DRXJ_16TO8(12618), /* RAD1 */ DRXJ_16TO8(11988), /* RAD2 */ DRXJ_16TO8(13809), /* RAD3 */ DRXJ_16TO8(13809), /* RAD4 */ DRXJ_16TO8(15609), /* RAD5 */ }; rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 105, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 60, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 100, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 195, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 80, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 84, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 32, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 141, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, 7, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16)(-15), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16)(-45), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-80), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 30, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 80, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 48, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 160, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 32, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 43008, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int set_qam128 () * \brief QAM128 specific setup * \param demod: instance of demod. * \return int. */ static int set_qam128(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; const u8 qam_dq_qual_fun[] = { DRXJ_16TO8(6), /* fun0 */ DRXJ_16TO8(6), /* fun1 */ DRXJ_16TO8(6), /* fun2 */ DRXJ_16TO8(6), /* fun3 */ DRXJ_16TO8(9), /* fun4 */ DRXJ_16TO8(9), /* fun5 */ }; const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(6164), /* RAD0 */ DRXJ_16TO8(6598), /* RAD1 */ DRXJ_16TO8(6394), /* RAD2 */ DRXJ_16TO8(6409), /* RAD3 */ DRXJ_16TO8(6656), /* RAD4 */ DRXJ_16TO8(7238), /* RAD5 */ }; rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 50, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 60, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 100, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 140, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 80, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 100, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 32, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 65, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16)(-1), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-23), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 20, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 80, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 32, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 144, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 20992, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int set_qam256 () * \brief QAM256 specific setup * \param demod: instance of demod. * \return int. */ static int set_qam256(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; const u8 qam_dq_qual_fun[] = { DRXJ_16TO8(8), /* fun0 */ DRXJ_16TO8(8), /* fun1 */ DRXJ_16TO8(8), /* fun2 */ DRXJ_16TO8(8), /* fun3 */ DRXJ_16TO8(12), /* fun4 */ DRXJ_16TO8(12), /* fun5 */ }; const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(12345), /* RAD0 */ DRXJ_16TO8(12345), /* RAD1 */ DRXJ_16TO8(13626), /* RAD2 */ DRXJ_16TO8(12931), /* RAD3 */ DRXJ_16TO8(14719), /* RAD4 */ DRXJ_16TO8(15356), /* RAD5 */ }; rc = drxdap_fasi_write_block(dev_addr, QAM_DQ_QUAL_FUN0__A, sizeof(qam_dq_qual_fun), ((u8 *)qam_dq_qual_fun), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, SCU_RAM_QAM_EQ_CMA_RAD0__A, sizeof(qam_eq_cma_rad), ((u8 *)qam_eq_cma_rad), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RTH__A, 50, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FTH__A, 60, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_PTH__A, 100, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_QTH__A, 150, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_CTH__A, 80, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MTH__A, 110, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RATE_LIM__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_FREQ_LIM__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_COUNT_LIM__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, 74, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, 18, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, 13, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, 7, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16)(-8), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_FINE__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CA_COARSE__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_MEDIUM__A, 50, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CP_COARSE__A, 255, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_FINE__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_MEDIUM__A, 25, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CI_COARSE__A, 80, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EP_COARSE__A, 24, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_FINE__A, 12, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_EI_COARSE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_FINE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_MEDIUM__A, 48, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF_COARSE__A, 80, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_FINE__A, 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 15, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_LC_CF1_COARSE__A, 16, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_SL_SIG_POWER__A, 43520, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ #define QAM_SET_OP_ALL 0x1 #define QAM_SET_OP_CONSTELLATION 0x2 #define QAM_SET_OP_SPECTRUM 0X4 /** * \fn int set_qam () * \brief Set QAM demod. * \param demod: instance of demod. * \param channel: pointer to channel data. * \return int. */ static int set_qam(struct drx_demod_instance *demod, struct drx_channel *channel, s32 tuner_freq_offset, u32 op) { struct i2c_device_addr *dev_addr = NULL; struct drxj_data *ext_attr = NULL; struct drx_common_attr *common_attr = NULL; int rc; u32 adc_frequency = 0; u32 iqm_rc_rate = 0; u16 cmd_result = 0; u16 lc_symbol_freq = 0; u16 iqm_rc_stretch = 0; u16 set_env_parameters = 0; u16 set_param_parameters[2] = { 0 }; struct drxjscu_cmd cmd_scu = { /* command */ 0, /* parameter_len */ 0, /* result_len */ 0, /* parameter */ NULL, /* result */ NULL }; const u8 qam_a_taps[] = { DRXJ_16TO8(-1), /* re0 */ DRXJ_16TO8(1), /* re1 */ DRXJ_16TO8(1), /* re2 */ DRXJ_16TO8(-1), /* re3 */ DRXJ_16TO8(-1), /* re4 */ DRXJ_16TO8(2), /* re5 */ DRXJ_16TO8(1), /* re6 */ DRXJ_16TO8(-2), /* re7 */ DRXJ_16TO8(0), /* re8 */ DRXJ_16TO8(3), /* re9 */ DRXJ_16TO8(-1), /* re10 */ DRXJ_16TO8(-3), /* re11 */ DRXJ_16TO8(4), /* re12 */ DRXJ_16TO8(1), /* re13 */ DRXJ_16TO8(-8), /* re14 */ DRXJ_16TO8(4), /* re15 */ DRXJ_16TO8(13), /* re16 */ DRXJ_16TO8(-13), /* re17 */ DRXJ_16TO8(-19), /* re18 */ DRXJ_16TO8(28), /* re19 */ DRXJ_16TO8(25), /* re20 */ DRXJ_16TO8(-53), /* re21 */ DRXJ_16TO8(-31), /* re22 */ DRXJ_16TO8(96), /* re23 */ DRXJ_16TO8(37), /* re24 */ DRXJ_16TO8(-190), /* re25 */ DRXJ_16TO8(-40), /* re26 */ DRXJ_16TO8(619) /* re27 */ }; const u8 qam_b64_taps[] = { DRXJ_16TO8(0), /* re0 */ DRXJ_16TO8(-2), /* re1 */ DRXJ_16TO8(1), /* re2 */ DRXJ_16TO8(2), /* re3 */ DRXJ_16TO8(-2), /* re4 */ DRXJ_16TO8(0), /* re5 */ DRXJ_16TO8(4), /* re6 */ DRXJ_16TO8(-2), /* re7 */ DRXJ_16TO8(-4), /* re8 */ DRXJ_16TO8(4), /* re9 */ DRXJ_16TO8(3), /* re10 */ DRXJ_16TO8(-6), /* re11 */ DRXJ_16TO8(0), /* re12 */ DRXJ_16TO8(6), /* re13 */ DRXJ_16TO8(-5), /* re14 */ DRXJ_16TO8(-3), /* re15 */ DRXJ_16TO8(11), /* re16 */ DRXJ_16TO8(-4), /* re17 */ DRXJ_16TO8(-19), /* re18 */ DRXJ_16TO8(19), /* re19 */ DRXJ_16TO8(28), /* re20 */ DRXJ_16TO8(-45), /* re21 */ DRXJ_16TO8(-36), /* re22 */ DRXJ_16TO8(90), /* re23 */ DRXJ_16TO8(42), /* re24 */ DRXJ_16TO8(-185), /* re25 */ DRXJ_16TO8(-46), /* re26 */ DRXJ_16TO8(614) /* re27 */ }; const u8 qam_b256_taps[] = { DRXJ_16TO8(-2), /* re0 */ DRXJ_16TO8(4), /* re1 */ DRXJ_16TO8(1), /* re2 */ DRXJ_16TO8(-4), /* re3 */ DRXJ_16TO8(0), /* re4 */ DRXJ_16TO8(4), /* re5 */ DRXJ_16TO8(-2), /* re6 */ DRXJ_16TO8(-4), /* re7 */ DRXJ_16TO8(5), /* re8 */ DRXJ_16TO8(2), /* re9 */ DRXJ_16TO8(-8), /* re10 */ DRXJ_16TO8(2), /* re11 */ DRXJ_16TO8(11), /* re12 */ DRXJ_16TO8(-8), /* re13 */ DRXJ_16TO8(-15), /* re14 */ DRXJ_16TO8(16), /* re15 */ DRXJ_16TO8(19), /* re16 */ DRXJ_16TO8(-27), /* re17 */ DRXJ_16TO8(-22), /* re18 */ DRXJ_16TO8(44), /* re19 */ DRXJ_16TO8(26), /* re20 */ DRXJ_16TO8(-69), /* re21 */ DRXJ_16TO8(-28), /* re22 */ DRXJ_16TO8(110), /* re23 */ DRXJ_16TO8(31), /* re24 */ DRXJ_16TO8(-201), /* re25 */ DRXJ_16TO8(-32), /* re26 */ DRXJ_16TO8(628) /* re27 */ }; const u8 qam_c_taps[] = { DRXJ_16TO8(-3), /* re0 */ DRXJ_16TO8(3), /* re1 */ DRXJ_16TO8(2), /* re2 */ DRXJ_16TO8(-4), /* re3 */ DRXJ_16TO8(0), /* re4 */ DRXJ_16TO8(4), /* re5 */ DRXJ_16TO8(-1), /* re6 */ DRXJ_16TO8(-4), /* re7 */ DRXJ_16TO8(3), /* re8 */ DRXJ_16TO8(3), /* re9 */ DRXJ_16TO8(-5), /* re10 */ DRXJ_16TO8(0), /* re11 */ DRXJ_16TO8(9), /* re12 */ DRXJ_16TO8(-4), /* re13 */ DRXJ_16TO8(-12), /* re14 */ DRXJ_16TO8(10), /* re15 */ DRXJ_16TO8(16), /* re16 */ DRXJ_16TO8(-21), /* re17 */ DRXJ_16TO8(-20), /* re18 */ DRXJ_16TO8(37), /* re19 */ DRXJ_16TO8(25), /* re20 */ DRXJ_16TO8(-62), /* re21 */ DRXJ_16TO8(-28), /* re22 */ DRXJ_16TO8(105), /* re23 */ DRXJ_16TO8(31), /* re24 */ DRXJ_16TO8(-197), /* re25 */ DRXJ_16TO8(-33), /* re26 */ DRXJ_16TO8(626) /* re27 */ }; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; common_attr = (struct drx_common_attr *) demod->my_common_attr; if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) { if (ext_attr->standard == DRX_STANDARD_ITU_B) { switch (channel->constellation) { case DRX_CONSTELLATION_QAM256: iqm_rc_rate = 0x00AE3562; lc_symbol_freq = QAM_LC_SYMBOL_FREQ_FREQ_QAM_B_256; channel->symbolrate = 5360537; iqm_rc_stretch = IQM_RC_STRETCH_QAM_B_256; break; case DRX_CONSTELLATION_QAM64: iqm_rc_rate = 0x00C05A0E; lc_symbol_freq = 409; channel->symbolrate = 5056941; iqm_rc_stretch = IQM_RC_STRETCH_QAM_B_64; break; default: return -EINVAL; } } else { adc_frequency = (common_attr->sys_clock_freq * 1000) / 3; if (channel->symbolrate == 0) { pr_err("error: channel symbolrate is zero!\n"); return -EIO; } iqm_rc_rate = (adc_frequency / channel->symbolrate) * (1 << 21) + (frac28 ((adc_frequency % channel->symbolrate), channel->symbolrate) >> 7) - (1 << 23); lc_symbol_freq = (u16) (frac28 (channel->symbolrate + (adc_frequency >> 13), adc_frequency) >> 16); if (lc_symbol_freq > 511) lc_symbol_freq = 511; iqm_rc_stretch = 21; } if (ext_attr->standard == DRX_STANDARD_ITU_A) { set_env_parameters = QAM_TOP_ANNEX_A; /* annex */ set_param_parameters[0] = channel->constellation; /* constellation */ set_param_parameters[1] = DRX_INTERLEAVEMODE_I12_J17; /* interleave mode */ } else if (ext_attr->standard == DRX_STANDARD_ITU_B) { set_env_parameters = QAM_TOP_ANNEX_B; /* annex */ set_param_parameters[0] = channel->constellation; /* constellation */ set_param_parameters[1] = channel->interleavemode; /* interleave mode */ } else if (ext_attr->standard == DRX_STANDARD_ITU_C) { set_env_parameters = QAM_TOP_ANNEX_C; /* annex */ set_param_parameters[0] = channel->constellation; /* constellation */ set_param_parameters[1] = DRX_INTERLEAVEMODE_I12_J17; /* interleave mode */ } else { return -EINVAL; } } if (op & QAM_SET_OP_ALL) { /* STEP 1: reset demodulator resets IQM, QAM and FEC HW blocks resets SCU variables */ /* stop all comm_exec */ rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_RESET; cmd_scu.parameter_len = 0; cmd_scu.result_len = 1; cmd_scu.parameter = NULL; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) { /* STEP 2: configure demodulator -set env -set params (resets IQM,QAM,FEC HW; initializes some SCU variables ) */ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV; cmd_scu.parameter_len = 1; cmd_scu.result_len = 1; cmd_scu.parameter = &set_env_parameters; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM; cmd_scu.parameter_len = 2; cmd_scu.result_len = 1; cmd_scu.parameter = set_param_parameters; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* set symbol rate */ rc = drxdap_fasi_write_reg32(dev_addr, IQM_RC_RATE_OFS_LO__A, iqm_rc_rate, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->iqm_rc_rate_ofs = iqm_rc_rate; rc = set_qam_measurement(demod, channel->constellation, channel->symbolrate); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* STEP 3: enable the system in a mode where the ADC provides valid signal setup constellation independent registers */ /* from qam_cmd.py script (qam_driver_b) */ /* TODO: remove re-writes of HW reset values */ if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_SPECTRUM)) { rc = set_frequency(demod, channel, tuner_freq_offset); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) { rc = drxj_dap_write_reg16(dev_addr, QAM_LC_SYMBOL_FREQ__A, lc_symbol_freq, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_STRETCH__A, iqm_rc_stretch, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } if (op & QAM_SET_OP_ALL) { if (!ext_attr->has_lna) { rc = drxj_dap_write_reg16(dev_addr, IQM_AF_AMUX__A, 0x02, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SYMMETRIC__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_MIDTAP__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_OUT_ENA__A, IQM_CF_OUT_ENA_QAM__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_WR_RSV_0__A, 0x5f, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* scu temporary shut down agc */ rc = drxj_dap_write_reg16(dev_addr, IQM_AF_SYNC_SEL__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_LEN__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_TH__A, 448, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_SNS_LEN__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_PDREF__A, 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_STDBY__A, 0x10, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_AF_PGA_GAIN__A, 11, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_POW_MEAS_LEN__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE_SH__A, IQM_CF_SCALE_SH__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /*! reset default val ! */ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_TIMEOUT__A, QAM_SY_TIMEOUT__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /*! reset default val ! */ if (ext_attr->standard == DRX_STANDARD_ITU_B) { rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_LWM__A, QAM_SY_SYNC_LWM__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /*! reset default val ! */ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_AWM__A, QAM_SY_SYNC_AWM__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /*! reset default val ! */ rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_HWM__A, QAM_SY_SYNC_HWM__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /*! reset default val ! */ } else { switch (channel->constellation) { case DRX_CONSTELLATION_QAM16: case DRX_CONSTELLATION_QAM64: case DRX_CONSTELLATION_QAM256: rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_LWM__A, 0x03, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_AWM__A, 0x04, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_HWM__A, QAM_SY_SYNC_HWM__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /*! reset default val ! */ break; case DRX_CONSTELLATION_QAM32: case DRX_CONSTELLATION_QAM128: rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_LWM__A, 0x03, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_AWM__A, 0x05, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_SY_SYNC_HWM__A, 0x06, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: return -EIO; } /* switch */ } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_MODE__A, QAM_LC_MODE__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /*! reset default val ! */ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_RATE_LIMIT__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_LPF_FACTORP__A, 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_LPF_FACTORI__A, 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_MODE__A, 7, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB0__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB1__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB2__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB3__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB4__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB5__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB6__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB8__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB9__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB10__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB12__A, 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB15__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB16__A, 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB20__A, 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_QUAL_TAB25__A, 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_FS_ADJ_SEL__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_ADJ_SEL__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_ADJ_SEL__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_POW_MEAS_LEN__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_GPIO__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* No more resets of the IQM, current standard correctly set => now AGCs can be configured. */ /* turn on IQMAF. It has to be in front of setAgc**() */ rc = set_iqm_af(demod, true); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = adc_synchronization(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = init_agc(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_agc_if(demod, &(ext_attr->qam_if_agc_cfg), false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_agc_rf(demod, &(ext_attr->qam_rf_agc_cfg), false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } { /* TODO fix this, store a struct drxj_cfg_afe_gain structure in struct drxj_data instead of only the gain */ struct drxj_cfg_afe_gain qam_pga_cfg = { DRX_STANDARD_ITU_B, 0 }; qam_pga_cfg.gain = ext_attr->qam_pga_cfg; rc = ctrl_set_cfg_afe_gain(demod, &qam_pga_cfg); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = ctrl_set_cfg_pre_saw(demod, &(ext_attr->qam_pre_saw_cfg)); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) { if (ext_attr->standard == DRX_STANDARD_ITU_A) { rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(qam_a_taps), ((u8 *)qam_a_taps), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(qam_a_taps), ((u8 *)qam_a_taps), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else if (ext_attr->standard == DRX_STANDARD_ITU_B) { switch (channel->constellation) { case DRX_CONSTELLATION_QAM64: rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(qam_b64_taps), ((u8 *)qam_b64_taps), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(qam_b64_taps), ((u8 *)qam_b64_taps), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_CONSTELLATION_QAM256: rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(qam_b256_taps), ((u8 *)qam_b256_taps), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(qam_b256_taps), ((u8 *)qam_b256_taps), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: return -EIO; } } else if (ext_attr->standard == DRX_STANDARD_ITU_C) { rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(qam_c_taps), ((u8 *)qam_c_taps), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(qam_c_taps), ((u8 *)qam_c_taps), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* SETP 4: constellation specific setup */ switch (channel->constellation) { case DRX_CONSTELLATION_QAM16: rc = set_qam16(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_CONSTELLATION_QAM32: rc = set_qam32(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_CONSTELLATION_QAM64: rc = set_qam64(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_CONSTELLATION_QAM128: rc = set_qam128(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_CONSTELLATION_QAM256: rc = set_qam256(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: return -EIO; } /* switch */ } if ((op & QAM_SET_OP_ALL)) { rc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE_SH__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Mpeg output has to be in front of FEC active */ rc = set_mpegtei_handling(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = bit_reverse_mpeg_output(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_mpeg_start_width(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } { /* TODO: move to set_standard after hardware reset value problem is solved */ /* Configure initial MPEG output */ struct drx_cfg_mpeg_output cfg_mpeg_output; memcpy(&cfg_mpeg_output, &common_attr->mpeg_cfg, sizeof(cfg_mpeg_output)); cfg_mpeg_output.enable_mpeg_output = true; rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } } if ((op & QAM_SET_OP_ALL) || (op & QAM_SET_OP_CONSTELLATION)) { /* STEP 5: start QAM demodulator (starts FEC, QAM and IQM HW) */ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_START; cmd_scu.parameter_len = 0; cmd_scu.result_len = 1; cmd_scu.parameter = NULL; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_COMM_EXEC__A, QAM_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ static int ctrl_get_qam_sig_quality(struct drx_demod_instance *demod); static int qam_flip_spec(struct drx_demod_instance *demod, struct drx_channel *channel) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; struct drxj_data *ext_attr = demod->my_ext_attr; int rc; u32 iqm_fs_rate_ofs = 0; u32 iqm_fs_rate_lo = 0; u16 qam_ctl_ena = 0; u16 data = 0; u16 equ_mode = 0; u16 fsm_state = 0; int i = 0; int ofsofs = 0; /* Silence the controlling of lc, equ, and the acquisition state machine */ rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_QAM_CTL_ENA__A, &qam_ctl_ena, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_CTL_ENA__A, qam_ctl_ena & ~(SCU_RAM_QAM_CTL_ENA_ACQ__M | SCU_RAM_QAM_CTL_ENA_EQU__M | SCU_RAM_QAM_CTL_ENA_LC__M), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* freeze the frequency control loop */ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_CF__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_CF1__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_atomic_read_reg32(dev_addr, IQM_FS_RATE_OFS_LO__A, &iqm_fs_rate_ofs, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_atomic_read_reg32(dev_addr, IQM_FS_RATE_LO__A, &iqm_fs_rate_lo, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ofsofs = iqm_fs_rate_lo - iqm_fs_rate_ofs; iqm_fs_rate_ofs = ~iqm_fs_rate_ofs + 1; iqm_fs_rate_ofs -= 2 * ofsofs; /* freeze dq/fq updating */ rc = drxj_dap_read_reg16(dev_addr, QAM_DQ_MODE__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } data = (data & 0xfff9); rc = drxj_dap_write_reg16(dev_addr, QAM_DQ_MODE__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_MODE__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* lc_cp / _ci / _ca */ rc = drxj_dap_write_reg16(dev_addr, QAM_LC_CI__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_LC_EP__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_LA_FACTOR__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* flip the spec */ rc = drxdap_fasi_write_reg32(dev_addr, IQM_FS_RATE_OFS_LO__A, iqm_fs_rate_ofs, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->iqm_fs_rate_ofs = iqm_fs_rate_ofs; ext_attr->pos_image = (ext_attr->pos_image) ? false : true; /* freeze dq/fq updating */ rc = drxj_dap_read_reg16(dev_addr, QAM_DQ_MODE__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } equ_mode = data; data = (data & 0xfff9); rc = drxj_dap_write_reg16(dev_addr, QAM_DQ_MODE__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_MODE__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } for (i = 0; i < 28; i++) { rc = drxj_dap_read_reg16(dev_addr, QAM_DQ_TAP_IM_EL0__A + (2 * i), &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_DQ_TAP_IM_EL0__A + (2 * i), -data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } for (i = 0; i < 24; i++) { rc = drxj_dap_read_reg16(dev_addr, QAM_FQ_TAP_IM_EL0__A + (2 * i), &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_TAP_IM_EL0__A + (2 * i), -data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } data = equ_mode; rc = drxj_dap_write_reg16(dev_addr, QAM_DQ_MODE__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, QAM_FQ_MODE__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_STATE_TGT__A, 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } i = 0; while ((fsm_state != 4) && (i++ < 100)) { rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_QAM_FSM_STATE__A, &fsm_state, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_CTL_ENA__A, (qam_ctl_ena | 0x0016), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } #define NO_LOCK 0x0 #define DEMOD_LOCKED 0x1 #define SYNC_FLIPPED 0x2 #define SPEC_MIRRORED 0x4 /** * \fn int qam64auto () * \brief auto do sync pattern switching and mirroring. * \param demod: instance of demod. * \param channel: pointer to channel data. * \param tuner_freq_offset: tuner frequency offset. * \param lock_status: pointer to lock status. * \return int. */ static int qam64auto(struct drx_demod_instance *demod, struct drx_channel *channel, s32 tuner_freq_offset, enum drx_lock_status *lock_status) { struct drxj_data *ext_attr = demod->my_ext_attr; struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; struct drx39xxj_state *state = dev_addr->user_data; struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache; int rc; u32 lck_state = NO_LOCK; u32 start_time = 0; u32 d_locked_time = 0; u32 timeout_ofs = 0; u16 data = 0; /* external attributes for storing acquired channel constellation */ *lock_status = DRX_NOT_LOCKED; start_time = jiffies_to_msecs(jiffies); lck_state = NO_LOCK; do { rc = ctrl_lock_status(demod, lock_status); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } switch (lck_state) { case NO_LOCK: if (*lock_status == DRXJ_DEMOD_LOCK) { rc = ctrl_get_qam_sig_quality(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (p->cnr.stat[0].svalue > 20800) { lck_state = DEMOD_LOCKED; /* some delay to see if fec_lock possible TODO find the right value */ timeout_ofs += DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME; /* see something, waiting longer */ d_locked_time = jiffies_to_msecs(jiffies); } } break; case DEMOD_LOCKED: if ((*lock_status == DRXJ_DEMOD_LOCK) && /* still demod_lock in 150ms */ ((jiffies_to_msecs(jiffies) - d_locked_time) > DRXJ_QAM_FEC_LOCK_WAITTIME)) { rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, data | 0x1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } lck_state = SYNC_FLIPPED; msleep(10); } break; case SYNC_FLIPPED: if (*lock_status == DRXJ_DEMOD_LOCK) { if (channel->mirror == DRX_MIRROR_AUTO) { /* flip sync pattern back */ rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, data & 0xFFFE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* flip spectrum */ ext_attr->mirror = DRX_MIRROR_YES; rc = qam_flip_spec(demod, channel); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } lck_state = SPEC_MIRRORED; /* reset timer TODO: still need 500ms? */ start_time = d_locked_time = jiffies_to_msecs(jiffies); timeout_ofs = 0; } else { /* no need to wait lock */ start_time = jiffies_to_msecs(jiffies) - DRXJ_QAM_MAX_WAITTIME - timeout_ofs; } } break; case SPEC_MIRRORED: if ((*lock_status == DRXJ_DEMOD_LOCK) && /* still demod_lock in 150ms */ ((jiffies_to_msecs(jiffies) - d_locked_time) > DRXJ_QAM_FEC_LOCK_WAITTIME)) { rc = ctrl_get_qam_sig_quality(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (p->cnr.stat[0].svalue > 20800) { rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, QAM_SY_TIMEOUT__A, data | 0x1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* no need to wait lock */ start_time = jiffies_to_msecs(jiffies) - DRXJ_QAM_MAX_WAITTIME - timeout_ofs; } } break; default: break; } msleep(10); } while ((*lock_status != DRX_LOCKED) && (*lock_status != DRX_NEVER_LOCK) && ((jiffies_to_msecs(jiffies) - start_time) < (DRXJ_QAM_MAX_WAITTIME + timeout_ofs)) ); /* Returning control to apllication ... */ return 0; rw_error: return rc; } /** * \fn int qam256auto () * \brief auto do sync pattern switching and mirroring. * \param demod: instance of demod. * \param channel: pointer to channel data. * \param tuner_freq_offset: tuner frequency offset. * \param lock_status: pointer to lock status. * \return int. */ static int qam256auto(struct drx_demod_instance *demod, struct drx_channel *channel, s32 tuner_freq_offset, enum drx_lock_status *lock_status) { struct drxj_data *ext_attr = demod->my_ext_attr; struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; struct drx39xxj_state *state = dev_addr->user_data; struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache; int rc; u32 lck_state = NO_LOCK; u32 start_time = 0; u32 d_locked_time = 0; u32 timeout_ofs = DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME; /* external attributes for storing acquired channel constellation */ *lock_status = DRX_NOT_LOCKED; start_time = jiffies_to_msecs(jiffies); lck_state = NO_LOCK; do { rc = ctrl_lock_status(demod, lock_status); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } switch (lck_state) { case NO_LOCK: if (*lock_status == DRXJ_DEMOD_LOCK) { rc = ctrl_get_qam_sig_quality(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (p->cnr.stat[0].svalue > 26800) { lck_state = DEMOD_LOCKED; timeout_ofs += DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME; /* see something, wait longer */ d_locked_time = jiffies_to_msecs(jiffies); } } break; case DEMOD_LOCKED: if (*lock_status == DRXJ_DEMOD_LOCK) { if ((channel->mirror == DRX_MIRROR_AUTO) && ((jiffies_to_msecs(jiffies) - d_locked_time) > DRXJ_QAM_FEC_LOCK_WAITTIME)) { ext_attr->mirror = DRX_MIRROR_YES; rc = qam_flip_spec(demod, channel); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } lck_state = SPEC_MIRRORED; /* reset timer TODO: still need 300ms? */ start_time = jiffies_to_msecs(jiffies); timeout_ofs = -DRXJ_QAM_MAX_WAITTIME / 2; } } break; case SPEC_MIRRORED: break; default: break; } msleep(10); } while ((*lock_status < DRX_LOCKED) && (*lock_status != DRX_NEVER_LOCK) && ((jiffies_to_msecs(jiffies) - start_time) < (DRXJ_QAM_MAX_WAITTIME + timeout_ofs))); return 0; rw_error: return rc; } /** * \fn int set_qam_channel () * \brief Set QAM channel according to the requested constellation. * \param demod: instance of demod. * \param channel: pointer to channel data. * \return int. */ static int set_qam_channel(struct drx_demod_instance *demod, struct drx_channel *channel, s32 tuner_freq_offset) { struct drxj_data *ext_attr = NULL; int rc; enum drx_lock_status lock_status = DRX_NOT_LOCKED; bool auto_flag = false; /* external attributes for storing acquired channel constellation */ ext_attr = (struct drxj_data *) demod->my_ext_attr; /* set QAM channel constellation */ switch (channel->constellation) { case DRX_CONSTELLATION_QAM16: case DRX_CONSTELLATION_QAM32: case DRX_CONSTELLATION_QAM128: return -EINVAL; case DRX_CONSTELLATION_QAM64: case DRX_CONSTELLATION_QAM256: if (ext_attr->standard != DRX_STANDARD_ITU_B) return -EINVAL; ext_attr->constellation = channel->constellation; if (channel->mirror == DRX_MIRROR_AUTO) ext_attr->mirror = DRX_MIRROR_NO; else ext_attr->mirror = channel->mirror; rc = set_qam(demod, channel, tuner_freq_offset, QAM_SET_OP_ALL); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (channel->constellation == DRX_CONSTELLATION_QAM64) rc = qam64auto(demod, channel, tuner_freq_offset, &lock_status); else rc = qam256auto(demod, channel, tuner_freq_offset, &lock_status); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_CONSTELLATION_AUTO: /* for channel scan */ if (ext_attr->standard == DRX_STANDARD_ITU_B) { u16 qam_ctl_ena = 0; auto_flag = true; /* try to lock default QAM constellation: QAM256 */ channel->constellation = DRX_CONSTELLATION_QAM256; ext_attr->constellation = DRX_CONSTELLATION_QAM256; if (channel->mirror == DRX_MIRROR_AUTO) ext_attr->mirror = DRX_MIRROR_NO; else ext_attr->mirror = channel->mirror; rc = set_qam(demod, channel, tuner_freq_offset, QAM_SET_OP_ALL); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = qam256auto(demod, channel, tuner_freq_offset, &lock_status); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (lock_status >= DRX_LOCKED) { channel->constellation = DRX_CONSTELLATION_AUTO; break; } /* QAM254 not locked. Try QAM64 constellation */ channel->constellation = DRX_CONSTELLATION_QAM64; ext_attr->constellation = DRX_CONSTELLATION_QAM64; if (channel->mirror == DRX_MIRROR_AUTO) ext_attr->mirror = DRX_MIRROR_NO; else ext_attr->mirror = channel->mirror; rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SCU_RAM_QAM_CTL_ENA__A, &qam_ctl_ena, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SCU_RAM_QAM_CTL_ENA__A, qam_ctl_ena & ~SCU_RAM_QAM_CTL_ENA_ACQ__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SCU_RAM_QAM_FSM_STATE_TGT__A, 0x2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* force to rate hunting */ rc = set_qam(demod, channel, tuner_freq_offset, QAM_SET_OP_CONSTELLATION); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SCU_RAM_QAM_CTL_ENA__A, qam_ctl_ena, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = qam64auto(demod, channel, tuner_freq_offset, &lock_status); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } channel->constellation = DRX_CONSTELLATION_AUTO; } else if (ext_attr->standard == DRX_STANDARD_ITU_C) { u16 qam_ctl_ena = 0; channel->constellation = DRX_CONSTELLATION_QAM64; ext_attr->constellation = DRX_CONSTELLATION_QAM64; auto_flag = true; if (channel->mirror == DRX_MIRROR_AUTO) ext_attr->mirror = DRX_MIRROR_NO; else ext_attr->mirror = channel->mirror; rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SCU_RAM_QAM_CTL_ENA__A, &qam_ctl_ena, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SCU_RAM_QAM_CTL_ENA__A, qam_ctl_ena & ~SCU_RAM_QAM_CTL_ENA_ACQ__M, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SCU_RAM_QAM_FSM_STATE_TGT__A, 0x2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* force to rate hunting */ rc = set_qam(demod, channel, tuner_freq_offset, QAM_SET_OP_CONSTELLATION); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(demod->my_i2c_dev_addr, SCU_RAM_QAM_CTL_ENA__A, qam_ctl_ena, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = qam64auto(demod, channel, tuner_freq_offset, &lock_status); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } channel->constellation = DRX_CONSTELLATION_AUTO; } else { return -EINVAL; } break; default: return -EINVAL; } return 0; rw_error: /* restore starting value */ if (auto_flag) channel->constellation = DRX_CONSTELLATION_AUTO; return rc; } /*============================================================================*/ /** * \fn static short get_qamrs_err_count(struct i2c_device_addr *dev_addr) * \brief Get RS error count in QAM mode (used for post RS BER calculation) * \return Error code * * precondition: measurement period & measurement prescale must be set * */ static int get_qamrs_err_count(struct i2c_device_addr *dev_addr, struct drxjrs_errors *rs_errors) { int rc; u16 nr_bit_errors = 0, nr_symbol_errors = 0, nr_packet_errors = 0, nr_failures = 0, nr_snc_par_fail_count = 0; /* check arguments */ if (dev_addr == NULL) return -EINVAL; /* all reported errors are received in the */ /* most recently finished measurment period */ /* no of pre RS bit errors */ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_BIT_ERRORS__A, &nr_bit_errors, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* no of symbol errors */ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_SYMBOL_ERRORS__A, &nr_symbol_errors, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* no of packet errors */ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_PACKET_ERRORS__A, &nr_packet_errors, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* no of failures to decode */ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_FAILURES__A, &nr_failures, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* no of post RS bit erros */ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_FAIL_COUNT__A, &nr_snc_par_fail_count, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* TODO: NOTE */ /* These register values are fetched in non-atomic fashion */ /* It is possible that the read values contain unrelated information */ rs_errors->nr_bit_errors = nr_bit_errors & FEC_RS_NR_BIT_ERRORS__M; rs_errors->nr_symbol_errors = nr_symbol_errors & FEC_RS_NR_SYMBOL_ERRORS__M; rs_errors->nr_packet_errors = nr_packet_errors & FEC_RS_NR_PACKET_ERRORS__M; rs_errors->nr_failures = nr_failures & FEC_RS_NR_FAILURES__M; rs_errors->nr_snc_par_fail_count = nr_snc_par_fail_count & FEC_OC_SNC_FAIL_COUNT__M; return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int get_sig_strength() * \brief Retrieve signal strength for VSB and QAM. * \param demod Pointer to demod instance * \param u16-t Pointer to signal strength data; range 0, .. , 100. * \return int. * \retval 0 sig_strength contains valid data. * \retval -EINVAL sig_strength is NULL. * \retval -EIO Erroneous data, sig_strength contains invalid data. */ #define DRXJ_AGC_TOP 0x2800 #define DRXJ_AGC_SNS 0x1600 #define DRXJ_RFAGC_MAX 0x3fff #define DRXJ_RFAGC_MIN 0x800 static int get_sig_strength(struct drx_demod_instance *demod, u16 *sig_strength) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; u16 rf_gain = 0; u16 if_gain = 0; u16 if_agc_sns = 0; u16 if_agc_top = 0; u16 rf_agc_max = 0; u16 rf_agc_min = 0; rc = drxj_dap_read_reg16(dev_addr, IQM_AF_AGC_IF__A, &if_gain, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if_gain &= IQM_AF_AGC_IF__M; rc = drxj_dap_read_reg16(dev_addr, IQM_AF_AGC_RF__A, &rf_gain, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rf_gain &= IQM_AF_AGC_RF__M; if_agc_sns = DRXJ_AGC_SNS; if_agc_top = DRXJ_AGC_TOP; rf_agc_max = DRXJ_RFAGC_MAX; rf_agc_min = DRXJ_RFAGC_MIN; if (if_gain > if_agc_top) { if (rf_gain > rf_agc_max) *sig_strength = 100; else if (rf_gain > rf_agc_min) { if (rf_agc_max == rf_agc_min) { pr_err("error: rf_agc_max == rf_agc_min\n"); return -EIO; } *sig_strength = 75 + 25 * (rf_gain - rf_agc_min) / (rf_agc_max - rf_agc_min); } else *sig_strength = 75; } else if (if_gain > if_agc_sns) { if (if_agc_top == if_agc_sns) { pr_err("error: if_agc_top == if_agc_sns\n"); return -EIO; } *sig_strength = 20 + 55 * (if_gain - if_agc_sns) / (if_agc_top - if_agc_sns); } else { if (!if_agc_sns) { pr_err("error: if_agc_sns is zero!\n"); return -EIO; } *sig_strength = (20 * if_gain / if_agc_sns); } if (*sig_strength <= 7) *sig_strength = 0; return 0; rw_error: return rc; } /** * \fn int ctrl_get_qam_sig_quality() * \brief Retrieve QAM signal quality from device. * \param devmod Pointer to demodulator instance. * \param sig_quality Pointer to signal quality data. * \return int. * \retval 0 sig_quality contains valid data. * \retval -EINVAL sig_quality is NULL. * \retval -EIO Erroneous data, sig_quality contains invalid data. * Pre-condition: Device must be started and in lock. */ static int ctrl_get_qam_sig_quality(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; struct drxj_data *ext_attr = demod->my_ext_attr; struct drx39xxj_state *state = dev_addr->user_data; struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache; struct drxjrs_errors measuredrs_errors = { 0, 0, 0, 0, 0 }; enum drx_modulation constellation = ext_attr->constellation; int rc; u32 pre_bit_err_rs = 0; /* pre RedSolomon Bit Error Rate */ u32 post_bit_err_rs = 0; /* post RedSolomon Bit Error Rate */ u32 pkt_errs = 0; /* no of packet errors in RS */ u16 qam_sl_err_power = 0; /* accumulated error between raw and sliced symbols */ u16 qsym_err_vd = 0; /* quadrature symbol errors in QAM_VD */ u16 fec_oc_period = 0; /* SNC sync failure measurement period */ u16 fec_rs_prescale = 0; /* ReedSolomon Measurement Prescale */ u16 fec_rs_period = 0; /* Value for corresponding I2C register */ /* calculation constants */ u32 rs_bit_cnt = 0; /* RedSolomon Bit Count */ u32 qam_sl_sig_power = 0; /* used for MER, depends of QAM constellation */ /* intermediate results */ u32 e = 0; /* exponent value used for QAM BER/SER */ u32 m = 0; /* mantisa value used for QAM BER/SER */ u32 ber_cnt = 0; /* BER count */ /* signal quality info */ u32 qam_sl_mer = 0; /* QAM MER */ u32 qam_pre_rs_ber = 0; /* Pre RedSolomon BER */ u32 qam_post_rs_ber = 0; /* Post RedSolomon BER */ u32 qam_vd_ser = 0; /* ViterbiDecoder SER */ u16 qam_vd_prescale = 0; /* Viterbi Measurement Prescale */ u16 qam_vd_period = 0; /* Viterbi Measurement period */ u32 vd_bit_cnt = 0; /* ViterbiDecoder Bit Count */ p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; /* read the physical registers */ /* Get the RS error data */ rc = get_qamrs_err_count(dev_addr, &measuredrs_errors); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* get the register value needed for MER */ rc = drxj_dap_read_reg16(dev_addr, QAM_SL_ERR_POWER__A, &qam_sl_err_power, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* get the register value needed for post RS BER */ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_FAIL_PERIOD__A, &fec_oc_period, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* get constants needed for signal quality calculation */ fec_rs_period = ext_attr->fec_rs_period; fec_rs_prescale = ext_attr->fec_rs_prescale; rs_bit_cnt = fec_rs_period * fec_rs_prescale * ext_attr->fec_rs_plen; qam_vd_period = ext_attr->qam_vd_period; qam_vd_prescale = ext_attr->qam_vd_prescale; vd_bit_cnt = qam_vd_period * qam_vd_prescale * ext_attr->fec_vd_plen; /* DRXJ_QAM_SL_SIG_POWER_QAMxxx * 4 */ switch (constellation) { case DRX_CONSTELLATION_QAM16: qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM16 << 2; break; case DRX_CONSTELLATION_QAM32: qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM32 << 2; break; case DRX_CONSTELLATION_QAM64: qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM64 << 2; break; case DRX_CONSTELLATION_QAM128: qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM128 << 2; break; case DRX_CONSTELLATION_QAM256: qam_sl_sig_power = DRXJ_QAM_SL_SIG_POWER_QAM256 << 2; break; default: return -EIO; } /* ------------------------------ */ /* MER Calculation */ /* ------------------------------ */ /* MER is good if it is above 27.5 for QAM256 or 21.5 for QAM64 */ /* 10.0*log10(qam_sl_sig_power * 4.0 / qam_sl_err_power); */ if (qam_sl_err_power == 0) qam_sl_mer = 0; else qam_sl_mer = log1_times100(qam_sl_sig_power) - log1_times100((u32)qam_sl_err_power); /* ----------------------------------------- */ /* Pre Viterbi Symbol Error Rate Calculation */ /* ----------------------------------------- */ /* pre viterbi SER is good if it is below 0.025 */ /* get the register value */ /* no of quadrature symbol errors */ rc = drxj_dap_read_reg16(dev_addr, QAM_VD_NR_QSYM_ERRORS__A, &qsym_err_vd, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Extract the Exponent and the Mantisa */ /* of number of quadrature symbol errors */ e = (qsym_err_vd & QAM_VD_NR_QSYM_ERRORS_EXP__M) >> QAM_VD_NR_QSYM_ERRORS_EXP__B; m = (qsym_err_vd & QAM_VD_NR_SYMBOL_ERRORS_FIXED_MANT__M) >> QAM_VD_NR_SYMBOL_ERRORS_FIXED_MANT__B; if ((m << e) >> 3 > 549752) qam_vd_ser = 500000 * vd_bit_cnt * ((e > 2) ? 1 : 8) / 8; else qam_vd_ser = m << ((e > 2) ? (e - 3) : e); /* --------------------------------------- */ /* pre and post RedSolomon BER Calculation */ /* --------------------------------------- */ /* pre RS BER is good if it is below 3.5e-4 */ /* get the register values */ pre_bit_err_rs = (u32) measuredrs_errors.nr_bit_errors; pkt_errs = post_bit_err_rs = (u32) measuredrs_errors.nr_snc_par_fail_count; /* Extract the Exponent and the Mantisa of the */ /* pre Reed-Solomon bit error count */ e = (pre_bit_err_rs & FEC_RS_NR_BIT_ERRORS_EXP__M) >> FEC_RS_NR_BIT_ERRORS_EXP__B; m = (pre_bit_err_rs & FEC_RS_NR_BIT_ERRORS_FIXED_MANT__M) >> FEC_RS_NR_BIT_ERRORS_FIXED_MANT__B; ber_cnt = m << e; /*qam_pre_rs_ber = frac_times1e6( ber_cnt, rs_bit_cnt ); */ if (m > (rs_bit_cnt >> (e + 1)) || (rs_bit_cnt >> e) == 0) qam_pre_rs_ber = 500000 * rs_bit_cnt >> e; else qam_pre_rs_ber = ber_cnt; /* post RS BER = 1000000* (11.17 * FEC_OC_SNC_FAIL_COUNT__A) / */ /* (1504.0 * FEC_OC_SNC_FAIL_PERIOD__A) */ /* => c = (1000000*100*11.17)/1504 = post RS BER = (( c* FEC_OC_SNC_FAIL_COUNT__A) / (100 * FEC_OC_SNC_FAIL_PERIOD__A) *100 and /100 is for more precision. => (20 bits * 12 bits) /(16 bits * 7 bits) => safe in 32 bits computation Precision errors still possible. */ if (!fec_oc_period) { qam_post_rs_ber = 0xFFFFFFFF; } else { e = post_bit_err_rs * 742686; m = fec_oc_period * 100; qam_post_rs_ber = e / m; } /* fill signal quality data structure */ p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER; p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; p->block_error.stat[0].scale = FE_SCALE_COUNTER; p->cnr.stat[0].scale = FE_SCALE_DECIBEL; p->cnr.stat[0].svalue = ((u16) qam_sl_mer) * 100; if (ext_attr->standard == DRX_STANDARD_ITU_B) { p->pre_bit_error.stat[0].uvalue += qam_vd_ser; p->pre_bit_count.stat[0].uvalue += vd_bit_cnt * ((e > 2) ? 1 : 8) / 8; } else { p->pre_bit_error.stat[0].uvalue += qam_pre_rs_ber; p->pre_bit_count.stat[0].uvalue += rs_bit_cnt >> e; } p->post_bit_error.stat[0].uvalue += qam_post_rs_ber; p->post_bit_count.stat[0].uvalue += rs_bit_cnt >> e; p->block_error.stat[0].uvalue += pkt_errs; #ifdef DRXJ_SIGNAL_ACCUM_ERR rc = get_acc_pkt_err(demod, &sig_quality->packet_error); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } #endif return 0; rw_error: p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; return rc; } #endif /* #ifndef DRXJ_VSB_ONLY */ /*============================================================================*/ /*== END QAM DATAPATH FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /*============================================================================*/ /*== ATV DATAPATH FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================*/ /* Implementation notes. NTSC/FM AGCs Four AGCs are used for NTSC: (1) RF (used to attenuate the input signal in case of to much power) (2) IF (used to attenuate the input signal in case of to much power) (3) Video AGC (used to amplify the output signal in case input to low) (4) SIF AGC (used to amplify the output signal in case input to low) Video AGC is coupled to RF and IF. SIF AGC is not coupled. It is assumed that the coupling between Video AGC and the RF and IF AGCs also works in favor of the SIF AGC. Three AGCs are used for FM: (1) RF (used to attenuate the input signal in case of to much power) (2) IF (used to attenuate the input signal in case of to much power) (3) SIF AGC (used to amplify the output signal in case input to low) The SIF AGC is now coupled to the RF/IF AGCs. The SIF AGC is needed for both SIF ouput and the internal SIF signal to the AUD block. RF and IF AGCs DACs are part of AFE, Video and SIF AGC DACs are part of the ATV block. The AGC control algorithms are all implemented in microcode. ATV SETTINGS (Shadow settings will not be used for now, they will be implemented later on because of the schedule) Several HW/SCU "settings" can be used for ATV. The standard selection will reset most of these settings. To avoid that the end user apllication has to perform these settings each time the ATV or FM standards is selected the driver will shadow these settings. This enables the end user to perform the settings only once after a drx_open(). The driver must write the shadow settings to HW/SCU incase: ( setstandard FM/ATV) || ( settings have changed && FM/ATV standard is active) The shadow settings will be stored in the device specific data container. A set of flags will be defined to flag changes in shadow settings. A routine will be implemented to write all changed shadow settings to HW/SCU. The "settings" will consist of: AGC settings, filter settings etc. Disadvantage of use of shadow settings: Direct changes in HW/SCU registers will not be reflected in the shadow settings and these changes will be overwritten during a next update. This can happen during evaluation. This will not be a problem for normal customer usage. */ /* -------------------------------------------------------------------------- */ /** * \fn int power_down_atv () * \brief Power down ATV. * \param demod instance of demodulator * \param standard either NTSC or FM (sub strandard for ATV ) * \return int. * * Stops and thus resets ATV and IQM block * SIF and CVBS ADC are powered down * Calls audio power down */ static int power_down_atv(struct drx_demod_instance *demod, enum drx_standard standard, bool primary) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; struct drxjscu_cmd cmd_scu = { /* command */ 0, /* parameter_len */ 0, /* result_len */ 0, /* *parameter */ NULL, /* *result */ NULL }; int rc; u16 cmd_result = 0; /* ATV NTSC */ /* Stop ATV SCU (will reset ATV and IQM hardware */ cmd_scu.command = SCU_RAM_COMMAND_STANDARD_ATV | SCU_RAM_COMMAND_CMD_DEMOD_STOP; cmd_scu.parameter_len = 0; cmd_scu.result_len = 1; cmd_scu.parameter = NULL; cmd_scu.result = &cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Disable ATV outputs (ATV reset enables CVBS, undo this) */ rc = drxj_dap_write_reg16(dev_addr, ATV_TOP_STDBY__A, (ATV_TOP_STDBY_SIF_STDBY_STANDBY & (~ATV_TOP_STDBY_CVBS_STDBY_A2_ACTIVE)), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, ATV_COMM_EXEC__A, ATV_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (primary) { rc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_iqm_af(demod, false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } else { rc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = power_down_aud(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \brief Power up AUD. * \param demod instance of demodulator * \return int. * */ static int power_down_aud(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = NULL; struct drxj_data *ext_attr = NULL; int rc; dev_addr = (struct i2c_device_addr *)demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; rc = drxj_dap_write_reg16(dev_addr, AUD_COMM_EXEC__A, AUD_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->aud_data.audio_is_active = false; return 0; rw_error: return rc; } /** * \fn int set_orx_nsu_aox() * \brief Configure OrxNsuAox for OOB * \param demod instance of demodulator. * \param active * \return int. */ static int set_orx_nsu_aox(struct drx_demod_instance *demod, bool active) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; u16 data = 0; /* Configure NSU_AOX */ rc = drxj_dap_read_reg16(dev_addr, ORX_NSU_AOX_STDBY_W__A, &data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (!active) data &= ((~ORX_NSU_AOX_STDBY_W_STDBYADC_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYAMP_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYBIAS_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYPLL_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYPD_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF_A2_ON) & (~ORX_NSU_AOX_STDBY_W_STDBYFLT_A2_ON)); else data |= (ORX_NSU_AOX_STDBY_W_STDBYADC_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYAMP_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYBIAS_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYPLL_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYPD_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYTAGC_IF_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYTAGC_RF_A2_ON | ORX_NSU_AOX_STDBY_W_STDBYFLT_A2_ON); rc = drxj_dap_write_reg16(dev_addr, ORX_NSU_AOX_STDBY_W__A, data, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } return 0; rw_error: return rc; } /** * \fn int ctrl_set_oob() * \brief Set OOB channel to be used. * \param demod instance of demodulator * \param oob_param OOB parameters for channel setting. * \frequency should be in KHz * \return int. * * Accepts only. Returns error otherwise. * Demapper value is written after scu_command START * because START command causes COMM_EXEC transition * from 0 to 1 which causes all registers to be * overwritten with initial value * */ /* Nyquist filter impulse response */ #define IMPULSE_COSINE_ALPHA_0_3 {-3, -4, -1, 6, 10, 7, -5, -20, -25, -10, 29, 79, 123, 140} /*sqrt raised-cosine filter with alpha=0.3 */ #define IMPULSE_COSINE_ALPHA_0_5 { 2, 0, -2, -2, 2, 5, 2, -10, -20, -14, 20, 74, 125, 145} /*sqrt raised-cosine filter with alpha=0.5 */ #define IMPULSE_COSINE_ALPHA_RO_0_5 { 0, 0, 1, 2, 3, 0, -7, -15, -16, 0, 34, 77, 114, 128} /*full raised-cosine filter with alpha=0.5 (receiver only) */ /* Coefficients for the nyquist fitler (total: 27 taps) */ #define NYQFILTERLEN 27 static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_param) { int rc; s32 freq = 0; /* KHz */ struct i2c_device_addr *dev_addr = NULL; struct drxj_data *ext_attr = NULL; u16 i = 0; bool mirror_freq_spect_oob = false; u16 trk_filter_value = 0; struct drxjscu_cmd scu_cmd; u16 set_param_parameters[3]; u16 cmd_result[2] = { 0, 0 }; s16 nyquist_coeffs[4][(NYQFILTERLEN + 1) / 2] = { IMPULSE_COSINE_ALPHA_0_3, /* Target Mode 0 */ IMPULSE_COSINE_ALPHA_0_3, /* Target Mode 1 */ IMPULSE_COSINE_ALPHA_0_5, /* Target Mode 2 */ IMPULSE_COSINE_ALPHA_RO_0_5 /* Target Mode 3 */ }; u8 mode_val[4] = { 2, 2, 0, 1 }; u8 pfi_coeffs[4][6] = { {DRXJ_16TO8(-92), DRXJ_16TO8(-108), DRXJ_16TO8(100)}, /* TARGET_MODE = 0: PFI_A = -23/32; PFI_B = -54/32; PFI_C = 25/32; fg = 0.5 MHz (Att=26dB) */ {DRXJ_16TO8(-64), DRXJ_16TO8(-80), DRXJ_16TO8(80)}, /* TARGET_MODE = 1: PFI_A = -16/32; PFI_B = -40/32; PFI_C = 20/32; fg = 1.0 MHz (Att=28dB) */ {DRXJ_16TO8(-80), DRXJ_16TO8(-98), DRXJ_16TO8(92)}, /* TARGET_MODE = 2, 3: PFI_A = -20/32; PFI_B = -49/32; PFI_C = 23/32; fg = 0.8 MHz (Att=25dB) */ {DRXJ_16TO8(-80), DRXJ_16TO8(-98), DRXJ_16TO8(92)} /* TARGET_MODE = 2, 3: PFI_A = -20/32; PFI_B = -49/32; PFI_C = 23/32; fg = 0.8 MHz (Att=25dB) */ }; u16 mode_index; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; mirror_freq_spect_oob = ext_attr->mirror_freq_spect_oob; /* Check parameters */ if (oob_param == NULL) { /* power off oob module */ scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB | SCU_RAM_COMMAND_CMD_DEMOD_STOP; scu_cmd.parameter_len = 0; scu_cmd.result_len = 1; scu_cmd.result = cmd_result; rc = scu_command(dev_addr, &scu_cmd); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_orx_nsu_aox(demod, false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->oob_power_on = false; return 0; } freq = oob_param->frequency; if ((freq < 70000) || (freq > 130000)) return -EIO; freq = (freq - 50000) / 50; { u16 index = 0; u16 remainder = 0; u16 *trk_filtercfg = ext_attr->oob_trk_filter_cfg; index = (u16) ((freq - 400) / 200); remainder = (u16) ((freq - 400) % 200); trk_filter_value = trk_filtercfg[index] - (trk_filtercfg[index] - trk_filtercfg[index + 1]) / 10 * remainder / 20; } /*********/ /* Stop */ /*********/ rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB | SCU_RAM_COMMAND_CMD_DEMOD_STOP; scu_cmd.parameter_len = 0; scu_cmd.result_len = 1; scu_cmd.result = cmd_result; rc = scu_command(dev_addr, &scu_cmd); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /*********/ /* Reset */ /*********/ scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB | SCU_RAM_COMMAND_CMD_DEMOD_RESET; scu_cmd.parameter_len = 0; scu_cmd.result_len = 1; scu_cmd.result = cmd_result; rc = scu_command(dev_addr, &scu_cmd); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /***********/ /* SET_ENV */ /***********/ /* set frequency, spectrum inversion and data rate */ scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV; scu_cmd.parameter_len = 3; /* 1-data rate;2-frequency */ switch (oob_param->standard) { case DRX_OOB_MODE_A: if ( /* signal is transmitted inverted */ ((oob_param->spectrum_inverted == true) && /* and tuner is not mirroring the signal */ (!mirror_freq_spect_oob)) | /* or */ /* signal is transmitted noninverted */ ((oob_param->spectrum_inverted == false) && /* and tuner is mirroring the signal */ (mirror_freq_spect_oob)) ) set_param_parameters[0] = SCU_RAM_ORX_RF_RX_DATA_RATE_2048KBPS_INVSPEC; else set_param_parameters[0] = SCU_RAM_ORX_RF_RX_DATA_RATE_2048KBPS_REGSPEC; break; case DRX_OOB_MODE_B_GRADE_A: if ( /* signal is transmitted inverted */ ((oob_param->spectrum_inverted == true) && /* and tuner is not mirroring the signal */ (!mirror_freq_spect_oob)) | /* or */ /* signal is transmitted noninverted */ ((oob_param->spectrum_inverted == false) && /* and tuner is mirroring the signal */ (mirror_freq_spect_oob)) ) set_param_parameters[0] = SCU_RAM_ORX_RF_RX_DATA_RATE_1544KBPS_INVSPEC; else set_param_parameters[0] = SCU_RAM_ORX_RF_RX_DATA_RATE_1544KBPS_REGSPEC; break; case DRX_OOB_MODE_B_GRADE_B: default: if ( /* signal is transmitted inverted */ ((oob_param->spectrum_inverted == true) && /* and tuner is not mirroring the signal */ (!mirror_freq_spect_oob)) | /* or */ /* signal is transmitted noninverted */ ((oob_param->spectrum_inverted == false) && /* and tuner is mirroring the signal */ (mirror_freq_spect_oob)) ) set_param_parameters[0] = SCU_RAM_ORX_RF_RX_DATA_RATE_3088KBPS_INVSPEC; else set_param_parameters[0] = SCU_RAM_ORX_RF_RX_DATA_RATE_3088KBPS_REGSPEC; break; } set_param_parameters[1] = (u16) (freq & 0xFFFF); set_param_parameters[2] = trk_filter_value; scu_cmd.parameter = set_param_parameters; scu_cmd.result_len = 1; scu_cmd.result = cmd_result; mode_index = mode_val[(set_param_parameters[0] & 0xC0) >> 6]; rc = scu_command(dev_addr, &scu_cmd); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0xFABA, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Write magic word to enable pdr reg write */ rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_OOB_CRX_CFG__A, OOB_CRX_DRIVE_STRENGTH << SIO_PDR_OOB_CRX_CFG_DRIVE__B | 0x03 << SIO_PDR_OOB_CRX_CFG_MODE__B, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_PDR_OOB_DRX_CFG__A, OOB_DRX_DRIVE_STRENGTH << SIO_PDR_OOB_DRX_CFG_DRIVE__B | 0x03 << SIO_PDR_OOB_DRX_CFG_MODE__B, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, 0x0000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Write magic word to disable pdr reg write */ rc = drxj_dap_write_reg16(dev_addr, ORX_TOP_COMM_KEY__A, 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_AAG_LEN_W__A, 16000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_AAG_THR_W__A, 40, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* ddc */ rc = drxj_dap_write_reg16(dev_addr, ORX_DDC_OFO_SET_W__A, ORX_DDC_OFO_SET_W__PRE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* nsu */ rc = drxj_dap_write_reg16(dev_addr, ORX_NSU_AOX_LOPOW_W__A, ext_attr->oob_lo_pow, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* initialization for target mode */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TARGET_MODE__A, SCU_RAM_ORX_TARGET_MODE_2048KBPS_SQRT, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FREQ_GAIN_CORR__A, SCU_RAM_ORX_FREQ_GAIN_CORR_2048KBPS, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Reset bits for timing and freq. recovery */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_RST_CPH__A, 0x0001, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_RST_CTI__A, 0x0002, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_RST_KRN__A, 0x0004, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_RST_KRP__A, 0x0008, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* AGN_LOCK = {2048>>3, -2048, 8, -8, 0, 1}; */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_LOCK_TH__A, 2048 >> 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_LOCK_TOTH__A, (u16)(-2048), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_ONLOCK_TTH__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_UNLOCK_TTH__A, (u16)(-8), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_AGN_LOCK_MASK__A, 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* DGN_LOCK = {10, -2048, 8, -8, 0, 1<<1}; */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_LOCK_TH__A, 10, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_LOCK_TOTH__A, (u16)(-2048), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_ONLOCK_TTH__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_UNLOCK_TTH__A, (u16)(-8), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_DGN_LOCK_MASK__A, 1 << 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* FRQ_LOCK = {15,-2048, 8, -8, 0, 1<<2}; */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_LOCK_TH__A, 17, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_LOCK_TOTH__A, (u16)(-2048), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_ONLOCK_TTH__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_UNLOCK_TTH__A, (u16)(-8), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_FRQ_LOCK_MASK__A, 1 << 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* PHA_LOCK = {5000, -2048, 8, -8, 0, 1<<3}; */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_LOCK_TH__A, 3000, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_LOCK_TOTH__A, (u16)(-2048), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_ONLOCK_TTH__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_UNLOCK_TTH__A, (u16)(-8), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_PHA_LOCK_MASK__A, 1 << 3, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* TIM_LOCK = {300, -2048, 8, -8, 0, 1<<4}; */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_LOCK_TH__A, 400, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_LOCK_TOTH__A, (u16)(-2048), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_ONLOCK_TTH__A, 8, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_UNLOCK_TTH__A, (u16)(-8), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_TIM_LOCK_MASK__A, 1 << 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* EQU_LOCK = {20, -2048, 8, -8, 0, 1<<5}; */ rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_LOCK_TH__A, 20, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_LOCK_TOTH__A, (u16)(-2048), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_ONLOCK_TTH__A, 4, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_UNLOCK_TTH__A, (u16)(-4), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_ORX_EQU_LOCK_MASK__A, 1 << 5, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* PRE-Filter coefficients (PFI) */ rc = drxdap_fasi_write_block(dev_addr, ORX_FWP_PFI_A_W__A, sizeof(pfi_coeffs[mode_index]), ((u8 *)pfi_coeffs[mode_index]), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, ORX_TOP_MDE_W__A, mode_index, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* NYQUIST-Filter coefficients (NYQ) */ for (i = 0; i < (NYQFILTERLEN + 1) / 2; i++) { rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_NYQ_ADR_W__A, i, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_NYQ_COF_RW__A, nyquist_coeffs[mode_index][i], 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } rc = drxj_dap_write_reg16(dev_addr, ORX_FWP_NYQ_ADR_W__A, 31, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /*********/ /* Start */ /*********/ scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB | SCU_RAM_COMMAND_CMD_DEMOD_START; scu_cmd.parameter_len = 0; scu_cmd.result_len = 1; scu_cmd.result = cmd_result; rc = scu_command(dev_addr, &scu_cmd); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_orx_nsu_aox(demod, true); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, ORX_NSU_AOX_STHR_W__A, ext_attr->oob_pre_saw, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->oob_power_on = true; return 0; rw_error: return rc; } /*============================================================================*/ /*== END OOB DATAPATH FUNCTIONS ==*/ /*============================================================================*/ /*============================================================================= ===== MC command related functions ========================================== ===========================================================================*/ /*============================================================================= ===== ctrl_set_channel() ========================================================== ===========================================================================*/ /** * \fn int ctrl_set_channel() * \brief Select a new transmission channel. * \param demod instance of demod. * \param channel Pointer to channel data. * \return int. * * In case the tuner module is not used and in case of NTSC/FM the pogrammer * must tune the tuner to the centre frequency of the NTSC/FM channel. * */ static int ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel) { int rc; s32 tuner_freq_offset = 0; struct drxj_data *ext_attr = NULL; struct i2c_device_addr *dev_addr = NULL; enum drx_standard standard = DRX_STANDARD_UNKNOWN; #ifndef DRXJ_VSB_ONLY u32 min_symbol_rate = 0; u32 max_symbol_rate = 0; int bandwidth_temp = 0; int bandwidth = 0; #endif /*== check arguments ======================================================*/ if ((demod == NULL) || (channel == NULL)) return -EINVAL; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; standard = ext_attr->standard; /* check valid standards */ switch (standard) { case DRX_STANDARD_8VSB: #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: #endif /* DRXJ_VSB_ONLY */ break; case DRX_STANDARD_UNKNOWN: default: return -EINVAL; } /* check bandwidth QAM annex B, NTSC and 8VSB */ if ((standard == DRX_STANDARD_ITU_B) || (standard == DRX_STANDARD_8VSB) || (standard == DRX_STANDARD_NTSC)) { switch (channel->bandwidth) { case DRX_BANDWIDTH_6MHZ: case DRX_BANDWIDTH_UNKNOWN: /* fall through */ channel->bandwidth = DRX_BANDWIDTH_6MHZ; break; case DRX_BANDWIDTH_8MHZ: /* fall through */ case DRX_BANDWIDTH_7MHZ: /* fall through */ default: return -EINVAL; } } /* For QAM annex A and annex C: -check symbolrate and constellation -derive bandwidth from symbolrate (input bandwidth is ignored) */ #ifndef DRXJ_VSB_ONLY if ((standard == DRX_STANDARD_ITU_A) || (standard == DRX_STANDARD_ITU_C)) { struct drxuio_cfg uio_cfg = { DRX_UIO1, DRX_UIO_MODE_FIRMWARE_SAW }; int bw_rolloff_factor = 0; bw_rolloff_factor = (standard == DRX_STANDARD_ITU_A) ? 115 : 113; min_symbol_rate = DRXJ_QAM_SYMBOLRATE_MIN; max_symbol_rate = DRXJ_QAM_SYMBOLRATE_MAX; /* config SMA_TX pin to SAW switch mode */ rc = ctrl_set_uio_cfg(demod, &uio_cfg); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (channel->symbolrate < min_symbol_rate || channel->symbolrate > max_symbol_rate) { return -EINVAL; } switch (channel->constellation) { case DRX_CONSTELLATION_QAM16: /* fall through */ case DRX_CONSTELLATION_QAM32: /* fall through */ case DRX_CONSTELLATION_QAM64: /* fall through */ case DRX_CONSTELLATION_QAM128: /* fall through */ case DRX_CONSTELLATION_QAM256: bandwidth_temp = channel->symbolrate * bw_rolloff_factor; bandwidth = bandwidth_temp / 100; if ((bandwidth_temp % 100) >= 50) bandwidth++; if (bandwidth <= 6100000) { channel->bandwidth = DRX_BANDWIDTH_6MHZ; } else if ((bandwidth > 6100000) && (bandwidth <= 7100000)) { channel->bandwidth = DRX_BANDWIDTH_7MHZ; } else if (bandwidth > 7100000) { channel->bandwidth = DRX_BANDWIDTH_8MHZ; } break; default: return -EINVAL; } } /* For QAM annex B: -check constellation */ if (standard == DRX_STANDARD_ITU_B) { switch (channel->constellation) { case DRX_CONSTELLATION_AUTO: case DRX_CONSTELLATION_QAM256: case DRX_CONSTELLATION_QAM64: break; default: return -EINVAL; } switch (channel->interleavemode) { case DRX_INTERLEAVEMODE_I128_J1: case DRX_INTERLEAVEMODE_I128_J1_V2: case DRX_INTERLEAVEMODE_I128_J2: case DRX_INTERLEAVEMODE_I64_J2: case DRX_INTERLEAVEMODE_I128_J3: case DRX_INTERLEAVEMODE_I32_J4: case DRX_INTERLEAVEMODE_I128_J4: case DRX_INTERLEAVEMODE_I16_J8: case DRX_INTERLEAVEMODE_I128_J5: case DRX_INTERLEAVEMODE_I8_J16: case DRX_INTERLEAVEMODE_I128_J6: case DRX_INTERLEAVEMODE_I128_J7: case DRX_INTERLEAVEMODE_I128_J8: case DRX_INTERLEAVEMODE_I12_J17: case DRX_INTERLEAVEMODE_I5_J4: case DRX_INTERLEAVEMODE_B52_M240: case DRX_INTERLEAVEMODE_B52_M720: case DRX_INTERLEAVEMODE_UNKNOWN: case DRX_INTERLEAVEMODE_AUTO: break; default: return -EINVAL; } } if ((ext_attr->uio_sma_tx_mode) == DRX_UIO_MODE_FIRMWARE_SAW) { /* SAW SW, user UIO is used for switchable SAW */ struct drxuio_data uio1 = { DRX_UIO1, false }; switch (channel->bandwidth) { case DRX_BANDWIDTH_8MHZ: uio1.value = true; break; case DRX_BANDWIDTH_7MHZ: uio1.value = false; break; case DRX_BANDWIDTH_6MHZ: uio1.value = false; break; case DRX_BANDWIDTH_UNKNOWN: default: return -EINVAL; } rc = ctrl_uio_write(demod, &uio1); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } #endif /* DRXJ_VSB_ONLY */ rc = drxj_dap_write_reg16(dev_addr, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } tuner_freq_offset = 0; /*== Setup demod for specific standard ====================================*/ switch (standard) { case DRX_STANDARD_8VSB: if (channel->mirror == DRX_MIRROR_AUTO) ext_attr->mirror = DRX_MIRROR_NO; else ext_attr->mirror = channel->mirror; rc = set_vsb(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_frequency(demod, channel, tuner_freq_offset); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: /* fallthrough */ case DRX_STANDARD_ITU_B: /* fallthrough */ case DRX_STANDARD_ITU_C: rc = set_qam_channel(demod, channel, tuner_freq_offset); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; #endif case DRX_STANDARD_UNKNOWN: default: return -EIO; } /* flag the packet error counter reset */ ext_attr->reset_pkt_err_acc = true; return 0; rw_error: return rc; } /*============================================================================= ===== SigQuality() ========================================================== ===========================================================================*/ /** * \fn int ctrl_sig_quality() * \brief Retrieve signal quality form device. * \param devmod Pointer to demodulator instance. * \param sig_quality Pointer to signal quality data. * \return int. * \retval 0 sig_quality contains valid data. * \retval -EINVAL sig_quality is NULL. * \retval -EIO Erroneous data, sig_quality contains invalid data. */ static int ctrl_sig_quality(struct drx_demod_instance *demod, enum drx_lock_status lock_status) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; struct drxj_data *ext_attr = demod->my_ext_attr; struct drx39xxj_state *state = dev_addr->user_data; struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache; enum drx_standard standard = ext_attr->standard; int rc; u32 ber, cnt, err, pkt; u16 mer, strength = 0; rc = get_sig_strength(demod, &strength); if (rc < 0) { pr_err("error getting signal strength %d\n", rc); p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; } else { p->strength.stat[0].scale = FE_SCALE_RELATIVE; p->strength.stat[0].uvalue = 65535UL * strength/ 100; } switch (standard) { case DRX_STANDARD_8VSB: #ifdef DRXJ_SIGNAL_ACCUM_ERR rc = get_acc_pkt_err(demod, &pkt); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } #endif if (lock_status != DRXJ_DEMOD_LOCK && lock_status != DRX_LOCKED) { p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; } else { rc = get_vsb_post_rs_pck_err(dev_addr, &err, &pkt); if (rc != 0) { pr_err("error %d getting UCB\n", rc); p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; } else { p->block_error.stat[0].scale = FE_SCALE_COUNTER; p->block_error.stat[0].uvalue += err; p->block_count.stat[0].scale = FE_SCALE_COUNTER; p->block_count.stat[0].uvalue += pkt; } /* PostViterbi is compute in steps of 10^(-6) */ rc = get_vs_bpre_viterbi_ber(dev_addr, &ber, &cnt); if (rc != 0) { pr_err("error %d getting pre-ber\n", rc); p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; } else { p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER; p->pre_bit_error.stat[0].uvalue += ber; p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; p->pre_bit_count.stat[0].uvalue += cnt; } rc = get_vs_bpost_viterbi_ber(dev_addr, &ber, &cnt); if (rc != 0) { pr_err("error %d getting post-ber\n", rc); p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; } else { p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; p->post_bit_error.stat[0].uvalue += ber; p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; p->post_bit_count.stat[0].uvalue += cnt; } rc = get_vsbmer(dev_addr, &mer); if (rc != 0) { pr_err("error %d getting MER\n", rc); p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; } else { p->cnr.stat[0].svalue = mer * 100; p->cnr.stat[0].scale = FE_SCALE_DECIBEL; } } break; #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: rc = ctrl_get_qam_sig_quality(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; #endif default: return -EIO; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int ctrl_lock_status() * \brief Retrieve lock status . * \param dev_addr Pointer to demodulator device address. * \param lock_stat Pointer to lock status structure. * \return int. * */ static int ctrl_lock_status(struct drx_demod_instance *demod, enum drx_lock_status *lock_stat) { enum drx_standard standard = DRX_STANDARD_UNKNOWN; struct drxj_data *ext_attr = NULL; struct i2c_device_addr *dev_addr = NULL; struct drxjscu_cmd cmd_scu = { /* command */ 0, /* parameter_len */ 0, /* result_len */ 0, /* *parameter */ NULL, /* *result */ NULL }; int rc; u16 cmd_result[2] = { 0, 0 }; u16 demod_lock = SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_DEMOD_LOCKED; /* check arguments */ if ((demod == NULL) || (lock_stat == NULL)) return -EINVAL; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; standard = ext_attr->standard; *lock_stat = DRX_NOT_LOCKED; /* define the SCU command code */ switch (standard) { case DRX_STANDARD_8VSB: cmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB | SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK; demod_lock |= 0x6; break; #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: cmd_scu.command = SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK; break; #endif case DRX_STANDARD_UNKNOWN: /* fallthrough */ default: return -EIO; } /* define the SCU command parameters and execute the command */ cmd_scu.parameter_len = 0; cmd_scu.result_len = 2; cmd_scu.parameter = NULL; cmd_scu.result = cmd_result; rc = scu_command(dev_addr, &cmd_scu); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* set the lock status */ if (cmd_scu.result[1] < demod_lock) { /* 0x0000 NOT LOCKED */ *lock_stat = DRX_NOT_LOCKED; } else if (cmd_scu.result[1] < SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_LOCKED) { *lock_stat = DRXJ_DEMOD_LOCK; } else if (cmd_scu.result[1] < SCU_RAM_PARAM_1_RES_DEMOD_GET_LOCK_NEVER_LOCK) { /* 0x8000 DEMOD + FEC LOCKED (system lock) */ *lock_stat = DRX_LOCKED; } else { /* 0xC000 NEVER LOCKED */ /* (system will never be able to lock to the signal) */ *lock_stat = DRX_NEVER_LOCK; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int ctrl_set_standard() * \brief Set modulation standard to be used. * \param standard Modulation standard. * \return int. * * Setup stuff for the desired demodulation standard. * Disable and power down the previous selected demodulation standard * */ static int ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard) { struct drxj_data *ext_attr = NULL; int rc; enum drx_standard prev_standard; /* check arguments */ if ((standard == NULL) || (demod == NULL)) return -EINVAL; ext_attr = (struct drxj_data *) demod->my_ext_attr; prev_standard = ext_attr->standard; /* Stop and power down previous standard */ switch (prev_standard) { #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: /* fallthrough */ case DRX_STANDARD_ITU_B: /* fallthrough */ case DRX_STANDARD_ITU_C: rc = power_down_qam(demod, false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; #endif case DRX_STANDARD_8VSB: rc = power_down_vsb(demod, false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_STANDARD_UNKNOWN: /* Do nothing */ break; case DRX_STANDARD_AUTO: /* fallthrough */ default: return -EINVAL; } /* Initialize channel independent registers Power up new standard */ ext_attr->standard = *standard; switch (*standard) { #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: /* fallthrough */ case DRX_STANDARD_ITU_B: /* fallthrough */ case DRX_STANDARD_ITU_C: do { u16 dummy; rc = drxj_dap_read_reg16(demod->my_i2c_dev_addr, SCU_RAM_VERSION_HI__A, &dummy, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } while (0); break; #endif case DRX_STANDARD_8VSB: rc = set_vsb_leak_n_gain(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; default: ext_attr->standard = DRX_STANDARD_UNKNOWN; return -EINVAL; break; } return 0; rw_error: /* Don't know what the standard is now ... try again */ ext_attr->standard = DRX_STANDARD_UNKNOWN; return rc; } /*============================================================================*/ static void drxj_reset_mode(struct drxj_data *ext_attr) { /* Initialize default AFE configuration for QAM */ if (ext_attr->has_lna) { /* IF AGC off, PGA active */ #ifndef DRXJ_VSB_ONLY ext_attr->qam_if_agc_cfg.standard = DRX_STANDARD_ITU_B; ext_attr->qam_if_agc_cfg.ctrl_mode = DRX_AGC_CTRL_OFF; ext_attr->qam_pga_cfg = 140 + (11 * 13); #endif ext_attr->vsb_if_agc_cfg.standard = DRX_STANDARD_8VSB; ext_attr->vsb_if_agc_cfg.ctrl_mode = DRX_AGC_CTRL_OFF; ext_attr->vsb_pga_cfg = 140 + (11 * 13); } else { /* IF AGC on, PGA not active */ #ifndef DRXJ_VSB_ONLY ext_attr->qam_if_agc_cfg.standard = DRX_STANDARD_ITU_B; ext_attr->qam_if_agc_cfg.ctrl_mode = DRX_AGC_CTRL_AUTO; ext_attr->qam_if_agc_cfg.min_output_level = 0; ext_attr->qam_if_agc_cfg.max_output_level = 0x7FFF; ext_attr->qam_if_agc_cfg.speed = 3; ext_attr->qam_if_agc_cfg.top = 1297; ext_attr->qam_pga_cfg = 140; #endif ext_attr->vsb_if_agc_cfg.standard = DRX_STANDARD_8VSB; ext_attr->vsb_if_agc_cfg.ctrl_mode = DRX_AGC_CTRL_AUTO; ext_attr->vsb_if_agc_cfg.min_output_level = 0; ext_attr->vsb_if_agc_cfg.max_output_level = 0x7FFF; ext_attr->vsb_if_agc_cfg.speed = 3; ext_attr->vsb_if_agc_cfg.top = 1024; ext_attr->vsb_pga_cfg = 140; } /* TODO: remove min_output_level and max_output_level for both QAM and VSB after */ /* mc has not used them */ #ifndef DRXJ_VSB_ONLY ext_attr->qam_rf_agc_cfg.standard = DRX_STANDARD_ITU_B; ext_attr->qam_rf_agc_cfg.ctrl_mode = DRX_AGC_CTRL_AUTO; ext_attr->qam_rf_agc_cfg.min_output_level = 0; ext_attr->qam_rf_agc_cfg.max_output_level = 0x7FFF; ext_attr->qam_rf_agc_cfg.speed = 3; ext_attr->qam_rf_agc_cfg.top = 9500; ext_attr->qam_rf_agc_cfg.cut_off_current = 4000; ext_attr->qam_pre_saw_cfg.standard = DRX_STANDARD_ITU_B; ext_attr->qam_pre_saw_cfg.reference = 0x07; ext_attr->qam_pre_saw_cfg.use_pre_saw = true; #endif /* Initialize default AFE configuration for VSB */ ext_attr->vsb_rf_agc_cfg.standard = DRX_STANDARD_8VSB; ext_attr->vsb_rf_agc_cfg.ctrl_mode = DRX_AGC_CTRL_AUTO; ext_attr->vsb_rf_agc_cfg.min_output_level = 0; ext_attr->vsb_rf_agc_cfg.max_output_level = 0x7FFF; ext_attr->vsb_rf_agc_cfg.speed = 3; ext_attr->vsb_rf_agc_cfg.top = 9500; ext_attr->vsb_rf_agc_cfg.cut_off_current = 4000; ext_attr->vsb_pre_saw_cfg.standard = DRX_STANDARD_8VSB; ext_attr->vsb_pre_saw_cfg.reference = 0x07; ext_attr->vsb_pre_saw_cfg.use_pre_saw = true; } /** * \fn int ctrl_power_mode() * \brief Set the power mode of the device to the specified power mode * \param demod Pointer to demodulator instance. * \param mode Pointer to new power mode. * \return int. * \retval 0 Success * \retval -EIO I2C error or other failure * \retval -EINVAL Invalid mode argument. * * */ static int ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode) { struct drx_common_attr *common_attr = (struct drx_common_attr *) NULL; struct drxj_data *ext_attr = (struct drxj_data *) NULL; struct i2c_device_addr *dev_addr = (struct i2c_device_addr *)NULL; int rc; u16 sio_cc_pwd_mode = 0; common_attr = (struct drx_common_attr *) demod->my_common_attr; ext_attr = (struct drxj_data *) demod->my_ext_attr; dev_addr = demod->my_i2c_dev_addr; /* Check arguments */ if (mode == NULL) return -EINVAL; /* If already in requested power mode, do nothing */ if (common_attr->current_power_mode == *mode) return 0; switch (*mode) { case DRX_POWER_UP: case DRXJ_POWER_DOWN_MAIN_PATH: sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_NONE; break; case DRXJ_POWER_DOWN_CORE: sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_CLOCK; break; case DRXJ_POWER_DOWN_PLL: sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_PLL; break; case DRX_POWER_DOWN: sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_OSC; break; default: /* Unknow sleep mode */ return -EINVAL; break; } /* Check if device needs to be powered up */ if ((common_attr->current_power_mode != DRX_POWER_UP)) { rc = power_up_device(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } if ((*mode == DRX_POWER_UP)) { /* Restore analog & pin configuration */ /* Initialize default AFE configuration for VSB */ drxj_reset_mode(ext_attr); } else { /* Power down to requested mode */ /* Backup some register settings */ /* Set pins with possible pull-ups connected to them in input mode */ /* Analog power down */ /* ADC power down */ /* Power down device */ /* stop all comm_exec */ /* Stop and power down previous standard */ switch (ext_attr->standard) { case DRX_STANDARD_ITU_A: case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: rc = power_down_qam(demod, true); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_STANDARD_8VSB: rc = power_down_vsb(demod, true); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_STANDARD_PAL_SECAM_BG: /* fallthrough */ case DRX_STANDARD_PAL_SECAM_DK: /* fallthrough */ case DRX_STANDARD_PAL_SECAM_I: /* fallthrough */ case DRX_STANDARD_PAL_SECAM_L: /* fallthrough */ case DRX_STANDARD_PAL_SECAM_LP: /* fallthrough */ case DRX_STANDARD_NTSC: /* fallthrough */ case DRX_STANDARD_FM: rc = power_down_atv(demod, ext_attr->standard, true); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } break; case DRX_STANDARD_UNKNOWN: /* Do nothing */ break; case DRX_STANDARD_AUTO: /* fallthrough */ default: return -EIO; } ext_attr->standard = DRX_STANDARD_UNKNOWN; } if (*mode != DRXJ_POWER_DOWN_MAIN_PATH) { rc = drxj_dap_write_reg16(dev_addr, SIO_CC_PWD_MODE__A, sio_cc_pwd_mode, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if ((*mode != DRX_POWER_UP)) { /* Initialize HI, wakeup key especially before put IC to sleep */ rc = init_hi(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } ext_attr->hi_cfg_ctrl |= SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ; rc = hi_cfg_command(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } } common_attr->current_power_mode = *mode; return 0; rw_error: return rc; } /*============================================================================*/ /*== CTRL Set/Get Config related functions ===================================*/ /*============================================================================*/ /** * \fn int ctrl_set_cfg_pre_saw() * \brief Set Pre-saw reference. * \param demod demod instance * \param u16 * * \return int. * * Check arguments * Dispatch handling to standard specific function. * */ static int ctrl_set_cfg_pre_saw(struct drx_demod_instance *demod, struct drxj_cfg_pre_saw *pre_saw) { struct i2c_device_addr *dev_addr = NULL; struct drxj_data *ext_attr = NULL; int rc; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; /* check arguments */ if ((pre_saw == NULL) || (pre_saw->reference > IQM_AF_PDREF__M) ) { return -EINVAL; } /* Only if standard is currently active */ if ((ext_attr->standard == pre_saw->standard) || (DRXJ_ISQAMSTD(ext_attr->standard) && DRXJ_ISQAMSTD(pre_saw->standard)) || (DRXJ_ISATVSTD(ext_attr->standard) && DRXJ_ISATVSTD(pre_saw->standard))) { rc = drxj_dap_write_reg16(dev_addr, IQM_AF_PDREF__A, pre_saw->reference, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* Store pre-saw settings */ switch (pre_saw->standard) { case DRX_STANDARD_8VSB: ext_attr->vsb_pre_saw_cfg = *pre_saw; break; #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: /* fallthrough */ case DRX_STANDARD_ITU_B: /* fallthrough */ case DRX_STANDARD_ITU_C: ext_attr->qam_pre_saw_cfg = *pre_saw; break; #endif default: return -EINVAL; } return 0; rw_error: return rc; } /*============================================================================*/ /** * \fn int ctrl_set_cfg_afe_gain() * \brief Set AFE Gain. * \param demod demod instance * \param u16 * * \return int. * * Check arguments * Dispatch handling to standard specific function. * */ static int ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain *afe_gain) { struct i2c_device_addr *dev_addr = NULL; struct drxj_data *ext_attr = NULL; int rc; u8 gain = 0; /* check arguments */ if (afe_gain == NULL) return -EINVAL; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; switch (afe_gain->standard) { case DRX_STANDARD_8VSB: /* fallthrough */ #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: /* fallthrough */ case DRX_STANDARD_ITU_B: /* fallthrough */ case DRX_STANDARD_ITU_C: #endif /* Do nothing */ break; default: return -EINVAL; } /* TODO PGA gain is also written by microcode (at least by QAM and VSB) So I (PJ) think interface requires choice between auto, user mode */ if (afe_gain->gain >= 329) gain = 15; else if (afe_gain->gain <= 147) gain = 0; else gain = (afe_gain->gain - 140 + 6) / 13; /* Only if standard is currently active */ if (ext_attr->standard == afe_gain->standard) { rc = drxj_dap_write_reg16(dev_addr, IQM_AF_PGA_GAIN__A, gain, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } } /* Store AFE Gain settings */ switch (afe_gain->standard) { case DRX_STANDARD_8VSB: ext_attr->vsb_pga_cfg = gain * 13 + 140; break; #ifndef DRXJ_VSB_ONLY case DRX_STANDARD_ITU_A: /* fallthrough */ case DRX_STANDARD_ITU_B: /* fallthrough */ case DRX_STANDARD_ITU_C: ext_attr->qam_pga_cfg = gain * 13 + 140; break; #endif default: return -EIO; } return 0; rw_error: return rc; } /*============================================================================*/ /*============================================================================= ===== EXPORTED FUNCTIONS ====================================================*/ static int drx_ctrl_u_code(struct drx_demod_instance *demod, struct drxu_code_info *mc_info, enum drxu_code_action action); static int drxj_set_lna_state(struct drx_demod_instance *demod, bool state); /** * \fn drxj_open() * \brief Open the demod instance, configure device, configure drxdriver * \return Status_t Return status. * * drxj_open() can be called with a NULL ucode image => no ucode upload. * This means that drxj_open() must NOT contain SCU commands or, in general, * rely on SCU or AUD ucode to be present. * */ static int drxj_open(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = NULL; struct drxj_data *ext_attr = NULL; struct drx_common_attr *common_attr = NULL; u32 driver_version = 0; struct drxu_code_info ucode_info; struct drx_cfg_mpeg_output cfg_mpeg_output; int rc; enum drx_power_mode power_mode = DRX_POWER_UP; if ((demod == NULL) || (demod->my_common_attr == NULL) || (demod->my_ext_attr == NULL) || (demod->my_i2c_dev_addr == NULL) || (demod->my_common_attr->is_opened)) { return -EINVAL; } /* Check arguments */ if (demod->my_ext_attr == NULL) return -EINVAL; dev_addr = demod->my_i2c_dev_addr; ext_attr = (struct drxj_data *) demod->my_ext_attr; common_attr = (struct drx_common_attr *) demod->my_common_attr; rc = ctrl_power_mode(demod, &power_mode); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } if (power_mode != DRX_POWER_UP) { rc = -EINVAL; pr_err("failed to powerup device\n"); goto rw_error; } /* has to be in front of setIqmAf and setOrxNsuAox */ rc = get_device_capabilities(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* * Soft reset of sys- and osc-clockdomain * * HACK: On windows, it writes a 0x07 here, instead of just 0x03. * As we didn't load the firmware here yet, we should do the same. * Btw, this is coherent with DRX-K, where we send reset codes * for modulation (OFTM, in DRX-k), SYS and OSC clock domains. */ rc = drxj_dap_write_reg16(dev_addr, SIO_CC_SOFT_RST__A, (0x04 | SIO_CC_SOFT_RST_SYS__M | SIO_CC_SOFT_RST_OSC__M), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } msleep(1); /* TODO first make sure that everything keeps working before enabling this */ /* PowerDownAnalogBlocks() */ rc = drxj_dap_write_reg16(dev_addr, ATV_TOP_STDBY__A, (~ATV_TOP_STDBY_CVBS_STDBY_A2_ACTIVE) | ATV_TOP_STDBY_SIF_STDBY_STANDBY, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_iqm_af(demod, false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = set_orx_nsu_aox(demod, false); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = init_hi(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* disable mpegoutput pins */ memcpy(&cfg_mpeg_output, &common_attr->mpeg_cfg, sizeof(cfg_mpeg_output)); cfg_mpeg_output.enable_mpeg_output = false; rc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Stop AUD Inform SetAudio it will need to do all setting */ rc = power_down_aud(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Stop SCU */ rc = drxj_dap_write_reg16(dev_addr, SCU_COMM_EXEC__A, SCU_COMM_EXEC_STOP, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Upload microcode */ if (common_attr->microcode_file != NULL) { /* Dirty trick to use common ucode upload & verify, pretend device is already open */ common_attr->is_opened = true; ucode_info.mc_file = common_attr->microcode_file; if (DRX_ISPOWERDOWNMODE(demod->my_common_attr->current_power_mode)) { pr_err("Should powerup before loading the firmware."); return -EINVAL; } rc = drx_ctrl_u_code(demod, &ucode_info, UCODE_UPLOAD); if (rc != 0) { pr_err("error %d while uploading the firmware\n", rc); goto rw_error; } if (common_attr->verify_microcode == true) { rc = drx_ctrl_u_code(demod, &ucode_info, UCODE_VERIFY); if (rc != 0) { pr_err("error %d while verifying the firmware\n", rc); goto rw_error; } } common_attr->is_opened = false; } /* Run SCU for a little while to initialize microcode version numbers */ rc = drxj_dap_write_reg16(dev_addr, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Initialize scan timeout */ common_attr->scan_demod_lock_timeout = DRXJ_SCAN_TIMEOUT; common_attr->scan_desired_lock = DRX_LOCKED; drxj_reset_mode(ext_attr); ext_attr->standard = DRX_STANDARD_UNKNOWN; rc = smart_ant_init(demod); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* Stamp driver version number in SCU data RAM in BCD code Done to enable field application engineers to retrieve drxdriver version via I2C from SCU RAM */ driver_version = (VERSION_MAJOR / 100) % 10; driver_version <<= 4; driver_version += (VERSION_MAJOR / 10) % 10; driver_version <<= 4; driver_version += (VERSION_MAJOR % 10); driver_version <<= 4; driver_version += (VERSION_MINOR % 10); driver_version <<= 4; driver_version += (VERSION_PATCH / 1000) % 10; driver_version <<= 4; driver_version += (VERSION_PATCH / 100) % 10; driver_version <<= 4; driver_version += (VERSION_PATCH / 10) % 10; driver_version <<= 4; driver_version += (VERSION_PATCH % 10); rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_DRIVER_VER_HI__A, (u16)(driver_version >> 16), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_DRIVER_VER_LO__A, (u16)(driver_version & 0xFFFF), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = ctrl_set_oob(demod, NULL); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } /* refresh the audio data structure with default */ ext_attr->aud_data = drxj_default_aud_data_g; demod->my_common_attr->is_opened = true; drxj_set_lna_state(demod, false); return 0; rw_error: common_attr->is_opened = false; return rc; } /*============================================================================*/ /** * \fn drxj_close() * \brief Close the demod instance, power down the device * \return Status_t Return status. * */ static int drxj_close(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; enum drx_power_mode power_mode = DRX_POWER_UP; if ((demod->my_common_attr == NULL) || (demod->my_ext_attr == NULL) || (demod->my_i2c_dev_addr == NULL) || (!demod->my_common_attr->is_opened)) { return -EINVAL; } /* power up */ rc = ctrl_power_mode(demod, &power_mode); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } rc = drxj_dap_write_reg16(dev_addr, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } power_mode = DRX_POWER_DOWN; rc = ctrl_power_mode(demod, &power_mode); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; } DRX_ATTR_ISOPENED(demod) = false; return 0; rw_error: DRX_ATTR_ISOPENED(demod) = false; return rc; } /* * Microcode related functions */ /** * drx_u_code_compute_crc - Compute CRC of block of microcode data. * @block_data: Pointer to microcode data. * @nr_words: Size of microcode block (number of 16 bits words). * * returns The computed CRC residue. */ static u16 drx_u_code_compute_crc(u8 *block_data, u16 nr_words) { u16 i = 0; u16 j = 0; u32 crc_word = 0; u32 carry = 0; while (i < nr_words) { crc_word |= (u32)be16_to_cpu(*(__be16 *)(block_data)); for (j = 0; j < 16; j++) { crc_word <<= 1; if (carry != 0) crc_word ^= 0x80050000UL; carry = crc_word & 0x80000000UL; } i++; block_data += (sizeof(u16)); } return (u16)(crc_word >> 16); } /** * drx_check_firmware - checks if the loaded firmware is valid * * @demod: demod structure * @mc_data: pointer to the start of the firmware * @size: firmware size */ static int drx_check_firmware(struct drx_demod_instance *demod, u8 *mc_data, unsigned size) { struct drxu_code_block_hdr block_hdr; int i; unsigned count = 2 * sizeof(u16); u32 mc_dev_type, mc_version, mc_base_version; u16 mc_nr_of_blks = be16_to_cpu(*(__be16 *)(mc_data + sizeof(u16))); /* * Scan microcode blocks first for version info * and firmware check */ /* Clear version block */ DRX_ATTR_MCRECORD(demod).aux_type = 0; DRX_ATTR_MCRECORD(demod).mc_dev_type = 0; DRX_ATTR_MCRECORD(demod).mc_version = 0; DRX_ATTR_MCRECORD(demod).mc_base_version = 0; for (i = 0; i < mc_nr_of_blks; i++) { if (count + 3 * sizeof(u16) + sizeof(u32) > size) goto eof; /* Process block header */ block_hdr.addr = be32_to_cpu(*(__be32 *)(mc_data + count)); count += sizeof(u32); block_hdr.size = be16_to_cpu(*(__be16 *)(mc_data + count)); count += sizeof(u16); block_hdr.flags = be16_to_cpu(*(__be16 *)(mc_data + count)); count += sizeof(u16); block_hdr.CRC = be16_to_cpu(*(__be16 *)(mc_data + count)); count += sizeof(u16); pr_debug("%u: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n", count, block_hdr.addr, block_hdr.size, block_hdr.flags, block_hdr.CRC); if (block_hdr.flags & 0x8) { u8 *auxblk = ((void *)mc_data) + block_hdr.addr; u16 auxtype; if (block_hdr.addr + sizeof(u16) > size) goto eof; auxtype = be16_to_cpu(*(__be16 *)(auxblk)); /* Aux block. Check type */ if (DRX_ISMCVERTYPE(auxtype)) { if (block_hdr.addr + 2 * sizeof(u16) + 2 * sizeof (u32) > size) goto eof; auxblk += sizeof(u16); mc_dev_type = be32_to_cpu(*(__be32 *)(auxblk)); auxblk += sizeof(u32); mc_version = be32_to_cpu(*(__be32 *)(auxblk)); auxblk += sizeof(u32); mc_base_version = be32_to_cpu(*(__be32 *)(auxblk)); DRX_ATTR_MCRECORD(demod).aux_type = auxtype; DRX_ATTR_MCRECORD(demod).mc_dev_type = mc_dev_type; DRX_ATTR_MCRECORD(demod).mc_version = mc_version; DRX_ATTR_MCRECORD(demod).mc_base_version = mc_base_version; pr_info("Firmware dev %x, ver %x, base ver %x\n", mc_dev_type, mc_version, mc_base_version); } } else if (count + block_hdr.size * sizeof(u16) > size) goto eof; count += block_hdr.size * sizeof(u16); } return 0; eof: pr_err("Firmware is truncated at pos %u/%u\n", count, size); return -EINVAL; } /** * drx_ctrl_u_code - Handle microcode upload or verify. * @dev_addr: Address of device. * @mc_info: Pointer to information about microcode data. * @action: Either UCODE_UPLOAD or UCODE_VERIFY * * This function returns: * 0: * - In case of UCODE_UPLOAD: code is successfully uploaded. * - In case of UCODE_VERIFY: image on device is equal to * image provided to this control function. * -EIO: * - In case of UCODE_UPLOAD: I2C error. * - In case of UCODE_VERIFY: I2C error or image on device * is not equal to image provided to this control function. * -EINVAL: * - Invalid arguments. * - Provided image is corrupt */ static int drx_ctrl_u_code(struct drx_demod_instance *demod, struct drxu_code_info *mc_info, enum drxu_code_action action) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; u16 i = 0; u16 mc_nr_of_blks = 0; u16 mc_magic_word = 0; const u8 *mc_data_init = NULL; u8 *mc_data = NULL; unsigned size; char *mc_file; /* Check arguments */ if (!mc_info || !mc_info->mc_file) return -EINVAL; mc_file = mc_info->mc_file; if (!demod->firmware) { const struct firmware *fw = NULL; rc = request_firmware(&fw, mc_file, demod->i2c->dev.parent); if (rc < 0) { pr_err("Couldn't read firmware %s\n", mc_file); return rc; } demod->firmware = fw; if (demod->firmware->size < 2 * sizeof(u16)) { rc = -EINVAL; pr_err("Firmware is too short!\n"); goto release; } pr_info("Firmware %s, size %zu\n", mc_file, demod->firmware->size); } mc_data_init = demod->firmware->data; size = demod->firmware->size; mc_data = (void *)mc_data_init; /* Check data */ mc_magic_word = be16_to_cpu(*(__be16 *)(mc_data)); mc_data += sizeof(u16); mc_nr_of_blks = be16_to_cpu(*(__be16 *)(mc_data)); mc_data += sizeof(u16); if ((mc_magic_word != DRX_UCODE_MAGIC_WORD) || (mc_nr_of_blks == 0)) { rc = -EINVAL; pr_err("Firmware magic word doesn't match\n"); goto release; } if (action == UCODE_UPLOAD) { rc = drx_check_firmware(demod, (u8 *)mc_data_init, size); if (rc) goto release; pr_info("Uploading firmware %s\n", mc_file); } else { pr_info("Verifying if firmware upload was ok.\n"); } /* Process microcode blocks */ for (i = 0; i < mc_nr_of_blks; i++) { struct drxu_code_block_hdr block_hdr; u16 mc_block_nr_bytes = 0; /* Process block header */ block_hdr.addr = be32_to_cpu(*(__be32 *)(mc_data)); mc_data += sizeof(u32); block_hdr.size = be16_to_cpu(*(__be16 *)(mc_data)); mc_data += sizeof(u16); block_hdr.flags = be16_to_cpu(*(__be16 *)(mc_data)); mc_data += sizeof(u16); block_hdr.CRC = be16_to_cpu(*(__be16 *)(mc_data)); mc_data += sizeof(u16); pr_debug("%u: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n", (unsigned)(mc_data - mc_data_init), block_hdr.addr, block_hdr.size, block_hdr.flags, block_hdr.CRC); /* Check block header on: - data larger than 64Kb - if CRC enabled check CRC */ if ((block_hdr.size > 0x7FFF) || (((block_hdr.flags & DRX_UCODE_CRC_FLAG) != 0) && (block_hdr.CRC != drx_u_code_compute_crc(mc_data, block_hdr.size))) ) { /* Wrong data ! */ rc = -EINVAL; pr_err("firmware CRC is wrong\n"); goto release; } if (!block_hdr.size) continue; mc_block_nr_bytes = block_hdr.size * ((u16) sizeof(u16)); /* Perform the desired action */ switch (action) { case UCODE_UPLOAD: /* Upload microcode */ if (drxdap_fasi_write_block(dev_addr, block_hdr.addr, mc_block_nr_bytes, mc_data, 0x0000)) { rc = -EIO; pr_err("error writing firmware at pos %u\n", (unsigned)(mc_data - mc_data_init)); goto release; } break; case UCODE_VERIFY: { /* Verify uploaded microcode */ int result = 0; u8 mc_data_buffer[DRX_UCODE_MAX_BUF_SIZE]; u32 bytes_to_comp = 0; u32 bytes_left = mc_block_nr_bytes; u32 curr_addr = block_hdr.addr; u8 *curr_ptr = mc_data; while (bytes_left != 0) { if (bytes_left > DRX_UCODE_MAX_BUF_SIZE) bytes_to_comp = DRX_UCODE_MAX_BUF_SIZE; else bytes_to_comp = bytes_left; if (drxdap_fasi_read_block(dev_addr, curr_addr, (u16)bytes_to_comp, (u8 *)mc_data_buffer, 0x0000)) { pr_err("error reading firmware at pos %u\n", (unsigned)(mc_data - mc_data_init)); return -EIO; } result = memcmp(curr_ptr, mc_data_buffer, bytes_to_comp); if (result) { pr_err("error verifying firmware at pos %u\n", (unsigned)(mc_data - mc_data_init)); return -EIO; } curr_addr += ((dr_xaddr_t)(bytes_to_comp / 2)); curr_ptr =&(curr_ptr[bytes_to_comp]); bytes_left -=((u32) bytes_to_comp); } break; } default: return -EINVAL; break; } mc_data += mc_block_nr_bytes; } return 0; release: release_firmware(demod->firmware); demod->firmware = NULL; return rc; } /* caller is expected to check if lna is supported before enabling */ static int drxj_set_lna_state(struct drx_demod_instance *demod, bool state) { struct drxuio_cfg uio_cfg; struct drxuio_data uio_data; int result; uio_cfg.uio = DRX_UIO1; uio_cfg.mode = DRX_UIO_MODE_READWRITE; /* Configure user-I/O #3: enable read/write */ result = ctrl_set_uio_cfg(demod, &uio_cfg); if (result) { pr_err("Failed to setup LNA GPIO!\n"); return result; } uio_data.uio = DRX_UIO1; uio_data.value = state; result = ctrl_uio_write(demod, &uio_data); if (result != 0) { pr_err("Failed to %sable LNA!\n", state ? "en" : "dis"); return result; } return 0; } /* * The Linux DVB Driver for Micronas DRX39xx family (drx3933j) * * Written by Devin Heitmueller <[email protected]> */ static int drx39xxj_set_powerstate(struct dvb_frontend *fe, int enable) { struct drx39xxj_state *state = fe->demodulator_priv; struct drx_demod_instance *demod = state->demod; int result; enum drx_power_mode power_mode; if (enable) power_mode = DRX_POWER_UP; else power_mode = DRX_POWER_DOWN; result = ctrl_power_mode(demod, &power_mode); if (result != 0) { pr_err("Power state change failed\n"); return 0; } return 0; } static int drx39xxj_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct drx39xxj_state *state = fe->demodulator_priv; struct drx_demod_instance *demod = state->demod; int result; enum drx_lock_status lock_status; *status = 0; result = ctrl_lock_status(demod, &lock_status); if (result != 0) { pr_err("drx39xxj: could not get lock status!\n"); *status = 0; } switch (lock_status) { case DRX_NEVER_LOCK: *status = 0; pr_err("drx says NEVER_LOCK\n"); break; case DRX_NOT_LOCKED: *status = 0; break; case DRX_LOCK_STATE_1: case DRX_LOCK_STATE_2: case DRX_LOCK_STATE_3: case DRX_LOCK_STATE_4: case DRX_LOCK_STATE_5: case DRX_LOCK_STATE_6: case DRX_LOCK_STATE_7: case DRX_LOCK_STATE_8: case DRX_LOCK_STATE_9: *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC; break; case DRX_LOCKED: *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; break; default: pr_err("Lock state unknown %d\n", lock_status); } ctrl_sig_quality(demod, lock_status); return 0; } static int drx39xxj_read_ber(struct dvb_frontend *fe, u32 *ber) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; if (p->pre_bit_error.stat[0].scale == FE_SCALE_NOT_AVAILABLE) { *ber = 0; return 0; } if (!p->pre_bit_count.stat[0].uvalue) { if (!p->pre_bit_error.stat[0].uvalue) *ber = 0; else *ber = 1000000; } else { *ber = frac_times1e6(p->pre_bit_error.stat[0].uvalue, p->pre_bit_count.stat[0].uvalue); } return 0; } static int drx39xxj_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; if (p->strength.stat[0].scale == FE_SCALE_NOT_AVAILABLE) { *strength = 0; return 0; } *strength = p->strength.stat[0].uvalue; return 0; } static int drx39xxj_read_snr(struct dvb_frontend *fe, u16 *snr) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; u64 tmp64; if (p->cnr.stat[0].scale == FE_SCALE_NOT_AVAILABLE) { *snr = 0; return 0; } tmp64 = p->cnr.stat[0].svalue; do_div(tmp64, 10); *snr = tmp64; return 0; } static int drx39xxj_read_ucblocks(struct dvb_frontend *fe, u32 *ucb) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; if (p->block_error.stat[0].scale == FE_SCALE_NOT_AVAILABLE) { *ucb = 0; return 0; } *ucb = p->block_error.stat[0].uvalue; return 0; } static int drx39xxj_set_frontend(struct dvb_frontend *fe) { #ifdef DJH_DEBUG int i; #endif struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct drx39xxj_state *state = fe->demodulator_priv; struct drx_demod_instance *demod = state->demod; enum drx_standard standard = DRX_STANDARD_8VSB; struct drx_channel channel; int result; static const struct drx_channel def_channel = { /* frequency */ 0, /* bandwidth */ DRX_BANDWIDTH_6MHZ, /* mirror */ DRX_MIRROR_NO, /* constellation */ DRX_CONSTELLATION_AUTO, /* hierarchy */ DRX_HIERARCHY_UNKNOWN, /* priority */ DRX_PRIORITY_UNKNOWN, /* coderate */ DRX_CODERATE_UNKNOWN, /* guard */ DRX_GUARD_UNKNOWN, /* fftmode */ DRX_FFTMODE_UNKNOWN, /* classification */ DRX_CLASSIFICATION_AUTO, /* symbolrate */ 5057000, /* interleavemode */ DRX_INTERLEAVEMODE_UNKNOWN, /* ldpc */ DRX_LDPC_UNKNOWN, /* carrier */ DRX_CARRIER_UNKNOWN, /* frame mode */ DRX_FRAMEMODE_UNKNOWN }; u32 constellation = DRX_CONSTELLATION_AUTO; /* Bring the demod out of sleep */ drx39xxj_set_powerstate(fe, 1); if (fe->ops.tuner_ops.set_params) { u32 int_freq; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* Set tuner to desired frequency and standard */ fe->ops.tuner_ops.set_params(fe); /* Use the tuner's IF */ if (fe->ops.tuner_ops.get_if_frequency) { fe->ops.tuner_ops.get_if_frequency(fe, &int_freq); demod->my_common_attr->intermediate_freq = int_freq / 1000; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } switch (p->delivery_system) { case SYS_ATSC: standard = DRX_STANDARD_8VSB; break; case SYS_DVBC_ANNEX_B: standard = DRX_STANDARD_ITU_B; switch (p->modulation) { case QAM_64: constellation = DRX_CONSTELLATION_QAM64; break; case QAM_256: constellation = DRX_CONSTELLATION_QAM256; break; default: constellation = DRX_CONSTELLATION_AUTO; break; } break; default: return -EINVAL; } /* Set the standard (will be powered up if necessary */ result = ctrl_set_standard(demod, &standard); if (result != 0) { pr_err("Failed to set standard! result=%02x\n", result); return -EINVAL; } /* set channel parameters */ channel = def_channel; channel.frequency = p->frequency / 1000; channel.bandwidth = DRX_BANDWIDTH_6MHZ; channel.constellation = constellation; /* program channel */ result = ctrl_set_channel(demod, &channel); if (result != 0) { pr_err("Failed to set channel!\n"); return -EINVAL; } /* Just for giggles, let's shut off the LNA again.... */ drxj_set_lna_state(demod, false); /* After set_frontend, except for strength, stats aren't available */ p->strength.stat[0].scale = FE_SCALE_RELATIVE; return 0; } static int drx39xxj_sleep(struct dvb_frontend *fe) { /* power-down the demodulator */ return drx39xxj_set_powerstate(fe, 0); } static int drx39xxj_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct drx39xxj_state *state = fe->demodulator_priv; struct drx_demod_instance *demod = state->demod; bool i2c_gate_state; int result; #ifdef DJH_DEBUG pr_debug("i2c gate call: enable=%d state=%d\n", enable, state->i2c_gate_open); #endif if (enable) i2c_gate_state = true; else i2c_gate_state = false; if (state->i2c_gate_open == enable) { /* We're already in the desired state */ return 0; } result = ctrl_i2c_bridge(demod, &i2c_gate_state); if (result != 0) { pr_err("drx39xxj: could not open i2c gate [%d]\n", result); dump_stack(); } else { state->i2c_gate_open = enable; } return 0; } static int drx39xxj_init(struct dvb_frontend *fe) { struct drx39xxj_state *state = fe->demodulator_priv; struct drx_demod_instance *demod = state->demod; int rc = 0; if (fe->exit == DVB_FE_DEVICE_RESUME) { /* so drxj_open() does what it needs to do */ demod->my_common_attr->is_opened = false; rc = drxj_open(demod); if (rc != 0) pr_err("drx39xxj_init(): DRX open failed rc=%d!\n", rc); } else drx39xxj_set_powerstate(fe, 1); return rc; } static int drx39xxj_set_lna(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct drx39xxj_state *state = fe->demodulator_priv; struct drx_demod_instance *demod = state->demod; struct drxj_data *ext_attr = demod->my_ext_attr; if (c->lna) { if (!ext_attr->has_lna) { pr_err("LNA is not supported on this device!\n"); return -EINVAL; } } return drxj_set_lna_state(demod, c->lna); } static int drx39xxj_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static void drx39xxj_release(struct dvb_frontend *fe) { struct drx39xxj_state *state = fe->demodulator_priv; struct drx_demod_instance *demod = state->demod; /* if device is removed don't access it */ if (fe->exit != DVB_FE_DEVICE_REMOVED) drxj_close(demod); kfree(demod->my_ext_attr); kfree(demod->my_common_attr); kfree(demod->my_i2c_dev_addr); release_firmware(demod->firmware); kfree(demod); kfree(state); } static const struct dvb_frontend_ops drx39xxj_ops; struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c) { struct drx39xxj_state *state = NULL; struct i2c_device_addr *demod_addr = NULL; struct drx_common_attr *demod_comm_attr = NULL; struct drxj_data *demod_ext_attr = NULL; struct drx_demod_instance *demod = NULL; struct dtv_frontend_properties *p; int result; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct drx39xxj_state), GFP_KERNEL); if (state == NULL) goto error; demod = kmalloc(sizeof(struct drx_demod_instance), GFP_KERNEL); if (demod == NULL) goto error; demod_addr = kmemdup(&drxj_default_addr_g, sizeof(struct i2c_device_addr), GFP_KERNEL); if (demod_addr == NULL) goto error; demod_comm_attr = kmemdup(&drxj_default_comm_attr_g, sizeof(struct drx_common_attr), GFP_KERNEL); if (demod_comm_attr == NULL) goto error; demod_ext_attr = kmemdup(&drxj_data_g, sizeof(struct drxj_data), GFP_KERNEL); if (demod_ext_attr == NULL) goto error; /* setup the state */ state->i2c = i2c; state->demod = demod; /* setup the demod data */ memcpy(demod, &drxj_default_demod_g, sizeof(struct drx_demod_instance)); demod->my_i2c_dev_addr = demod_addr; demod->my_common_attr = demod_comm_attr; demod->my_i2c_dev_addr->user_data = state; demod->my_common_attr->microcode_file = DRX39XX_MAIN_FIRMWARE; demod->my_common_attr->verify_microcode = true; demod->my_common_attr->intermediate_freq = 5000; demod->my_common_attr->current_power_mode = DRX_POWER_DOWN; demod->my_ext_attr = demod_ext_attr; ((struct drxj_data *)demod_ext_attr)->uio_sma_tx_mode = DRX_UIO_MODE_READWRITE; demod->i2c = i2c; result = drxj_open(demod); if (result != 0) { pr_err("DRX open failed! Aborting\n"); goto error; } /* create dvb_frontend */ memcpy(&state->frontend.ops, &drx39xxj_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; /* Initialize stats - needed for DVBv5 stats to work */ p = &state->frontend.dtv_property_cache; p->strength.len = 1; p->pre_bit_count.len = 1; p->pre_bit_error.len = 1; p->post_bit_count.len = 1; p->post_bit_error.len = 1; p->block_count.len = 1; p->block_error.len = 1; p->cnr.len = 1; p->strength.stat[0].scale = FE_SCALE_RELATIVE; p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; return &state->frontend; error: kfree(demod_ext_attr); kfree(demod_comm_attr); kfree(demod_addr); kfree(demod); kfree(state); return NULL; } EXPORT_SYMBOL(drx39xxj_attach); static const struct dvb_frontend_ops drx39xxj_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, .info = { .name = "Micronas DRX39xxj family Frontend", .frequency_stepsize = 62500, .frequency_min = 51000000, .frequency_max = 858000000, .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB }, .init = drx39xxj_init, .i2c_gate_ctrl = drx39xxj_i2c_gate_ctrl, .sleep = drx39xxj_sleep, .set_frontend = drx39xxj_set_frontend, .get_tune_settings = drx39xxj_get_tune_settings, .read_status = drx39xxj_read_status, .read_ber = drx39xxj_read_ber, .read_signal_strength = drx39xxj_read_signal_strength, .read_snr = drx39xxj_read_snr, .read_ucblocks = drx39xxj_read_ucblocks, .release = drx39xxj_release, .set_lna = drx39xxj_set_lna, }; MODULE_DESCRIPTION("Micronas DRX39xxj Frontend"); MODULE_AUTHOR("Devin Heitmueller"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(DRX39XX_MAIN_FIRMWARE);
325337.c
#include <aos/aos.h> #include <aos/aos_rpc.h> #include <aos/aos_rpc_ump.h> #include <aos/aos_rpc_types.h> #include <aos/domain.h> #include <ctype.h> #include <rpc/server/ump.h> #include "nameserver.h" static struct rpc_ump_server server; #define NAMESERVER_STATUS_RESPONSE_SIZE (sizeof(struct rpc_message) + 0) #define NAMESERVER_REGISTER_RESPONSE_SIZE (NAMESERVER_STATUS_RESPONSE_SIZE) #define NAMESERVER_DEREGISTER_RESPONSE_SIZE (NAMESERVER_STATUS_RESPONSE_SIZE) #define NAMESERVER_LOOKUP_RESPONSE_SIZE (NAMESERVER_STATUS_RESPONSE_SIZE + sizeof(domainid_t)) #define NAMESERVER_ENUMERATE_RESPONSE_SIZE (NAMESERVER_STATUS_RESPONSE_SIZE) struct nameserver_entry { char name[AOS_RPC_NAMESERVER_MAX_NAME_LENGTH + 1]; struct aos_rpc add_client_chan; domainid_t pid; }; // Source: http://www.cse.yorku.ca/~oz/hash.html static uint64_t hash_string(char *str) { assert(str != NULL); uint64_t hash = 5381; int c; while ((c = *str++)) { hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ } return hash; } static void read_name(char dst[AOS_RPC_NAMESERVER_MAX_NAME_LENGTH + 1], struct rpc_message *msg) { memset(dst, 0, AOS_RPC_NAMESERVER_MAX_NAME_LENGTH + 1); memcpy(dst, msg->msg.payload, AOS_RPC_NAMESERVER_MAX_NAME_LENGTH); } static void reply_init(struct rpc_message *msg, struct rpc_message *resp) { resp->msg.method = msg->msg.method; resp->msg.status = Status_Ok; resp->msg.payload_length = 0; resp->cap = NULL_CAP; } static bool check_name_valid(char *name) { for (char *ptr = name; *ptr != '\0'; ptr++) { if (!(isalnum(*ptr) || *ptr == '/')) { return false; } } return true; } errval_t nameserver_add_service(struct nameserver_state *ns_state, char *name, struct capref chan_frame_cap, domainid_t pid) { errval_t err; assert(ns_state != NULL); assert(strlen(name) <= AOS_RPC_NAMESERVER_MAX_NAME_LENGTH); collections_hash_table *service_table = ns_state->service_table; assert(service_table != NULL); uint64_t hash = hash_string(name); struct nameserver_entry *entry = collections_hash_find(service_table, hash); if (entry != NULL) { debug_printf("Service '%s' already registered.\n", name); return LIB_ERR_NOT_IMPLEMENTED; } entry = calloc(1, sizeof(struct nameserver_entry)); if (entry == NULL) { debug_printf("calloc() failed"); return LIB_ERR_MALLOC_FAIL; } entry->pid = pid; strncpy(entry->name, name, AOS_RPC_NAMESERVER_MAX_NAME_LENGTH); err = aos_rpc_ump_init(&entry->add_client_chan, chan_frame_cap, false); if (err_is_fail(err)) { debug_printf("aos_rpc_ump_init() failed: %s", err_getstring(err)); return err; } collections_hash_insert(service_table, hash, entry); return SYS_ERR_OK; } static void handle_register(struct rpc_message *msg, struct nameserver_state *ns_state, struct rpc_message *resp) { errval_t err; assert(msg != NULL); assert(ns_state != NULL); char name[AOS_RPC_NAMESERVER_MAX_NAME_LENGTH + 1]; read_name(name, msg); domainid_t pid; memcpy(&pid, &(msg->msg.payload[AOS_RPC_NAMESERVER_MAX_NAME_LENGTH]), sizeof(domainid_t)); if (!check_name_valid(name)) { debug_printf("Service name '%s' is not allowed.\n", name); resp->msg.status = Status_Error; return; } struct capref chan_frame_cap = msg->cap; err = nameserver_add_service(ns_state, name, chan_frame_cap, pid); if (err_is_fail(err)) { debug_printf("add_service() failed: %s", err_getstring(err)); resp->msg.status = Status_Error; return; } } static void handle_deregister(struct rpc_message *msg, struct nameserver_state *ns_state, struct rpc_message *resp) { assert(msg != NULL); assert(ns_state != NULL); char name[AOS_RPC_NAMESERVER_MAX_NAME_LENGTH + 1]; read_name(name, msg); collections_hash_table *service_table = ns_state->service_table; assert(service_table != NULL); uint64_t hash = hash_string(name); struct nameserver_entry *entry = collections_hash_find(service_table, hash); if (entry == NULL) { debug_printf("Service '%s' not registered.\n", name); resp->msg.status = Status_Error; return; } collections_hash_delete(service_table, hash); } static errval_t send_add_client(struct aos_rpc *add_client_chan) { errval_t err; uint8_t send_buf[sizeof(struct rpc_message)]; struct rpc_message *send = (struct rpc_message *) &send_buf; send->msg.method = Method_Ump_Add_Client; send->msg.payload_length = 0; send->msg.status = Status_Ok; send->cap = NULL_CAP; err = aos_rpc_ump_send_message(add_client_chan, send); if (err_is_fail(err)) { debug_printf("aos_rpc_ump_send_message() failed: %s", err_getstring(err)); return err; } return SYS_ERR_OK; } static errval_t handle_lookup(struct rpc_message *msg, struct nameserver_state *ns_state, struct aos_rpc *rpc_resp) { errval_t err; assert(msg != NULL); assert(ns_state != NULL); char name[AOS_RPC_NAMESERVER_MAX_NAME_LENGTH + 1]; read_name(name, msg); collections_hash_table *service_table = ns_state->service_table; assert(service_table != NULL); uint64_t hash = hash_string(name); struct nameserver_entry *entry = collections_hash_find(service_table, hash); if (entry == NULL) { //debug_printf("Service '%s' not registered.\n", name); return LIB_ERR_NOT_IMPLEMENTED; } err = send_add_client(&entry->add_client_chan); if (err_is_fail(err)) { debug_printf("send_add_client() failed: %s", err_getstring(err)); return err; } ns_state->add_client_pid_pending = entry->pid; ns_state->rpc_add_client_request_pending = &entry->add_client_chan; ns_state->rpc_add_client_response_pending = rpc_resp; rpc_ump_server_pause_processing(&server); return SYS_ERR_OK; } static errval_t try_add_client_response(struct nameserver_state *ns_state) { errval_t err = SYS_ERR_OK; struct rpc_message *recv = NULL; // Check if we are waiting for a response to forward if (ns_state->rpc_add_client_request_pending != NULL) { err = aos_rpc_ump_receive_non_block(ns_state->rpc_add_client_request_pending, &recv); if (err_is_fail(err)) { debug_printf("aos_rpc_ump_receive_non_block() failed: %s", err_getstring(err)); goto cleanup; } if (recv != NULL) { assert(recv->msg.method == Method_Ump_Add_Client); assert(recv->msg.status == Status_Ok); assert(recv->msg.payload_length == 0); assert(!capref_is_null(recv->cap)); struct capref client_frame_cap = recv->cap; uint8_t recv_buf[NAMESERVER_LOOKUP_RESPONSE_SIZE]; struct rpc_message *resp = (struct rpc_message *) &recv_buf; resp->msg.method = Method_Nameserver_Lookup; resp->msg.status = Status_Ok; resp->cap = client_frame_cap; resp->msg.payload_length = sizeof(domainid_t); memcpy(resp->msg.payload, &ns_state->add_client_pid_pending, sizeof(domainid_t)); err = aos_rpc_ump_send_message(ns_state->rpc_add_client_response_pending, resp); if (err_is_fail(err)) { debug_printf("aos_rpc_ump_send_message() failed: %s", err_getstring(err)); goto cleanup; } ns_state->add_client_pid_pending = 0; ns_state->rpc_add_client_request_pending = NULL; ns_state->rpc_add_client_response_pending = NULL; rpc_ump_server_start_processing(&server); } } cleanup: if (recv != NULL) { free(recv); } return err; } static void add_client_response_periodic_event_func(void *arg) { errval_t err; assert(arg != NULL); struct nameserver_state *ns_state = arg; err = try_add_client_response(ns_state); if (err_is_fail(err)) { debug_printf("Unhandled error in nameserver add_client_response_periodic_event_func()\n"); } } static bool query_matches(char *query, char *name) { // Check if name starts with query return strncmp(query, name, strlen(query)) == 0; } static void handle_enumerate(struct rpc_message *msg, struct nameserver_state *ns_state, struct rpc_message **resp) { int32_t ret; assert(msg != NULL); assert(ns_state != NULL); char query[AOS_RPC_NAMESERVER_MAX_NAME_LENGTH + 1]; read_name(query, msg); collections_hash_table *service_table = ns_state->service_table; assert(service_table != NULL); // Count entries that match the received query size_t name_list_len = 0; size_t match_count = 0; __unused uint64_t key; struct nameserver_entry *entry; ret = collections_hash_traverse_start(service_table); assert(ret == 1); while ((entry = collections_hash_traverse_next(service_table, &key))) { if (query_matches(query, entry->name)) { size_t name_len = strlen(entry->name); name_list_len += name_len + 1; // Add one for null-byte match_count++; } } ret = collections_hash_traverse_end(service_table); assert(ret == 1); // Allocate buffer large enough to contain all matches size_t payload_length = sizeof(size_t) + name_list_len; *resp = malloc(NAMESERVER_ENUMERATE_RESPONSE_SIZE + payload_length); reply_init(msg, *resp); (*resp)->msg.payload_length = payload_length; // Write matches into response char * const payload_base = &((*resp)->msg.payload[0]); char *ptr = payload_base; memcpy(ptr, &match_count, sizeof(size_t)); ptr += sizeof(size_t); ret = collections_hash_traverse_start(service_table); assert(ret == 1); while ((entry = collections_hash_traverse_next(service_table, &key))) { if (query_matches(query, entry->name)) { size_t name_len = strlen(entry->name); // Add one for null-byte memcpy(ptr, entry->name, name_len + 1); ptr += name_len + 1; } } assert(((void *) ptr) - ((void *) payload_base) == payload_length); ret = collections_hash_traverse_end(service_table); assert(ret == 1); } static void service_recv_cb(struct rpc_message *msg, void *callback_state, struct aos_rpc *rpc, void *server_state) { errval_t err; struct nameserver_state *ns_state = server_state; struct rpc_message *resp; switch (msg->msg.method) { case Method_Nameserver_Register: resp = malloc(NAMESERVER_REGISTER_RESPONSE_SIZE); reply_init(msg, resp); handle_register(msg, ns_state, resp); break; case Method_Nameserver_Deregister: resp = malloc(NAMESERVER_DEREGISTER_RESPONSE_SIZE); reply_init(msg, resp); handle_deregister(msg, ns_state, resp); break; case Method_Nameserver_Lookup: err = handle_lookup(msg, ns_state, rpc); if (err_is_ok(err)) { // Do nothing since we are waiting for the add_client response return; } else { // Something went wrong before/while sending the add_client request, return error immediately resp = malloc(NAMESERVER_LOOKUP_RESPONSE_SIZE); reply_init(msg, resp); resp->msg.status = Status_Error; } break; case Method_Nameserver_Enumerate: handle_enumerate(msg, ns_state, &resp); break; default: debug_printf("Unknown message type. Ignoring message.\n"); resp = malloc(NAMESERVER_STATUS_RESPONSE_SIZE); reply_init(msg, resp); resp->msg.status = Status_Error; break; } err = aos_rpc_ump_send_message(rpc, resp); if (err_is_fail(err)) { debug_printf("aos_rpc_ump_send_message() failed: %s", err_getstring(err)); } } errval_t nameserver_add_client(struct aos_rpc *rpc, coreid_t mpid) { return rpc_ump_server_add_client(&server, rpc); } errval_t nameserver_serve_next(void) { return rpc_ump_server_serve_next(&server); } static void free_nameserver_entry(void *ns_entry) { struct nameserver_entry *entry = ns_entry; // TODO Free memebers of entry free(entry); } static void serve_periodic_event_func(void *arg) { errval_t err; err = nameserver_serve_next(); if (err_is_fail(err)) { DEBUG_ERR(err, "in nameserver_ump_serve_next()"); return; } } errval_t nameserver_init(struct nameserver_state *server_state) { errval_t err; memset(server_state, 0, sizeof(struct nameserver_state)); collections_hash_create(&server_state->service_table, free_nameserver_entry); memset(&server_state->serve_periodic_ev, 0, sizeof(struct periodic_event)); err = periodic_event_create(&server_state->serve_periodic_ev, get_default_waitset(), NAMESERVER_PERIODIC_SERVE_EVENT_US, MKCLOSURE(serve_periodic_event_func, server_state)); if (err_is_fail(err)) { debug_printf("periodic_event_create() failed: %s\n", err_getstring(err)); return err; } memset(&server_state->add_client_response_periodic_ev, 0, sizeof(struct periodic_event)); err = periodic_event_create(&server_state->add_client_response_periodic_ev, get_default_waitset(), NAMESERVER_PERIODIC_SERVE_EVENT_US, MKCLOSURE(add_client_response_periodic_event_func, server_state)); if (err_is_fail(err)) { debug_printf("periodic_event_create() failed: %s\n", err_getstring(err)); return err; } err = rpc_ump_server_init(&server, service_recv_cb, NULL, NULL, server_state); if (err_is_fail(err)) { debug_printf("rpc_ump_server_init() failed: %s\n", err_getstring(err)); return err_push(err, RPC_ERR_INITIALIZATION); } debug_printf("Namerserver started.\n"); return SYS_ERR_OK; }
910806.c
/* * Driver for the Conexant CX23885 PCIe bridge * * Copyright (c) 2006 Steven Toth <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. */ #include "cx23885.h" #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <asm/div64.h> #include <linux/firmware.h> #include "cimax2.h" #include "altera-ci.h" #include "cx23888-ir.h" #include "cx23885-ir.h" #include "cx23885-av.h" #include "cx23885-input.h" MODULE_DESCRIPTION("Driver for cx23885 based TV cards"); MODULE_AUTHOR("Steven Toth <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX23885_VERSION); static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET }; module_param_array(card, int, NULL, 0444); MODULE_PARM_DESC(card, "card type"); #define dprintk(level, fmt, arg...)\ do { if (debug >= level)\ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##arg); \ } while (0) static unsigned int cx23885_devcount; #define NO_SYNC_LINE (-1U) /* FIXME, these allocations will change when * analog arrives. The be reviewed. * CX23887 Assumptions * 1 line = 16 bytes of CDT * cmds size = 80 * cdt size = 16 * linesize * iqsize = 64 * maxlines = 6 * * Address Space: * 0x00000000 0x00008fff FIFO clusters * 0x00010000 0x000104af Channel Management Data Structures * 0x000104b0 0x000104ff Free * 0x00010500 0x000108bf 15 channels * iqsize * 0x000108c0 0x000108ff Free * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables * 15 channels * (iqsize + (maxlines * linesize)) * 0x00010ea0 0x00010xxx Free */ static struct sram_channel cx23885_sram_channels[] = { [SRAM_CH01] = { .name = "VID A", .cmds_start = 0x10000, .ctrl_start = 0x10380, .cdt = 0x104c0, .fifo_start = 0x40, .fifo_size = 0x2800, .ptr1_reg = DMA1_PTR1, .ptr2_reg = DMA1_PTR2, .cnt1_reg = DMA1_CNT1, .cnt2_reg = DMA1_CNT2, }, [SRAM_CH02] = { .name = "ch2", .cmds_start = 0x0, .ctrl_start = 0x0, .cdt = 0x0, .fifo_start = 0x0, .fifo_size = 0x0, .ptr1_reg = DMA2_PTR1, .ptr2_reg = DMA2_PTR2, .cnt1_reg = DMA2_CNT1, .cnt2_reg = DMA2_CNT2, }, [SRAM_CH03] = { .name = "TS1 B", .cmds_start = 0x100A0, .ctrl_start = 0x10400, .cdt = 0x10580, .fifo_start = 0x5000, .fifo_size = 0x1000, .ptr1_reg = DMA3_PTR1, .ptr2_reg = DMA3_PTR2, .cnt1_reg = DMA3_CNT1, .cnt2_reg = DMA3_CNT2, }, [SRAM_CH04] = { .name = "ch4", .cmds_start = 0x0, .ctrl_start = 0x0, .cdt = 0x0, .fifo_start = 0x0, .fifo_size = 0x0, .ptr1_reg = DMA4_PTR1, .ptr2_reg = DMA4_PTR2, .cnt1_reg = DMA4_CNT1, .cnt2_reg = DMA4_CNT2, }, [SRAM_CH05] = { .name = "ch5", .cmds_start = 0x0, .ctrl_start = 0x0, .cdt = 0x0, .fifo_start = 0x0, .fifo_size = 0x0, .ptr1_reg = DMA5_PTR1, .ptr2_reg = DMA5_PTR2, .cnt1_reg = DMA5_CNT1, .cnt2_reg = DMA5_CNT2, }, [SRAM_CH06] = { .name = "TS2 C", .cmds_start = 0x10140, .ctrl_start = 0x10440, .cdt = 0x105e0, .fifo_start = 0x6000, .fifo_size = 0x1000, .ptr1_reg = DMA5_PTR1, .ptr2_reg = DMA5_PTR2, .cnt1_reg = DMA5_CNT1, .cnt2_reg = DMA5_CNT2, }, [SRAM_CH07] = { .name = "TV Audio", .cmds_start = 0x10190, .ctrl_start = 0x10480, .cdt = 0x10a00, .fifo_start = 0x7000, .fifo_size = 0x1000, .ptr1_reg = DMA6_PTR1, .ptr2_reg = DMA6_PTR2, .cnt1_reg = DMA6_CNT1, .cnt2_reg = DMA6_CNT2, }, [SRAM_CH08] = { .name = "ch8", .cmds_start = 0x0, .ctrl_start = 0x0, .cdt = 0x0, .fifo_start = 0x0, .fifo_size = 0x0, .ptr1_reg = DMA7_PTR1, .ptr2_reg = DMA7_PTR2, .cnt1_reg = DMA7_CNT1, .cnt2_reg = DMA7_CNT2, }, [SRAM_CH09] = { .name = "ch9", .cmds_start = 0x0, .ctrl_start = 0x0, .cdt = 0x0, .fifo_start = 0x0, .fifo_size = 0x0, .ptr1_reg = DMA8_PTR1, .ptr2_reg = DMA8_PTR2, .cnt1_reg = DMA8_CNT1, .cnt2_reg = DMA8_CNT2, }, }; static struct sram_channel cx23887_sram_channels[] = { [SRAM_CH01] = { .name = "VID A", .cmds_start = 0x10000, .ctrl_start = 0x105b0, .cdt = 0x107b0, .fifo_start = 0x40, .fifo_size = 0x2800, .ptr1_reg = DMA1_PTR1, .ptr2_reg = DMA1_PTR2, .cnt1_reg = DMA1_CNT1, .cnt2_reg = DMA1_CNT2, }, [SRAM_CH02] = { .name = "VID A (VBI)", .cmds_start = 0x10050, .ctrl_start = 0x105F0, .cdt = 0x10810, .fifo_start = 0x3000, .fifo_size = 0x1000, .ptr1_reg = DMA2_PTR1, .ptr2_reg = DMA2_PTR2, .cnt1_reg = DMA2_CNT1, .cnt2_reg = DMA2_CNT2, }, [SRAM_CH03] = { .name = "TS1 B", .cmds_start = 0x100A0, .ctrl_start = 0x10630, .cdt = 0x10870, .fifo_start = 0x5000, .fifo_size = 0x1000, .ptr1_reg = DMA3_PTR1, .ptr2_reg = DMA3_PTR2, .cnt1_reg = DMA3_CNT1, .cnt2_reg = DMA3_CNT2, }, [SRAM_CH04] = { .name = "ch4", .cmds_start = 0x0, .ctrl_start = 0x0, .cdt = 0x0, .fifo_start = 0x0, .fifo_size = 0x0, .ptr1_reg = DMA4_PTR1, .ptr2_reg = DMA4_PTR2, .cnt1_reg = DMA4_CNT1, .cnt2_reg = DMA4_CNT2, }, [SRAM_CH05] = { .name = "ch5", .cmds_start = 0x0, .ctrl_start = 0x0, .cdt = 0x0, .fifo_start = 0x0, .fifo_size = 0x0, .ptr1_reg = DMA5_PTR1, .ptr2_reg = DMA5_PTR2, .cnt1_reg = DMA5_CNT1, .cnt2_reg = DMA5_CNT2, }, [SRAM_CH06] = { .name = "TS2 C", .cmds_start = 0x10140, .ctrl_start = 0x10670, .cdt = 0x108d0, .fifo_start = 0x6000, .fifo_size = 0x1000, .ptr1_reg = DMA5_PTR1, .ptr2_reg = DMA5_PTR2, .cnt1_reg = DMA5_CNT1, .cnt2_reg = DMA5_CNT2, }, [SRAM_CH07] = { .name = "TV Audio", .cmds_start = 0x10190, .ctrl_start = 0x106B0, .cdt = 0x10930, .fifo_start = 0x7000, .fifo_size = 0x1000, .ptr1_reg = DMA6_PTR1, .ptr2_reg = DMA6_PTR2, .cnt1_reg = DMA6_CNT1, .cnt2_reg = DMA6_CNT2, }, [SRAM_CH08] = { .name = "ch8", .cmds_start = 0x0, .ctrl_start = 0x0, .cdt = 0x0, .fifo_start = 0x0, .fifo_size = 0x0, .ptr1_reg = DMA7_PTR1, .ptr2_reg = DMA7_PTR2, .cnt1_reg = DMA7_CNT1, .cnt2_reg = DMA7_CNT2, }, [SRAM_CH09] = { .name = "ch9", .cmds_start = 0x0, .ctrl_start = 0x0, .cdt = 0x0, .fifo_start = 0x0, .fifo_size = 0x0, .ptr1_reg = DMA8_PTR1, .ptr2_reg = DMA8_PTR2, .cnt1_reg = DMA8_CNT1, .cnt2_reg = DMA8_CNT2, }, }; static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask) { unsigned long flags; spin_lock_irqsave(&dev->pci_irqmask_lock, flags); dev->pci_irqmask |= mask; spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); } void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask) { unsigned long flags; spin_lock_irqsave(&dev->pci_irqmask_lock, flags); dev->pci_irqmask |= mask; cx_set(PCI_INT_MSK, mask); spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); } void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask) { u32 v; unsigned long flags; spin_lock_irqsave(&dev->pci_irqmask_lock, flags); v = mask & dev->pci_irqmask; if (v) cx_set(PCI_INT_MSK, v); spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); } static inline void cx23885_irq_enable_all(struct cx23885_dev *dev) { cx23885_irq_enable(dev, 0xffffffff); } void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask) { unsigned long flags; spin_lock_irqsave(&dev->pci_irqmask_lock, flags); cx_clear(PCI_INT_MSK, mask); spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); } static inline void cx23885_irq_disable_all(struct cx23885_dev *dev) { cx23885_irq_disable(dev, 0xffffffff); } void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask) { unsigned long flags; spin_lock_irqsave(&dev->pci_irqmask_lock, flags); dev->pci_irqmask &= ~mask; cx_clear(PCI_INT_MSK, mask); spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); } static u32 cx23885_irq_get_mask(struct cx23885_dev *dev) { u32 v; unsigned long flags; spin_lock_irqsave(&dev->pci_irqmask_lock, flags); v = cx_read(PCI_INT_MSK); spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); return v; } static int cx23885_risc_decode(u32 risc) { static char *instr[16] = { [RISC_SYNC >> 28] = "sync", [RISC_WRITE >> 28] = "write", [RISC_WRITEC >> 28] = "writec", [RISC_READ >> 28] = "read", [RISC_READC >> 28] = "readc", [RISC_JUMP >> 28] = "jump", [RISC_SKIP >> 28] = "skip", [RISC_WRITERM >> 28] = "writerm", [RISC_WRITECM >> 28] = "writecm", [RISC_WRITECR >> 28] = "writecr", }; static int incr[16] = { [RISC_WRITE >> 28] = 3, [RISC_JUMP >> 28] = 3, [RISC_SKIP >> 28] = 1, [RISC_SYNC >> 28] = 1, [RISC_WRITERM >> 28] = 3, [RISC_WRITECM >> 28] = 3, [RISC_WRITECR >> 28] = 4, }; static char *bits[] = { "12", "13", "14", "resync", "cnt0", "cnt1", "18", "19", "20", "21", "22", "23", "irq1", "irq2", "eol", "sol", }; int i; printk(KERN_DEBUG "0x%08x [ %s", risc, instr[risc >> 28] ? instr[risc >> 28] : "INVALID"); for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--) if (risc & (1 << (i + 12))) pr_cont(" %s", bits[i]); pr_cont(" count=%d ]\n", risc & 0xfff); return incr[risc >> 28] ? incr[risc >> 28] : 1; } static void cx23885_wakeup(struct cx23885_tsport *port, struct cx23885_dmaqueue *q, u32 count) { struct cx23885_buffer *buf; if (list_empty(&q->active)) return; buf = list_entry(q->active.next, struct cx23885_buffer, queue); buf->vb.vb2_buf.timestamp = ktime_get_ns(); buf->vb.sequence = q->count++; dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.vb2_buf.index, count, q->count); list_del(&buf->queue); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); } int cx23885_sram_channel_setup(struct cx23885_dev *dev, struct sram_channel *ch, unsigned int bpl, u32 risc) { unsigned int i, lines; u32 cdt; if (ch->cmds_start == 0) { dprintk(1, "%s() Erasing channel [%s]\n", __func__, ch->name); cx_write(ch->ptr1_reg, 0); cx_write(ch->ptr2_reg, 0); cx_write(ch->cnt2_reg, 0); cx_write(ch->cnt1_reg, 0); return 0; } else { dprintk(1, "%s() Configuring channel [%s]\n", __func__, ch->name); } bpl = (bpl + 7) & ~7; /* alignment */ cdt = ch->cdt; lines = ch->fifo_size / bpl; if (lines > 6) lines = 6; BUG_ON(lines < 2); cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET); cx_write(8 + 4, 12); cx_write(8 + 8, 0); /* write CDT */ for (i = 0; i < lines; i++) { dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i, ch->fifo_start + bpl*i); cx_write(cdt + 16*i, ch->fifo_start + bpl*i); cx_write(cdt + 16*i + 4, 0); cx_write(cdt + 16*i + 8, 0); cx_write(cdt + 16*i + 12, 0); } /* write CMDS */ if (ch->jumponly) cx_write(ch->cmds_start + 0, 8); else cx_write(ch->cmds_start + 0, risc); cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */ cx_write(ch->cmds_start + 8, cdt); cx_write(ch->cmds_start + 12, (lines*16) >> 3); cx_write(ch->cmds_start + 16, ch->ctrl_start); if (ch->jumponly) cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2)); else cx_write(ch->cmds_start + 20, 64 >> 2); for (i = 24; i < 80; i += 4) cx_write(ch->cmds_start + i, 0); /* fill registers */ cx_write(ch->ptr1_reg, ch->fifo_start); cx_write(ch->ptr2_reg, cdt); cx_write(ch->cnt2_reg, (lines*16) >> 3); cx_write(ch->cnt1_reg, (bpl >> 3) - 1); dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n", dev->bridge, ch->name, bpl, lines); return 0; } void cx23885_sram_channel_dump(struct cx23885_dev *dev, struct sram_channel *ch) { static char *name[] = { "init risc lo", "init risc hi", "cdt base", "cdt size", "iq base", "iq size", "risc pc lo", "risc pc hi", "iq wr ptr", "iq rd ptr", "cdt current", "pci target lo", "pci target hi", "line / byte", }; u32 risc; unsigned int i, j, n; pr_warn("%s: %s - dma channel status dump\n", dev->name, ch->name); for (i = 0; i < ARRAY_SIZE(name); i++) pr_warn("%s: cmds: %-15s: 0x%08x\n", dev->name, name[i], cx_read(ch->cmds_start + 4*i)); for (i = 0; i < 4; i++) { risc = cx_read(ch->cmds_start + 4 * (i + 14)); pr_warn("%s: risc%d: ", dev->name, i); cx23885_risc_decode(risc); } for (i = 0; i < (64 >> 2); i += n) { risc = cx_read(ch->ctrl_start + 4 * i); /* No consideration for bits 63-32 */ pr_warn("%s: (0x%08x) iq %x: ", dev->name, ch->ctrl_start + 4 * i, i); n = cx23885_risc_decode(risc); for (j = 1; j < n; j++) { risc = cx_read(ch->ctrl_start + 4 * (i + j)); pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n", dev->name, i+j, risc, j); } } pr_warn("%s: fifo: 0x%08x -> 0x%x\n", dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size); pr_warn("%s: ctrl: 0x%08x -> 0x%x\n", dev->name, ch->ctrl_start, ch->ctrl_start + 6*16); pr_warn("%s: ptr1_reg: 0x%08x\n", dev->name, cx_read(ch->ptr1_reg)); pr_warn("%s: ptr2_reg: 0x%08x\n", dev->name, cx_read(ch->ptr2_reg)); pr_warn("%s: cnt1_reg: 0x%08x\n", dev->name, cx_read(ch->cnt1_reg)); pr_warn("%s: cnt2_reg: 0x%08x\n", dev->name, cx_read(ch->cnt2_reg)); } static void cx23885_risc_disasm(struct cx23885_tsport *port, struct cx23885_riscmem *risc) { struct cx23885_dev *dev = port->dev; unsigned int i, j, n; pr_info("%s: risc disasm: %p [dma=0x%08lx]\n", dev->name, risc->cpu, (unsigned long)risc->dma); for (i = 0; i < (risc->size >> 2); i += n) { pr_info("%s: %04d: ", dev->name, i); n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i])); for (j = 1; j < n; j++) pr_info("%s: %04d: 0x%08x [ arg #%d ]\n", dev->name, i + j, risc->cpu[i + j], j); if (risc->cpu[i] == cpu_to_le32(RISC_JUMP)) break; } } static void cx23885_shutdown(struct cx23885_dev *dev) { /* disable RISC controller */ cx_write(DEV_CNTRL2, 0); /* Disable all IR activity */ cx_write(IR_CNTRL_REG, 0); /* Disable Video A/B activity */ cx_write(VID_A_DMA_CTL, 0); cx_write(VID_B_DMA_CTL, 0); cx_write(VID_C_DMA_CTL, 0); /* Disable Audio activity */ cx_write(AUD_INT_DMA_CTL, 0); cx_write(AUD_EXT_DMA_CTL, 0); /* Disable Serial port */ cx_write(UART_CTL, 0); /* Disable Interrupts */ cx23885_irq_disable_all(dev); cx_write(VID_A_INT_MSK, 0); cx_write(VID_B_INT_MSK, 0); cx_write(VID_C_INT_MSK, 0); cx_write(AUDIO_INT_INT_MSK, 0); cx_write(AUDIO_EXT_INT_MSK, 0); } static void cx23885_reset(struct cx23885_dev *dev) { dprintk(1, "%s()\n", __func__); cx23885_shutdown(dev); cx_write(PCI_INT_STAT, 0xffffffff); cx_write(VID_A_INT_STAT, 0xffffffff); cx_write(VID_B_INT_STAT, 0xffffffff); cx_write(VID_C_INT_STAT, 0xffffffff); cx_write(AUDIO_INT_INT_STAT, 0xffffffff); cx_write(AUDIO_EXT_INT_STAT, 0xffffffff); cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000); cx_write(PAD_CTRL, 0x00500300); mdelay(100); cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01], 720*4, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03], 188*4, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06], 188*4, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0); cx23885_gpio_setup(dev); } static int cx23885_pci_quirks(struct cx23885_dev *dev) { dprintk(1, "%s()\n", __func__); /* The cx23885 bridge has a weird bug which causes NMI to be asserted * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not * occur on the cx23887 bridge. */ if (dev->bridge == CX23885_BRIDGE_885) cx_clear(RDR_TLCTL0, 1 << 4); return 0; } static int get_resources(struct cx23885_dev *dev) { if (request_mem_region(pci_resource_start(dev->pci, 0), pci_resource_len(dev->pci, 0), dev->name)) return 0; pr_err("%s: can't get MMIO memory @ 0x%llx\n", dev->name, (unsigned long long)pci_resource_start(dev->pci, 0)); return -EBUSY; } static int cx23885_init_tsport(struct cx23885_dev *dev, struct cx23885_tsport *port, int portno) { dprintk(1, "%s(portno=%d)\n", __func__, portno); /* Transport bus init dma queue - Common settings */ port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */ port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */ port->vld_misc_val = 0x0; port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4); spin_lock_init(&port->slock); port->dev = dev; port->nr = portno; INIT_LIST_HEAD(&port->mpegq.active); mutex_init(&port->frontends.lock); INIT_LIST_HEAD(&port->frontends.felist); port->frontends.active_fe_id = 0; /* This should be hardcoded allow a single frontend * attachment to this tsport, keeping the -dvb.c * code clean and safe. */ if (!port->num_frontends) port->num_frontends = 1; switch (portno) { case 1: port->reg_gpcnt = VID_B_GPCNT; port->reg_gpcnt_ctl = VID_B_GPCNT_CTL; port->reg_dma_ctl = VID_B_DMA_CTL; port->reg_lngth = VID_B_LNGTH; port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL; port->reg_gen_ctrl = VID_B_GEN_CTL; port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS; port->reg_sop_status = VID_B_SOP_STATUS; port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT; port->reg_vld_misc = VID_B_VLD_MISC; port->reg_ts_clk_en = VID_B_TS_CLK_EN; port->reg_src_sel = VID_B_SRC_SEL; port->reg_ts_int_msk = VID_B_INT_MSK; port->reg_ts_int_stat = VID_B_INT_STAT; port->sram_chno = SRAM_CH03; /* VID_B */ port->pci_irqmask = 0x02; /* VID_B bit1 */ break; case 2: port->reg_gpcnt = VID_C_GPCNT; port->reg_gpcnt_ctl = VID_C_GPCNT_CTL; port->reg_dma_ctl = VID_C_DMA_CTL; port->reg_lngth = VID_C_LNGTH; port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL; port->reg_gen_ctrl = VID_C_GEN_CTL; port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS; port->reg_sop_status = VID_C_SOP_STATUS; port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT; port->reg_vld_misc = VID_C_VLD_MISC; port->reg_ts_clk_en = VID_C_TS_CLK_EN; port->reg_src_sel = 0; port->reg_ts_int_msk = VID_C_INT_MSK; port->reg_ts_int_stat = VID_C_INT_STAT; port->sram_chno = SRAM_CH06; /* VID_C */ port->pci_irqmask = 0x04; /* VID_C bit2 */ break; default: BUG(); } return 0; } static void cx23885_dev_checkrevision(struct cx23885_dev *dev) { switch (cx_read(RDR_CFG2) & 0xff) { case 0x00: /* cx23885 */ dev->hwrevision = 0xa0; break; case 0x01: /* CX23885-12Z */ dev->hwrevision = 0xa1; break; case 0x02: /* CX23885-13Z/14Z */ dev->hwrevision = 0xb0; break; case 0x03: if (dev->pci->device == 0x8880) { /* CX23888-21Z/22Z */ dev->hwrevision = 0xc0; } else { /* CX23885-14Z */ dev->hwrevision = 0xa4; } break; case 0x04: if (dev->pci->device == 0x8880) { /* CX23888-31Z */ dev->hwrevision = 0xd0; } else { /* CX23885-15Z, CX23888-31Z */ dev->hwrevision = 0xa5; } break; case 0x0e: /* CX23887-15Z */ dev->hwrevision = 0xc0; break; case 0x0f: /* CX23887-14Z */ dev->hwrevision = 0xb1; break; default: pr_err("%s() New hardware revision found 0x%x\n", __func__, dev->hwrevision); } if (dev->hwrevision) pr_info("%s() Hardware revision = 0x%02x\n", __func__, dev->hwrevision); else pr_err("%s() Hardware revision unknown 0x%x\n", __func__, dev->hwrevision); } /* Find the first v4l2_subdev member of the group id in hw */ struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw) { struct v4l2_subdev *result = NULL; struct v4l2_subdev *sd; spin_lock(&dev->v4l2_dev.lock); v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) { if (sd->grp_id == hw) { result = sd; break; } } spin_unlock(&dev->v4l2_dev.lock); return result; } static int cx23885_dev_setup(struct cx23885_dev *dev) { int i; spin_lock_init(&dev->pci_irqmask_lock); spin_lock_init(&dev->slock); mutex_init(&dev->lock); mutex_init(&dev->gpio_lock); atomic_inc(&dev->refcount); dev->nr = cx23885_devcount++; sprintf(dev->name, "cx23885[%d]", dev->nr); /* Configure the internal memory */ if (dev->pci->device == 0x8880) { /* Could be 887 or 888, assume a default */ dev->bridge = CX23885_BRIDGE_887; /* Apply a sensible clock frequency for the PCIe bridge */ dev->clk_freq = 25000000; dev->sram_channels = cx23887_sram_channels; } else if (dev->pci->device == 0x8852) { dev->bridge = CX23885_BRIDGE_885; /* Apply a sensible clock frequency for the PCIe bridge */ dev->clk_freq = 28000000; dev->sram_channels = cx23885_sram_channels; } else BUG(); dprintk(1, "%s() Memory configured for PCIe bridge type %d\n", __func__, dev->bridge); /* board config */ dev->board = UNSET; if (card[dev->nr] < cx23885_bcount) dev->board = card[dev->nr]; for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++) if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor && dev->pci->subsystem_device == cx23885_subids[i].subdevice) dev->board = cx23885_subids[i].card; if (UNSET == dev->board) { dev->board = CX23885_BOARD_UNKNOWN; cx23885_card_list(dev); } /* If the user specific a clk freq override, apply it */ if (cx23885_boards[dev->board].clk_freq > 0) dev->clk_freq = cx23885_boards[dev->board].clk_freq; if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE && dev->pci->subsystem_device == 0x7137) { /* Hauppauge ImpactVCBe device ID 0x7137 is populated * with an 888, and a 25Mhz crystal, instead of the * usual third overtone 50Mhz. The default clock rate must * be overridden so the cx25840 is properly configured */ dev->clk_freq = 25000000; } dev->pci_bus = dev->pci->bus->number; dev->pci_slot = PCI_SLOT(dev->pci->devfn); cx23885_irq_add(dev, 0x001f00); /* External Master 1 Bus */ dev->i2c_bus[0].nr = 0; dev->i2c_bus[0].dev = dev; dev->i2c_bus[0].reg_stat = I2C1_STAT; dev->i2c_bus[0].reg_ctrl = I2C1_CTRL; dev->i2c_bus[0].reg_addr = I2C1_ADDR; dev->i2c_bus[0].reg_rdata = I2C1_RDATA; dev->i2c_bus[0].reg_wdata = I2C1_WDATA; dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */ /* External Master 2 Bus */ dev->i2c_bus[1].nr = 1; dev->i2c_bus[1].dev = dev; dev->i2c_bus[1].reg_stat = I2C2_STAT; dev->i2c_bus[1].reg_ctrl = I2C2_CTRL; dev->i2c_bus[1].reg_addr = I2C2_ADDR; dev->i2c_bus[1].reg_rdata = I2C2_RDATA; dev->i2c_bus[1].reg_wdata = I2C2_WDATA; dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */ /* Internal Master 3 Bus */ dev->i2c_bus[2].nr = 2; dev->i2c_bus[2].dev = dev; dev->i2c_bus[2].reg_stat = I2C3_STAT; dev->i2c_bus[2].reg_ctrl = I2C3_CTRL; dev->i2c_bus[2].reg_addr = I2C3_ADDR; dev->i2c_bus[2].reg_rdata = I2C3_RDATA; dev->i2c_bus[2].reg_wdata = I2C3_WDATA; dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */ if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) || (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)) cx23885_init_tsport(dev, &dev->ts1, 1); if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) || (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)) cx23885_init_tsport(dev, &dev->ts2, 2); if (get_resources(dev) < 0) { pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n", dev->name, dev->pci->subsystem_vendor, dev->pci->subsystem_device); cx23885_devcount--; return -ENODEV; } /* PCIe stuff */ dev->lmmio = ioremap(pci_resource_start(dev->pci, 0), pci_resource_len(dev->pci, 0)); dev->bmmio = (u8 __iomem *)dev->lmmio; pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n", dev->name, dev->pci->subsystem_vendor, dev->pci->subsystem_device, cx23885_boards[dev->board].name, dev->board, card[dev->nr] == dev->board ? "insmod option" : "autodetected"); cx23885_pci_quirks(dev); /* Assume some sensible defaults */ dev->tuner_type = cx23885_boards[dev->board].tuner_type; dev->tuner_addr = cx23885_boards[dev->board].tuner_addr; dev->tuner_bus = cx23885_boards[dev->board].tuner_bus; dev->radio_type = cx23885_boards[dev->board].radio_type; dev->radio_addr = cx23885_boards[dev->board].radio_addr; dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n", __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus); dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n", __func__, dev->radio_type, dev->radio_addr); /* The cx23417 encoder has GPIO's that need to be initialised * before DVB, so that demodulators and tuners are out of * reset before DVB uses them. */ if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) || (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)) cx23885_mc417_init(dev); /* init hardware */ cx23885_reset(dev); cx23885_i2c_register(&dev->i2c_bus[0]); cx23885_i2c_register(&dev->i2c_bus[1]); cx23885_i2c_register(&dev->i2c_bus[2]); cx23885_card_setup(dev); call_all(dev, core, s_power, 0); cx23885_ir_init(dev); if (dev->board == CX23885_BOARD_VIEWCAST_460E) { /* * GPIOs 9/8 are input detection bits for the breakout video * (gpio 8) and audio (gpio 9) cables. When they're attached, * this gpios are pulled high. Make sure these GPIOs are marked * as inputs. */ cx23885_gpio_enable(dev, 0x300, 0); } if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) { if (cx23885_video_register(dev) < 0) { pr_err("%s() Failed to register analog video adapters on VID_A\n", __func__); } } if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) { if (cx23885_boards[dev->board].num_fds_portb) dev->ts1.num_frontends = cx23885_boards[dev->board].num_fds_portb; if (cx23885_dvb_register(&dev->ts1) < 0) { pr_err("%s() Failed to register dvb adapters on VID_B\n", __func__); } } else if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) { if (cx23885_417_register(dev) < 0) { pr_err("%s() Failed to register 417 on VID_B\n", __func__); } } if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) { if (cx23885_boards[dev->board].num_fds_portc) dev->ts2.num_frontends = cx23885_boards[dev->board].num_fds_portc; if (cx23885_dvb_register(&dev->ts2) < 0) { pr_err("%s() Failed to register dvb on VID_C\n", __func__); } } else if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) { if (cx23885_417_register(dev) < 0) { pr_err("%s() Failed to register 417 on VID_C\n", __func__); } } cx23885_dev_checkrevision(dev); /* disable MSI for NetUP cards, otherwise CI is not working */ if (cx23885_boards[dev->board].ci_type > 0) cx_clear(RDR_RDRCTL1, 1 << 8); switch (dev->board) { case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_TEVII_S471: cx_clear(RDR_RDRCTL1, 1 << 8); break; } return 0; } static void cx23885_dev_unregister(struct cx23885_dev *dev) { release_mem_region(pci_resource_start(dev->pci, 0), pci_resource_len(dev->pci, 0)); if (!atomic_dec_and_test(&dev->refcount)) return; if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) cx23885_video_unregister(dev); if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) cx23885_dvb_unregister(&dev->ts1); if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) cx23885_417_unregister(dev); if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) cx23885_dvb_unregister(&dev->ts2); if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) cx23885_417_unregister(dev); cx23885_i2c_unregister(&dev->i2c_bus[2]); cx23885_i2c_unregister(&dev->i2c_bus[1]); cx23885_i2c_unregister(&dev->i2c_bus[0]); iounmap(dev->lmmio); } static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist, unsigned int offset, u32 sync_line, unsigned int bpl, unsigned int padding, unsigned int lines, unsigned int lpi, bool jump) { struct scatterlist *sg; unsigned int line, todo, sol; if (jump) { *(rp++) = cpu_to_le32(RISC_JUMP); *(rp++) = cpu_to_le32(0); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ } /* sync instruction */ if (sync_line != NO_SYNC_LINE) *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line); /* scan lines */ sg = sglist; for (line = 0; line < lines; line++) { while (offset && offset >= sg_dma_len(sg)) { offset -= sg_dma_len(sg); sg = sg_next(sg); } if (lpi && line > 0 && !(line % lpi)) sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC; else sol = RISC_SOL; if (bpl <= sg_dma_len(sg)-offset) { /* fits into current chunk */ *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl); *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ offset += bpl; } else { /* scanline needs to be split */ todo = bpl; *(rp++) = cpu_to_le32(RISC_WRITE|sol| (sg_dma_len(sg)-offset)); *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ todo -= (sg_dma_len(sg)-offset); offset = 0; sg = sg_next(sg); while (todo > sg_dma_len(sg)) { *(rp++) = cpu_to_le32(RISC_WRITE| sg_dma_len(sg)); *(rp++) = cpu_to_le32(sg_dma_address(sg)); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ todo -= sg_dma_len(sg); sg = sg_next(sg); } *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo); *(rp++) = cpu_to_le32(sg_dma_address(sg)); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ offset += todo; } offset += padding; } return rp; } int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc, struct scatterlist *sglist, unsigned int top_offset, unsigned int bottom_offset, unsigned int bpl, unsigned int padding, unsigned int lines) { u32 instructions, fields; __le32 *rp; fields = 0; if (UNSET != top_offset) fields++; if (UNSET != bottom_offset) fields++; /* estimate risc mem: worst case is one write per page border + one write per scan line + syncs + jump (all 2 dwords). Padding can cause next bpl to start close to a page border. First DMA region may be smaller than PAGE_SIZE */ /* write and jump need and extra dword */ instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines); instructions += 5; risc->size = instructions * 12; risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma); if (risc->cpu == NULL) return -ENOMEM; /* write risc instructions */ rp = risc->cpu; if (UNSET != top_offset) rp = cx23885_risc_field(rp, sglist, top_offset, 0, bpl, padding, lines, 0, true); if (UNSET != bottom_offset) rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200, bpl, padding, lines, 0, UNSET == top_offset); /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); return 0; } int cx23885_risc_databuffer(struct pci_dev *pci, struct cx23885_riscmem *risc, struct scatterlist *sglist, unsigned int bpl, unsigned int lines, unsigned int lpi) { u32 instructions; __le32 *rp; /* estimate risc mem: worst case is one write per page border + one write per scan line + syncs + jump (all 2 dwords). Here there is no padding and no sync. First DMA region may be smaller than PAGE_SIZE */ /* Jump and write need an extra dword */ instructions = 1 + (bpl * lines) / PAGE_SIZE + lines; instructions += 4; risc->size = instructions * 12; risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma); if (risc->cpu == NULL) return -ENOMEM; /* write risc instructions */ rp = risc->cpu; rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines, lpi, lpi == 0); /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); return 0; } int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc, struct scatterlist *sglist, unsigned int top_offset, unsigned int bottom_offset, unsigned int bpl, unsigned int padding, unsigned int lines) { u32 instructions, fields; __le32 *rp; fields = 0; if (UNSET != top_offset) fields++; if (UNSET != bottom_offset) fields++; /* estimate risc mem: worst case is one write per page border + one write per scan line + syncs + jump (all 2 dwords). Padding can cause next bpl to start close to a page border. First DMA region may be smaller than PAGE_SIZE */ /* write and jump need and extra dword */ instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines); instructions += 5; risc->size = instructions * 12; risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma); if (risc->cpu == NULL) return -ENOMEM; /* write risc instructions */ rp = risc->cpu; /* Sync to line 6, so US CC line 21 will appear in line '12' * in the userland vbi payload */ if (UNSET != top_offset) rp = cx23885_risc_field(rp, sglist, top_offset, 0, bpl, padding, lines, 0, true); if (UNSET != bottom_offset) rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200, bpl, padding, lines, 0, UNSET == top_offset); /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); return 0; } void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf) { struct cx23885_riscmem *risc = &buf->risc; BUG_ON(in_interrupt()); pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma); } static void cx23885_tsport_reg_dump(struct cx23885_tsport *port) { struct cx23885_dev *dev = port->dev; dprintk(1, "%s() Register Dump\n", __func__); dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__, cx_read(DEV_CNTRL2)); dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__, cx23885_irq_get_mask(dev)); dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__, cx_read(AUDIO_INT_INT_MSK)); dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__, cx_read(AUD_INT_DMA_CTL)); dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__, cx_read(AUDIO_EXT_INT_MSK)); dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__, cx_read(AUD_EXT_DMA_CTL)); dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__, cx_read(PAD_CTRL)); dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__, cx_read(ALT_PIN_OUT_SEL)); dprintk(1, "%s() GPIO2 0x%08X\n", __func__, cx_read(GPIO2)); dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__, port->reg_gpcnt, cx_read(port->reg_gpcnt)); dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__, port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl)); dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__, port->reg_dma_ctl, cx_read(port->reg_dma_ctl)); if (port->reg_src_sel) dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__, port->reg_src_sel, cx_read(port->reg_src_sel)); dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__, port->reg_lngth, cx_read(port->reg_lngth)); dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__, port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl)); dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__, port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl)); dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__, port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status)); dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__, port->reg_sop_status, cx_read(port->reg_sop_status)); dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__, port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat)); dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__, port->reg_vld_misc, cx_read(port->reg_vld_misc)); dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__, port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en)); dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__, port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk)); } int cx23885_start_dma(struct cx23885_tsport *port, struct cx23885_dmaqueue *q, struct cx23885_buffer *buf) { struct cx23885_dev *dev = port->dev; u32 reg; dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__, dev->width, dev->height, dev->field); /* Stop the fifo and risc engine for this port */ cx_clear(port->reg_dma_ctl, port->dma_ctl_val); /* setup fifo + format */ cx23885_sram_channel_setup(dev, &dev->sram_channels[port->sram_chno], port->ts_packet_size, buf->risc.dma); if (debug > 5) { cx23885_sram_channel_dump(dev, &dev->sram_channels[port->sram_chno]); cx23885_risc_disasm(port, &buf->risc); } /* write TS length to chip */ cx_write(port->reg_lngth, port->ts_packet_size); if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) && (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) { pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n", __func__, cx23885_boards[dev->board].portb, cx23885_boards[dev->board].portc); return -EINVAL; } if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) cx23885_av_clk(dev, 0); udelay(100); /* If the port supports SRC SELECT, configure it */ if (port->reg_src_sel) cx_write(port->reg_src_sel, port->src_sel_val); cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val); cx_write(port->reg_ts_clk_en, port->ts_clk_en_val); cx_write(port->reg_vld_misc, port->vld_misc_val); cx_write(port->reg_gen_ctrl, port->gen_ctrl_val); udelay(100); /* NOTE: this is 2 (reserved) for portb, does it matter? */ /* reset counter to zero */ cx_write(port->reg_gpcnt_ctl, 3); q->count = 0; /* Set VIDB pins to input */ if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) { reg = cx_read(PAD_CTRL); reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */ cx_write(PAD_CTRL, reg); } /* Set VIDC pins to input */ if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) { reg = cx_read(PAD_CTRL); reg &= ~0x4; /* Clear TS2_SOP_OE */ cx_write(PAD_CTRL, reg); } if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) { reg = cx_read(PAD_CTRL); reg = reg & ~0x1; /* Clear TS1_OE */ /* FIXME, bit 2 writing here is questionable */ /* set TS1_SOP_OE and TS1_OE_HI */ reg = reg | 0xa; cx_write(PAD_CTRL, reg); /* FIXME and these two registers should be documented. */ cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011); cx_write(ALT_PIN_OUT_SEL, 0x10100045); } switch (dev->bridge) { case CX23885_BRIDGE_885: case CX23885_BRIDGE_887: case CX23885_BRIDGE_888: /* enable irqs */ dprintk(1, "%s() enabling TS int's and DMA\n", __func__); cx_set(port->reg_ts_int_msk, port->ts_int_msk_val); cx_set(port->reg_dma_ctl, port->dma_ctl_val); cx23885_irq_add(dev, port->pci_irqmask); cx23885_irq_enable_all(dev); break; default: BUG(); } cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */ if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) cx23885_av_clk(dev, 1); if (debug > 4) cx23885_tsport_reg_dump(port); return 0; } static int cx23885_stop_dma(struct cx23885_tsport *port) { struct cx23885_dev *dev = port->dev; u32 reg; dprintk(1, "%s()\n", __func__); /* Stop interrupts and DMA */ cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val); cx_clear(port->reg_dma_ctl, port->dma_ctl_val); if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) { reg = cx_read(PAD_CTRL); /* Set TS1_OE */ reg = reg | 0x1; /* clear TS1_SOP_OE and TS1_OE_HI */ reg = reg & ~0xa; cx_write(PAD_CTRL, reg); cx_write(port->reg_src_sel, 0); cx_write(port->reg_gen_ctrl, 8); } if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) cx23885_av_clk(dev, 0); return 0; } /* ------------------------------------------------------------------ */ int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port) { struct cx23885_dev *dev = port->dev; int size = port->ts_packet_size * port->ts_packet_count; struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0); dprintk(1, "%s: %p\n", __func__, buf); if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size) return -EINVAL; vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); cx23885_risc_databuffer(dev->pci, &buf->risc, sgt->sgl, port->ts_packet_size, port->ts_packet_count, 0); return 0; } /* * The risc program for each buffer works as follows: it starts with a simple * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the * buffer follows and at the end we have a JUMP back to the start + 12 (skipping * the initial JUMP). * * This is the risc program of the first buffer to be queued if the active list * is empty and it just keeps DMAing this buffer without generating any * interrupts. * * If a new buffer is added then the initial JUMP in the code for that buffer * will generate an interrupt which signals that the previous buffer has been * DMAed successfully and that it can be returned to userspace. * * It also sets the final jump of the previous buffer to the start of the new * buffer, thus chaining the new buffer into the DMA chain. This is a single * atomic u32 write, so there is no race condition. * * The end-result of all this that you only get an interrupt when a buffer * is ready, so the control flow is very easy. */ void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf) { struct cx23885_buffer *prev; struct cx23885_dev *dev = port->dev; struct cx23885_dmaqueue *cx88q = &port->mpegq; unsigned long flags; buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12); buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12); buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ spin_lock_irqsave(&dev->slock, flags); if (list_empty(&cx88q->active)) { list_add_tail(&buf->queue, &cx88q->active); dprintk(1, "[%p/%d] %s - first active\n", buf, buf->vb.vb2_buf.index, __func__); } else { buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); prev = list_entry(cx88q->active.prev, struct cx23885_buffer, queue); list_add_tail(&buf->queue, &cx88q->active); prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); dprintk(1, "[%p/%d] %s - append to active\n", buf, buf->vb.vb2_buf.index, __func__); } spin_unlock_irqrestore(&dev->slock, flags); } /* ----------------------------------------------------------- */ static void do_cancel_buffers(struct cx23885_tsport *port, char *reason) { struct cx23885_dmaqueue *q = &port->mpegq; struct cx23885_buffer *buf; unsigned long flags; spin_lock_irqsave(&port->slock, flags); while (!list_empty(&q->active)) { buf = list_entry(q->active.next, struct cx23885_buffer, queue); list_del(&buf->queue); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dprintk(1, "[%p/%d] %s - dma=0x%08lx\n", buf, buf->vb.vb2_buf.index, reason, (unsigned long)buf->risc.dma); } spin_unlock_irqrestore(&port->slock, flags); } void cx23885_cancel_buffers(struct cx23885_tsport *port) { dprintk(1, "%s()\n", __func__); cx23885_stop_dma(port); do_cancel_buffers(port, "cancel"); } int cx23885_irq_417(struct cx23885_dev *dev, u32 status) { /* FIXME: port1 assumption here. */ struct cx23885_tsport *port = &dev->ts1; int count = 0; int handled = 0; if (status == 0) return handled; count = cx_read(port->reg_gpcnt); dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n", status, cx_read(port->reg_ts_int_msk), count); if ((status & VID_B_MSK_BAD_PKT) || (status & VID_B_MSK_OPC_ERR) || (status & VID_B_MSK_VBI_OPC_ERR) || (status & VID_B_MSK_SYNC) || (status & VID_B_MSK_VBI_SYNC) || (status & VID_B_MSK_OF) || (status & VID_B_MSK_VBI_OF)) { pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n", dev->name, status); if (status & VID_B_MSK_BAD_PKT) dprintk(1, " VID_B_MSK_BAD_PKT\n"); if (status & VID_B_MSK_OPC_ERR) dprintk(1, " VID_B_MSK_OPC_ERR\n"); if (status & VID_B_MSK_VBI_OPC_ERR) dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n"); if (status & VID_B_MSK_SYNC) dprintk(1, " VID_B_MSK_SYNC\n"); if (status & VID_B_MSK_VBI_SYNC) dprintk(1, " VID_B_MSK_VBI_SYNC\n"); if (status & VID_B_MSK_OF) dprintk(1, " VID_B_MSK_OF\n"); if (status & VID_B_MSK_VBI_OF) dprintk(1, " VID_B_MSK_VBI_OF\n"); cx_clear(port->reg_dma_ctl, port->dma_ctl_val); cx23885_sram_channel_dump(dev, &dev->sram_channels[port->sram_chno]); cx23885_417_check_encoder(dev); } else if (status & VID_B_MSK_RISCI1) { dprintk(7, " VID_B_MSK_RISCI1\n"); spin_lock(&port->slock); cx23885_wakeup(port, &port->mpegq, count); spin_unlock(&port->slock); } if (status) { cx_write(port->reg_ts_int_stat, status); handled = 1; } return handled; } static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status) { struct cx23885_dev *dev = port->dev; int handled = 0; u32 count; if ((status & VID_BC_MSK_OPC_ERR) || (status & VID_BC_MSK_BAD_PKT) || (status & VID_BC_MSK_SYNC) || (status & VID_BC_MSK_OF)) { if (status & VID_BC_MSK_OPC_ERR) dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n", VID_BC_MSK_OPC_ERR); if (status & VID_BC_MSK_BAD_PKT) dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n", VID_BC_MSK_BAD_PKT); if (status & VID_BC_MSK_SYNC) dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n", VID_BC_MSK_SYNC); if (status & VID_BC_MSK_OF) dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n", VID_BC_MSK_OF); pr_err("%s: mpeg risc op code error\n", dev->name); cx_clear(port->reg_dma_ctl, port->dma_ctl_val); cx23885_sram_channel_dump(dev, &dev->sram_channels[port->sram_chno]); } else if (status & VID_BC_MSK_RISCI1) { dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1); spin_lock(&port->slock); count = cx_read(port->reg_gpcnt); cx23885_wakeup(port, &port->mpegq, count); spin_unlock(&port->slock); } if (status) { cx_write(port->reg_ts_int_stat, status); handled = 1; } return handled; } static irqreturn_t cx23885_irq(int irq, void *dev_id) { struct cx23885_dev *dev = dev_id; struct cx23885_tsport *ts1 = &dev->ts1; struct cx23885_tsport *ts2 = &dev->ts2; u32 pci_status, pci_mask; u32 vida_status, vida_mask; u32 audint_status, audint_mask; u32 ts1_status, ts1_mask; u32 ts2_status, ts2_mask; int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0; int audint_count = 0; bool subdev_handled; pci_status = cx_read(PCI_INT_STAT); pci_mask = cx23885_irq_get_mask(dev); vida_status = cx_read(VID_A_INT_STAT); vida_mask = cx_read(VID_A_INT_MSK); audint_status = cx_read(AUDIO_INT_INT_STAT); audint_mask = cx_read(AUDIO_INT_INT_MSK); ts1_status = cx_read(VID_B_INT_STAT); ts1_mask = cx_read(VID_B_INT_MSK); ts2_status = cx_read(VID_C_INT_STAT); ts2_mask = cx_read(VID_C_INT_MSK); if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0)) goto out; vida_count = cx_read(VID_A_GPCNT); audint_count = cx_read(AUD_INT_A_GPCNT); ts1_count = cx_read(ts1->reg_gpcnt); ts2_count = cx_read(ts2->reg_gpcnt); dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n", pci_status, pci_mask); dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n", vida_status, vida_mask, vida_count); dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n", audint_status, audint_mask, audint_count); dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n", ts1_status, ts1_mask, ts1_count); dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n", ts2_status, ts2_mask, ts2_count); if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR | PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA | PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A | PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT | PCI_MSK_GPIO0 | PCI_MSK_GPIO1 | PCI_MSK_AV_CORE | PCI_MSK_IR)) { if (pci_status & PCI_MSK_RISC_RD) dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n", PCI_MSK_RISC_RD); if (pci_status & PCI_MSK_RISC_WR) dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n", PCI_MSK_RISC_WR); if (pci_status & PCI_MSK_AL_RD) dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n", PCI_MSK_AL_RD); if (pci_status & PCI_MSK_AL_WR) dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n", PCI_MSK_AL_WR); if (pci_status & PCI_MSK_APB_DMA) dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n", PCI_MSK_APB_DMA); if (pci_status & PCI_MSK_VID_C) dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n", PCI_MSK_VID_C); if (pci_status & PCI_MSK_VID_B) dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n", PCI_MSK_VID_B); if (pci_status & PCI_MSK_VID_A) dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n", PCI_MSK_VID_A); if (pci_status & PCI_MSK_AUD_INT) dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n", PCI_MSK_AUD_INT); if (pci_status & PCI_MSK_AUD_EXT) dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n", PCI_MSK_AUD_EXT); if (pci_status & PCI_MSK_GPIO0) dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n", PCI_MSK_GPIO0); if (pci_status & PCI_MSK_GPIO1) dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n", PCI_MSK_GPIO1); if (pci_status & PCI_MSK_AV_CORE) dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n", PCI_MSK_AV_CORE); if (pci_status & PCI_MSK_IR) dprintk(7, " (PCI_MSK_IR 0x%08x)\n", PCI_MSK_IR); } if (cx23885_boards[dev->board].ci_type == 1 && (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0))) handled += netup_ci_slot_status(dev, pci_status); if (cx23885_boards[dev->board].ci_type == 2 && (pci_status & PCI_MSK_GPIO0)) handled += altera_ci_irq(dev); if (ts1_status) { if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) handled += cx23885_irq_ts(ts1, ts1_status); else if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) handled += cx23885_irq_417(dev, ts1_status); } if (ts2_status) { if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) handled += cx23885_irq_ts(ts2, ts2_status); else if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) handled += cx23885_irq_417(dev, ts2_status); } if (vida_status) handled += cx23885_video_irq(dev, vida_status); if (audint_status) handled += cx23885_audio_irq(dev, audint_status, audint_mask); if (pci_status & PCI_MSK_IR) { subdev_handled = false; v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine, pci_status, &subdev_handled); if (subdev_handled) handled++; } if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) { cx23885_irq_disable(dev, PCI_MSK_AV_CORE); schedule_work(&dev->cx25840_work); handled++; } if (handled) cx_write(PCI_INT_STAT, pci_status); out: return IRQ_RETVAL(handled); } static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd, unsigned int notification, void *arg) { struct cx23885_dev *dev; if (sd == NULL) return; dev = to_cx23885(sd->v4l2_dev); switch (notification) { case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */ if (sd == dev->sd_ir) cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg); break; case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */ if (sd == dev->sd_ir) cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg); break; } } static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev) { INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler); INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler); INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler); dev->v4l2_dev.notify = cx23885_v4l2_dev_notify; } static inline int encoder_on_portb(struct cx23885_dev *dev) { return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER; } static inline int encoder_on_portc(struct cx23885_dev *dev) { return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER; } /* Mask represents 32 different GPIOs, GPIO's are split into multiple * registers depending on the board configuration (and whether the * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will * be pushed into the correct hardware register, regardless of the * physical location. Certain registers are shared so we sanity check * and report errors if we think we're tampering with a GPIo that might * be assigned to the encoder (and used for the host bus). * * GPIO 2 thru 0 - On the cx23885 bridge * GPIO 18 thru 3 - On the cx23417 host bus interface * GPIO 23 thru 19 - On the cx25840 a/v core */ void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask) { if (mask & 0x7) cx_set(GP0_IO, mask & 0x7); if (mask & 0x0007fff8) { if (encoder_on_portb(dev) || encoder_on_portc(dev)) pr_err("%s: Setting GPIO on encoder ports\n", dev->name); cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3); } /* TODO: 23-19 */ if (mask & 0x00f80000) pr_info("%s: Unsupported\n", dev->name); } void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask) { if (mask & 0x00000007) cx_clear(GP0_IO, mask & 0x7); if (mask & 0x0007fff8) { if (encoder_on_portb(dev) || encoder_on_portc(dev)) pr_err("%s: Clearing GPIO moving on encoder ports\n", dev->name); cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3); } /* TODO: 23-19 */ if (mask & 0x00f80000) pr_info("%s: Unsupported\n", dev->name); } u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask) { if (mask & 0x00000007) return (cx_read(GP0_IO) >> 8) & mask & 0x7; if (mask & 0x0007fff8) { if (encoder_on_portb(dev) || encoder_on_portc(dev)) pr_err("%s: Reading GPIO moving on encoder ports\n", dev->name); return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3; } /* TODO: 23-19 */ if (mask & 0x00f80000) pr_info("%s: Unsupported\n", dev->name); return 0; } void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput) { if ((mask & 0x00000007) && asoutput) cx_set(GP0_IO, (mask & 0x7) << 16); else if ((mask & 0x00000007) && !asoutput) cx_clear(GP0_IO, (mask & 0x7) << 16); if (mask & 0x0007fff8) { if (encoder_on_portb(dev) || encoder_on_portc(dev)) pr_err("%s: Enabling GPIO on encoder ports\n", dev->name); } /* MC417_OEN is active low for output, write 1 for an input */ if ((mask & 0x0007fff8) && asoutput) cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3); else if ((mask & 0x0007fff8) && !asoutput) cx_set(MC417_OEN, (mask & 0x7fff8) >> 3); /* TODO: 23-19 */ } static int cx23885_initdev(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { struct cx23885_dev *dev; struct v4l2_ctrl_handler *hdl; int err; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (NULL == dev) return -ENOMEM; err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev); if (err < 0) goto fail_free; hdl = &dev->ctrl_handler; v4l2_ctrl_handler_init(hdl, 6); if (hdl->error) { err = hdl->error; goto fail_ctrl; } dev->v4l2_dev.ctrl_handler = hdl; /* Prepare to handle notifications from subdevices */ cx23885_v4l2_dev_notify_init(dev); /* pci init */ dev->pci = pci_dev; if (pci_enable_device(pci_dev)) { err = -EIO; goto fail_ctrl; } if (cx23885_dev_setup(dev) < 0) { err = -EINVAL; goto fail_ctrl; } /* print pci info */ dev->pci_rev = pci_dev->revision; pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat); pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n", dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, dev->pci_lat, (unsigned long long)pci_resource_start(pci_dev, 0)); pci_set_master(pci_dev); err = pci_set_dma_mask(pci_dev, 0xffffffff); if (err) { pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); goto fail_ctrl; } err = request_irq(pci_dev->irq, cx23885_irq, IRQF_SHARED, dev->name, dev); if (err < 0) { pr_err("%s: can't get IRQ %d\n", dev->name, pci_dev->irq); goto fail_irq; } switch (dev->board) { case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0); break; case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: cx23885_irq_add_enable(dev, PCI_MSK_GPIO0); break; } /* * The CX2388[58] IR controller can start firing interrupts when * enabled, so these have to take place after the cx23885_irq() handler * is hooked up by the call to request_irq() above. */ cx23885_ir_pci_int_enable(dev); cx23885_input_init(dev); return 0; fail_irq: cx23885_dev_unregister(dev); fail_ctrl: v4l2_ctrl_handler_free(hdl); v4l2_device_unregister(&dev->v4l2_dev); fail_free: kfree(dev); return err; } static void cx23885_finidev(struct pci_dev *pci_dev) { struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev); struct cx23885_dev *dev = to_cx23885(v4l2_dev); cx23885_input_fini(dev); cx23885_ir_fini(dev); cx23885_shutdown(dev); /* unregister stuff */ free_irq(pci_dev->irq, dev); pci_disable_device(pci_dev); cx23885_dev_unregister(dev); v4l2_ctrl_handler_free(&dev->ctrl_handler); v4l2_device_unregister(v4l2_dev); kfree(dev); } static const struct pci_device_id cx23885_pci_tbl[] = { { /* CX23885 */ .vendor = 0x14f1, .device = 0x8852, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { /* CX23887 Rev 2 */ .vendor = 0x14f1, .device = 0x8880, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { /* --- end of list --- */ } }; MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl); static struct pci_driver cx23885_pci_driver = { .name = "cx23885", .id_table = cx23885_pci_tbl, .probe = cx23885_initdev, .remove = cx23885_finidev, /* TODO */ .suspend = NULL, .resume = NULL, }; static int __init cx23885_init(void) { pr_info("cx23885 driver version %s loaded\n", CX23885_VERSION); return pci_register_driver(&cx23885_pci_driver); } static void __exit cx23885_fini(void) { pci_unregister_driver(&cx23885_pci_driver); } module_init(cx23885_init); module_exit(cx23885_fini);
998702.c
/****************************************************************************** * * Copyright 2015 Altera Corporation. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /* * $Id: //acds/rel/17.0std/embedded/ip/hps/altera_hps/hwlib/src/hwmgr/soc_a10/alt_fpga_manager.c#1 $ */ #include <stdio.h> #include <inttypes.h> #include "alt_fpga_manager.h" #include <alt_printf.h> #include "socal/alt_fpgamgr.h" #include "socal/alt_fpgamgrdata.h" #include "socal/hps.h" #include "socal/socal.h" #if DEBUG_ALT_FPGA_MANAGER #define dprintf printf #else #define dprintf null_printf #endif /* * This is used in the FPGA reconfiguration streaming interface. Because FPGA * images are commonly stored on disk, the chunk size is that of the disk size. * We cannot choose too large a chunk size because the stack size is fairly * small. */ #define DISK_SECTOR_SIZE 8192 #define ISTREAM_CHUNK_SIZE DISK_SECTOR_SIZE /* * Structure that holds the internal global state. */ static struct { /* HPS CPU is in control of FPGA block. */ bool cpu_in_control; /* Reset assert active. */ bool reset_asserted; } g_fpgaState; /* * FPGA Data Type identifier enum */ typedef enum FPGA_DATA_TYPE_e { FPGA_DATA_FULL = 1, FPGA_DATA_LIST = 2, FPGA_DATA_STREAM = 3 } FPGA_DATA_TYPE_t; /* * FPGA Data, for Full Buffer, Buffer List, or IStream configuration */ typedef struct FPGA_DATA_s { FPGA_DATA_TYPE_t type; union { /* For FPGA_DATA_FULL */ struct { const void * buffer; size_t length; } full; /* For FPGA_DATA_LIST */ struct { const void ** buffer; const size_t * length; size_t count; } list; /* For FPGA_DATA_STREAM */ struct { alt_fpga_istream_t callback; void * context; } stream; } mode; #if ALT_FPGA_ENABLE_DMA_SUPPORT bool use_dma; ALT_DMA_CHANNEL_t dma_channel; #endif } FPGA_DATA_t; /*****/ /* * Helper function that polls for a certain FPGA state to become active. */ static ALT_STATUS_CODE wait_for_fpga_status(ALT_FPGA_STATUS_t state, bool to_be_set, size_t tmo) { if (to_be_set) { while ((alt_read_word(ALT_FPGAMGR_IMGCFG_STAT_ADDR) & state) == 0) { --tmo; if (tmo == 0) { return ALT_E_TMO; } } } else { while ((alt_read_word(ALT_FPGAMGR_IMGCFG_STAT_ADDR) & state) != 0) { --tmo; if (tmo == 0) { return ALT_E_TMO; } } } return ALT_E_SUCCESS; } /*****/ ALT_STATUS_CODE alt_fpga_init(void) { g_fpgaState.cpu_in_control = false; g_fpgaState.reset_asserted = false; return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_fpga_uninit(void) { if (g_fpgaState.cpu_in_control) { alt_fpga_control_disable(); } return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_fpga_control_enable(ALT_FPGA_CFG_MODE_t mode) { if (!g_fpgaState.cpu_in_control) { uint32_t mask = 0; /* * Step 1: * Verify MSEL is 000 or 001. */ dprintf("FPGA[ctrl:en]: === Step 1 ===\n"); switch (alt_read_word(ALT_FPGAMGR_IMGCFG_STAT_ADDR) & (ALT_FPGAMGR_IMGCFG_STAT_F2S_MSEL0_SET_MSK | ALT_FPGAMGR_IMGCFG_STAT_F2S_MSEL1_SET_MSK | ALT_FPGAMGR_IMGCFG_STAT_F2S_MSEL2_SET_MSK)) { case 0: case ALT_FPGAMGR_IMGCFG_STAT_F2S_MSEL0_SET_MSK: break; default: dprintf("FPGA[1]: MSEL not set to 000 or 001.\n"); return ALT_E_ERROR; } dprintf("FPGA[ctrl:en]: === Step 2 ===\n"); switch (mode) { case ALT_FPGA_CFG_MODE_PP16_FAST_NOAES_NODC: mask = ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_E_PPX16) | ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_E_X1); break; case ALT_FPGA_CFG_MODE_PP16_FAST_AES_NODC: mask = ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_E_PPX16) | ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_E_X2); break; case ALT_FPGA_CFG_MODE_PP16_FAST_NOAES_DC: mask = ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_E_PPX16) | ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_E_X4); break; case ALT_FPGA_CFG_MODE_PP16_FAST_AES_DC: mask = ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_E_PPX16) | ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_E_X4); break; case ALT_FPGA_CFG_MODE_PP32_FAST_NOAES_NODC: mask = ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_E_PPX32) | ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_E_X1); break; case ALT_FPGA_CFG_MODE_PP32_FAST_AES_NODC: mask = ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_E_PPX32) | ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_E_X4); break; case ALT_FPGA_CFG_MODE_PP32_FAST_NOAES_DC: mask = ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_E_PPX32) | ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_E_X8); break; case ALT_FPGA_CFG_MODE_PP32_FAST_AES_DC: mask = ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_E_PPX32) | ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SET(ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_E_X8); break; } alt_replbits_word(ALT_FPGAMGR_IMGCFG_CTL_02_ADDR, ALT_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SET_MSK | ALT_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SET_MSK, mask); /* * Step 3: * Verify no other devices are interfering with programming. // Verify: F2S_NCONFIG_PIN = 1 // Verify: F2S_NSTATUS_PIN = 1 */ dprintf("FPGA[ctrl:en]: === Step 3 ===\n"); if ((alt_read_word(ALT_FPGAMGR_IMGCFG_STAT_ADDR) & (ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN_SET_MSK | ALT_FPGAMGR_IMGCFG_STAT_F2S_NCFG_PIN_SET_MSK)) != (ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN_SET_MSK | ALT_FPGAMGR_IMGCFG_STAT_F2S_NCFG_PIN_SET_MSK)) { dprintf("FPGA[3]: Error: F2S_NCONFIG_PIN != 1 or F2S_NSTATUS_PIN != 1.\n"); return ALT_E_ERROR; } /* * Step 4: * Deassert signal drives before taking over those overrides. // Write: ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NCE = 1 // Write: ALT_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST = 0 // Write: ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_DATA = 0 // Write: ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTL = 0 // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG = 1 // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NSTAT_OE = 0 // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_CONDONE_OE = 0 */ dprintf("FPGA[ctrl:en]: === Step 4 ===\n"); alt_replbits_word(ALT_FPGAMGR_IMGCFG_CTL_01_ADDR, ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NCE_SET_MSK | ALT_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST_SET_MSK, ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NCE_SET_MSK); alt_replbits_word(ALT_FPGAMGR_IMGCFG_CTL_02_ADDR, ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_DATA_SET_MSK | ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTL_SET_MSK, 0); alt_replbits_word(ALT_FPGAMGR_IMGCFG_CTL_00_ADDR, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG_SET_MSK | ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NSTAT_OE_SET_MSK | ALT_FPGAMGR_IMGCFG_CTL_00_S2F_CONDONE_OE_SET_MSK, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG_SET_MSK); /* * Step 5: * Enable overrides for DATA / DCLK / NCONFIG. // Write: ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NEN_CFG = 0 // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NEN_NCFG = 0 * * Disable overrides for NSTATUS / CONF_DONE. // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NEN_NSTAT = 1 // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NEN_CONDONE = 1 */ dprintf("FPGA[ctrl:en]: === Step 5 ===\n"); alt_clrbits_word(ALT_FPGAMGR_IMGCFG_CTL_01_ADDR, ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NEN_CFG_SET_MSK); alt_clrbits_word(ALT_FPGAMGR_IMGCFG_CTL_00_ADDR, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NEN_NCFG_SET_MSK); alt_setbits_word(ALT_FPGAMGR_IMGCFG_CTL_00_ADDR, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NEN_NSTAT_SET_MSK | ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NEN_CONDONE_SET_MSK); /* * Step 6: * Drive chip select. // Write: ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NCE = 0 */ dprintf("FPGA[ctrl:en]: === Step 6 ===\n"); alt_clrbits_word(ALT_FPGAMGR_IMGCFG_CTL_01_ADDR, ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NCE_SET_MSK); /* * Step 7: * Repeat step 3, just in case. */ dprintf("FPGA[ctrl:en]: === Step 7 ===\n"); if ((alt_read_word(ALT_FPGAMGR_IMGCFG_STAT_ADDR) & (ALT_FPGA_STATUS_F2S_NSTATUS_PIN | ALT_FPGA_STATUS_F2S_NCONFIG_PIN)) != (ALT_FPGA_STATUS_F2S_NSTATUS_PIN | ALT_FPGA_STATUS_F2S_NCONFIG_PIN)) { dprintf("FPGA[7]: Error: F2S_NCONFIG_PIN != 1 or F2S_NSTATUS_PIN != 1.\n"); return ALT_E_ERROR; } /* * Mark state as in control. */ g_fpgaState.cpu_in_control = true; } return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_fpga_control_disable(void) { if (g_fpgaState.cpu_in_control) { if (g_fpgaState.reset_asserted) { /* * Borrowed from Step 8b. Disable reset of FPGA. */ dprintf("FPGA[ctrl:dis]: === Step 8b ===\n"); alt_setbits_word(ALT_FPGAMGR_IMGCFG_CTL_00_ADDR, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG_SET_MSK); g_fpgaState.reset_asserted = false; } /* * Step 15: * Disable chip select. // Write: ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NCE = 1 */ dprintf("FPGA[ctrl:dis]: === Step 15 ===\n"); alt_setbits_word(ALT_FPGAMGR_IMGCFG_CTL_01_ADDR, ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NCE_SET_MSK); /* * Step 16: * Disable overrides for DATA / DCLK / NCONFIG // Write: ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NEN_CFG = 1 // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NEN_NCFG = 1 */ dprintf("FPGA[ctrl:dis]: === Step 16 ===\n"); alt_setbits_word(ALT_FPGAMGR_IMGCFG_CTL_01_ADDR, ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NEN_CFG_SET_MSK); alt_setbits_word(ALT_FPGAMGR_IMGCFG_CTL_00_ADDR, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NEN_NCFG_SET_MSK); /* * Mark state as not in control. */ g_fpgaState.cpu_in_control = false; } return ALT_E_SUCCESS; } bool alt_fpga_control_is_enabled(void) { return g_fpgaState.cpu_in_control; } ALT_STATUS_CODE alt_fpga_reset_assert(void) { if (!alt_fpga_control_is_enabled()) { /* HPS not in control. */ return ALT_E_FPGA_NO_SOC_CTRL; } else if (g_fpgaState.reset_asserted) { /* Reset already asserted. */ return ALT_E_SUCCESS; } else { g_fpgaState.reset_asserted = true; /* * Borrowed from FPGA configuration, step 8a. */ dprintf("FPGA[rst:en]: === Step 8a ===\n"); alt_clrbits_word(ALT_FPGAMGR_IMGCFG_CTL_00_ADDR, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG_SET_MSK); return wait_for_fpga_status(ALT_FPGA_STATUS_F2S_NSTATUS_PIN, false, 1000); } } ALT_STATUS_CODE alt_fpga_reset_deassert(void) { if (!alt_fpga_control_is_enabled()) { /* HPS not in control. */ return ALT_E_FPGA_NO_SOC_CTRL; } else if (!g_fpgaState.reset_asserted) { /* Reset already unasserted. */ return ALT_E_SUCCESS; } else { g_fpgaState.reset_asserted = false; /* * Borrowed from FPGA configuration, step 8b. */ dprintf("FPGA[rst:dis]: === Step 8b ===\n"); alt_setbits_word(ALT_FPGAMGR_IMGCFG_CTL_00_ADDR, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG_SET_MSK); return wait_for_fpga_status(ALT_FPGA_STATUS_F2S_NSTATUS_PIN, true, 1000); } } uint32_t alt_fpga_status_get(void) { return alt_read_word(ALT_FPGAMGR_IMGCFG_STAT_ADDR); } /* * Helper function which handles writing data to the AXI bus. */ static ALT_STATUS_CODE alt_fpga_internal_writeaxi(const void * bufferv, uint32_t length #if ALT_FPGA_ENABLE_DMA_SUPPORT , bool use_dma, ALT_DMA_CHANNEL_t dma_channel #endif ) { const char * buffer = bufferv; ALT_STATUS_CODE status = ALT_E_SUCCESS; const uint32_t * buffer_end_32 = (const uint32_t *) (buffer + (length & ~0x3)); #if ALT_FPGA_ENABLE_DMA_SUPPORT if (use_dma) { /* A10 DMA support for FPGA as a peripheral is not implemented. */ dprintf("FPGA[AXI]: DMA support not implemented.\n"); status = ALT_E_ERROR; } else #endif { const uint32_t * buffer_32 = (const uint32_t *) buffer; /* Write out as many complete 32-bit chunks. */ while (length >= sizeof(uint32_t)) { alt_write_word(ALT_FPGAMGRDATA_ADDR, *buffer_32); ++buffer_32; length -= sizeof(uint32_t); } } /* Write out remaining non 32-bit aligned chunk. */ if ((status == ALT_E_SUCCESS) && (length & 0x3)) { dprintf("FPGA[AXI]: PIO unaligned data ...\n"); switch (length & 0x3) { case 3: alt_write_word(ALT_FPGAMGRDATA_ADDR, *buffer_end_32 & 0x00ffffff); break; case 2: alt_write_word(ALT_FPGAMGRDATA_ADDR, *buffer_end_32 & 0x0000ffff); break; case 1: alt_write_word(ALT_FPGAMGRDATA_ADDR, *buffer_end_32 & 0x000000ff); break; default: /* This will never happen. */ break; } } return status; } static ALT_STATUS_CODE alt_fpga_internal_configure_idata(FPGA_DATA_t * fpga_data) { ALT_STATUS_CODE status = ALT_E_SUCCESS; /* * Step 10: * Send POF / SOF data * Program in ALT_FPGAMGRDATA_ADDR * Optionally read and confirm LT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN = 1, else restart from step 1. */ if (status == ALT_E_SUCCESS) { /* * This is the largest configuration image possible for the largest Arria 10 * SoC device with some generous padding added. * Experimental values: * - Compressed : 15 MiB. * - Uncompressed : 31 MiB. */ uint32_t data_limit = 36 * 1024 * 1024; dprintf("FPGA[cfg]: === Step 10 ===\n"); /* Print this here or else istream will cause this to be printed many times. */ dprintf("FPGA[AXI]: PIO aligned data ...\n"); if (fpga_data->type == FPGA_DATA_FULL) { if (fpga_data->mode.full.length > data_limit) { status = ALT_E_FPGA_CFG; } else { status = alt_fpga_internal_writeaxi(fpga_data->mode.full.buffer, fpga_data->mode.full.length #if ALT_FPGA_ENABLE_DMA_SUPPORT , fpga_data->use_dma, fpga_data->dma_channel #endif ); } } else if (fpga_data->type == FPGA_DATA_LIST) { /* Flag if processing has encountered an uint32_t unaligned segment. If so, that must be the last segment. */ bool unaligned_segment = false; size_t i; for (i = 0; i < fpga_data->mode.list.count; ++i) { const void * buffer = fpga_data->mode.list.buffer[i]; size_t length = fpga_data->mode.list.length[i]; if (length > data_limit) { dprintf("FPGA[10][list]: Data limit breached; infinite loop or invalid image likely.\n"); status = ALT_E_FPGA_CFG; } else if (unaligned_segment) { dprintf("FPGA[10][list]: Previous segment unaligned; RBF segment corruption likely.\n"); status = ALT_E_FPGA_CFG; } else { status = alt_fpga_internal_writeaxi(buffer, length #if ALT_FPGA_ENABLE_DMA_SUPPORT , fpga_data->use_dma, fpga_data->dma_channel #endif ); data_limit -= length; if (length & (sizeof(uint32_t) - 1)) { unaligned_segment = true; } } if (status != ALT_E_SUCCESS) { break; } } } else if (fpga_data->type == FPGA_DATA_STREAM) { /* Flag if processing has encountered an uint32_t unaligned segment. If so, that must be the last segment. */ bool unaligned_segment = false; uint32_t buffer[ISTREAM_CHUNK_SIZE / sizeof(uint32_t)]; int32_t cb_status = 0; /* Callback status */ do { cb_status = fpga_data->mode.stream.callback(buffer, sizeof(buffer), fpga_data->mode.stream.context); if (cb_status > sizeof(buffer)) { /* Callback data overflows buffer space. */ status = ALT_E_FPGA_CFG_STM; } else if (cb_status < 0) { /* A problem occurred when streaming data from the source. */ status = ALT_E_FPGA_CFG_STM; } else if (cb_status == 0) { /* End of IStream data. */ break; } else if (cb_status > data_limit) { /* Limit hit for the largest permissible data stream. */ status = ALT_E_FPGA_CFG_STM; } else if (unaligned_segment) { dprintf("FPGA[10][stream]: Previous segment unaligned; previous segment invalid.\n"); status = ALT_E_FPGA_CFG; } else { /* Copy in configuration data. */ status = alt_fpga_internal_writeaxi(buffer, cb_status #if ALT_FPGA_ENABLE_DMA_SUPPORT , fpga_data->use_dma, fpga_data->dma_channel #endif ); data_limit -= cb_status; if (cb_status & (sizeof(uint32_t) - 1)) { unaligned_segment = true; } } if (status != ALT_E_SUCCESS) { break; } } while (cb_status > 0); } else { dprintf("FPGA[10]: Invalid programming request type.\n"); status = ALT_E_ERROR; } } /* * Step 11: * Wait for ConfigCompletion // Wait: ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN = 1 or ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN = 0. // If ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN = 0: goto step 1. // If ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN = 1: configuration passed. yay! */ if (status == ALT_E_SUCCESS) { int i = 10000; dprintf("FPGA[cfg]: === Step 11 === (starting i = %d).\n", i); do { uint32_t imgcfg = alt_read_word(ALT_FPGAMGR_IMGCFG_STAT_ADDR); if (!(imgcfg & ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN_SET_MSK)) { dprintf("FPGA[11]: Error: F2S_NSTAT_PIN = 0.\n"); status = ALT_E_ERROR; break; } else if (imgcfg & ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN_SET_MSK) { /* yay! */ break; } } while (--i); if (i == 0) { dprintf("FPGA[11]: Timeout waiting for config completion result.\n"); status = ALT_E_TMO; } else { dprintf("FPGA[11]: i = %d.\n", i); } } /* * Step 12: * Write dclkcnt = 0xf. // Write: ALT_FPGAMGR_DCLKCNT_ADDR = 0xf. // Wait: ALT_FPGAMGR_DCLKSTAT_ADDR = 1. */ if (status == ALT_E_SUCCESS) { int i = 10000; dprintf("FPGA[cfg]: === Step 12 === (starting i = %d).\n", i); /* Clear the DCLKSTAT.dcntdone before starting. */ if (alt_read_word(ALT_FPGAMGR_DCLKSTAT_ADDR)) { alt_write_word(ALT_FPGAMGR_DCLKSTAT_ADDR, ALT_FPGAMGR_DCLKSTAT_DCNTDONE_SET_MSK); } alt_write_word(ALT_FPGAMGR_DCLKCNT_ADDR, 0xf); /* Now poll until DCLKSTAT.dcntdone = 1*/ while ((alt_read_word(ALT_FPGAMGR_DCLKSTAT_ADDR) & ALT_FPGAMGR_DCLKSTAT_DCNTDONE_SET_MSK) == 0) { if (!--i) { dprintf("FPGA[12]: Timeout waiting for DCLKSTAT.DCNTDONE.\n"); status = ALT_E_TMO; break; } } if (i != 0) { /* Cleanup DCLKSTAT.dcntdone status. */ alt_write_word(ALT_FPGAMGR_DCLKSTAT_ADDR, ALT_FPGAMGR_DCLKSTAT_DCNTDONE_SET_MSK); dprintf("FPGA[12]: i = %d.\n", i); } } /* * Step 13: * Wait for initialization sequence to complete. // Wait: ALT_FPGAMGR_IMGCFG_STAT_F2S_USERMOD = 1 */ if (status == ALT_E_SUCCESS) { dprintf("FPGA[cfg]: === Step 13 ===\n"); status = wait_for_fpga_status(ALT_FPGA_STATUS_F2S_USERMODE, true, 10000); if (status != ALT_E_SUCCESS) { dprintf("FPGA[13]: Timeout waiting for F2S_USERMOD = 1.\n"); } } return status; } static ALT_STATUS_CODE alt_fpga_internal_configure(FPGA_DATA_t * fpga_data) { ALT_STATUS_CODE status = ALT_E_SUCCESS; /* HPS should be on control before configuration. */ if (g_fpgaState.cpu_in_control == false) { dprintf("FPGA[cfg]: precondition not met: CPU not in control.\n"); return ALT_E_FPGA_NO_SOC_CTRL; } /* FPGA should not be in reset before attempting configuration. */ if (g_fpgaState.reset_asserted) { dprintf("FPGA[cfg]: precondition not met: FPGA reset asserted.\n"); return ALT_E_ERROR; } /* * Step 1: * Verify MSEL is 000 or 001. */ dprintf("FPGA[cfg]: === Step 1 === (skipped due to precondition)\n"); /* * Step 2: * Determine the CFGWIDTH and CDRATIO from the programming mode and write to HW. */ dprintf("FPGA[cfg]: === Step 2 === (skipped due to precondition)\n"); /* * Step 3: * Verify no other devices are interfering with programming. */ dprintf("FPGA[cfg]: === Step 3 === (skipped due to precondition)\n"); /* * Step 4: * Deassert signal drives before taking over those overrides. */ dprintf("FPGA[cfg]: === Step 4 === (skipped due to precondition)\n"); /* * Step 5: * Enable overrides for DATA / DCLK / NCONFIG. * Disable overrides for NSTATUS / CONF_DONE. */ dprintf("FPGA[cfg]: === Step 5 === (skipped due to precondition)\n"); /* * Step 6: * Drive chip select. */ dprintf("FPGA[cfg]: === Step 6 === (skipped due to precondition)\n"); /* * Step 7: * Repeat step 3, just in case. */ dprintf("FPGA[cfg]: === Step 7 === (skipped due to precondition)\n"); /* * Step 8: * Reset the configuration. // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG = 0 // Wait: ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN = 0 * // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG = 1 // Wait: ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN = 1 * // Verify: ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN = 0 // Verify: ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_OE = 1 */ dprintf("FPGA[cfg]: === Step 8 ===\n"); alt_clrbits_word(ALT_FPGAMGR_IMGCFG_CTL_00_ADDR, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG_SET_MSK); status = wait_for_fpga_status(ALT_FPGA_STATUS_F2S_NSTATUS_PIN, false, 100000); /* Handle any error conditions after reset request has been withdrawn. This is to avoid bailing out with reset assert requested. */ alt_setbits_word(ALT_FPGAMGR_IMGCFG_CTL_00_ADDR, ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NCFG_SET_MSK); /* This is the error handler from above, step 8a. */ if (status != ALT_E_SUCCESS) { dprintf("FPGA[8]: Error: Timeout waiting for F2S_NSTAT_PIN = 0.\n"); return ALT_E_FPGA_CFG; } status = wait_for_fpga_status(ALT_FPGA_STATUS_F2S_NSTATUS_PIN, true, 100000); if (status != ALT_E_SUCCESS) { dprintf("FPGA[8]: Error: Timeout waiting for F2S_NSTAT_PIN = 1.\n"); return ALT_E_FPGA_CFG; } if ((alt_read_word(ALT_FPGAMGR_IMGCFG_STAT_ADDR) & (ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN_SET_MSK | ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_OE_SET_MSK)) != ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_OE_SET_MSK) { dprintf("FPGA[8]: Error: F2S_CONDONE_PIN != 0 or F2S_CONDONE_OE != 1.\n"); return ALT_E_ERROR; } /* * Step 9: * Enable DCLK and DATA path. // Write: ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_DATA = 1 // Write: ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTL = 1 */ dprintf("FPGA[cfg]: === Step 9 ===\n"); alt_setbits_word(ALT_FPGAMGR_IMGCFG_CTL_02_ADDR, ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_DATA_SET_MSK | ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTL_SET_MSK); /* * Helper function for Steps 10 - 13. */ status = alt_fpga_internal_configure_idata(fpga_data); /* * Step 14: * Stop DATA and DCLK path. // Write: ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_DATA = 0 // Write: ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTL = 0 */ dprintf("FPGA[cfg]: === Step 14 ===\n"); alt_clrbits_word(ALT_FPGAMGR_IMGCFG_CTL_02_ADDR, ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_DATA_SET_MSK | ALT_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTL_SET_MSK); /* * Step 15: * Disable chip select. // Write: ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NCE = 1 */ dprintf("FPGA[cfg]: === Step 15 === (skipped due to post condition)\n"); /* * Step 16: * Disable overrides for DATA / DCLK / NCONFIG // Write: ALT_FPGAMGR_IMGCFG_CTL_01_S2F_NEN_CFG = 1 // Write: ALT_FPGAMGR_IMGCFG_CTL_00_S2F_NEN_NCFG = 1 */ dprintf("FPGA[cfg]: === Step 16 === (skipped due to post condition)\n"); /* * Step 17: * Final check. // Verify: ALT_FPGAMGR_IMGCFG_STAT_F2S_USERMOD = 1 // Verify: ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN = 1 // Verify: ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN = 1 */ if (status == ALT_E_SUCCESS) { dprintf("FPGA[cfg]: === Step 17 ===\n"); if ((alt_read_word(ALT_FPGAMGR_IMGCFG_STAT_ADDR) & (ALT_FPGAMGR_IMGCFG_STAT_F2S_USERMOD_SET_MSK | ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN_SET_MSK | ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN_SET_MSK)) != (ALT_FPGAMGR_IMGCFG_STAT_F2S_USERMOD_SET_MSK | ALT_FPGAMGR_IMGCFG_STAT_F2S_NSTAT_PIN_SET_MSK | ALT_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN_SET_MSK)) { dprintf("FPGA[17]: Error: F2S_USERMOD != 1 or F2S_NSTAT_PIN != 1 or F2S_CONDONE_PIN != 1.\n"); return ALT_E_ERROR; } } return status; } ALT_STATUS_CODE alt_fpga_configure(const void * buf, size_t len) { FPGA_DATA_t fpga_data; fpga_data.type = FPGA_DATA_FULL; fpga_data.mode.full.buffer = buf; fpga_data.mode.full.length = len; #if ALT_FPGA_ENABLE_DMA_SUPPORT fpga_data.use_dma = false; #endif return alt_fpga_internal_configure(&fpga_data); } ALT_STATUS_CODE alt_fpga_configure_list(const void ** buf_list, const size_t * len_list, size_t list_count) { FPGA_DATA_t fpga_data; fpga_data.type = FPGA_DATA_LIST; fpga_data.mode.list.buffer = buf_list; fpga_data.mode.list.length = len_list; fpga_data.mode.list.count = list_count; #if ALT_FPGA_ENABLE_DMA_SUPPORT fpga_data.use_dma = false; #endif return alt_fpga_internal_configure(&fpga_data); } #if ALT_FPGA_ENABLE_DMA_SUPPORT ALT_STATUS_CODE alt_fpga_configure_dma(const void * buf, size_t len, ALT_DMA_CHANNEL_t dma_channel) { FPGA_DATA_t fpga_data; fpga_data.type = FPGA_DATA_FULL; fpga_data.mode.full.buffer = buf; fpga_data.mode.full.length = len; fpga_data.use_dma = true; fpga_data.dma_channel = dma_channel; return alt_fpga_internal_configure(&fpga_data); } ALT_STATUS_CODE alt_fpga_configure_list_dma(const void ** buf_list, const size_t * len_list, size_t list_count, ALT_DMA_CHANNEL_t dma_channel) { FPGA_DATA_t fpga_data; fpga_data.type = FPGA_DATA_LIST; fpga_data.mode.list.buffer = buf_list; fpga_data.mode.list.length = len_list; fpga_data.mode.list.count = list_count; fpga_data.use_dma = true; fpga_data.dma_channel = dma_channel; return alt_fpga_internal_configure(&fpga_data); } #endif ALT_STATUS_CODE alt_fpga_istream_configure(alt_fpga_istream_t cfg_stream, void * user_data) { FPGA_DATA_t fpga_data; fpga_data.type = FPGA_DATA_STREAM; fpga_data.mode.stream.callback = cfg_stream; fpga_data.mode.stream.context = user_data; #if ALT_FPGA_ENABLE_DMA_SUPPORT fpga_data.use_dma = false; #endif return alt_fpga_internal_configure(&fpga_data); } #if ALT_FPGA_ENABLE_DMA_SUPPORT ALT_STATUS_CODE alt_fpga_istream_configure_dma(alt_fpga_istream_t cfg_stream, void * user_data, ALT_DMA_CHANNEL_t dma_channel) { FPGA_DATA_t fpga_data; fpga_data.type = FPGA_DATA_STREAM; fpga_data.mode.stream.callback = cfg_stream; fpga_data.mode.stream.context = user_data; fpga_data.use_dma = true; fpga_data.dma_channel = dma_channel; return alt_fpga_internal_configure(&fpga_data); } #endif ALT_STATUS_CODE alt_fpga_int_enable(uint32_t mask) { /* Writing 0 will cause the interrupt to be unmasked. */ alt_clrbits_word(ALT_FPGAMGR_INTR_MSK_ADDR, mask); return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_fpga_int_disable(uint32_t mask) { /* Writing 1 will cause the interrupt to be masked. */ alt_setbits_word(ALT_FPGAMGR_INTR_MSK_ADDR, mask); return ALT_E_SUCCESS; } uint32_t alt_fpga_int_get(void) { return alt_read_word(ALT_FPGAMGR_INTR_MSKED_STAT_ADDR); } ALT_STATUS_CODE alt_fpga_int_clear(uint32_t mask) { alt_write_word(ALT_FPGAMGR_INTR_MSKED_STAT_ADDR, mask); return ALT_E_SUCCESS; } uint32_t alt_fpga_gpi_read(uint32_t mask) { if (mask == 0) { return 0; } return alt_read_word(ALT_FPGAMGR_GPI_ADDR) & mask; } ALT_STATUS_CODE alt_fpga_gpo_write(uint32_t mask, uint32_t value) { if (mask != 0) { alt_replbits_word(ALT_FPGAMGR_GPO_ADDR, mask, value); } return ALT_E_SUCCESS; }
302471.c
// SPDX-License-Identifier: GPL-2.0 /* * fs/f2fs/namei.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/ctype.h> #include <linux/random.h> #include <linux/dcache.h> #include <linux/namei.h> #include <linux/quotaops.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include "xattr.h" #include "acl.h" #include <trace/events/f2fs.h> static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); nid_t ino; struct inode *inode; bool nid_free = false; int xattr_size = 0; int err; inode = new_inode(dir->i_sb); if (!inode) return ERR_PTR(-ENOMEM); f2fs_lock_op(sbi); if (!f2fs_alloc_nid(sbi, &ino)) { f2fs_unlock_op(sbi); err = -ENOSPC; goto fail; } f2fs_unlock_op(sbi); nid_free = true; inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); F2FS_I(inode)->i_crtime = inode->i_mtime; inode->i_generation = prandom_u32(); if (S_ISDIR(inode->i_mode)) F2FS_I(inode)->i_current_depth = 1; err = insert_inode_locked(inode); if (err) { err = -EINVAL; goto fail; } if (f2fs_sb_has_project_quota(sbi) && (F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL)) F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid; else F2FS_I(inode)->i_projid = make_kprojid(&init_user_ns, F2FS_DEF_PROJID); err = dquot_initialize(inode); if (err) goto fail_drop; set_inode_flag(inode, FI_NEW_INODE); /* If the directory encrypted, then we should encrypt the inode. */ if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) && f2fs_may_encrypt(inode)) f2fs_set_encrypted_inode(inode); if (f2fs_sb_has_extra_attr(sbi)) { set_inode_flag(inode, FI_EXTRA_ATTR); F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE; } if (test_opt(sbi, INLINE_XATTR)) set_inode_flag(inode, FI_INLINE_XATTR); if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) set_inode_flag(inode, FI_INLINE_DATA); if (f2fs_may_inline_dentry(inode)) set_inode_flag(inode, FI_INLINE_DENTRY); if (f2fs_sb_has_flexible_inline_xattr(sbi)) { f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode)); if (f2fs_has_inline_xattr(inode)) xattr_size = F2FS_OPTION(sbi).inline_xattr_size; /* Otherwise, will be 0 */ } else if (f2fs_has_inline_xattr(inode) || f2fs_has_inline_dentry(inode)) { xattr_size = DEFAULT_INLINE_XATTR_ADDRS; } F2FS_I(inode)->i_inline_xattr_size = xattr_size; f2fs_init_extent_tree(inode, NULL); stat_inc_inline_xattr(inode); stat_inc_inline_inode(inode); stat_inc_inline_dir(inode); F2FS_I(inode)->i_flags = f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED); if (S_ISDIR(inode->i_mode)) F2FS_I(inode)->i_flags |= F2FS_INDEX_FL; if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) set_inode_flag(inode, FI_PROJ_INHERIT); f2fs_set_inode_flags(inode); trace_f2fs_new_inode(inode, 0); return inode; fail: trace_f2fs_new_inode(inode, err); make_bad_inode(inode); if (nid_free) set_inode_flag(inode, FI_FREE_NID); iput(inode); return ERR_PTR(err); fail_drop: trace_f2fs_new_inode(inode, err); dquot_drop(inode); inode->i_flags |= S_NOQUOTA; if (nid_free) set_inode_flag(inode, FI_FREE_NID); clear_nlink(inode); unlock_new_inode(inode); iput(inode); return ERR_PTR(err); } static inline int is_extension_exist(const unsigned char *s, const char *sub) { size_t slen = strlen(s); size_t sublen = strlen(sub); int i; /* * filename format of multimedia file should be defined as: * "filename + '.' + extension + (optional: '.' + temp extension)". */ if (slen < sublen + 2) return 0; for (i = 1; i < slen - sublen; i++) { if (s[i] != '.') continue; if (!strncasecmp(s + i + 1, sub, sublen)) return 1; } return 0; } /* * Set multimedia files as cold files for hot/cold data separation */ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode, const unsigned char *name) { __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list; int i, cold_count, hot_count; down_read(&sbi->sb_lock); cold_count = le32_to_cpu(sbi->raw_super->extension_count); hot_count = sbi->raw_super->hot_ext_count; for (i = 0; i < cold_count + hot_count; i++) { if (is_extension_exist(name, extlist[i])) break; } up_read(&sbi->sb_lock); if (i == cold_count + hot_count) return; if (i < cold_count) file_set_cold(inode); else file_set_hot(inode); } int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, bool hot, bool set) { __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list; int cold_count = le32_to_cpu(sbi->raw_super->extension_count); int hot_count = sbi->raw_super->hot_ext_count; int total_count = cold_count + hot_count; int start, count; int i; if (set) { if (total_count == F2FS_MAX_EXTENSION) return -EINVAL; } else { if (!hot && !cold_count) return -EINVAL; if (hot && !hot_count) return -EINVAL; } if (hot) { start = cold_count; count = total_count; } else { start = 0; count = cold_count; } for (i = start; i < count; i++) { if (strcmp(name, extlist[i])) continue; if (set) return -EINVAL; memcpy(extlist[i], extlist[i + 1], F2FS_EXTENSION_LEN * (total_count - i - 1)); memset(extlist[total_count - 1], 0, F2FS_EXTENSION_LEN); if (hot) sbi->raw_super->hot_ext_count = hot_count - 1; else sbi->raw_super->extension_count = cpu_to_le32(cold_count - 1); return 0; } if (!set) return -EINVAL; if (hot) { memcpy(extlist[count], name, strlen(name)); sbi->raw_super->hot_ext_count = hot_count + 1; } else { char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN]; memcpy(buf, &extlist[cold_count], F2FS_EXTENSION_LEN * hot_count); memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN); memcpy(extlist[cold_count], name, strlen(name)); memcpy(&extlist[cold_count + 1], buf, F2FS_EXTENSION_LEN * hot_count); sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1); } return 0; } static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; nid_t ino = 0; int err; if (unlikely(f2fs_cp_error(sbi))) return -EIO; err = f2fs_is_checkpoint_ready(sbi); if (err) return err; err = dquot_initialize(dir); if (err) return err; inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); if (!test_opt(sbi, DISABLE_EXT_IDENTIFY)) set_file_temperature(sbi, inode, dentry->d_name.name); inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; ino = inode->i_ino; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); f2fs_alloc_nid_done(sbi, ino); d_instantiate_new(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); f2fs_balance_fs(sbi, true); return 0; out: f2fs_handle_failed_inode(inode); return err; } static int f2fs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); struct f2fs_sb_info *sbi = F2FS_I_SB(dir); int err; if (unlikely(f2fs_cp_error(sbi))) return -EIO; err = f2fs_is_checkpoint_ready(sbi); if (err) return err; err = fscrypt_prepare_link(old_dentry, dir, dentry); if (err) return err; if (is_inode_flag_set(dir, FI_PROJ_INHERIT) && (!projid_eq(F2FS_I(dir)->i_projid, F2FS_I(old_dentry->d_inode)->i_projid))) return -EXDEV; err = dquot_initialize(dir); if (err) return err; f2fs_balance_fs(sbi, true); inode->i_ctime = current_time(inode); ihold(inode); set_inode_flag(inode, FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); d_instantiate(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: clear_inode_flag(inode, FI_INC_LINK); iput(inode); f2fs_unlock_op(sbi); return err; } struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = QSTR_INIT("..", 2); struct page *page; unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page); if (!ino) { if (IS_ERR(page)) return ERR_CAST(page); return ERR_PTR(-ENOENT); } return d_obtain_alias(f2fs_iget(child->d_sb, ino)); } static int __recover_dot_dentries(struct inode *dir, nid_t pino) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct qstr dot = QSTR_INIT(".", 1); struct qstr dotdot = QSTR_INIT("..", 2); struct f2fs_dir_entry *de; struct page *page; int err = 0; if (f2fs_readonly(sbi->sb)) { f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint", dir->i_ino, pino); return 0; } err = dquot_initialize(dir); if (err) return err; f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); de = f2fs_find_entry(dir, &dot, &page); if (de) { f2fs_put_page(page, 0); } else if (IS_ERR(page)) { err = PTR_ERR(page); goto out; } else { err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); if (err) goto out; } de = f2fs_find_entry(dir, &dotdot, &page); if (de) f2fs_put_page(page, 0); else if (IS_ERR(page)) err = PTR_ERR(page); else err = f2fs_do_add_link(dir, &dotdot, NULL, pino, S_IFDIR); out: if (!err) clear_inode_flag(dir, FI_INLINE_DOTS); f2fs_unlock_op(sbi); return err; } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; struct dentry *new; nid_t ino = -1; int err = 0; unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); struct fscrypt_name fname; trace_f2fs_lookup_start(dir, dentry, flags); if (dentry->d_name.len > F2FS_NAME_LEN) { err = -ENAMETOOLONG; goto out; } err = fscrypt_prepare_lookup(dir, dentry, &fname); if (err == -ENOENT) goto out_splice; if (err) goto out; de = __f2fs_find_entry(dir, &fname, &page); fscrypt_free_filename(&fname); if (!de) { if (IS_ERR(page)) { err = PTR_ERR(page); goto out; } goto out_splice; } ino = le32_to_cpu(de->ino); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) { err = __recover_dot_dentries(dir, root_ino); if (err) goto out_iput; } if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) goto out_iput; } if (IS_ENCRYPTED(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { f2fs_warn(F2FS_I_SB(inode), "Inconsistent encryption contexts: %lu/%lu", dir->i_ino, inode->i_ino); err = -EPERM; goto out_iput; } out_splice: new = d_splice_alias(inode, dentry); err = PTR_ERR_OR_ZERO(new); trace_f2fs_lookup_end(dir, dentry, ino, err); return new; out_iput: iput(inode); out: trace_f2fs_lookup_end(dir, dentry, ino, err); return ERR_PTR(err); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = d_inode(dentry); struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); if (unlikely(f2fs_cp_error(sbi))) return -EIO; err = dquot_initialize(dir); if (err) return err; err = dquot_initialize(inode); if (err) return err; de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { if (IS_ERR(page)) err = PTR_ERR(page); goto fail; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, dir, inode); f2fs_unlock_op(sbi); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); fail: trace_f2fs_unlink_exit(inode, err); return err; } static const char *f2fs_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { const char *link = page_get_link(dentry, inode, done); if (!IS_ERR(link) && !*link) { /* this is broken symlink case */ do_delayed_call(done); clear_delayed_call(done); link = ERR_PTR(-ENOENT); } return link; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t len = strlen(symname); struct fscrypt_str disk_link; int err; if (unlikely(f2fs_cp_error(sbi))) return -EIO; err = f2fs_is_checkpoint_ready(sbi); if (err) return err; err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize, &disk_link); if (err) return err; err = dquot_initialize(dir); if (err) return err; inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); if (IS_ENCRYPTED(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out_f2fs_handle_failed_inode; f2fs_unlock_op(sbi); f2fs_alloc_nid_done(sbi, inode->i_ino); err = fscrypt_encrypt_symlink(inode, symname, len, &disk_link); if (err) goto err_out; err = page_symlink(inode, disk_link.name, disk_link.len); err_out: d_instantiate_new(dentry, inode); /* * Let's flush symlink data in order to avoid broken symlink as much as * possible. Nevertheless, fsyncing is the best way, but there is no * way to get a file descriptor in order to flush that. * * Note that, it needs to do dir->fsync to make this recoverable. * If the symlink path is stored into inline_data, there is no * performance regression. */ if (!err) { filemap_write_and_wait_range(inode->i_mapping, 0, disk_link.len - 1); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); } else { f2fs_unlink(dir, dentry); } f2fs_balance_fs(sbi, true); goto out_free_encrypted_link; out_f2fs_handle_failed_inode: f2fs_handle_failed_inode(inode); out_free_encrypted_link: if (disk_link.name != (unsigned char *)symname) kvfree(disk_link.name); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; if (unlikely(f2fs_cp_error(sbi))) return -EIO; err = dquot_initialize(dir); if (err) return err; inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; inode_nohighmem(inode); set_inode_flag(inode, FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out_fail; f2fs_unlock_op(sbi); f2fs_alloc_nid_done(sbi, inode->i_ino); d_instantiate_new(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); f2fs_balance_fs(sbi, true); return 0; out_fail: clear_inode_flag(inode, FI_INC_LINK); f2fs_handle_failed_inode(inode); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err = 0; if (unlikely(f2fs_cp_error(sbi))) return -EIO; err = f2fs_is_checkpoint_ready(sbi); if (err) return err; err = dquot_initialize(dir); if (err) return err; inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); f2fs_alloc_nid_done(sbi, inode->i_ino); d_instantiate_new(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); f2fs_balance_fs(sbi, true); return 0; out: f2fs_handle_failed_inode(inode); return err; } static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode, struct inode **whiteout) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; err = dquot_initialize(dir); if (err) return err; inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); if (whiteout) { init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); inode->i_op = &f2fs_special_inode_operations; } else { inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } f2fs_lock_op(sbi); err = f2fs_acquire_orphan_inode(sbi); if (err) goto out; err = f2fs_do_tmpfile(inode, dir); if (err) goto release_out; /* * add this non-linked tmpfile to orphan list, in this way we could * remove all unused data of tmpfile after abnormal power-off. */ f2fs_add_orphan_inode(inode); f2fs_alloc_nid_done(sbi, inode->i_ino); if (whiteout) { f2fs_i_links_write(inode, false); *whiteout = inode; } else { d_tmpfile(dentry, inode); } /* link_count was changed by d_tmpfile as well. */ f2fs_unlock_op(sbi); unlock_new_inode(inode); f2fs_balance_fs(sbi, true); return 0; release_out: f2fs_release_orphan_inode(sbi); out: f2fs_handle_failed_inode(inode); return err; } static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); if (unlikely(f2fs_cp_error(sbi))) return -EIO; if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) { int err = fscrypt_get_encryption_info(dir); if (err) return err; } return __f2fs_tmpfile(dir, dentry, mode, NULL); } static int f2fs_create_whiteout(struct inode *dir, struct inode **whiteout) { if (unlikely(f2fs_cp_error(F2FS_I_SB(dir)))) return -EIO; return __f2fs_tmpfile(dir, NULL, S_IFCHR | WHITEOUT_MODE, whiteout); } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct inode *whiteout = NULL; struct page *old_dir_page; struct page *old_page, *new_page = NULL; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; bool is_old_inline = f2fs_has_inline_dentry(old_dir); int err; if (unlikely(f2fs_cp_error(sbi))) return -EIO; err = f2fs_is_checkpoint_ready(sbi); if (err) return err; if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && (!projid_eq(F2FS_I(new_dir)->i_projid, F2FS_I(old_dentry->d_inode)->i_projid))) return -EXDEV; err = dquot_initialize(old_dir); if (err) goto out; err = dquot_initialize(new_dir); if (err) goto out; if (new_inode) { err = dquot_initialize(new_inode); if (err) goto out; } err = -ENOENT; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) err = PTR_ERR(old_page); goto out; } if (S_ISDIR(old_inode->i_mode)) { old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) { if (IS_ERR(old_dir_page)) err = PTR_ERR(old_dir_page); goto out_old; } } if (flags & RENAME_WHITEOUT) { err = f2fs_create_whiteout(old_dir, &whiteout); if (err) goto out_dir; } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_whiteout; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) { if (IS_ERR(new_page)) err = PTR_ERR(new_page); goto out_whiteout; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_acquire_orphan_inode(sbi); if (err) goto put_out_dir; f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = current_time(new_inode); down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) f2fs_i_links_write(new_inode, false); f2fs_i_links_write(new_inode, false); up_write(&F2FS_I(new_inode)->i_sem); if (!new_inode->i_nlink) f2fs_add_orphan_inode(new_inode); else f2fs_release_orphan_inode(sbi); } else { f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_whiteout; } if (old_dir_entry) f2fs_i_links_write(new_dir, true); /* * old entry and new entry can locate in the same inline * dentry in inode, when attaching new entry in inline dentry, * it could force inline dentry conversion, after that, * old_entry and old_page will point to wrong address, in * order to avoid this, let's do the check and update here. */ if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) { f2fs_put_page(old_page, 0); old_page = NULL; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { err = -ENOENT; if (IS_ERR(old_page)) err = PTR_ERR(old_page); f2fs_unlock_op(sbi); goto out_whiteout; } } } down_write(&F2FS_I(old_inode)->i_sem); if (!old_dir_entry || whiteout) file_lost_pino(old_inode); else F2FS_I(old_inode)->i_pino = new_dir->i_ino; up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = current_time(old_inode); f2fs_mark_inode_dirty_sync(old_inode, false); f2fs_delete_entry(old_entry, old_page, old_dir, NULL); if (whiteout) { whiteout->i_state |= I_LINKABLE; set_inode_flag(whiteout, FI_INC_LINK); err = f2fs_add_link(old_dentry, whiteout); if (err) goto put_out_dir; whiteout->i_state &= ~I_LINKABLE; iput(whiteout); } if (old_dir_entry) { if (old_dir != new_dir && !whiteout) f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); else f2fs_put_page(old_dir_page, 0); f2fs_i_links_write(old_dir, false); } if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) { f2fs_add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO); if (S_ISDIR(old_inode->i_mode)) f2fs_add_ino_entry(sbi, old_inode->i_ino, TRANS_DIR_INO); } f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); f2fs_update_time(sbi, REQ_TIME); return 0; put_out_dir: f2fs_unlock_op(sbi); if (new_page) f2fs_put_page(new_page, 0); out_whiteout: if (whiteout) iput(whiteout); out_dir: if (old_dir_entry) f2fs_put_page(old_dir_page, 0); out_old: f2fs_put_page(old_page, 0); out: return err; } static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct page *old_dir_page, *new_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL; struct f2fs_dir_entry *old_entry, *new_entry; int old_nlink = 0, new_nlink = 0; int err; if (unlikely(f2fs_cp_error(sbi))) return -EIO; err = f2fs_is_checkpoint_ready(sbi); if (err) return err; if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && !projid_eq(F2FS_I(new_dir)->i_projid, F2FS_I(old_dentry->d_inode)->i_projid)) || (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && !projid_eq(F2FS_I(old_dir)->i_projid, F2FS_I(new_dentry->d_inode)->i_projid))) return -EXDEV; err = dquot_initialize(old_dir); if (err) goto out; err = dquot_initialize(new_dir); if (err) goto out; err = -ENOENT; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) err = PTR_ERR(old_page); goto out; } new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) { if (IS_ERR(new_page)) err = PTR_ERR(new_page); goto out_old; } /* prepare for updating ".." directory entry info later */ if (old_dir != new_dir) { if (S_ISDIR(old_inode->i_mode)) { old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) { if (IS_ERR(old_dir_page)) err = PTR_ERR(old_dir_page); goto out_new; } } if (S_ISDIR(new_inode->i_mode)) { new_dir_entry = f2fs_parent_dir(new_inode, &new_dir_page); if (!new_dir_entry) { if (IS_ERR(new_dir_page)) err = PTR_ERR(new_dir_page); goto out_old_dir; } } } /* * If cross rename between file and directory those are not * in the same directory, we will inc nlink of file's parent * later, so we should check upper boundary of its nlink. */ if ((!old_dir_entry || !new_dir_entry) && old_dir_entry != new_dir_entry) { old_nlink = old_dir_entry ? -1 : 1; new_nlink = -old_nlink; err = -EMLINK; if ((old_nlink > 0 && old_dir->i_nlink >= F2FS_LINK_MAX) || (new_nlink > 0 && new_dir->i_nlink >= F2FS_LINK_MAX)) goto out_new_dir; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); /* update ".." directory entry info of old dentry */ if (old_dir_entry) f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); /* update ".." directory entry info of new dentry */ if (new_dir_entry) f2fs_set_link(new_inode, new_dir_entry, new_dir_page, old_dir); /* update directory entry info of old dir inode */ f2fs_set_link(old_dir, old_entry, old_page, new_inode); down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_dir->i_ctime = current_time(old_dir); if (old_nlink) { down_write(&F2FS_I(old_dir)->i_sem); f2fs_i_links_write(old_dir, old_nlink > 0); up_write(&F2FS_I(old_dir)->i_sem); } f2fs_mark_inode_dirty_sync(old_dir, false); /* update directory entry info of new dir inode */ f2fs_set_link(new_dir, new_entry, new_page, old_inode); down_write(&F2FS_I(new_inode)->i_sem); file_lost_pino(new_inode); up_write(&F2FS_I(new_inode)->i_sem); new_dir->i_ctime = current_time(new_dir); if (new_nlink) { down_write(&F2FS_I(new_dir)->i_sem); f2fs_i_links_write(new_dir, new_nlink > 0); up_write(&F2FS_I(new_dir)->i_sem); } f2fs_mark_inode_dirty_sync(new_dir, false); if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) { f2fs_add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO); f2fs_add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO); } f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); f2fs_update_time(sbi, REQ_TIME); return 0; out_new_dir: if (new_dir_entry) { f2fs_put_page(new_dir_page, 0); } out_old_dir: if (old_dir_entry) { f2fs_put_page(old_dir_page, 0); } out_new: f2fs_put_page(new_page, 0); out_old: f2fs_put_page(old_page, 0); out: return err; } static int f2fs_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { int err; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (err) return err; if (flags & RENAME_EXCHANGE) { return f2fs_cross_rename(old_dir, old_dentry, new_dir, new_dentry); } /* * VFS has already handled the new dentry existence case, * here, we just deal with "RENAME_NOREPLACE" as regular rename. */ return f2fs_rename(old_dir, old_dentry, new_dir, new_dentry, flags); } static const char *f2fs_encrypted_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct page *page; const char *target; if (!dentry) return ERR_PTR(-ECHILD); page = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(page)) return ERR_CAST(page); target = fscrypt_get_symlink(inode, page_address(page), inode->i_sb->s_blocksize, done); put_page(page); return target; } const struct inode_operations f2fs_encrypted_symlink_inode_operations = { .get_link = f2fs_encrypted_get_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, #endif }; const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename2, .tmpfile = f2fs_tmpfile, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, #ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .get_link = f2fs_get_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, #ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, #endif };
142623.c
#include "utfcmn.h" #define LENHDR 12 void uopen ( int *iret ) /************************************************************************ * uopen * * * * This subroutine opens a file and loads the header for a * * Universal Transmission Format (UTF) file. * * * * uopen ( iret ) * * * * Output parameters: * * *iret int Return Code * ** * * Log: * * E. Safford/GSC 11/96 Initial Coding * * S. Jacobs/NCEP 8/97 Clean up header and comments * * M. Linda/GSC 9/97 Changed a key word in the prologue * * S. Jacobs/NCEP 9/97 Changed the time stamp for the product * * S. Jacobs/NCEP 2/98 Changed the minutes in the time stamp * ***********************************************************************/ { unsigned char hdr[LENHDR]; int itime, ierr; /*---------------------------------------------------------------------*/ *iret = G_NORMAL; /* * If no file name is specified, return with an error. */ if ( strlen ( filnam ) == (size_t)0 ) { *iret = G_NOUTFL; return; } /* * If the open fails, return immediately. */ flun = cfl_wopn ( filnam, &ierr ); if ( ierr != 0 ) { *iret = G_NOUTFL; opnfil = G_FALSE; return; } /* * Mark file as opened. */ opnfil = G_TRUE; /* * The header is 12 bytes long and the storage scheme is as follows: * * hdr[0] --> header marker (0xc1) * hdr[1] --> projection indicator value * hdr[2], hdr[3] --> geography scale value * hdr[4], hdr[5] --> imax (max width) value * hdr[6], hdr[7] --> jmax (max height) value * hdr[8] --> all 5 bits of the day starting at the high * order and then 3 of the 4 bits of the month * hdr[9] --> the remaining 1 bit of the month (at high * order) all 7 bits of the year * hdr[10] --> the first 8 bits of the time in hundreds of * hours * hdr[11] --> the remaining 4 bits of the time (at high * order) all 4 bits of the pdc value * (presently 0) */ hdr[0] = 0xc1; hdr[1] = kmap & BMASK; hdr[2] = (kgscl >> 8) & BMASK; hdr[3] = kgscl & BMASK; hdr[4] = (kxsize >> 8) & BMASK; hdr[5] = kxsize & BMASK; hdr[6] = (kysize >> 8) & BMASK; hdr[7] = kysize & BMASK; hdr[8] = ( (kdd << 3) & 0xf8 ) | ( (kmm >> 1) & 0x07 ); hdr[9] = ( ( (kmm & 0x01) << 7 ) & 0x80 ) | (kyy & 0x7f); itime = khh * 100 + knn; hdr[10] = (itime >> 4) & BMASK; hdr[11] = (itime << 4) & 0xf0; /* * Write the header array to the buffer. */ uwrbuf ( hdr, LENHDR, iret ); }
165165.c
/* : mem handling stuff * * 2/11/99 JC * - from im_open.c and callback.c * - malloc tracking stuff added * 11/3/01 JC * - im_strncpy() added * 20/4/01 JC * - im_(v)snprintf() added * 6/7/05 * - more tracking for DEBUGM * 20/10/06 * - return NULL for size <= 0 * 11/5/06 * - abort() on malloc() failure with DEBUG * 20/10/09 * - gtkdoc comment * 6/11/09 * - im_malloc()/im_free() now call g_try_malloc()/g_free() ... removes * confusion over whether to use im_free() or g_free() for things like * im_header_string() */ /* This file is part of VIPS. VIPS is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* These files are distributed with VIPS - http://www.vips.ecs.soton.ac.uk */ #ifdef HAVE_CONFIG_H #include <config.h> #endif /*HAVE_CONFIG_H*/ #include <vips/intl.h> #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <assert.h> #include <vips/vips.h> #include <vips/thread.h> #ifdef WITH_DMALLOC #include <dmalloc.h> #endif /*WITH_DMALLOC*/ /** * SECTION: memory * @short_description: memory utilities * @stability: Stable * @include: vips/vips.h * * Simple memory allocation utilities. These functions and macros help * allocate and free memory. Most of VIPS uses them, though some parts use * the g_malloc() system instead, confusingly. * * If you compile with %DEBUGM it will track allocations for you, though * valgrind or dmalloc are better solutions. */ /* Define for simple malloc tracking ... better to use dmalloc if you can. #define DEBUGM */ /* g_assert( 0 ) on memory errors. #define DEBUG */ #ifdef DEBUG # warning DEBUG on in libsrc/iofuncs/memory.c #endif /*DEBUG*/ /* Track total alloc/total free here for debugging. */ #ifdef DEBUGM static size_t int total_mem_alloc = 0; static unsigned int total_allocs = 0; static size_t int high_water_mark = 0; static GMutex *malloc_mutex = NULL; static GSList *malloc_list = NULL; static const int trace_freq = 100; /* Msg every this many malloc/free */ static int next_trace = 0; #endif /*DEBUGM*/ /** * IM_NEW: * @IM: allocate memory local to @IM, or %NULL for no auto-free * @T: type of thing to allocate * * Returns: A pointer of type @T *, or %NULL on error. */ /** * IM_ARRAY: * @IM: allocate memory local to @IM, or %NULL for no auto-free * @N: number of @T 's to allocate * @T: type of thing to allocate * * Returns: A pointer of type @T *, or %NULL on error. */ /** * im_free: * @s: memory to free * * VIPS free function. VIPS tries to use this instead of free(). It always * returns zero, so it can be used as a callback handler. * * Only use it to free * memory that was previously allocated with im_malloc() with a %NULL first * argument. * * Returns: 0 */ int im_free( void *s ) { #ifdef DEBUGM { size_t size; s = (void *) ((char*)s - 16); size = *((size_t*)s); g_mutex_lock( malloc_mutex ); assert( g_slist_find( malloc_list, s ) ); malloc_list = g_slist_remove( malloc_list, s ); assert( !g_slist_find( malloc_list, s ) ); malloc_list = g_slist_remove( malloc_list, s ); assert( total_allocs > 0 ); total_mem_alloc -= size; total_allocs -= 1; next_trace += 1; if( next_trace > trace_freq ) { printf( "im_free: %d, %d allocs, total %.3gM, " "high water %.3gM\n", size, total_allocs, total_mem_alloc / (1024.0 * 1024.0), high_water_mark / (1024.0 * 1024.0) ); next_trace = 0; } g_mutex_unlock( malloc_mutex ); } #endif /*DEBUGM*/ #ifdef DEBUG if( !s ) g_assert( 0 ); #endif /*DEBUG*/ g_free( s ); return( 0 ); } /** * im_malloc: * @im: allocate memory local to this #IMAGE, or %NULL * @size: number of bytes to allocate * * Malloc local to @im, that is, the memory will be automatically * freed for you when the image is closed. If @im is %NULL, you need to free * the memory explicitly with im_free(). * If allocation fails im_malloc() returns %NULL and * sets an error message. * * If two threads try to allocate local to the same @im at the same time, you * can get heap corruption. * * Returns: a pointer to the allocated memory, or %NULL on error. */ void * im_malloc( IMAGE *im, size_t size ) { void *buf; #ifdef DEBUGM /* Assume the first im_malloc() is single-threaded. */ if( !malloc_mutex ) malloc_mutex = g_mutex_new(); #endif /*DEBUGM*/ #ifdef DEBUGM /* If debugging mallocs, need an extra sizeof(uint) bytes to track * size of this block. Ask for an extra 16 to make sure we don't break * alignment rules. */ size += 16; #endif /*DEBUGM*/ if( !(buf = g_try_malloc( size )) ) { #ifdef DEBUG g_assert( 0 ); #endif /*DEBUG*/ im_error( "im_malloc", _( "out of memory --- size == %dMB" ), (int) (size / (1024.0*1024.0)) ); im_warn( "im_malloc", _( "out of memory --- size == %dMB" ), (int) (size / (1024.0*1024.0)) ); return( NULL ); } #ifdef DEBUGM /* Record number alloced. */ g_mutex_lock( malloc_mutex ); assert( !g_slist_find( malloc_list, buf ) ); malloc_list = g_slist_prepend( malloc_list, buf ); *((size_t*)buf) = size; buf = (void *) ((char*)buf + 16); total_mem_alloc += size; if( total_mem_alloc > high_water_mark ) high_water_mark = total_mem_alloc; total_allocs += 1; next_trace += 1; if( next_trace > trace_freq ) { printf( "im_malloc: %d, %d allocs, total %.3gM, " "high water %.3gM\n", size, total_allocs, total_mem_alloc / (1024.0 * 1024.0), high_water_mark / (1024.0 * 1024.0) ); next_trace = 0; } g_mutex_unlock( malloc_mutex ); /* Handy to breakpoint on this printf() for catching large mallocs(). */ if( size > 1000000 ) printf( "woah! big!\n" ); #endif /*DEBUGM*/ if( im && im_add_close_callback( im, (im_callback_fn) im_free, buf, NULL ) ) { im_free( buf ); return( NULL ); } return( buf ); }
819873.c
#include "pagai_assert.h" typedef unsigned long size_t; extern void *calloc (size_t __nmemb, size_t __size); typedef double data; int main() { int x = 0; data *begin = calloc(10, sizeof(data)), *end = begin+10; for(data *p = begin; p<end; p++) x++; assert(x==10); }
523449.c
/* * OSTimer device simulation in PKUnity SoC * * Copyright (C) 2010-2012 Guan Xuetao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation, or any later version. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "hw/sysbus.h" #include "hw/irq.h" #include "hw/ptimer.h" #include "qemu/module.h" #include "qemu/log.h" #include "qom/object.h" #undef DEBUG_PUV3 #include "hw/unicore32/puv3.h" #define TYPE_PUV3_OST "puv3_ost" OBJECT_DECLARE_SIMPLE_TYPE(PUV3OSTState, PUV3_OST) /* puv3 ostimer implementation. */ struct PUV3OSTState { SysBusDevice parent_obj; MemoryRegion iomem; qemu_irq irq; ptimer_state *ptimer; uint32_t reg_OSMR0; uint32_t reg_OSCR; uint32_t reg_OSSR; uint32_t reg_OIER; }; static uint64_t puv3_ost_read(void *opaque, hwaddr offset, unsigned size) { PUV3OSTState *s = opaque; uint32_t ret = 0; switch (offset) { case 0x10: /* Counter Register */ ret = s->reg_OSMR0 - (uint32_t)ptimer_get_count(s->ptimer); break; case 0x14: /* Status Register */ ret = s->reg_OSSR; break; case 0x1c: /* Interrupt Enable Register */ ret = s->reg_OIER; break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad read offset 0x%"HWADDR_PRIx"\n", __func__, offset); } DPRINTF("offset 0x%x, value 0x%x\n", offset, ret); return ret; } static void puv3_ost_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PUV3OSTState *s = opaque; DPRINTF("offset 0x%x, value 0x%x\n", offset, value); switch (offset) { case 0x00: /* Match Register 0 */ ptimer_transaction_begin(s->ptimer); s->reg_OSMR0 = value; if (s->reg_OSMR0 > s->reg_OSCR) { ptimer_set_count(s->ptimer, s->reg_OSMR0 - s->reg_OSCR); } else { ptimer_set_count(s->ptimer, s->reg_OSMR0 + (0xffffffff - s->reg_OSCR)); } ptimer_run(s->ptimer, 2); ptimer_transaction_commit(s->ptimer); break; case 0x14: /* Status Register */ assert(value == 0); if (s->reg_OSSR) { s->reg_OSSR = value; qemu_irq_lower(s->irq); } break; case 0x1c: /* Interrupt Enable Register */ s->reg_OIER = value; break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad write offset 0x%"HWADDR_PRIx"\n", __func__, offset); } } static const MemoryRegionOps puv3_ost_ops = { .read = puv3_ost_read, .write = puv3_ost_write, .impl = { .min_access_size = 4, .max_access_size = 4, }, .endianness = DEVICE_NATIVE_ENDIAN, }; static void puv3_ost_tick(void *opaque) { PUV3OSTState *s = opaque; DPRINTF("ost hit when ptimer counter from 0x%x to 0x%x!\n", s->reg_OSCR, s->reg_OSMR0); s->reg_OSCR = s->reg_OSMR0; if (s->reg_OIER) { s->reg_OSSR = 1; qemu_irq_raise(s->irq); } } static void puv3_ost_realize(DeviceState *dev, Error **errp) { PUV3OSTState *s = PUV3_OST(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); s->reg_OIER = 0; s->reg_OSSR = 0; s->reg_OSMR0 = 0; s->reg_OSCR = 0; sysbus_init_irq(sbd, &s->irq); s->ptimer = ptimer_init(puv3_ost_tick, s, PTIMER_POLICY_DEFAULT); ptimer_transaction_begin(s->ptimer); ptimer_set_freq(s->ptimer, 50 * 1000 * 1000); ptimer_transaction_commit(s->ptimer); memory_region_init_io(&s->iomem, OBJECT(s), &puv3_ost_ops, s, "puv3_ost", PUV3_REGS_OFFSET); sysbus_init_mmio(sbd, &s->iomem); } static void puv3_ost_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = puv3_ost_realize; } static const TypeInfo puv3_ost_info = { .name = TYPE_PUV3_OST, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(PUV3OSTState), .class_init = puv3_ost_class_init, }; static void puv3_ost_register_type(void) { type_register_static(&puv3_ost_info); } type_init(puv3_ost_register_type)
54439.c
/* * NDR data marshalling * * Copyright 2002 Greg Turner * Copyright 2003-2006 CodeWeavers * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA * * TODO: * - String structs * - Byte count pointers * - transmit_as/represent as * - Multi-dimensional arrays * - Conversion functions (NdrConvert) * - Checks for integer addition overflow in user marshall functions */ #include <assert.h> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <limits.h> #define NONAMELESSUNION #include "windef.h" #include "winbase.h" #include "winerror.h" #include "ndr_misc.h" #include "rpcndr.h" #include "ndrtypes.h" #include "wine/unicode.h" #include "wine/rpcfc.h" #include "wine/debug.h" WINE_DEFAULT_DEBUG_CHANNEL(ole); #if defined(__i386__) # define LITTLE_ENDIAN_UINT32_WRITE(pchar, uint32) \ (*((UINT32 *)(pchar)) = (uint32)) # define LITTLE_ENDIAN_UINT32_READ(pchar) \ (*((UINT32 *)(pchar))) #else /* these would work for i386 too, but less efficient */ # define LITTLE_ENDIAN_UINT32_WRITE(pchar, uint32) \ (*(pchar) = LOBYTE(LOWORD(uint32)), \ *((pchar)+1) = HIBYTE(LOWORD(uint32)), \ *((pchar)+2) = LOBYTE(HIWORD(uint32)), \ *((pchar)+3) = HIBYTE(HIWORD(uint32))) # define LITTLE_ENDIAN_UINT32_READ(pchar) \ (MAKELONG( \ MAKEWORD(*(pchar), *((pchar)+1)), \ MAKEWORD(*((pchar)+2), *((pchar)+3)))) #endif #define BIG_ENDIAN_UINT32_WRITE(pchar, uint32) \ (*((pchar)+3) = LOBYTE(LOWORD(uint32)), \ *((pchar)+2) = HIBYTE(LOWORD(uint32)), \ *((pchar)+1) = LOBYTE(HIWORD(uint32)), \ *(pchar) = HIBYTE(HIWORD(uint32))) #define BIG_ENDIAN_UINT32_READ(pchar) \ (MAKELONG( \ MAKEWORD(*((pchar)+3), *((pchar)+2)), \ MAKEWORD(*((pchar)+1), *(pchar)))) #ifdef NDR_LOCAL_IS_BIG_ENDIAN # define NDR_LOCAL_UINT32_WRITE(pchar, uint32) \ BIG_ENDIAN_UINT32_WRITE(pchar, uint32) # define NDR_LOCAL_UINT32_READ(pchar) \ BIG_ENDIAN_UINT32_READ(pchar) #else # define NDR_LOCAL_UINT32_WRITE(pchar, uint32) \ LITTLE_ENDIAN_UINT32_WRITE(pchar, uint32) # define NDR_LOCAL_UINT32_READ(pchar) \ LITTLE_ENDIAN_UINT32_READ(pchar) #endif static inline void align_length( ULONG *len, unsigned int align ) { *len = (*len + align - 1) & ~(align - 1); } static inline void align_pointer( unsigned char **ptr, unsigned int align ) { ULONG_PTR mask = align - 1; *ptr = (unsigned char *)(((ULONG_PTR)*ptr + mask) & ~mask); } static inline void align_pointer_clear( unsigned char **ptr, unsigned int align ) { ULONG_PTR mask = align - 1; memset( *ptr, 0, (align - (ULONG_PTR)*ptr) & mask ); *ptr = (unsigned char *)(((ULONG_PTR)*ptr + mask) & ~mask); } #define STD_OVERFLOW_CHECK(_Msg) do { \ TRACE("buffer=%d/%d\n", (ULONG)(_Msg->Buffer - (unsigned char *)_Msg->RpcMsg->Buffer), _Msg->BufferLength); \ if (_Msg->Buffer > (unsigned char *)_Msg->RpcMsg->Buffer + _Msg->BufferLength) \ ERR("buffer overflow %d bytes\n", (ULONG)(_Msg->Buffer - ((unsigned char *)_Msg->RpcMsg->Buffer + _Msg->BufferLength))); \ } while (0) #define NDR_POINTER_ID_BASE 0x20000 #define NDR_POINTER_ID(pStubMsg) (NDR_POINTER_ID_BASE + ((pStubMsg)->UniquePtrCount++) * 4) #define NDR_TABLE_SIZE 128 #define NDR_TABLE_MASK 127 #define NDRSContextFromValue(user_context) (NDR_SCONTEXT)((char *)(user_context) - (char *)NDRSContextValue((NDR_SCONTEXT)NULL)) static unsigned char *WINAPI NdrBaseTypeMarshall(PMIDL_STUB_MESSAGE, unsigned char *, PFORMAT_STRING); static unsigned char *WINAPI NdrBaseTypeUnmarshall(PMIDL_STUB_MESSAGE, unsigned char **, PFORMAT_STRING, unsigned char); static void WINAPI NdrBaseTypeBufferSize(PMIDL_STUB_MESSAGE, unsigned char *, PFORMAT_STRING); static void WINAPI NdrBaseTypeFree(PMIDL_STUB_MESSAGE, unsigned char *, PFORMAT_STRING); static ULONG WINAPI NdrBaseTypeMemorySize(PMIDL_STUB_MESSAGE, PFORMAT_STRING); static unsigned char *WINAPI NdrContextHandleMarshall(PMIDL_STUB_MESSAGE, unsigned char *, PFORMAT_STRING); static void WINAPI NdrContextHandleBufferSize(PMIDL_STUB_MESSAGE, unsigned char *, PFORMAT_STRING); static unsigned char *WINAPI NdrContextHandleUnmarshall(PMIDL_STUB_MESSAGE, unsigned char **, PFORMAT_STRING, unsigned char); static unsigned char *WINAPI NdrRangeMarshall(PMIDL_STUB_MESSAGE,unsigned char *, PFORMAT_STRING); static void WINAPI NdrRangeBufferSize(PMIDL_STUB_MESSAGE, unsigned char *, PFORMAT_STRING); static ULONG WINAPI NdrRangeMemorySize(PMIDL_STUB_MESSAGE, PFORMAT_STRING); static void WINAPI NdrRangeFree(PMIDL_STUB_MESSAGE, unsigned char *, PFORMAT_STRING); static ULONG WINAPI NdrByteCountPointerMemorySize(PMIDL_STUB_MESSAGE, PFORMAT_STRING); static unsigned char * ComplexBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer); static unsigned char * ComplexMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer); static unsigned char * ComplexUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer, unsigned char fMustAlloc); static ULONG ComplexStructMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer); static unsigned char * ComplexFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer); const NDR_MARSHALL NdrMarshaller[NDR_TABLE_SIZE] = { 0, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall, /* 0x10 */ NdrBaseTypeMarshall, /* 0x11 */ NdrPointerMarshall, NdrPointerMarshall, NdrPointerMarshall, NdrPointerMarshall, /* 0x15 */ NdrSimpleStructMarshall, NdrSimpleStructMarshall, NdrConformantStructMarshall, NdrConformantStructMarshall, NdrConformantVaryingStructMarshall, NdrComplexStructMarshall, /* 0x1b */ NdrConformantArrayMarshall, NdrConformantVaryingArrayMarshall, NdrFixedArrayMarshall, NdrFixedArrayMarshall, NdrVaryingArrayMarshall, NdrVaryingArrayMarshall, NdrComplexArrayMarshall, /* 0x22 */ NdrConformantStringMarshall, 0, 0, NdrConformantStringMarshall, NdrNonConformantStringMarshall, 0, 0, 0, /* 0x2a */ NdrEncapsulatedUnionMarshall, NdrNonEncapsulatedUnionMarshall, NdrByteCountPointerMarshall, NdrXmitOrRepAsMarshall, NdrXmitOrRepAsMarshall, /* 0x2f */ NdrInterfacePointerMarshall, /* 0x30 */ NdrContextHandleMarshall, /* 0xb1 */ 0, 0, 0, NdrUserMarshalMarshall, 0, 0, /* 0xb7 */ NdrRangeMarshall, NdrBaseTypeMarshall, NdrBaseTypeMarshall }; const NDR_UNMARSHALL NdrUnmarshaller[NDR_TABLE_SIZE] = { 0, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall, /* 0x10 */ NdrBaseTypeUnmarshall, /* 0x11 */ NdrPointerUnmarshall, NdrPointerUnmarshall, NdrPointerUnmarshall, NdrPointerUnmarshall, /* 0x15 */ NdrSimpleStructUnmarshall, NdrSimpleStructUnmarshall, NdrConformantStructUnmarshall, NdrConformantStructUnmarshall, NdrConformantVaryingStructUnmarshall, NdrComplexStructUnmarshall, /* 0x1b */ NdrConformantArrayUnmarshall, NdrConformantVaryingArrayUnmarshall, NdrFixedArrayUnmarshall, NdrFixedArrayUnmarshall, NdrVaryingArrayUnmarshall, NdrVaryingArrayUnmarshall, NdrComplexArrayUnmarshall, /* 0x22 */ NdrConformantStringUnmarshall, 0, 0, NdrConformantStringUnmarshall, NdrNonConformantStringUnmarshall, 0, 0, 0, /* 0x2a */ NdrEncapsulatedUnionUnmarshall, NdrNonEncapsulatedUnionUnmarshall, NdrByteCountPointerUnmarshall, NdrXmitOrRepAsUnmarshall, NdrXmitOrRepAsUnmarshall, /* 0x2f */ NdrInterfacePointerUnmarshall, /* 0x30 */ NdrContextHandleUnmarshall, /* 0xb1 */ 0, 0, 0, NdrUserMarshalUnmarshall, 0, 0, /* 0xb7 */ NdrRangeUnmarshall, NdrBaseTypeUnmarshall, NdrBaseTypeUnmarshall }; const NDR_BUFFERSIZE NdrBufferSizer[NDR_TABLE_SIZE] = { 0, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize, /* 0x10 */ NdrBaseTypeBufferSize, /* 0x11 */ NdrPointerBufferSize, NdrPointerBufferSize, NdrPointerBufferSize, NdrPointerBufferSize, /* 0x15 */ NdrSimpleStructBufferSize, NdrSimpleStructBufferSize, NdrConformantStructBufferSize, NdrConformantStructBufferSize, NdrConformantVaryingStructBufferSize, NdrComplexStructBufferSize, /* 0x1b */ NdrConformantArrayBufferSize, NdrConformantVaryingArrayBufferSize, NdrFixedArrayBufferSize, NdrFixedArrayBufferSize, NdrVaryingArrayBufferSize, NdrVaryingArrayBufferSize, NdrComplexArrayBufferSize, /* 0x22 */ NdrConformantStringBufferSize, 0, 0, NdrConformantStringBufferSize, NdrNonConformantStringBufferSize, 0, 0, 0, /* 0x2a */ NdrEncapsulatedUnionBufferSize, NdrNonEncapsulatedUnionBufferSize, NdrByteCountPointerBufferSize, NdrXmitOrRepAsBufferSize, NdrXmitOrRepAsBufferSize, /* 0x2f */ NdrInterfacePointerBufferSize, /* 0x30 */ NdrContextHandleBufferSize, /* 0xb1 */ 0, 0, 0, NdrUserMarshalBufferSize, 0, 0, /* 0xb7 */ NdrRangeBufferSize, NdrBaseTypeBufferSize, NdrBaseTypeBufferSize }; const NDR_MEMORYSIZE NdrMemorySizer[NDR_TABLE_SIZE] = { 0, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize, /* 0x10 */ NdrBaseTypeMemorySize, /* 0x11 */ NdrPointerMemorySize, NdrPointerMemorySize, NdrPointerMemorySize, NdrPointerMemorySize, /* 0x15 */ NdrSimpleStructMemorySize, NdrSimpleStructMemorySize, NdrConformantStructMemorySize, NdrConformantStructMemorySize, NdrConformantVaryingStructMemorySize, NdrComplexStructMemorySize, /* 0x1b */ NdrConformantArrayMemorySize, NdrConformantVaryingArrayMemorySize, NdrFixedArrayMemorySize, NdrFixedArrayMemorySize, NdrVaryingArrayMemorySize, NdrVaryingArrayMemorySize, NdrComplexArrayMemorySize, /* 0x22 */ NdrConformantStringMemorySize, 0, 0, NdrConformantStringMemorySize, NdrNonConformantStringMemorySize, 0, 0, 0, /* 0x2a */ NdrEncapsulatedUnionMemorySize, NdrNonEncapsulatedUnionMemorySize, NdrByteCountPointerMemorySize, NdrXmitOrRepAsMemorySize, NdrXmitOrRepAsMemorySize, /* 0x2f */ NdrInterfacePointerMemorySize, /* 0x30 */ 0, /* 0xb1 */ 0, 0, 0, NdrUserMarshalMemorySize, 0, 0, /* 0xb7 */ NdrRangeMemorySize, NdrBaseTypeMemorySize, NdrBaseTypeMemorySize }; const NDR_FREE NdrFreer[NDR_TABLE_SIZE] = { 0, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, NdrBaseTypeFree, /* 0x10 */ NdrBaseTypeFree, /* 0x11 */ NdrPointerFree, NdrPointerFree, NdrPointerFree, NdrPointerFree, /* 0x15 */ NdrSimpleStructFree, NdrSimpleStructFree, NdrConformantStructFree, NdrConformantStructFree, NdrConformantVaryingStructFree, NdrComplexStructFree, /* 0x1b */ NdrConformantArrayFree, NdrConformantVaryingArrayFree, NdrFixedArrayFree, NdrFixedArrayFree, NdrVaryingArrayFree, NdrVaryingArrayFree, NdrComplexArrayFree, /* 0x22 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x2a */ NdrEncapsulatedUnionFree, NdrNonEncapsulatedUnionFree, 0, NdrXmitOrRepAsFree, NdrXmitOrRepAsFree, /* 0x2f */ NdrInterfacePointerFree, /* 0x30 */ 0, /* 0xb1 */ 0, 0, 0, NdrUserMarshalFree, 0, 0, /* 0xb7 */ NdrRangeFree, NdrBaseTypeFree, NdrBaseTypeFree }; typedef struct _NDR_MEMORY_LIST { ULONG magic; ULONG size; ULONG reserved; struct _NDR_MEMORY_LIST *next; } NDR_MEMORY_LIST; #define MEML_MAGIC ('M' << 24 | 'E' << 16 | 'M' << 8 | 'L') /*********************************************************************** * NdrAllocate [RPCRT4.@] * * Allocates a block of memory using pStubMsg->pfnAllocate. * * PARAMS * pStubMsg [I/O] MIDL_STUB_MESSAGE structure. * len [I] Size of memory block to allocate. * * RETURNS * The memory block of size len that was allocated. * * NOTES * The memory block is always 8-byte aligned. * If the function is unable to allocate memory an RPC_X_NO_MEMORY * exception is raised. */ void * WINAPI NdrAllocate(MIDL_STUB_MESSAGE *pStubMsg, SIZE_T len) { SIZE_T aligned_len; SIZE_T adjusted_len; void *p; NDR_MEMORY_LIST *mem_list; aligned_len = (len + 7) & ~7; adjusted_len = aligned_len + sizeof(NDR_MEMORY_LIST); /* check for overflow */ if (adjusted_len < len) { ERR("overflow of adjusted_len %ld, len %ld\n", adjusted_len, len); RpcRaiseException(RPC_X_BAD_STUB_DATA); } p = pStubMsg->pfnAllocate(adjusted_len); if (!p) RpcRaiseException(RPC_X_NO_MEMORY); mem_list = (NDR_MEMORY_LIST *)((char *)p + aligned_len); mem_list->magic = MEML_MAGIC; mem_list->size = aligned_len; mem_list->reserved = 0; mem_list->next = pStubMsg->pMemoryList; pStubMsg->pMemoryList = mem_list; TRACE("-- %p\n", p); return p; } static void NdrFree(MIDL_STUB_MESSAGE *pStubMsg, unsigned char *Pointer) { TRACE("(%p, %p)\n", pStubMsg, Pointer); pStubMsg->pfnFree(Pointer); } static inline BOOL IsConformanceOrVariancePresent(PFORMAT_STRING pFormat) { return (*(const ULONG *)pFormat != -1); } static inline PFORMAT_STRING SkipConformance(const PMIDL_STUB_MESSAGE pStubMsg, const PFORMAT_STRING pFormat) { return pStubMsg->fHasNewCorrDesc ? pFormat + 6 : pFormat + 4; } static PFORMAT_STRING ReadConformance(MIDL_STUB_MESSAGE *pStubMsg, PFORMAT_STRING pFormat) { align_pointer(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + 4 > pStubMsg->BufferEnd) RpcRaiseException(RPC_X_BAD_STUB_DATA); pStubMsg->MaxCount = NDR_LOCAL_UINT32_READ(pStubMsg->Buffer); pStubMsg->Buffer += 4; TRACE("unmarshalled conformance is %ld\n", pStubMsg->MaxCount); return SkipConformance(pStubMsg, pFormat); } static inline PFORMAT_STRING ReadVariance(MIDL_STUB_MESSAGE *pStubMsg, PFORMAT_STRING pFormat, ULONG MaxValue) { if (pFormat && !IsConformanceOrVariancePresent(pFormat)) { pStubMsg->Offset = 0; pStubMsg->ActualCount = pStubMsg->MaxCount; goto done; } align_pointer(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + 8 > pStubMsg->BufferEnd) RpcRaiseException(RPC_X_BAD_STUB_DATA); pStubMsg->Offset = NDR_LOCAL_UINT32_READ(pStubMsg->Buffer); pStubMsg->Buffer += 4; TRACE("offset is %d\n", pStubMsg->Offset); pStubMsg->ActualCount = NDR_LOCAL_UINT32_READ(pStubMsg->Buffer); pStubMsg->Buffer += 4; TRACE("variance is %d\n", pStubMsg->ActualCount); if ((pStubMsg->ActualCount > MaxValue) || (pStubMsg->ActualCount + pStubMsg->Offset > MaxValue)) { ERR("invalid array bound(s): ActualCount = %d, Offset = %d, MaxValue = %d\n", pStubMsg->ActualCount, pStubMsg->Offset, MaxValue); RpcRaiseException(RPC_S_INVALID_BOUND); return NULL; } done: return SkipConformance(pStubMsg, pFormat); } /* writes the conformance value to the buffer */ static inline void WriteConformance(MIDL_STUB_MESSAGE *pStubMsg) { align_pointer_clear(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + 4 > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength) RpcRaiseException(RPC_X_BAD_STUB_DATA); NDR_LOCAL_UINT32_WRITE(pStubMsg->Buffer, pStubMsg->MaxCount); pStubMsg->Buffer += 4; } /* writes the variance values to the buffer */ static inline void WriteVariance(MIDL_STUB_MESSAGE *pStubMsg) { align_pointer_clear(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + 8 > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength) RpcRaiseException(RPC_X_BAD_STUB_DATA); NDR_LOCAL_UINT32_WRITE(pStubMsg->Buffer, pStubMsg->Offset); pStubMsg->Buffer += 4; NDR_LOCAL_UINT32_WRITE(pStubMsg->Buffer, pStubMsg->ActualCount); pStubMsg->Buffer += 4; } /* requests buffer space for the conformance value */ static inline void SizeConformance(MIDL_STUB_MESSAGE *pStubMsg) { align_length(&pStubMsg->BufferLength, 4); if (pStubMsg->BufferLength + 4 < pStubMsg->BufferLength) RpcRaiseException(RPC_X_BAD_STUB_DATA); pStubMsg->BufferLength += 4; } /* requests buffer space for the variance values */ static inline void SizeVariance(MIDL_STUB_MESSAGE *pStubMsg) { align_length(&pStubMsg->BufferLength, 4); if (pStubMsg->BufferLength + 8 < pStubMsg->BufferLength) RpcRaiseException(RPC_X_BAD_STUB_DATA); pStubMsg->BufferLength += 8; } PFORMAT_STRING ComputeConformanceOrVariance( MIDL_STUB_MESSAGE *pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, ULONG_PTR def, ULONG_PTR *pCount) { BYTE dtype = pFormat[0] & 0xf; short ofs = *(const short *)&pFormat[2]; LPVOID ptr = NULL; ULONG_PTR data = 0; if (!IsConformanceOrVariancePresent(pFormat)) { /* null descriptor */ *pCount = def; goto finish_conf; } switch (pFormat[0] & 0xf0) { case RPC_FC_NORMAL_CONFORMANCE: TRACE("normal conformance, ofs=%d\n", ofs); ptr = pMemory; break; case RPC_FC_POINTER_CONFORMANCE: TRACE("pointer conformance, ofs=%d\n", ofs); ptr = pStubMsg->Memory; break; case RPC_FC_TOP_LEVEL_CONFORMANCE: TRACE("toplevel conformance, ofs=%d\n", ofs); if (pStubMsg->StackTop) { ptr = pStubMsg->StackTop; } else { /* -Os mode, *pCount is already set */ goto finish_conf; } break; case RPC_FC_CONSTANT_CONFORMANCE: data = ofs | ((DWORD)pFormat[1] << 16); TRACE("constant conformance, val=%ld\n", data); *pCount = data; goto finish_conf; case RPC_FC_TOP_LEVEL_MULTID_CONFORMANCE: FIXME("toplevel multidimensional conformance, ofs=%d\n", ofs); if (pStubMsg->StackTop) { ptr = pStubMsg->StackTop; } else { /* ? */ goto done_conf_grab; } break; default: FIXME("unknown conformance type %x, expect crash.\n", pFormat[0] & 0xf0); goto finish_conf; } switch (pFormat[1]) { case RPC_FC_DEREFERENCE: ptr = *(LPVOID*)((char *)ptr + ofs); break; case RPC_FC_CALLBACK: { unsigned char *old_stack_top = pStubMsg->StackTop; ULONG_PTR max_count, old_max_count = pStubMsg->MaxCount; pStubMsg->StackTop = ptr; /* ofs is index into StubDesc->apfnExprEval */ TRACE("callback conformance into apfnExprEval[%d]\n", ofs); pStubMsg->StubDesc->apfnExprEval[ofs](pStubMsg); pStubMsg->StackTop = old_stack_top; /* the callback function always stores the computed value in MaxCount */ max_count = pStubMsg->MaxCount; pStubMsg->MaxCount = old_max_count; *pCount = max_count; goto finish_conf; } default: ptr = (char *)ptr + ofs; break; } switch (dtype) { case RPC_FC_LONG: case RPC_FC_ULONG: data = *(DWORD*)ptr; break; case RPC_FC_SHORT: data = *(SHORT*)ptr; break; case RPC_FC_USHORT: data = *(USHORT*)ptr; break; case RPC_FC_CHAR: case RPC_FC_SMALL: data = *(CHAR*)ptr; break; case RPC_FC_BYTE: case RPC_FC_USMALL: data = *(UCHAR*)ptr; break; case RPC_FC_HYPER: data = *(ULONGLONG *)ptr; break; default: FIXME("unknown conformance data type %x\n", dtype); goto done_conf_grab; } TRACE("dereferenced data type %x at %p, got %ld\n", dtype, ptr, data); done_conf_grab: switch (pFormat[1]) { case RPC_FC_DEREFERENCE: /* already handled */ case 0: /* no op */ *pCount = data; break; case RPC_FC_ADD_1: *pCount = data + 1; break; case RPC_FC_SUB_1: *pCount = data - 1; break; case RPC_FC_MULT_2: *pCount = data * 2; break; case RPC_FC_DIV_2: *pCount = data / 2; break; default: FIXME("unknown conformance op %d\n", pFormat[1]); goto finish_conf; } finish_conf: TRACE("resulting conformance is %ld\n", *pCount); return SkipConformance(pStubMsg, pFormat); } static inline PFORMAT_STRING SkipVariance(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { return SkipConformance( pStubMsg, pFormat ); } /* multiply two numbers together, raising an RPC_S_INVALID_BOUND exception if * the result overflows 32-bits */ static inline ULONG safe_multiply(ULONG a, ULONG b) { ULONGLONG ret = (ULONGLONG)a * b; if (ret > 0xffffffff) { RpcRaiseException(RPC_S_INVALID_BOUND); return 0; } return ret; } static inline void safe_buffer_increment(MIDL_STUB_MESSAGE *pStubMsg, ULONG size) { if ((pStubMsg->Buffer + size < pStubMsg->Buffer) || /* integer overflow of pStubMsg->Buffer */ (pStubMsg->Buffer + size > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength)) RpcRaiseException(RPC_X_BAD_STUB_DATA); pStubMsg->Buffer += size; } static inline void safe_buffer_length_increment(MIDL_STUB_MESSAGE *pStubMsg, ULONG size) { if (pStubMsg->BufferLength + size < pStubMsg->BufferLength) /* integer overflow of pStubMsg->BufferSize */ { ERR("buffer length overflow - BufferLength = %u, size = %u\n", pStubMsg->BufferLength, size); RpcRaiseException(RPC_X_BAD_STUB_DATA); } pStubMsg->BufferLength += size; } /* copies data from the buffer, checking that there is enough data in the buffer * to do so */ static inline void safe_copy_from_buffer(MIDL_STUB_MESSAGE *pStubMsg, void *p, ULONG size) { if ((pStubMsg->Buffer + size < pStubMsg->Buffer) || /* integer overflow of pStubMsg->Buffer */ (pStubMsg->Buffer + size > pStubMsg->BufferEnd)) { ERR("buffer overflow - Buffer = %p, BufferEnd = %p, size = %u\n", pStubMsg->Buffer, pStubMsg->BufferEnd, size); RpcRaiseException(RPC_X_BAD_STUB_DATA); } if (p == pStubMsg->Buffer) ERR("pointer is the same as the buffer\n"); memcpy(p, pStubMsg->Buffer, size); pStubMsg->Buffer += size; } /* copies data to the buffer, checking that there is enough space to do so */ static inline void safe_copy_to_buffer(MIDL_STUB_MESSAGE *pStubMsg, const void *p, ULONG size) { if ((pStubMsg->Buffer + size < pStubMsg->Buffer) || /* integer overflow of pStubMsg->Buffer */ (pStubMsg->Buffer + size > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength)) { ERR("buffer overflow - Buffer = %p, BufferEnd = %p, size = %u\n", pStubMsg->Buffer, (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength, size); RpcRaiseException(RPC_X_BAD_STUB_DATA); } memcpy(pStubMsg->Buffer, p, size); pStubMsg->Buffer += size; } /* verify that string data sitting in the buffer is valid and safe to * unmarshall */ static void validate_string_data(MIDL_STUB_MESSAGE *pStubMsg, ULONG bufsize, ULONG esize) { ULONG i; /* verify the buffer is safe to access */ if ((pStubMsg->Buffer + bufsize < pStubMsg->Buffer) || (pStubMsg->Buffer + bufsize > pStubMsg->BufferEnd)) { ERR("bufsize 0x%x exceeded buffer end %p of buffer %p\n", bufsize, pStubMsg->BufferEnd, pStubMsg->Buffer); RpcRaiseException(RPC_X_BAD_STUB_DATA); } /* strings must always have null terminating bytes */ if (bufsize < esize) { ERR("invalid string length of %d\n", bufsize / esize); RpcRaiseException(RPC_S_INVALID_BOUND); } for (i = bufsize - esize; i < bufsize; i++) if (pStubMsg->Buffer[i] != 0) { ERR("string not null-terminated at byte position %d, data is 0x%x\n", i, pStubMsg->Buffer[i]); RpcRaiseException(RPC_S_INVALID_BOUND); } } static inline void dump_pointer_attr(unsigned char attr) { if (attr & RPC_FC_P_ALLOCALLNODES) TRACE(" RPC_FC_P_ALLOCALLNODES"); if (attr & RPC_FC_P_DONTFREE) TRACE(" RPC_FC_P_DONTFREE"); if (attr & RPC_FC_P_ONSTACK) TRACE(" RPC_FC_P_ONSTACK"); if (attr & RPC_FC_P_SIMPLEPOINTER) TRACE(" RPC_FC_P_SIMPLEPOINTER"); if (attr & RPC_FC_P_DEREF) TRACE(" RPC_FC_P_DEREF"); TRACE("\n"); } /*********************************************************************** * PointerMarshall [internal] */ static void PointerMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *Buffer, unsigned char *Pointer, PFORMAT_STRING pFormat) { unsigned type = pFormat[0], attr = pFormat[1]; PFORMAT_STRING desc; NDR_MARSHALL m; ULONG pointer_id; BOOL pointer_needs_marshaling; TRACE("(%p,%p,%p,%p)\n", pStubMsg, Buffer, Pointer, pFormat); TRACE("type=0x%x, attr=", type); dump_pointer_attr(attr); pFormat += 2; if (attr & RPC_FC_P_SIMPLEPOINTER) desc = pFormat; else desc = pFormat + *(const SHORT*)pFormat; switch (type) { case RPC_FC_RP: /* ref pointer (always non-null) */ if (!Pointer) { ERR("NULL ref pointer is not allowed\n"); RpcRaiseException(RPC_X_NULL_REF_POINTER); } pointer_needs_marshaling = TRUE; break; case RPC_FC_UP: /* unique pointer */ case RPC_FC_OP: /* object pointer - same as unique here */ if (Pointer) pointer_needs_marshaling = TRUE; else pointer_needs_marshaling = FALSE; pointer_id = Pointer ? NDR_POINTER_ID(pStubMsg) : 0; TRACE("writing 0x%08x to buffer\n", pointer_id); NDR_LOCAL_UINT32_WRITE(Buffer, pointer_id); break; case RPC_FC_FP: pointer_needs_marshaling = !NdrFullPointerQueryPointer( pStubMsg->FullPtrXlatTables, Pointer, 1, &pointer_id); TRACE("writing 0x%08x to buffer\n", pointer_id); NDR_LOCAL_UINT32_WRITE(Buffer, pointer_id); break; default: FIXME("unhandled ptr type=%02x\n", type); RpcRaiseException(RPC_X_BAD_STUB_DATA); return; } TRACE("calling marshaller for type 0x%x\n", (int)*desc); if (pointer_needs_marshaling) { if (attr & RPC_FC_P_DEREF) { Pointer = *(unsigned char**)Pointer; TRACE("deref => %p\n", Pointer); } m = NdrMarshaller[*desc & NDR_TABLE_MASK]; if (m) m(pStubMsg, Pointer, desc); else FIXME("no marshaller for data type=%02x\n", *desc); } STD_OVERFLOW_CHECK(pStubMsg); } /*********************************************************************** * PointerUnmarshall [internal] */ static void PointerUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *Buffer, unsigned char **pPointer, unsigned char *pSrcPointer, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned type = pFormat[0], attr = pFormat[1]; PFORMAT_STRING desc; NDR_UNMARSHALL m; DWORD pointer_id = 0; BOOL pointer_needs_unmarshaling; TRACE("(%p,%p,%p,%p,%p,%d)\n", pStubMsg, Buffer, pPointer, pSrcPointer, pFormat, fMustAlloc); TRACE("type=0x%x, attr=", type); dump_pointer_attr(attr); pFormat += 2; if (attr & RPC_FC_P_SIMPLEPOINTER) desc = pFormat; else desc = pFormat + *(const SHORT*)pFormat; switch (type) { case RPC_FC_RP: /* ref pointer (always non-null) */ pointer_needs_unmarshaling = TRUE; break; case RPC_FC_UP: /* unique pointer */ pointer_id = NDR_LOCAL_UINT32_READ(Buffer); TRACE("pointer_id is 0x%08x\n", pointer_id); if (pointer_id) pointer_needs_unmarshaling = TRUE; else { *pPointer = NULL; pointer_needs_unmarshaling = FALSE; } break; case RPC_FC_OP: /* object pointer - we must free data before overwriting it */ pointer_id = NDR_LOCAL_UINT32_READ(Buffer); TRACE("pointer_id is 0x%08x\n", pointer_id); if (!fMustAlloc && pSrcPointer) { FIXME("free object pointer %p\n", pSrcPointer); fMustAlloc = TRUE; } if (pointer_id) pointer_needs_unmarshaling = TRUE; else { *pPointer = NULL; pointer_needs_unmarshaling = FALSE; } break; case RPC_FC_FP: pointer_id = NDR_LOCAL_UINT32_READ(Buffer); TRACE("pointer_id is 0x%08x\n", pointer_id); pointer_needs_unmarshaling = !NdrFullPointerQueryRefId( pStubMsg->FullPtrXlatTables, pointer_id, 1, (void **)pPointer); break; default: FIXME("unhandled ptr type=%02x\n", type); RpcRaiseException(RPC_X_BAD_STUB_DATA); return; } if (pointer_needs_unmarshaling) { unsigned char **current_ptr = pPointer; if (pStubMsg->IsClient) { TRACE("client\n"); /* if we aren't forcing allocation of memory then try to use the existing * (source) pointer to unmarshall the data into so that [in,out] * parameters behave correctly. it doesn't matter if the parameter is * [out] only since in that case the pointer will be NULL. we force * allocation when the source pointer is NULL here instead of in the type * unmarshalling routine for the benefit of the deref code below */ if (!fMustAlloc) { if (pSrcPointer) { TRACE("setting *pPointer to %p\n", pSrcPointer); *pPointer = pSrcPointer; } else fMustAlloc = TRUE; } } else { TRACE("server\n"); /* the memory in a stub is never initialised, so we have to work out here * whether we have to initialise it so we can use the optimisation of * setting the pointer to the buffer, if possible, or set fMustAlloc to * TRUE. */ if (attr & RPC_FC_P_DEREF) { fMustAlloc = TRUE; } else { *current_ptr = NULL; } } if (attr & RPC_FC_P_ALLOCALLNODES) FIXME("RPC_FC_P_ALLOCALLNODES not implemented\n"); if (attr & RPC_FC_P_DEREF) { if (fMustAlloc) { unsigned char *base_ptr_val = NdrAllocate(pStubMsg, sizeof(void *)); *pPointer = base_ptr_val; current_ptr = (unsigned char **)base_ptr_val; } else current_ptr = *(unsigned char***)current_ptr; TRACE("deref => %p\n", current_ptr); if (!fMustAlloc && !*current_ptr) fMustAlloc = TRUE; } m = NdrUnmarshaller[*desc & NDR_TABLE_MASK]; if (m) m(pStubMsg, current_ptr, desc, fMustAlloc); else FIXME("no unmarshaller for data type=%02x\n", *desc); if (type == RPC_FC_FP) NdrFullPointerInsertRefId(pStubMsg->FullPtrXlatTables, pointer_id, *pPointer); } TRACE("pointer=%p\n", *pPointer); } /*********************************************************************** * PointerBufferSize [internal] */ static void PointerBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *Pointer, PFORMAT_STRING pFormat) { unsigned type = pFormat[0], attr = pFormat[1]; PFORMAT_STRING desc; NDR_BUFFERSIZE m; BOOL pointer_needs_sizing; ULONG pointer_id; TRACE("(%p,%p,%p)\n", pStubMsg, Pointer, pFormat); TRACE("type=0x%x, attr=", type); dump_pointer_attr(attr); pFormat += 2; if (attr & RPC_FC_P_SIMPLEPOINTER) desc = pFormat; else desc = pFormat + *(const SHORT*)pFormat; switch (type) { case RPC_FC_RP: /* ref pointer (always non-null) */ if (!Pointer) { ERR("NULL ref pointer is not allowed\n"); RpcRaiseException(RPC_X_NULL_REF_POINTER); } break; case RPC_FC_OP: case RPC_FC_UP: /* NULL pointer has no further representation */ if (!Pointer) return; break; case RPC_FC_FP: pointer_needs_sizing = !NdrFullPointerQueryPointer( pStubMsg->FullPtrXlatTables, Pointer, 0, &pointer_id); if (!pointer_needs_sizing) return; break; default: FIXME("unhandled ptr type=%02x\n", type); RpcRaiseException(RPC_X_BAD_STUB_DATA); return; } if (attr & RPC_FC_P_DEREF) { Pointer = *(unsigned char**)Pointer; TRACE("deref => %p\n", Pointer); } m = NdrBufferSizer[*desc & NDR_TABLE_MASK]; if (m) m(pStubMsg, Pointer, desc); else FIXME("no buffersizer for data type=%02x\n", *desc); } /*********************************************************************** * PointerMemorySize [internal] */ static ULONG PointerMemorySize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *Buffer, PFORMAT_STRING pFormat) { unsigned type = pFormat[0], attr = pFormat[1]; PFORMAT_STRING desc; NDR_MEMORYSIZE m; DWORD pointer_id = 0; BOOL pointer_needs_sizing; TRACE("(%p,%p,%p)\n", pStubMsg, Buffer, pFormat); TRACE("type=0x%x, attr=", type); dump_pointer_attr(attr); pFormat += 2; if (attr & RPC_FC_P_SIMPLEPOINTER) desc = pFormat; else desc = pFormat + *(const SHORT*)pFormat; switch (type) { case RPC_FC_RP: /* ref pointer (always non-null) */ pointer_needs_sizing = TRUE; break; case RPC_FC_UP: /* unique pointer */ case RPC_FC_OP: /* object pointer - we must free data before overwriting it */ pointer_id = NDR_LOCAL_UINT32_READ(Buffer); TRACE("pointer_id is 0x%08x\n", pointer_id); if (pointer_id) pointer_needs_sizing = TRUE; else pointer_needs_sizing = FALSE; break; case RPC_FC_FP: { void *pointer; pointer_id = NDR_LOCAL_UINT32_READ(Buffer); TRACE("pointer_id is 0x%08x\n", pointer_id); pointer_needs_sizing = !NdrFullPointerQueryRefId( pStubMsg->FullPtrXlatTables, pointer_id, 1, &pointer); break; } default: FIXME("unhandled ptr type=%02x\n", type); RpcRaiseException(RPC_X_BAD_STUB_DATA); return 0; } if (attr & RPC_FC_P_DEREF) { align_length(&pStubMsg->MemorySize, sizeof(void*)); pStubMsg->MemorySize += sizeof(void*); TRACE("deref\n"); } if (pointer_needs_sizing) { m = NdrMemorySizer[*desc & NDR_TABLE_MASK]; if (m) m(pStubMsg, desc); else FIXME("no memorysizer for data type=%02x\n", *desc); } return pStubMsg->MemorySize; } /*********************************************************************** * PointerFree [internal] */ static void PointerFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *Pointer, PFORMAT_STRING pFormat) { unsigned type = pFormat[0], attr = pFormat[1]; PFORMAT_STRING desc; NDR_FREE m; unsigned char *current_pointer = Pointer; TRACE("(%p,%p,%p)\n", pStubMsg, Pointer, pFormat); TRACE("type=0x%x, attr=", type); dump_pointer_attr(attr); if (attr & RPC_FC_P_DONTFREE) return; pFormat += 2; if (attr & RPC_FC_P_SIMPLEPOINTER) desc = pFormat; else desc = pFormat + *(const SHORT*)pFormat; if (!Pointer) return; if (type == RPC_FC_FP) { int pointer_needs_freeing = NdrFullPointerFree( pStubMsg->FullPtrXlatTables, Pointer); if (!pointer_needs_freeing) return; } if (attr & RPC_FC_P_DEREF) { current_pointer = *(unsigned char**)Pointer; TRACE("deref => %p\n", current_pointer); } m = NdrFreer[*desc & NDR_TABLE_MASK]; if (m) m(pStubMsg, current_pointer, desc); /* this check stops us from trying to free buffer memory. we don't have to * worry about clients, since they won't call this function. * we don't have to check for the buffer being reallocated because * BufferStart and BufferEnd won't be reset when allocating memory for * sending the response. we don't have to check for the new buffer here as * it won't be used a type memory, only for buffer memory */ if (Pointer >= pStubMsg->BufferStart && Pointer <= pStubMsg->BufferEnd) goto notfree; if (attr & RPC_FC_P_ONSTACK) { TRACE("not freeing stack ptr %p\n", Pointer); return; } TRACE("freeing %p\n", Pointer); NdrFree(pStubMsg, Pointer); return; notfree: TRACE("not freeing %p\n", Pointer); } /*********************************************************************** * EmbeddedPointerMarshall */ static unsigned char * EmbeddedPointerMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned char *Mark = pStubMsg->BufferMark; unsigned rep, count, stride; unsigned i; unsigned char *saved_buffer = NULL; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (*pFormat != RPC_FC_PP) return NULL; pFormat += 2; if (pStubMsg->PointerBufferMark) { saved_buffer = pStubMsg->Buffer; pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; } while (pFormat[0] != RPC_FC_END) { switch (pFormat[0]) { default: FIXME("unknown repeat type %d; assuming no repeat\n", pFormat[0]); /* fallthrough */ case RPC_FC_NO_REPEAT: rep = 1; stride = 0; count = 1; pFormat += 2; break; case RPC_FC_FIXED_REPEAT: rep = *(const WORD*)&pFormat[2]; stride = *(const WORD*)&pFormat[4]; count = *(const WORD*)&pFormat[8]; pFormat += 10; break; case RPC_FC_VARIABLE_REPEAT: rep = (pFormat[1] == RPC_FC_VARIABLE_OFFSET) ? pStubMsg->ActualCount : pStubMsg->MaxCount; stride = *(const WORD*)&pFormat[2]; count = *(const WORD*)&pFormat[6]; pFormat += 8; break; } for (i = 0; i < rep; i++) { PFORMAT_STRING info = pFormat; unsigned char *membase = pMemory + (i * stride); unsigned char *bufbase = Mark + (i * stride); unsigned u; for (u=0; u<count; u++,info+=8) { unsigned char *memptr = membase + *(const SHORT*)&info[0]; unsigned char *bufptr = bufbase + *(const SHORT*)&info[2]; unsigned char *saved_memory = pStubMsg->Memory; pStubMsg->Memory = membase; PointerMarshall(pStubMsg, bufptr, *(unsigned char**)memptr, info+4); pStubMsg->Memory = saved_memory; } } pFormat += 8 * count; } if (saved_buffer) { pStubMsg->PointerBufferMark = pStubMsg->Buffer; pStubMsg->Buffer = saved_buffer; } STD_OVERFLOW_CHECK(pStubMsg); return NULL; } /*********************************************************************** * EmbeddedPointerUnmarshall */ static unsigned char * EmbeddedPointerUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pDstBuffer, unsigned char *pSrcMemoryPtrs, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned char *Mark = pStubMsg->BufferMark; unsigned rep, count, stride; unsigned i; unsigned char *saved_buffer = NULL; TRACE("(%p,%p,%p,%p,%d)\n", pStubMsg, pDstBuffer, pSrcMemoryPtrs, pFormat, fMustAlloc); if (*pFormat != RPC_FC_PP) return NULL; pFormat += 2; if (pStubMsg->PointerBufferMark) { saved_buffer = pStubMsg->Buffer; pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; } while (pFormat[0] != RPC_FC_END) { TRACE("pFormat[0] = 0x%x\n", pFormat[0]); switch (pFormat[0]) { default: FIXME("unknown repeat type %d; assuming no repeat\n", pFormat[0]); /* fallthrough */ case RPC_FC_NO_REPEAT: rep = 1; stride = 0; count = 1; pFormat += 2; break; case RPC_FC_FIXED_REPEAT: rep = *(const WORD*)&pFormat[2]; stride = *(const WORD*)&pFormat[4]; count = *(const WORD*)&pFormat[8]; pFormat += 10; break; case RPC_FC_VARIABLE_REPEAT: rep = (pFormat[1] == RPC_FC_VARIABLE_OFFSET) ? pStubMsg->ActualCount : pStubMsg->MaxCount; stride = *(const WORD*)&pFormat[2]; count = *(const WORD*)&pFormat[6]; pFormat += 8; break; } for (i = 0; i < rep; i++) { PFORMAT_STRING info = pFormat; unsigned char *bufdstbase = pDstBuffer + (i * stride); unsigned char *memsrcbase = pSrcMemoryPtrs + (i * stride); unsigned char *bufbase = Mark + (i * stride); unsigned u; for (u=0; u<count; u++,info+=8) { unsigned char **bufdstptr = (unsigned char **)(bufdstbase + *(const SHORT*)&info[2]); unsigned char **memsrcptr = (unsigned char **)(memsrcbase + *(const SHORT*)&info[0]); unsigned char *bufptr = bufbase + *(const SHORT*)&info[2]; PointerUnmarshall(pStubMsg, bufptr, bufdstptr, *memsrcptr, info+4, fMustAlloc); } } pFormat += 8 * count; } if (saved_buffer) { pStubMsg->PointerBufferMark = pStubMsg->Buffer; pStubMsg->Buffer = saved_buffer; } return NULL; } /*********************************************************************** * EmbeddedPointerBufferSize */ static void EmbeddedPointerBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned rep, count, stride; unsigned i; ULONG saved_buffer_length = 0; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (pStubMsg->IgnoreEmbeddedPointers) return; if (*pFormat != RPC_FC_PP) return; pFormat += 2; if (pStubMsg->PointerLength) { saved_buffer_length = pStubMsg->BufferLength; pStubMsg->BufferLength = pStubMsg->PointerLength; pStubMsg->PointerLength = 0; } while (pFormat[0] != RPC_FC_END) { switch (pFormat[0]) { default: FIXME("unknown repeat type %d; assuming no repeat\n", pFormat[0]); /* fallthrough */ case RPC_FC_NO_REPEAT: rep = 1; stride = 0; count = 1; pFormat += 2; break; case RPC_FC_FIXED_REPEAT: rep = *(const WORD*)&pFormat[2]; stride = *(const WORD*)&pFormat[4]; count = *(const WORD*)&pFormat[8]; pFormat += 10; break; case RPC_FC_VARIABLE_REPEAT: rep = (pFormat[1] == RPC_FC_VARIABLE_OFFSET) ? pStubMsg->ActualCount : pStubMsg->MaxCount; stride = *(const WORD*)&pFormat[2]; count = *(const WORD*)&pFormat[6]; pFormat += 8; break; } for (i = 0; i < rep; i++) { PFORMAT_STRING info = pFormat; unsigned char *membase = pMemory + (i * stride); unsigned u; for (u=0; u<count; u++,info+=8) { unsigned char *memptr = membase + *(const SHORT*)&info[0]; unsigned char *saved_memory = pStubMsg->Memory; pStubMsg->Memory = membase; PointerBufferSize(pStubMsg, *(unsigned char**)memptr, info+4); pStubMsg->Memory = saved_memory; } } pFormat += 8 * count; } if (saved_buffer_length) { pStubMsg->PointerLength = pStubMsg->BufferLength; pStubMsg->BufferLength = saved_buffer_length; } } /*********************************************************************** * EmbeddedPointerMemorySize [internal] */ static ULONG EmbeddedPointerMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { unsigned char *Mark = pStubMsg->BufferMark; unsigned rep, count, stride; unsigned i; unsigned char *saved_buffer = NULL; TRACE("(%p,%p)\n", pStubMsg, pFormat); if (pStubMsg->IgnoreEmbeddedPointers) return 0; if (pStubMsg->PointerBufferMark) { saved_buffer = pStubMsg->Buffer; pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; } if (*pFormat != RPC_FC_PP) return 0; pFormat += 2; while (pFormat[0] != RPC_FC_END) { switch (pFormat[0]) { default: FIXME("unknown repeat type %d; assuming no repeat\n", pFormat[0]); /* fallthrough */ case RPC_FC_NO_REPEAT: rep = 1; stride = 0; count = 1; pFormat += 2; break; case RPC_FC_FIXED_REPEAT: rep = *(const WORD*)&pFormat[2]; stride = *(const WORD*)&pFormat[4]; count = *(const WORD*)&pFormat[8]; pFormat += 10; break; case RPC_FC_VARIABLE_REPEAT: rep = (pFormat[1] == RPC_FC_VARIABLE_OFFSET) ? pStubMsg->ActualCount : pStubMsg->MaxCount; stride = *(const WORD*)&pFormat[2]; count = *(const WORD*)&pFormat[6]; pFormat += 8; break; } for (i = 0; i < rep; i++) { PFORMAT_STRING info = pFormat; unsigned char *bufbase = Mark + (i * stride); unsigned u; for (u=0; u<count; u++,info+=8) { unsigned char *bufptr = bufbase + *(const SHORT*)&info[2]; PointerMemorySize(pStubMsg, bufptr, info+4); } } pFormat += 8 * count; } if (saved_buffer) { pStubMsg->PointerBufferMark = pStubMsg->Buffer; pStubMsg->Buffer = saved_buffer; } return 0; } /*********************************************************************** * EmbeddedPointerFree [internal] */ static void EmbeddedPointerFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned rep, count, stride; unsigned i; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (*pFormat != RPC_FC_PP) return; pFormat += 2; while (pFormat[0] != RPC_FC_END) { switch (pFormat[0]) { default: FIXME("unknown repeat type %d; assuming no repeat\n", pFormat[0]); /* fallthrough */ case RPC_FC_NO_REPEAT: rep = 1; stride = 0; count = 1; pFormat += 2; break; case RPC_FC_FIXED_REPEAT: rep = *(const WORD*)&pFormat[2]; stride = *(const WORD*)&pFormat[4]; count = *(const WORD*)&pFormat[8]; pFormat += 10; break; case RPC_FC_VARIABLE_REPEAT: rep = (pFormat[1] == RPC_FC_VARIABLE_OFFSET) ? pStubMsg->ActualCount : pStubMsg->MaxCount; stride = *(const WORD*)&pFormat[2]; count = *(const WORD*)&pFormat[6]; pFormat += 8; break; } for (i = 0; i < rep; i++) { PFORMAT_STRING info = pFormat; unsigned char *membase = pMemory + (i * stride); unsigned u; for (u=0; u<count; u++,info+=8) { unsigned char *memptr = membase + *(const SHORT*)&info[0]; unsigned char *saved_memory = pStubMsg->Memory; pStubMsg->Memory = membase; PointerFree(pStubMsg, *(unsigned char**)memptr, info+4); pStubMsg->Memory = saved_memory; } } pFormat += 8 * count; } } /*********************************************************************** * NdrPointerMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrPointerMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned char *Buffer; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); /* Increment the buffer here instead of in PointerMarshall, * as that is used by embedded pointers which already handle the incrementing * the buffer, and shouldn't write any additional pointer data to the wire */ if (*pFormat != RPC_FC_RP) { align_pointer_clear(&pStubMsg->Buffer, 4); Buffer = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, 4); } else Buffer = pStubMsg->Buffer; PointerMarshall(pStubMsg, Buffer, pMemory, pFormat); return NULL; } /*********************************************************************** * NdrPointerUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrPointerUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned char *Buffer; TRACE("(%p,%p,%p,%d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); if (*pFormat == RPC_FC_RP) { Buffer = pStubMsg->Buffer; /* Do the NULL ref pointer check here because embedded pointers can be * NULL if the type the pointer is embedded in was allocated rather than * being passed in by the client */ if (pStubMsg->IsClient && !*ppMemory) { ERR("NULL ref pointer is not allowed\n"); RpcRaiseException(RPC_X_NULL_REF_POINTER); } } else { /* Increment the buffer here instead of in PointerUnmarshall, * as that is used by embedded pointers which already handle the incrementing * the buffer, and shouldn't read any additional pointer data from the * buffer */ align_pointer(&pStubMsg->Buffer, 4); Buffer = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, 4); } PointerUnmarshall(pStubMsg, Buffer, ppMemory, *ppMemory, pFormat, fMustAlloc); return NULL; } /*********************************************************************** * NdrPointerBufferSize [RPCRT4.@] */ void WINAPI NdrPointerBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); /* Increment the buffer length here instead of in PointerBufferSize, * as that is used by embedded pointers which already handle the buffer * length, and shouldn't write anything more to the wire */ if (*pFormat != RPC_FC_RP) { align_length(&pStubMsg->BufferLength, 4); safe_buffer_length_increment(pStubMsg, 4); } PointerBufferSize(pStubMsg, pMemory, pFormat); } /*********************************************************************** * NdrPointerMemorySize [RPCRT4.@] */ ULONG WINAPI NdrPointerMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { unsigned char *Buffer = pStubMsg->Buffer; if (*pFormat != RPC_FC_RP) { align_pointer(&pStubMsg->Buffer, 4); safe_buffer_increment(pStubMsg, 4); } align_length(&pStubMsg->MemorySize, sizeof(void *)); return PointerMemorySize(pStubMsg, Buffer, pFormat); } /*********************************************************************** * NdrPointerFree [RPCRT4.@] */ void WINAPI NdrPointerFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); PointerFree(pStubMsg, pMemory, pFormat); } /*********************************************************************** * NdrSimpleTypeMarshall [RPCRT4.@] */ void WINAPI NdrSimpleTypeMarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char* pMemory, unsigned char FormatChar ) { NdrBaseTypeMarshall(pStubMsg, pMemory, &FormatChar); } /*********************************************************************** * NdrSimpleTypeUnmarshall [RPCRT4.@] * * Unmarshall a base type. * * NOTES * Doesn't check that the buffer is long enough before copying, so the caller * should do this. */ void WINAPI NdrSimpleTypeUnmarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char* pMemory, unsigned char FormatChar ) { #define BASE_TYPE_UNMARSHALL(type) \ align_pointer(&pStubMsg->Buffer, sizeof(type)); \ TRACE("pMemory: %p\n", pMemory); \ *(type *)pMemory = *(type *)pStubMsg->Buffer; \ pStubMsg->Buffer += sizeof(type); switch(FormatChar) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: BASE_TYPE_UNMARSHALL(UCHAR); TRACE("value: 0x%02x\n", *pMemory); break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: BASE_TYPE_UNMARSHALL(USHORT); TRACE("value: 0x%04x\n", *(USHORT *)pMemory); break; case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ERROR_STATUS_T: case RPC_FC_ENUM32: BASE_TYPE_UNMARSHALL(ULONG); TRACE("value: 0x%08x\n", *(ULONG *)pMemory); break; case RPC_FC_FLOAT: BASE_TYPE_UNMARSHALL(float); TRACE("value: %f\n", *(float *)pMemory); break; case RPC_FC_DOUBLE: BASE_TYPE_UNMARSHALL(double); TRACE("value: %f\n", *(double *)pMemory); break; case RPC_FC_HYPER: BASE_TYPE_UNMARSHALL(ULONGLONG); TRACE("value: %s\n", wine_dbgstr_longlong(*(ULONGLONG *)pMemory)); break; case RPC_FC_ENUM16: align_pointer(&pStubMsg->Buffer, sizeof(USHORT)); TRACE("pMemory: %p\n", pMemory); /* 16-bits on the wire, but int in memory */ *(UINT *)pMemory = *(USHORT *)pStubMsg->Buffer; pStubMsg->Buffer += sizeof(USHORT); TRACE("value: 0x%08x\n", *(UINT *)pMemory); break; case RPC_FC_INT3264: align_pointer(&pStubMsg->Buffer, sizeof(INT)); /* 32-bits on the wire, but int_ptr in memory */ *(INT_PTR *)pMemory = *(INT *)pStubMsg->Buffer; pStubMsg->Buffer += sizeof(INT); TRACE("value: 0x%08lx\n", *(INT_PTR *)pMemory); break; case RPC_FC_UINT3264: align_pointer(&pStubMsg->Buffer, sizeof(UINT)); /* 32-bits on the wire, but int_ptr in memory */ *(UINT_PTR *)pMemory = *(UINT *)pStubMsg->Buffer; pStubMsg->Buffer += sizeof(UINT); TRACE("value: 0x%08lx\n", *(UINT_PTR *)pMemory); break; case RPC_FC_IGNORE: break; default: FIXME("Unhandled base type: 0x%02x\n", FormatChar); } #undef BASE_TYPE_UNMARSHALL } /*********************************************************************** * NdrSimpleStructMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrSimpleStructMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned size = *(const WORD*)(pFormat+2); TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); align_pointer_clear(&pStubMsg->Buffer, pFormat[1] + 1); pStubMsg->BufferMark = pStubMsg->Buffer; safe_copy_to_buffer(pStubMsg, pMemory, size); if (pFormat[0] != RPC_FC_STRUCT) EmbeddedPointerMarshall(pStubMsg, pMemory, pFormat+4); return NULL; } /*********************************************************************** * NdrSimpleStructUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrSimpleStructUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned size = *(const WORD*)(pFormat+2); unsigned char *saved_buffer; TRACE("(%p,%p,%p,%d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); align_pointer(&pStubMsg->Buffer, pFormat[1] + 1); if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, size); else { if (!pStubMsg->IsClient && !*ppMemory) /* for servers, we just point straight into the RPC buffer */ *ppMemory = pStubMsg->Buffer; } saved_buffer = pStubMsg->BufferMark = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, size); if (pFormat[0] == RPC_FC_PSTRUCT) EmbeddedPointerUnmarshall(pStubMsg, saved_buffer, *ppMemory, pFormat+4, fMustAlloc); TRACE("copying %p to %p\n", saved_buffer, *ppMemory); if (*ppMemory != saved_buffer) memcpy(*ppMemory, saved_buffer, size); return NULL; } /*********************************************************************** * NdrSimpleStructBufferSize [RPCRT4.@] */ void WINAPI NdrSimpleStructBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned size = *(const WORD*)(pFormat+2); TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); align_length(&pStubMsg->BufferLength, pFormat[1] + 1); safe_buffer_length_increment(pStubMsg, size); if (pFormat[0] != RPC_FC_STRUCT) EmbeddedPointerBufferSize(pStubMsg, pMemory, pFormat+4); } /*********************************************************************** * NdrSimpleStructMemorySize [RPCRT4.@] */ ULONG WINAPI NdrSimpleStructMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { unsigned short size = *(const WORD *)(pFormat+2); TRACE("(%p,%p)\n", pStubMsg, pFormat); align_pointer(&pStubMsg->Buffer, pFormat[1] + 1); pStubMsg->MemorySize += size; safe_buffer_increment(pStubMsg, size); if (pFormat[0] != RPC_FC_STRUCT) EmbeddedPointerMemorySize(pStubMsg, pFormat+4); return pStubMsg->MemorySize; } /*********************************************************************** * NdrSimpleStructFree [RPCRT4.@] */ void WINAPI NdrSimpleStructFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_STRUCT) EmbeddedPointerFree(pStubMsg, pMemory, pFormat+4); } /* Array helpers */ static inline void array_compute_and_size_conformance( unsigned char fc, PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { DWORD count; switch (fc) { case RPC_FC_CARRAY: ComputeConformance(pStubMsg, pMemory, pFormat+4, 0); SizeConformance(pStubMsg); break; case RPC_FC_CVARRAY: pFormat = ComputeConformance(pStubMsg, pMemory, pFormat + 4, 0); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, 0); SizeConformance(pStubMsg); break; case RPC_FC_C_CSTRING: case RPC_FC_C_WSTRING: if (fc == RPC_FC_C_CSTRING) { TRACE("string=%s\n", debugstr_a((const char *)pMemory)); pStubMsg->ActualCount = strlen((const char *)pMemory)+1; } else { TRACE("string=%s\n", debugstr_w((LPCWSTR)pMemory)); pStubMsg->ActualCount = strlenW((LPCWSTR)pMemory)+1; } if (pFormat[1] == RPC_FC_STRING_SIZED) pFormat = ComputeConformance(pStubMsg, pMemory, pFormat + 2, 0); else pStubMsg->MaxCount = pStubMsg->ActualCount; SizeConformance(pStubMsg); break; case RPC_FC_BOGUS_ARRAY: count = *(const WORD *)(pFormat + 2); pFormat += 4; if (IsConformanceOrVariancePresent(pFormat)) SizeConformance(pStubMsg); pFormat = ComputeConformance(pStubMsg, pMemory, pFormat, count); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, pStubMsg->MaxCount); break; default: ERR("unknown array format 0x%x\n", fc); RpcRaiseException(RPC_X_BAD_STUB_DATA); } } static inline void array_buffer_size( unsigned char fc, PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, unsigned char fHasPointers) { DWORD i, size; DWORD esize; unsigned char alignment; switch (fc) { case RPC_FC_CARRAY: esize = *(const WORD*)(pFormat+2); alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); align_length(&pStubMsg->BufferLength, alignment); size = safe_multiply(esize, pStubMsg->MaxCount); /* conformance value plus array */ safe_buffer_length_increment(pStubMsg, size); if (fHasPointers) EmbeddedPointerBufferSize(pStubMsg, pMemory, pFormat); break; case RPC_FC_CVARRAY: esize = *(const WORD*)(pFormat+2); alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); pFormat = SkipVariance(pStubMsg, pFormat); SizeVariance(pStubMsg); align_length(&pStubMsg->BufferLength, alignment); size = safe_multiply(esize, pStubMsg->ActualCount); safe_buffer_length_increment(pStubMsg, size); if (fHasPointers) EmbeddedPointerBufferSize(pStubMsg, pMemory, pFormat); break; case RPC_FC_C_CSTRING: case RPC_FC_C_WSTRING: if (fc == RPC_FC_C_CSTRING) esize = 1; else esize = 2; SizeVariance(pStubMsg); size = safe_multiply(esize, pStubMsg->ActualCount); safe_buffer_length_increment(pStubMsg, size); break; case RPC_FC_BOGUS_ARRAY: alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); if (IsConformanceOrVariancePresent(pFormat)) SizeVariance(pStubMsg); pFormat = SkipVariance(pStubMsg, pFormat); align_length(&pStubMsg->BufferLength, alignment); size = pStubMsg->ActualCount; for (i = 0; i < size; i++) pMemory = ComplexBufferSize(pStubMsg, pMemory, pFormat, NULL); break; default: ERR("unknown array format 0x%x\n", fc); RpcRaiseException(RPC_X_BAD_STUB_DATA); } } static inline void array_compute_and_write_conformance( unsigned char fc, PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { ULONG def; BOOL conformance_present; switch (fc) { case RPC_FC_CARRAY: ComputeConformance(pStubMsg, pMemory, pFormat+4, 0); WriteConformance(pStubMsg); break; case RPC_FC_CVARRAY: pFormat = ComputeConformance(pStubMsg, pMemory, pFormat + 4, 0); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, 0); WriteConformance(pStubMsg); break; case RPC_FC_C_CSTRING: case RPC_FC_C_WSTRING: if (fc == RPC_FC_C_CSTRING) { TRACE("string=%s\n", debugstr_a((const char *)pMemory)); pStubMsg->ActualCount = strlen((const char *)pMemory)+1; } else { TRACE("string=%s\n", debugstr_w((LPCWSTR)pMemory)); pStubMsg->ActualCount = strlenW((LPCWSTR)pMemory)+1; } if (pFormat[1] == RPC_FC_STRING_SIZED) pFormat = ComputeConformance(pStubMsg, pMemory, pFormat + 2, 0); else pStubMsg->MaxCount = pStubMsg->ActualCount; pStubMsg->Offset = 0; WriteConformance(pStubMsg); break; case RPC_FC_BOGUS_ARRAY: def = *(const WORD *)(pFormat + 2); pFormat += 4; conformance_present = IsConformanceOrVariancePresent(pFormat); pFormat = ComputeConformance(pStubMsg, pMemory, pFormat, def); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, pStubMsg->MaxCount); if (conformance_present) WriteConformance(pStubMsg); break; default: ERR("unknown array format 0x%x\n", fc); RpcRaiseException(RPC_X_BAD_STUB_DATA); } } static inline void array_write_variance_and_marshall( unsigned char fc, PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, unsigned char fHasPointers) { DWORD i, size; DWORD esize; unsigned char alignment; switch (fc) { case RPC_FC_CARRAY: esize = *(const WORD*)(pFormat+2); alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); align_pointer_clear(&pStubMsg->Buffer, alignment); size = safe_multiply(esize, pStubMsg->MaxCount); if (fHasPointers) pStubMsg->BufferMark = pStubMsg->Buffer; safe_copy_to_buffer(pStubMsg, pMemory, size); if (fHasPointers) EmbeddedPointerMarshall(pStubMsg, pMemory, pFormat); break; case RPC_FC_CVARRAY: esize = *(const WORD*)(pFormat+2); alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); pFormat = SkipVariance(pStubMsg, pFormat); WriteVariance(pStubMsg); align_pointer_clear(&pStubMsg->Buffer, alignment); size = safe_multiply(esize, pStubMsg->ActualCount); if (fHasPointers) pStubMsg->BufferMark = pStubMsg->Buffer; safe_copy_to_buffer(pStubMsg, pMemory + pStubMsg->Offset, size); if (fHasPointers) EmbeddedPointerMarshall(pStubMsg, pMemory, pFormat); break; case RPC_FC_C_CSTRING: case RPC_FC_C_WSTRING: if (fc == RPC_FC_C_CSTRING) esize = 1; else esize = 2; WriteVariance(pStubMsg); size = safe_multiply(esize, pStubMsg->ActualCount); safe_copy_to_buffer(pStubMsg, pMemory, size); /* the string itself */ break; case RPC_FC_BOGUS_ARRAY: alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); if (IsConformanceOrVariancePresent(pFormat)) WriteVariance(pStubMsg); pFormat = SkipVariance(pStubMsg, pFormat); align_pointer_clear(&pStubMsg->Buffer, alignment); size = pStubMsg->ActualCount; for (i = 0; i < size; i++) pMemory = ComplexMarshall(pStubMsg, pMemory, pFormat, NULL); break; default: ERR("unknown array format 0x%x\n", fc); RpcRaiseException(RPC_X_BAD_STUB_DATA); } } static inline ULONG array_read_conformance( unsigned char fc, PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { DWORD def, esize; switch (fc) { case RPC_FC_CARRAY: esize = *(const WORD*)(pFormat+2); pFormat = ReadConformance(pStubMsg, pFormat+4); return safe_multiply(esize, pStubMsg->MaxCount); case RPC_FC_CVARRAY: esize = *(const WORD*)(pFormat+2); pFormat = ReadConformance(pStubMsg, pFormat+4); return safe_multiply(esize, pStubMsg->MaxCount); case RPC_FC_C_CSTRING: case RPC_FC_C_WSTRING: if (fc == RPC_FC_C_CSTRING) esize = 1; else esize = 2; if (pFormat[1] == RPC_FC_STRING_SIZED) ReadConformance(pStubMsg, pFormat + 2); else ReadConformance(pStubMsg, NULL); return safe_multiply(esize, pStubMsg->MaxCount); case RPC_FC_BOGUS_ARRAY: def = *(const WORD *)(pFormat + 2); pFormat += 4; if (IsConformanceOrVariancePresent(pFormat)) pFormat = ReadConformance(pStubMsg, pFormat); else { pStubMsg->MaxCount = def; pFormat = SkipConformance( pStubMsg, pFormat ); } pFormat = SkipVariance( pStubMsg, pFormat ); esize = ComplexStructSize(pStubMsg, pFormat); return safe_multiply(pStubMsg->MaxCount, esize); default: ERR("unknown array format 0x%x\n", fc); RpcRaiseException(RPC_X_BAD_STUB_DATA); } } static inline ULONG array_read_variance_and_unmarshall( unsigned char fc, PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc, unsigned char fUseBufferMemoryServer, unsigned char fUnmarshall) { ULONG bufsize, memsize; WORD esize; unsigned char alignment; unsigned char *saved_buffer, *pMemory; ULONG i, offset, count; switch (fc) { case RPC_FC_CARRAY: esize = *(const WORD*)(pFormat+2); alignment = pFormat[1] + 1; bufsize = memsize = safe_multiply(esize, pStubMsg->MaxCount); pFormat = SkipConformance(pStubMsg, pFormat + 4); align_pointer(&pStubMsg->Buffer, alignment); if (fUnmarshall) { if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, memsize); else { if (fUseBufferMemoryServer && !pStubMsg->IsClient && !*ppMemory) /* for servers, we just point straight into the RPC buffer */ *ppMemory = pStubMsg->Buffer; } saved_buffer = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, bufsize); pStubMsg->BufferMark = saved_buffer; EmbeddedPointerUnmarshall(pStubMsg, saved_buffer, *ppMemory, pFormat, fMustAlloc); TRACE("copying %p to %p\n", saved_buffer, *ppMemory); if (*ppMemory != saved_buffer) memcpy(*ppMemory, saved_buffer, bufsize); } return bufsize; case RPC_FC_CVARRAY: esize = *(const WORD*)(pFormat+2); alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); pFormat = ReadVariance(pStubMsg, pFormat, pStubMsg->MaxCount); align_pointer(&pStubMsg->Buffer, alignment); bufsize = safe_multiply(esize, pStubMsg->ActualCount); memsize = safe_multiply(esize, pStubMsg->MaxCount); if (fUnmarshall) { offset = pStubMsg->Offset; if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, memsize); saved_buffer = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, bufsize); pStubMsg->BufferMark = saved_buffer; EmbeddedPointerUnmarshall(pStubMsg, saved_buffer, *ppMemory, pFormat, fMustAlloc); memcpy(*ppMemory + offset, saved_buffer, bufsize); } return bufsize; case RPC_FC_C_CSTRING: case RPC_FC_C_WSTRING: if (fc == RPC_FC_C_CSTRING) esize = 1; else esize = 2; ReadVariance(pStubMsg, NULL, pStubMsg->MaxCount); if (pFormat[1] != RPC_FC_STRING_SIZED && (pStubMsg->MaxCount != pStubMsg->ActualCount)) { ERR("buffer size %d must equal memory size %ld for non-sized conformant strings\n", pStubMsg->ActualCount, pStubMsg->MaxCount); RpcRaiseException(RPC_S_INVALID_BOUND); } if (pStubMsg->Offset) { ERR("conformant strings can't have Offset (%d)\n", pStubMsg->Offset); RpcRaiseException(RPC_S_INVALID_BOUND); } memsize = safe_multiply(esize, pStubMsg->MaxCount); bufsize = safe_multiply(esize, pStubMsg->ActualCount); validate_string_data(pStubMsg, bufsize, esize); if (fUnmarshall) { if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, memsize); else { if (fUseBufferMemoryServer && !pStubMsg->IsClient && !*ppMemory && (pStubMsg->MaxCount == pStubMsg->ActualCount)) /* if the data in the RPC buffer is big enough, we just point * straight into it */ *ppMemory = pStubMsg->Buffer; else if (!*ppMemory) *ppMemory = NdrAllocate(pStubMsg, memsize); } if (*ppMemory == pStubMsg->Buffer) safe_buffer_increment(pStubMsg, bufsize); else safe_copy_from_buffer(pStubMsg, *ppMemory, bufsize); if (*pFormat == RPC_FC_C_CSTRING) TRACE("string=%s\n", debugstr_a((char*)*ppMemory)); else TRACE("string=%s\n", debugstr_w((LPWSTR)*ppMemory)); } return bufsize; case RPC_FC_BOGUS_ARRAY: alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); pFormat = ReadVariance(pStubMsg, pFormat, pStubMsg->MaxCount); esize = ComplexStructSize(pStubMsg, pFormat); memsize = safe_multiply(esize, pStubMsg->MaxCount); assert( fUnmarshall ); if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, memsize); align_pointer(&pStubMsg->Buffer, alignment); saved_buffer = pStubMsg->Buffer; pMemory = *ppMemory; count = pStubMsg->ActualCount; for (i = 0; i < count; i++) pMemory = ComplexUnmarshall(pStubMsg, pMemory, pFormat, NULL, fMustAlloc); return pStubMsg->Buffer - saved_buffer; default: ERR("unknown array format 0x%x\n", fc); RpcRaiseException(RPC_X_BAD_STUB_DATA); } } static inline void array_memory_size( unsigned char fc, PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat, unsigned char fHasPointers) { ULONG i, count, SavedMemorySize; ULONG bufsize, memsize; DWORD esize; unsigned char alignment; switch (fc) { case RPC_FC_CARRAY: esize = *(const WORD*)(pFormat+2); alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); bufsize = memsize = safe_multiply(esize, pStubMsg->MaxCount); pStubMsg->MemorySize += memsize; align_pointer(&pStubMsg->Buffer, alignment); if (fHasPointers) pStubMsg->BufferMark = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, bufsize); if (fHasPointers) EmbeddedPointerMemorySize(pStubMsg, pFormat); break; case RPC_FC_CVARRAY: esize = *(const WORD*)(pFormat+2); alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); pFormat = ReadVariance(pStubMsg, pFormat, pStubMsg->MaxCount); bufsize = safe_multiply(esize, pStubMsg->ActualCount); memsize = safe_multiply(esize, pStubMsg->MaxCount); pStubMsg->MemorySize += memsize; align_pointer(&pStubMsg->Buffer, alignment); if (fHasPointers) pStubMsg->BufferMark = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, bufsize); if (fHasPointers) EmbeddedPointerMemorySize(pStubMsg, pFormat); break; case RPC_FC_C_CSTRING: case RPC_FC_C_WSTRING: if (fc == RPC_FC_C_CSTRING) esize = 1; else esize = 2; ReadVariance(pStubMsg, NULL, pStubMsg->MaxCount); if (pFormat[1] != RPC_FC_STRING_SIZED && (pStubMsg->MaxCount != pStubMsg->ActualCount)) { ERR("buffer size %d must equal memory size %ld for non-sized conformant strings\n", pStubMsg->ActualCount, pStubMsg->MaxCount); RpcRaiseException(RPC_S_INVALID_BOUND); } if (pStubMsg->Offset) { ERR("conformant strings can't have Offset (%d)\n", pStubMsg->Offset); RpcRaiseException(RPC_S_INVALID_BOUND); } memsize = safe_multiply(esize, pStubMsg->MaxCount); bufsize = safe_multiply(esize, pStubMsg->ActualCount); validate_string_data(pStubMsg, bufsize, esize); safe_buffer_increment(pStubMsg, bufsize); pStubMsg->MemorySize += memsize; break; case RPC_FC_BOGUS_ARRAY: alignment = pFormat[1] + 1; pFormat = SkipConformance(pStubMsg, pFormat + 4); pFormat = ReadVariance(pStubMsg, pFormat, pStubMsg->MaxCount); align_pointer(&pStubMsg->Buffer, alignment); SavedMemorySize = pStubMsg->MemorySize; esize = ComplexStructSize(pStubMsg, pFormat); memsize = safe_multiply(pStubMsg->MaxCount, esize); count = pStubMsg->ActualCount; for (i = 0; i < count; i++) ComplexStructMemorySize(pStubMsg, pFormat, NULL); pStubMsg->MemorySize = SavedMemorySize + memsize; break; default: ERR("unknown array format 0x%x\n", fc); RpcRaiseException(RPC_X_BAD_STUB_DATA); } } static inline void array_free( unsigned char fc, PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, unsigned char fHasPointers) { DWORD i, count; switch (fc) { case RPC_FC_CARRAY: pFormat = ComputeConformance(pStubMsg, pMemory, pFormat+4, 0); if (fHasPointers) EmbeddedPointerFree(pStubMsg, pMemory, pFormat); break; case RPC_FC_CVARRAY: pFormat = ComputeConformance(pStubMsg, pMemory, pFormat+4, 0); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, 0); if (fHasPointers) EmbeddedPointerFree(pStubMsg, pMemory, pFormat); break; case RPC_FC_C_CSTRING: case RPC_FC_C_WSTRING: /* No embedded pointers so nothing to do */ break; case RPC_FC_BOGUS_ARRAY: count = *(const WORD *)(pFormat + 2); pFormat = ComputeConformance(pStubMsg, pMemory, pFormat + 4, count); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, pStubMsg->MaxCount); count = pStubMsg->ActualCount; for (i = 0; i < count; i++) pMemory = ComplexFree(pStubMsg, pMemory, pFormat, NULL); break; default: ERR("unknown array format 0x%x\n", fc); RpcRaiseException(RPC_X_BAD_STUB_DATA); } } /* * NdrConformantString: * * What MS calls a ConformantString is, in DCE terminology, * a Varying-Conformant String. * [ * maxlen: DWORD (max # of CHARTYPE characters, inclusive of '\0') * offset: DWORD (actual string data begins at (offset) CHARTYPE's * into unmarshalled string) * length: DWORD (# of CHARTYPE characters, inclusive of '\0') * [ * data: CHARTYPE[maxlen] * ] * ], where CHARTYPE is the appropriate character type (specified externally) * */ /*********************************************************************** * NdrConformantStringMarshall [RPCRT4.@] */ unsigned char *WINAPI NdrConformantStringMarshall(MIDL_STUB_MESSAGE *pStubMsg, unsigned char *pszMessage, PFORMAT_STRING pFormat) { TRACE("(pStubMsg == ^%p, pszMessage == ^%p, pFormat == ^%p)\n", pStubMsg, pszMessage, pFormat); if (pFormat[0] != RPC_FC_C_CSTRING && pFormat[0] != RPC_FC_C_WSTRING) { ERR("Unhandled string type: %#x\n", pFormat[0]); RpcRaiseException(RPC_X_BAD_STUB_DATA); } /* allow compiler to optimise inline function by passing constant into * these functions */ if (pFormat[0] == RPC_FC_C_CSTRING) { array_compute_and_write_conformance(RPC_FC_C_CSTRING, pStubMsg, pszMessage, pFormat); array_write_variance_and_marshall(RPC_FC_C_CSTRING, pStubMsg, pszMessage, pFormat, TRUE /* fHasPointers */); } else { array_compute_and_write_conformance(RPC_FC_C_WSTRING, pStubMsg, pszMessage, pFormat); array_write_variance_and_marshall(RPC_FC_C_WSTRING, pStubMsg, pszMessage, pFormat, TRUE /* fHasPointers */); } return NULL; } /*********************************************************************** * NdrConformantStringBufferSize [RPCRT4.@] */ void WINAPI NdrConformantStringBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char* pMemory, PFORMAT_STRING pFormat) { TRACE("(pStubMsg == ^%p, pMemory == ^%p, pFormat == ^%p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_C_CSTRING && pFormat[0] != RPC_FC_C_WSTRING) { ERR("Unhandled string type: %#x\n", pFormat[0]); RpcRaiseException(RPC_X_BAD_STUB_DATA); } /* allow compiler to optimise inline function by passing constant into * these functions */ if (pFormat[0] == RPC_FC_C_CSTRING) { array_compute_and_size_conformance(RPC_FC_C_CSTRING, pStubMsg, pMemory, pFormat); array_buffer_size(RPC_FC_C_CSTRING, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); } else { array_compute_and_size_conformance(RPC_FC_C_WSTRING, pStubMsg, pMemory, pFormat); array_buffer_size(RPC_FC_C_WSTRING, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); } } /************************************************************************ * NdrConformantStringMemorySize [RPCRT4.@] */ ULONG WINAPI NdrConformantStringMemorySize( PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat ) { TRACE("(pStubMsg == ^%p, pFormat == ^%p)\n", pStubMsg, pFormat); if (pFormat[0] != RPC_FC_C_CSTRING && pFormat[0] != RPC_FC_C_WSTRING) { ERR("Unhandled string type: %#x\n", pFormat[0]); RpcRaiseException(RPC_X_BAD_STUB_DATA); } /* allow compiler to optimise inline function by passing constant into * these functions */ if (pFormat[0] == RPC_FC_C_CSTRING) { array_read_conformance(RPC_FC_C_CSTRING, pStubMsg, pFormat); array_memory_size(RPC_FC_C_CSTRING, pStubMsg, pFormat, TRUE /* fHasPointers */); } else { array_read_conformance(RPC_FC_C_WSTRING, pStubMsg, pFormat); array_memory_size(RPC_FC_C_WSTRING, pStubMsg, pFormat, TRUE /* fHasPointers */); } return pStubMsg->MemorySize; } /************************************************************************ * NdrConformantStringUnmarshall [RPCRT4.@] */ unsigned char *WINAPI NdrConformantStringUnmarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char** ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc ) { TRACE("(pStubMsg == ^%p, *pMemory == ^%p, pFormat == ^%p, fMustAlloc == %u)\n", pStubMsg, *ppMemory, pFormat, fMustAlloc); if (pFormat[0] != RPC_FC_C_CSTRING && pFormat[0] != RPC_FC_C_WSTRING) { ERR("Unhandled string type: %#x\n", *pFormat); RpcRaiseException(RPC_X_BAD_STUB_DATA); } /* allow compiler to optimise inline function by passing constant into * these functions */ if (pFormat[0] == RPC_FC_C_CSTRING) { array_read_conformance(RPC_FC_C_CSTRING, pStubMsg, pFormat); array_read_variance_and_unmarshall(RPC_FC_C_CSTRING, pStubMsg, ppMemory, pFormat, fMustAlloc, TRUE /* fUseBufferMemoryServer */, TRUE /* fUnmarshall */); } else { array_read_conformance(RPC_FC_C_WSTRING, pStubMsg, pFormat); array_read_variance_and_unmarshall(RPC_FC_C_WSTRING, pStubMsg, ppMemory, pFormat, fMustAlloc, TRUE /* fUseBufferMemoryServer */, TRUE /* fUnmarshall */); } return NULL; } /*********************************************************************** * NdrNonConformantStringMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrNonConformantStringMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { ULONG esize, size, maxsize; TRACE("(pStubMsg == ^%p, pMemory == ^%p, pFormat == ^%p)\n", pStubMsg, pMemory, pFormat); maxsize = *(const USHORT *)&pFormat[2]; if (*pFormat == RPC_FC_CSTRING) { ULONG i = 0; const char *str = (const char *)pMemory; while (i < maxsize && str[i]) i++; TRACE("string=%s\n", debugstr_an(str, i)); pStubMsg->ActualCount = i + 1; esize = 1; } else if (*pFormat == RPC_FC_WSTRING) { ULONG i = 0; const WCHAR *str = (const WCHAR *)pMemory; while (i < maxsize && str[i]) i++; TRACE("string=%s\n", debugstr_wn(str, i)); pStubMsg->ActualCount = i + 1; esize = 2; } else { ERR("Unhandled string type: %#x\n", *pFormat); RpcRaiseException(RPC_X_BAD_STUB_DATA); } pStubMsg->Offset = 0; WriteVariance(pStubMsg); size = safe_multiply(esize, pStubMsg->ActualCount); safe_copy_to_buffer(pStubMsg, pMemory, size); /* the string itself */ return NULL; } /*********************************************************************** * NdrNonConformantStringUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrNonConformantStringUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { ULONG bufsize, memsize, esize, maxsize; TRACE("(pStubMsg == ^%p, *pMemory == ^%p, pFormat == ^%p, fMustAlloc == %u)\n", pStubMsg, *ppMemory, pFormat, fMustAlloc); maxsize = *(const USHORT *)&pFormat[2]; ReadVariance(pStubMsg, NULL, maxsize); if (pStubMsg->Offset) { ERR("non-conformant strings can't have Offset (%d)\n", pStubMsg->Offset); RpcRaiseException(RPC_S_INVALID_BOUND); } if (*pFormat == RPC_FC_CSTRING) esize = 1; else if (*pFormat == RPC_FC_WSTRING) esize = 2; else { ERR("Unhandled string type: %#x\n", *pFormat); RpcRaiseException(RPC_X_BAD_STUB_DATA); } memsize = esize * maxsize; bufsize = safe_multiply(esize, pStubMsg->ActualCount); validate_string_data(pStubMsg, bufsize, esize); if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, memsize); safe_copy_from_buffer(pStubMsg, *ppMemory, bufsize); if (*pFormat == RPC_FC_CSTRING) { TRACE("string=%s\n", debugstr_an((char*)*ppMemory, pStubMsg->ActualCount)); } else if (*pFormat == RPC_FC_WSTRING) { TRACE("string=%s\n", debugstr_wn((LPWSTR)*ppMemory, pStubMsg->ActualCount)); } return NULL; } /*********************************************************************** * NdrNonConformantStringBufferSize [RPCRT4.@] */ void WINAPI NdrNonConformantStringBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { ULONG esize, maxsize; TRACE("(pStubMsg == ^%p, pMemory == ^%p, pFormat == ^%p)\n", pStubMsg, pMemory, pFormat); maxsize = *(const USHORT *)&pFormat[2]; SizeVariance(pStubMsg); if (*pFormat == RPC_FC_CSTRING) { ULONG i = 0; const char *str = (const char *)pMemory; while (i < maxsize && str[i]) i++; TRACE("string=%s\n", debugstr_an(str, i)); pStubMsg->ActualCount = i + 1; esize = 1; } else if (*pFormat == RPC_FC_WSTRING) { ULONG i = 0; const WCHAR *str = (const WCHAR *)pMemory; while (i < maxsize && str[i]) i++; TRACE("string=%s\n", debugstr_wn(str, i)); pStubMsg->ActualCount = i + 1; esize = 2; } else { ERR("Unhandled string type: %#x\n", *pFormat); RpcRaiseException(RPC_X_BAD_STUB_DATA); } safe_buffer_length_increment(pStubMsg, safe_multiply(esize, pStubMsg->ActualCount)); } /*********************************************************************** * NdrNonConformantStringMemorySize [RPCRT4.@] */ ULONG WINAPI NdrNonConformantStringMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { ULONG bufsize, memsize, esize, maxsize; TRACE("(pStubMsg == ^%p, pFormat == ^%p)\n", pStubMsg, pFormat); maxsize = *(const USHORT *)&pFormat[2]; ReadVariance(pStubMsg, NULL, maxsize); if (pStubMsg->Offset) { ERR("non-conformant strings can't have Offset (%d)\n", pStubMsg->Offset); RpcRaiseException(RPC_S_INVALID_BOUND); } if (*pFormat == RPC_FC_CSTRING) esize = 1; else if (*pFormat == RPC_FC_WSTRING) esize = 2; else { ERR("Unhandled string type: %#x\n", *pFormat); RpcRaiseException(RPC_X_BAD_STUB_DATA); } memsize = esize * maxsize; bufsize = safe_multiply(esize, pStubMsg->ActualCount); validate_string_data(pStubMsg, bufsize, esize); safe_buffer_increment(pStubMsg, bufsize); pStubMsg->MemorySize += memsize; return pStubMsg->MemorySize; } /* Complex types */ #include "pshpack1.h" typedef struct { unsigned char type; unsigned char flags_type; /* flags in upper nibble, type in lower nibble */ ULONG low_value; ULONG high_value; } NDR_RANGE; #include "poppack.h" static ULONG EmbeddedComplexSize(MIDL_STUB_MESSAGE *pStubMsg, PFORMAT_STRING pFormat) { switch (*pFormat) { case RPC_FC_STRUCT: case RPC_FC_PSTRUCT: case RPC_FC_CSTRUCT: case RPC_FC_BOGUS_STRUCT: case RPC_FC_SMFARRAY: case RPC_FC_SMVARRAY: case RPC_FC_CSTRING: return *(const WORD*)&pFormat[2]; case RPC_FC_USER_MARSHAL: return *(const WORD*)&pFormat[4]; case RPC_FC_RANGE: { switch (((const NDR_RANGE *)pFormat)->flags_type & 0xf) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: return sizeof(UCHAR); case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: return sizeof(USHORT); case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM32: case RPC_FC_INT3264: case RPC_FC_UINT3264: return sizeof(ULONG); case RPC_FC_FLOAT: return sizeof(float); case RPC_FC_DOUBLE: return sizeof(double); case RPC_FC_HYPER: return sizeof(ULONGLONG); case RPC_FC_ENUM16: return sizeof(UINT); default: ERR("unknown type 0x%x\n", ((const NDR_RANGE *)pFormat)->flags_type & 0xf); RpcRaiseException(RPC_X_BAD_STUB_DATA); } } case RPC_FC_NON_ENCAPSULATED_UNION: pFormat += 2; pFormat = SkipConformance(pStubMsg, pFormat); pFormat += *(const SHORT*)pFormat; return *(const SHORT*)pFormat; case RPC_FC_IP: return sizeof(void *); case RPC_FC_WSTRING: return *(const WORD*)&pFormat[2] * 2; default: FIXME("unhandled embedded type %02x\n", *pFormat); } return 0; } static ULONG EmbeddedComplexMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { NDR_MEMORYSIZE m = NdrMemorySizer[*pFormat & NDR_TABLE_MASK]; if (!m) { FIXME("no memorysizer for data type=%02x\n", *pFormat); return 0; } return m(pStubMsg, pFormat); } static unsigned char * ComplexMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer) { PFORMAT_STRING desc; NDR_MARSHALL m; ULONG size; while (*pFormat != RPC_FC_END) { switch (*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: TRACE("byte=%d <= %p\n", *(WORD*)pMemory, pMemory); safe_copy_to_buffer(pStubMsg, pMemory, 1); pMemory += 1; break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: TRACE("short=%d <= %p\n", *(WORD*)pMemory, pMemory); safe_copy_to_buffer(pStubMsg, pMemory, 2); pMemory += 2; break; case RPC_FC_ENUM16: { USHORT val = *(DWORD *)pMemory; TRACE("enum16=%d <= %p\n", *(DWORD*)pMemory, pMemory); if (32767 < *(DWORD*)pMemory) RpcRaiseException(RPC_X_ENUM_VALUE_OUT_OF_RANGE); safe_copy_to_buffer(pStubMsg, &val, 2); pMemory += 4; break; } case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM32: TRACE("long=%d <= %p\n", *(DWORD*)pMemory, pMemory); safe_copy_to_buffer(pStubMsg, pMemory, 4); pMemory += 4; break; case RPC_FC_INT3264: case RPC_FC_UINT3264: { UINT val = *(UINT_PTR *)pMemory; TRACE("int3264=%ld <= %p\n", *(UINT_PTR *)pMemory, pMemory); safe_copy_to_buffer(pStubMsg, &val, sizeof(UINT)); pMemory += sizeof(UINT_PTR); break; } case RPC_FC_FLOAT: TRACE("float=%f <= %p\n", *(float*)pMemory, pMemory); safe_copy_to_buffer(pStubMsg, pMemory, sizeof(float)); pMemory += sizeof(float); break; case RPC_FC_HYPER: TRACE("longlong=%s <= %p\n", wine_dbgstr_longlong(*(ULONGLONG*)pMemory), pMemory); safe_copy_to_buffer(pStubMsg, pMemory, 8); pMemory += 8; break; case RPC_FC_DOUBLE: TRACE("double=%f <= %p\n", *(double*)pMemory, pMemory); safe_copy_to_buffer(pStubMsg, pMemory, sizeof(double)); pMemory += sizeof(double); break; case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: case RPC_FC_POINTER: { unsigned char *saved_buffer; BOOL pointer_buffer_mark_set = FALSE; TRACE("pointer=%p <= %p\n", *(unsigned char**)pMemory, pMemory); TRACE("pStubMsg->Buffer before %p\n", pStubMsg->Buffer); if (*pFormat != RPC_FC_POINTER) pPointer = pFormat; if (*pPointer != RPC_FC_RP) align_pointer_clear(&pStubMsg->Buffer, 4); saved_buffer = pStubMsg->Buffer; if (pStubMsg->PointerBufferMark) { pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; pointer_buffer_mark_set = TRUE; } else if (*pPointer != RPC_FC_RP) safe_buffer_increment(pStubMsg, 4); /* for pointer ID */ PointerMarshall(pStubMsg, saved_buffer, *(unsigned char**)pMemory, pPointer); if (pointer_buffer_mark_set) { STD_OVERFLOW_CHECK(pStubMsg); pStubMsg->PointerBufferMark = pStubMsg->Buffer; pStubMsg->Buffer = saved_buffer; if (*pPointer != RPC_FC_RP) safe_buffer_increment(pStubMsg, 4); /* for pointer ID */ } TRACE("pStubMsg->Buffer after %p\n", pStubMsg->Buffer); if (*pFormat == RPC_FC_POINTER) pPointer += 4; else pFormat += 4; pMemory += sizeof(void *); break; } case RPC_FC_ALIGNM2: align_pointer(&pMemory, 2); break; case RPC_FC_ALIGNM4: align_pointer(&pMemory, 4); break; case RPC_FC_ALIGNM8: align_pointer(&pMemory, 8); break; case RPC_FC_STRUCTPAD1: case RPC_FC_STRUCTPAD2: case RPC_FC_STRUCTPAD3: case RPC_FC_STRUCTPAD4: case RPC_FC_STRUCTPAD5: case RPC_FC_STRUCTPAD6: case RPC_FC_STRUCTPAD7: pMemory += *pFormat - RPC_FC_STRUCTPAD1 + 1; break; case RPC_FC_EMBEDDED_COMPLEX: pMemory += pFormat[1]; pFormat += 2; desc = pFormat + *(const SHORT*)pFormat; size = EmbeddedComplexSize(pStubMsg, desc); TRACE("embedded complex (size=%d) <= %p\n", size, pMemory); m = NdrMarshaller[*desc & NDR_TABLE_MASK]; if (m) { /* for some reason interface pointers aren't generated as * RPC_FC_POINTER, but instead as RPC_FC_EMBEDDED_COMPLEX, yet * they still need the derefencing treatment that pointers are * given */ if (*desc == RPC_FC_IP) m(pStubMsg, *(unsigned char **)pMemory, desc); else m(pStubMsg, pMemory, desc); } else FIXME("no marshaller for embedded type %02x\n", *desc); pMemory += size; pFormat += 2; continue; case RPC_FC_PAD: break; default: FIXME("unhandled format 0x%02x\n", *pFormat); } pFormat++; } return pMemory; } static unsigned char * ComplexUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer, unsigned char fMustAlloc) { PFORMAT_STRING desc; NDR_UNMARSHALL m; ULONG size; while (*pFormat != RPC_FC_END) { switch (*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: safe_copy_from_buffer(pStubMsg, pMemory, 1); TRACE("byte=%d => %p\n", *(WORD*)pMemory, pMemory); pMemory += 1; break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: safe_copy_from_buffer(pStubMsg, pMemory, 2); TRACE("short=%d => %p\n", *(WORD*)pMemory, pMemory); pMemory += 2; break; case RPC_FC_ENUM16: { WORD val; safe_copy_from_buffer(pStubMsg, &val, 2); *(DWORD*)pMemory = val; TRACE("enum16=%d => %p\n", *(DWORD*)pMemory, pMemory); if (32767 < *(DWORD*)pMemory) RpcRaiseException(RPC_X_ENUM_VALUE_OUT_OF_RANGE); pMemory += 4; break; } case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM32: safe_copy_from_buffer(pStubMsg, pMemory, 4); TRACE("long=%d => %p\n", *(DWORD*)pMemory, pMemory); pMemory += 4; break; case RPC_FC_INT3264: { INT val; safe_copy_from_buffer(pStubMsg, &val, 4); *(INT_PTR *)pMemory = val; TRACE("int3264=%ld => %p\n", *(INT_PTR*)pMemory, pMemory); pMemory += sizeof(INT_PTR); break; } case RPC_FC_UINT3264: { UINT val; safe_copy_from_buffer(pStubMsg, &val, 4); *(UINT_PTR *)pMemory = val; TRACE("uint3264=%ld => %p\n", *(UINT_PTR*)pMemory, pMemory); pMemory += sizeof(UINT_PTR); break; } case RPC_FC_FLOAT: safe_copy_from_buffer(pStubMsg, pMemory, sizeof(float)); TRACE("float=%f => %p\n", *(float*)pMemory, pMemory); pMemory += sizeof(float); break; case RPC_FC_HYPER: safe_copy_from_buffer(pStubMsg, pMemory, 8); TRACE("longlong=%s => %p\n", wine_dbgstr_longlong(*(ULONGLONG*)pMemory), pMemory); pMemory += 8; break; case RPC_FC_DOUBLE: safe_copy_from_buffer(pStubMsg, pMemory, sizeof(double)); TRACE("double=%f => %p\n", *(double*)pMemory, pMemory); pMemory += sizeof(double); break; case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: case RPC_FC_POINTER: { unsigned char *saved_buffer; BOOL pointer_buffer_mark_set = FALSE; TRACE("pointer => %p\n", pMemory); if (*pFormat != RPC_FC_POINTER) pPointer = pFormat; if (*pPointer != RPC_FC_RP) align_pointer(&pStubMsg->Buffer, 4); saved_buffer = pStubMsg->Buffer; if (pStubMsg->PointerBufferMark) { pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; pointer_buffer_mark_set = TRUE; } else if (*pPointer != RPC_FC_RP) safe_buffer_increment(pStubMsg, 4); /* for pointer ID */ PointerUnmarshall(pStubMsg, saved_buffer, (unsigned char**)pMemory, *(unsigned char**)pMemory, pPointer, fMustAlloc); if (pointer_buffer_mark_set) { STD_OVERFLOW_CHECK(pStubMsg); pStubMsg->PointerBufferMark = pStubMsg->Buffer; pStubMsg->Buffer = saved_buffer; if (*pPointer != RPC_FC_RP) safe_buffer_increment(pStubMsg, 4); /* for pointer ID */ } if (*pFormat == RPC_FC_POINTER) pPointer += 4; else pFormat += 4; pMemory += sizeof(void *); break; } case RPC_FC_ALIGNM2: align_pointer_clear(&pMemory, 2); break; case RPC_FC_ALIGNM4: align_pointer_clear(&pMemory, 4); break; case RPC_FC_ALIGNM8: align_pointer_clear(&pMemory, 8); break; case RPC_FC_STRUCTPAD1: case RPC_FC_STRUCTPAD2: case RPC_FC_STRUCTPAD3: case RPC_FC_STRUCTPAD4: case RPC_FC_STRUCTPAD5: case RPC_FC_STRUCTPAD6: case RPC_FC_STRUCTPAD7: memset(pMemory, 0, *pFormat - RPC_FC_STRUCTPAD1 + 1); pMemory += *pFormat - RPC_FC_STRUCTPAD1 + 1; break; case RPC_FC_EMBEDDED_COMPLEX: pMemory += pFormat[1]; pFormat += 2; desc = pFormat + *(const SHORT*)pFormat; size = EmbeddedComplexSize(pStubMsg, desc); TRACE("embedded complex (size=%d) => %p\n", size, pMemory); if (fMustAlloc) /* we can't pass fMustAlloc=TRUE into the marshaller for this type * since the type is part of the memory block that is encompassed by * the whole complex type. Memory is forced to allocate when pointers * are set to NULL, so we emulate that part of fMustAlloc=TRUE by * clearing the memory we pass in to the unmarshaller */ memset(pMemory, 0, size); m = NdrUnmarshaller[*desc & NDR_TABLE_MASK]; if (m) { /* for some reason interface pointers aren't generated as * RPC_FC_POINTER, but instead as RPC_FC_EMBEDDED_COMPLEX, yet * they still need the derefencing treatment that pointers are * given */ if (*desc == RPC_FC_IP) m(pStubMsg, (unsigned char **)pMemory, desc, FALSE); else m(pStubMsg, &pMemory, desc, FALSE); } else FIXME("no unmarshaller for embedded type %02x\n", *desc); pMemory += size; pFormat += 2; continue; case RPC_FC_PAD: break; default: FIXME("unhandled format %d\n", *pFormat); } pFormat++; } return pMemory; } static unsigned char * ComplexBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer) { PFORMAT_STRING desc; NDR_BUFFERSIZE m; ULONG size; while (*pFormat != RPC_FC_END) { switch (*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: safe_buffer_length_increment(pStubMsg, 1); pMemory += 1; break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: safe_buffer_length_increment(pStubMsg, 2); pMemory += 2; break; case RPC_FC_ENUM16: safe_buffer_length_increment(pStubMsg, 2); pMemory += 4; break; case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM32: case RPC_FC_FLOAT: safe_buffer_length_increment(pStubMsg, 4); pMemory += 4; break; case RPC_FC_INT3264: case RPC_FC_UINT3264: safe_buffer_length_increment(pStubMsg, 4); pMemory += sizeof(INT_PTR); break; case RPC_FC_HYPER: case RPC_FC_DOUBLE: safe_buffer_length_increment(pStubMsg, 8); pMemory += 8; break; case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: case RPC_FC_POINTER: if (*pFormat != RPC_FC_POINTER) pPointer = pFormat; if (!pStubMsg->IgnoreEmbeddedPointers) { int saved_buffer_length = pStubMsg->BufferLength; pStubMsg->BufferLength = pStubMsg->PointerLength; pStubMsg->PointerLength = 0; if(!pStubMsg->BufferLength) ERR("BufferLength == 0??\n"); PointerBufferSize(pStubMsg, *(unsigned char**)pMemory, pPointer); pStubMsg->PointerLength = pStubMsg->BufferLength; pStubMsg->BufferLength = saved_buffer_length; } if (*pPointer != RPC_FC_RP) { align_length(&pStubMsg->BufferLength, 4); safe_buffer_length_increment(pStubMsg, 4); } if (*pFormat == RPC_FC_POINTER) pPointer += 4; else pFormat += 4; pMemory += sizeof(void*); break; case RPC_FC_ALIGNM2: align_pointer(&pMemory, 2); break; case RPC_FC_ALIGNM4: align_pointer(&pMemory, 4); break; case RPC_FC_ALIGNM8: align_pointer(&pMemory, 8); break; case RPC_FC_STRUCTPAD1: case RPC_FC_STRUCTPAD2: case RPC_FC_STRUCTPAD3: case RPC_FC_STRUCTPAD4: case RPC_FC_STRUCTPAD5: case RPC_FC_STRUCTPAD6: case RPC_FC_STRUCTPAD7: pMemory += *pFormat - RPC_FC_STRUCTPAD1 + 1; break; case RPC_FC_EMBEDDED_COMPLEX: pMemory += pFormat[1]; pFormat += 2; desc = pFormat + *(const SHORT*)pFormat; size = EmbeddedComplexSize(pStubMsg, desc); m = NdrBufferSizer[*desc & NDR_TABLE_MASK]; if (m) { /* for some reason interface pointers aren't generated as * RPC_FC_POINTER, but instead as RPC_FC_EMBEDDED_COMPLEX, yet * they still need the derefencing treatment that pointers are * given */ if (*desc == RPC_FC_IP) m(pStubMsg, *(unsigned char **)pMemory, desc); else m(pStubMsg, pMemory, desc); } else FIXME("no buffersizer for embedded type %02x\n", *desc); pMemory += size; pFormat += 2; continue; case RPC_FC_PAD: break; default: FIXME("unhandled format 0x%02x\n", *pFormat); } pFormat++; } return pMemory; } static unsigned char * ComplexFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer) { PFORMAT_STRING desc; NDR_FREE m; ULONG size; while (*pFormat != RPC_FC_END) { switch (*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: pMemory += 1; break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: pMemory += 2; break; case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM16: case RPC_FC_ENUM32: case RPC_FC_FLOAT: pMemory += 4; break; case RPC_FC_INT3264: case RPC_FC_UINT3264: pMemory += sizeof(INT_PTR); break; case RPC_FC_HYPER: case RPC_FC_DOUBLE: pMemory += 8; break; case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: case RPC_FC_POINTER: if (*pFormat != RPC_FC_POINTER) pPointer = pFormat; NdrPointerFree(pStubMsg, *(unsigned char**)pMemory, pPointer); if (*pFormat == RPC_FC_POINTER) pPointer += 4; else pFormat += 4; pMemory += sizeof(void *); break; case RPC_FC_ALIGNM2: align_pointer(&pMemory, 2); break; case RPC_FC_ALIGNM4: align_pointer(&pMemory, 4); break; case RPC_FC_ALIGNM8: align_pointer(&pMemory, 8); break; case RPC_FC_STRUCTPAD1: case RPC_FC_STRUCTPAD2: case RPC_FC_STRUCTPAD3: case RPC_FC_STRUCTPAD4: case RPC_FC_STRUCTPAD5: case RPC_FC_STRUCTPAD6: case RPC_FC_STRUCTPAD7: pMemory += *pFormat - RPC_FC_STRUCTPAD1 + 1; break; case RPC_FC_EMBEDDED_COMPLEX: pMemory += pFormat[1]; pFormat += 2; desc = pFormat + *(const SHORT*)pFormat; size = EmbeddedComplexSize(pStubMsg, desc); m = NdrFreer[*desc & NDR_TABLE_MASK]; if (m) { /* for some reason interface pointers aren't generated as * RPC_FC_POINTER, but instead as RPC_FC_EMBEDDED_COMPLEX, yet * they still need the derefencing treatment that pointers are * given */ if (*desc == RPC_FC_IP) m(pStubMsg, *(unsigned char **)pMemory, desc); else m(pStubMsg, pMemory, desc); } pMemory += size; pFormat += 2; continue; case RPC_FC_PAD: break; default: FIXME("unhandled format 0x%02x\n", *pFormat); } pFormat++; } return pMemory; } static ULONG ComplexStructMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat, PFORMAT_STRING pPointer) { PFORMAT_STRING desc; ULONG size = 0; while (*pFormat != RPC_FC_END) { switch (*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: size += 1; safe_buffer_increment(pStubMsg, 1); break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: size += 2; safe_buffer_increment(pStubMsg, 2); break; case RPC_FC_ENUM16: size += 4; safe_buffer_increment(pStubMsg, 2); break; case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM32: case RPC_FC_FLOAT: size += 4; safe_buffer_increment(pStubMsg, 4); break; case RPC_FC_INT3264: case RPC_FC_UINT3264: size += sizeof(INT_PTR); safe_buffer_increment(pStubMsg, 4); break; case RPC_FC_HYPER: case RPC_FC_DOUBLE: size += 8; safe_buffer_increment(pStubMsg, 8); break; case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: case RPC_FC_POINTER: { unsigned char *saved_buffer; BOOL pointer_buffer_mark_set = FALSE; if (*pFormat != RPC_FC_POINTER) pPointer = pFormat; if (*pPointer != RPC_FC_RP) align_pointer(&pStubMsg->Buffer, 4); saved_buffer = pStubMsg->Buffer; if (pStubMsg->PointerBufferMark) { pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; pointer_buffer_mark_set = TRUE; } else if (*pPointer != RPC_FC_RP) safe_buffer_increment(pStubMsg, 4); /* for pointer ID */ if (!pStubMsg->IgnoreEmbeddedPointers) PointerMemorySize(pStubMsg, saved_buffer, pPointer); if (pointer_buffer_mark_set) { STD_OVERFLOW_CHECK(pStubMsg); pStubMsg->PointerBufferMark = pStubMsg->Buffer; pStubMsg->Buffer = saved_buffer; if (*pPointer != RPC_FC_RP) safe_buffer_increment(pStubMsg, 4); /* for pointer ID */ } if (*pFormat == RPC_FC_POINTER) pPointer += 4; else pFormat += 4; size += sizeof(void *); break; } case RPC_FC_ALIGNM2: align_length(&size, 2); break; case RPC_FC_ALIGNM4: align_length(&size, 4); break; case RPC_FC_ALIGNM8: align_length(&size, 8); break; case RPC_FC_STRUCTPAD1: case RPC_FC_STRUCTPAD2: case RPC_FC_STRUCTPAD3: case RPC_FC_STRUCTPAD4: case RPC_FC_STRUCTPAD5: case RPC_FC_STRUCTPAD6: case RPC_FC_STRUCTPAD7: size += *pFormat - RPC_FC_STRUCTPAD1 + 1; break; case RPC_FC_EMBEDDED_COMPLEX: size += pFormat[1]; pFormat += 2; desc = pFormat + *(const SHORT*)pFormat; size += EmbeddedComplexMemorySize(pStubMsg, desc); pFormat += 2; continue; case RPC_FC_PAD: break; default: FIXME("unhandled format 0x%02x\n", *pFormat); } pFormat++; } return size; } ULONG ComplexStructSize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { PFORMAT_STRING desc; ULONG size = 0; while (*pFormat != RPC_FC_END) { switch (*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: size += 1; break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: size += 2; break; case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM16: case RPC_FC_ENUM32: case RPC_FC_FLOAT: size += 4; break; case RPC_FC_INT3264: case RPC_FC_UINT3264: size += sizeof(INT_PTR); break; case RPC_FC_HYPER: case RPC_FC_DOUBLE: size += 8; break; case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: case RPC_FC_POINTER: size += sizeof(void *); if (*pFormat != RPC_FC_POINTER) pFormat += 4; break; case RPC_FC_ALIGNM2: align_length(&size, 2); break; case RPC_FC_ALIGNM4: align_length(&size, 4); break; case RPC_FC_ALIGNM8: align_length(&size, 8); break; case RPC_FC_STRUCTPAD1: case RPC_FC_STRUCTPAD2: case RPC_FC_STRUCTPAD3: case RPC_FC_STRUCTPAD4: case RPC_FC_STRUCTPAD5: case RPC_FC_STRUCTPAD6: case RPC_FC_STRUCTPAD7: size += *pFormat - RPC_FC_STRUCTPAD1 + 1; break; case RPC_FC_EMBEDDED_COMPLEX: size += pFormat[1]; pFormat += 2; desc = pFormat + *(const SHORT*)pFormat; size += EmbeddedComplexSize(pStubMsg, desc); pFormat += 2; continue; case RPC_FC_PAD: break; default: FIXME("unhandled format 0x%02x\n", *pFormat); } pFormat++; } return size; } /*********************************************************************** * NdrComplexStructMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrComplexStructMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { PFORMAT_STRING conf_array = NULL; PFORMAT_STRING pointer_desc = NULL; unsigned char *OldMemory = pStubMsg->Memory; BOOL pointer_buffer_mark_set = FALSE; ULONG count = 0; ULONG max_count = 0; ULONG offset = 0; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (!pStubMsg->PointerBufferMark) { int saved_ignore_embedded = pStubMsg->IgnoreEmbeddedPointers; /* save buffer length */ ULONG saved_buffer_length = pStubMsg->BufferLength; /* get the buffer pointer after complex array data, but before * pointer data */ pStubMsg->BufferLength = pStubMsg->Buffer - (unsigned char *)pStubMsg->RpcMsg->Buffer; pStubMsg->IgnoreEmbeddedPointers = 1; NdrComplexStructBufferSize(pStubMsg, pMemory, pFormat); pStubMsg->IgnoreEmbeddedPointers = saved_ignore_embedded; /* save it for use by embedded pointer code later */ pStubMsg->PointerBufferMark = (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength; TRACE("difference = 0x%x\n", (ULONG)(pStubMsg->PointerBufferMark - pStubMsg->Buffer)); pointer_buffer_mark_set = TRUE; /* restore the original buffer length */ pStubMsg->BufferLength = saved_buffer_length; } align_pointer_clear(&pStubMsg->Buffer, pFormat[1] + 1); pFormat += 4; if (*(const SHORT*)pFormat) conf_array = pFormat + *(const SHORT*)pFormat; pFormat += 2; if (*(const WORD*)pFormat) pointer_desc = pFormat + *(const WORD*)pFormat; pFormat += 2; pStubMsg->Memory = pMemory; if (conf_array) { ULONG struct_size = ComplexStructSize(pStubMsg, pFormat); array_compute_and_write_conformance(conf_array[0], pStubMsg, pMemory + struct_size, conf_array); /* these could be changed in ComplexMarshall so save them for later */ max_count = pStubMsg->MaxCount; count = pStubMsg->ActualCount; offset = pStubMsg->Offset; } pMemory = ComplexMarshall(pStubMsg, pMemory, pFormat, pointer_desc); if (conf_array) { pStubMsg->MaxCount = max_count; pStubMsg->ActualCount = count; pStubMsg->Offset = offset; array_write_variance_and_marshall(conf_array[0], pStubMsg, pMemory, conf_array, TRUE /* fHasPointers */); } pStubMsg->Memory = OldMemory; if (pointer_buffer_mark_set) { pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; } STD_OVERFLOW_CHECK(pStubMsg); return NULL; } /*********************************************************************** * NdrComplexStructUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrComplexStructUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned size = *(const WORD*)(pFormat+2); PFORMAT_STRING conf_array = NULL; PFORMAT_STRING pointer_desc = NULL; unsigned char *pMemory; BOOL pointer_buffer_mark_set = FALSE; ULONG count = 0; ULONG max_count = 0; ULONG offset = 0; ULONG array_size = 0; TRACE("(%p,%p,%p,%d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); if (!pStubMsg->PointerBufferMark) { int saved_ignore_embedded = pStubMsg->IgnoreEmbeddedPointers; /* save buffer pointer */ unsigned char *saved_buffer = pStubMsg->Buffer; /* get the buffer pointer after complex array data, but before * pointer data */ pStubMsg->IgnoreEmbeddedPointers = 1; NdrComplexStructMemorySize(pStubMsg, pFormat); pStubMsg->IgnoreEmbeddedPointers = saved_ignore_embedded; /* save it for use by embedded pointer code later */ pStubMsg->PointerBufferMark = pStubMsg->Buffer; TRACE("difference = 0x%x\n", (ULONG)(pStubMsg->PointerBufferMark - saved_buffer)); pointer_buffer_mark_set = TRUE; /* restore the original buffer */ pStubMsg->Buffer = saved_buffer; } align_pointer(&pStubMsg->Buffer, pFormat[1] + 1); pFormat += 4; if (*(const SHORT*)pFormat) conf_array = pFormat + *(const SHORT*)pFormat; pFormat += 2; if (*(const WORD*)pFormat) pointer_desc = pFormat + *(const WORD*)pFormat; pFormat += 2; if (conf_array) { array_size = array_read_conformance(conf_array[0], pStubMsg, conf_array); size += array_size; /* these could be changed in ComplexMarshall so save them for later */ max_count = pStubMsg->MaxCount; count = pStubMsg->ActualCount; offset = pStubMsg->Offset; } if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, size); pMemory = ComplexUnmarshall(pStubMsg, *ppMemory, pFormat, pointer_desc, fMustAlloc); if (conf_array) { pStubMsg->MaxCount = max_count; pStubMsg->ActualCount = count; pStubMsg->Offset = offset; if (fMustAlloc) memset(pMemory, 0, array_size); array_read_variance_and_unmarshall(conf_array[0], pStubMsg, &pMemory, conf_array, FALSE, FALSE /* fUseBufferMemoryServer */, TRUE /* fUnmarshall */); } if (pointer_buffer_mark_set) { pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; } return NULL; } /*********************************************************************** * NdrComplexStructBufferSize [RPCRT4.@] */ void WINAPI NdrComplexStructBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { PFORMAT_STRING conf_array = NULL; PFORMAT_STRING pointer_desc = NULL; unsigned char *OldMemory = pStubMsg->Memory; int pointer_length_set = 0; ULONG count = 0; ULONG max_count = 0; ULONG offset = 0; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); align_length(&pStubMsg->BufferLength, pFormat[1] + 1); if(!pStubMsg->IgnoreEmbeddedPointers && !pStubMsg->PointerLength) { int saved_ignore_embedded = pStubMsg->IgnoreEmbeddedPointers; ULONG saved_buffer_length = pStubMsg->BufferLength; /* get the buffer length after complex struct data, but before * pointer data */ pStubMsg->IgnoreEmbeddedPointers = 1; NdrComplexStructBufferSize(pStubMsg, pMemory, pFormat); pStubMsg->IgnoreEmbeddedPointers = saved_ignore_embedded; /* save it for use by embedded pointer code later */ pStubMsg->PointerLength = pStubMsg->BufferLength; pointer_length_set = 1; TRACE("difference = 0x%x\n", pStubMsg->PointerLength - saved_buffer_length); /* restore the original buffer length */ pStubMsg->BufferLength = saved_buffer_length; } pFormat += 4; if (*(const SHORT*)pFormat) conf_array = pFormat + *(const SHORT*)pFormat; pFormat += 2; if (*(const WORD*)pFormat) pointer_desc = pFormat + *(const WORD*)pFormat; pFormat += 2; pStubMsg->Memory = pMemory; if (conf_array) { ULONG struct_size = ComplexStructSize(pStubMsg, pFormat); array_compute_and_size_conformance(conf_array[0], pStubMsg, pMemory + struct_size, conf_array); /* these could be changed in ComplexMarshall so save them for later */ max_count = pStubMsg->MaxCount; count = pStubMsg->ActualCount; offset = pStubMsg->Offset; } pMemory = ComplexBufferSize(pStubMsg, pMemory, pFormat, pointer_desc); if (conf_array) { pStubMsg->MaxCount = max_count; pStubMsg->ActualCount = count; pStubMsg->Offset = offset; array_buffer_size(conf_array[0], pStubMsg, pMemory, conf_array, TRUE /* fHasPointers */); } pStubMsg->Memory = OldMemory; if(pointer_length_set) { pStubMsg->BufferLength = pStubMsg->PointerLength; pStubMsg->PointerLength = 0; } } /*********************************************************************** * NdrComplexStructMemorySize [RPCRT4.@] */ ULONG WINAPI NdrComplexStructMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { unsigned size = *(const WORD*)(pFormat+2); PFORMAT_STRING conf_array = NULL; PFORMAT_STRING pointer_desc = NULL; ULONG count = 0; ULONG max_count = 0; ULONG offset = 0; TRACE("(%p,%p)\n", pStubMsg, pFormat); align_pointer(&pStubMsg->Buffer, pFormat[1] + 1); pFormat += 4; if (*(const SHORT*)pFormat) conf_array = pFormat + *(const SHORT*)pFormat; pFormat += 2; if (*(const WORD*)pFormat) pointer_desc = pFormat + *(const WORD*)pFormat; pFormat += 2; if (conf_array) { array_read_conformance(conf_array[0], pStubMsg, conf_array); /* these could be changed in ComplexStructMemorySize so save them for * later */ max_count = pStubMsg->MaxCount; count = pStubMsg->ActualCount; offset = pStubMsg->Offset; } ComplexStructMemorySize(pStubMsg, pFormat, pointer_desc); if (conf_array) { pStubMsg->MaxCount = max_count; pStubMsg->ActualCount = count; pStubMsg->Offset = offset; array_memory_size(conf_array[0], pStubMsg, conf_array, TRUE /* fHasPointers */); } return size; } /*********************************************************************** * NdrComplexStructFree [RPCRT4.@] */ void WINAPI NdrComplexStructFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { PFORMAT_STRING conf_array = NULL; PFORMAT_STRING pointer_desc = NULL; unsigned char *OldMemory = pStubMsg->Memory; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); pFormat += 4; if (*(const SHORT*)pFormat) conf_array = pFormat + *(const SHORT*)pFormat; pFormat += 2; if (*(const WORD*)pFormat) pointer_desc = pFormat + *(const WORD*)pFormat; pFormat += 2; pStubMsg->Memory = pMemory; pMemory = ComplexFree(pStubMsg, pMemory, pFormat, pointer_desc); if (conf_array) array_free(conf_array[0], pStubMsg, pMemory, conf_array, TRUE /* fHasPointers */); pStubMsg->Memory = OldMemory; } /*********************************************************************** * NdrConformantArrayMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrConformantArrayMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_CARRAY) { ERR("invalid format = 0x%x\n", pFormat[0]); RpcRaiseException(RPC_X_BAD_STUB_DATA); } array_compute_and_write_conformance(RPC_FC_CARRAY, pStubMsg, pMemory, pFormat); array_write_variance_and_marshall(RPC_FC_CARRAY, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); return NULL; } /*********************************************************************** * NdrConformantArrayUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrConformantArrayUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { TRACE("(%p,%p,%p,%d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); if (pFormat[0] != RPC_FC_CARRAY) { ERR("invalid format = 0x%x\n", pFormat[0]); RpcRaiseException(RPC_X_BAD_STUB_DATA); } array_read_conformance(RPC_FC_CARRAY, pStubMsg, pFormat); array_read_variance_and_unmarshall(RPC_FC_CARRAY, pStubMsg, ppMemory, pFormat, fMustAlloc, TRUE /* fUseBufferMemoryServer */, TRUE /* fUnmarshall */); return NULL; } /*********************************************************************** * NdrConformantArrayBufferSize [RPCRT4.@] */ void WINAPI NdrConformantArrayBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_CARRAY) { ERR("invalid format = 0x%x\n", pFormat[0]); RpcRaiseException(RPC_X_BAD_STUB_DATA); } array_compute_and_size_conformance(RPC_FC_CARRAY, pStubMsg, pMemory, pFormat); array_buffer_size(RPC_FC_CARRAY, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); } /*********************************************************************** * NdrConformantArrayMemorySize [RPCRT4.@] */ ULONG WINAPI NdrConformantArrayMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { TRACE("(%p,%p)\n", pStubMsg, pFormat); if (pFormat[0] != RPC_FC_CARRAY) { ERR("invalid format = 0x%x\n", pFormat[0]); RpcRaiseException(RPC_X_BAD_STUB_DATA); } array_read_conformance(RPC_FC_CARRAY, pStubMsg, pFormat); array_memory_size(RPC_FC_CARRAY, pStubMsg, pFormat, TRUE /* fHasPointers */); return pStubMsg->MemorySize; } /*********************************************************************** * NdrConformantArrayFree [RPCRT4.@] */ void WINAPI NdrConformantArrayFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_CARRAY) { ERR("invalid format = 0x%x\n", pFormat[0]); RpcRaiseException(RPC_X_BAD_STUB_DATA); } array_free(RPC_FC_CARRAY, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); } /*********************************************************************** * NdrConformantVaryingArrayMarshall [RPCRT4.@] */ unsigned char* WINAPI NdrConformantVaryingArrayMarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char* pMemory, PFORMAT_STRING pFormat ) { TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_CVARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } array_compute_and_write_conformance(RPC_FC_CVARRAY, pStubMsg, pMemory, pFormat); array_write_variance_and_marshall(RPC_FC_CVARRAY, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); return NULL; } /*********************************************************************** * NdrConformantVaryingArrayUnmarshall [RPCRT4.@] */ unsigned char* WINAPI NdrConformantVaryingArrayUnmarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char** ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc ) { TRACE("(%p, %p, %p, %d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); if (pFormat[0] != RPC_FC_CVARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } array_read_conformance(RPC_FC_CVARRAY, pStubMsg, pFormat); array_read_variance_and_unmarshall(RPC_FC_CVARRAY, pStubMsg, ppMemory, pFormat, fMustAlloc, TRUE /* fUseBufferMemoryServer */, TRUE /* fUnmarshall */); return NULL; } /*********************************************************************** * NdrConformantVaryingArrayFree [RPCRT4.@] */ void WINAPI NdrConformantVaryingArrayFree( PMIDL_STUB_MESSAGE pStubMsg, unsigned char* pMemory, PFORMAT_STRING pFormat ) { TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_CVARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } array_free(RPC_FC_CVARRAY, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); } /*********************************************************************** * NdrConformantVaryingArrayBufferSize [RPCRT4.@] */ void WINAPI NdrConformantVaryingArrayBufferSize( PMIDL_STUB_MESSAGE pStubMsg, unsigned char* pMemory, PFORMAT_STRING pFormat ) { TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_CVARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } array_compute_and_size_conformance(RPC_FC_CVARRAY, pStubMsg, pMemory, pFormat); array_buffer_size(RPC_FC_CVARRAY, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); } /*********************************************************************** * NdrConformantVaryingArrayMemorySize [RPCRT4.@] */ ULONG WINAPI NdrConformantVaryingArrayMemorySize( PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat ) { TRACE("(%p, %p)\n", pStubMsg, pFormat); if (pFormat[0] != RPC_FC_CVARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return pStubMsg->MemorySize; } array_read_conformance(RPC_FC_CVARRAY, pStubMsg, pFormat); array_memory_size(RPC_FC_CVARRAY, pStubMsg, pFormat, TRUE /* fHasPointers */); return pStubMsg->MemorySize; } /*********************************************************************** * NdrComplexArrayMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrComplexArrayMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { BOOL pointer_buffer_mark_set = FALSE; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_BOGUS_ARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } if (!pStubMsg->PointerBufferMark) { /* save buffer fields that may be changed by buffer sizer functions * and that may be needed later on */ int saved_ignore_embedded = pStubMsg->IgnoreEmbeddedPointers; ULONG saved_buffer_length = pStubMsg->BufferLength; ULONG_PTR saved_max_count = pStubMsg->MaxCount; ULONG saved_offset = pStubMsg->Offset; ULONG saved_actual_count = pStubMsg->ActualCount; /* get the buffer pointer after complex array data, but before * pointer data */ pStubMsg->BufferLength = pStubMsg->Buffer - (unsigned char *)pStubMsg->RpcMsg->Buffer; pStubMsg->IgnoreEmbeddedPointers = 1; NdrComplexArrayBufferSize(pStubMsg, pMemory, pFormat); pStubMsg->IgnoreEmbeddedPointers = saved_ignore_embedded; /* save it for use by embedded pointer code later */ pStubMsg->PointerBufferMark = (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength; TRACE("difference = 0x%x\n", (ULONG)(pStubMsg->Buffer - (unsigned char *)pStubMsg->RpcMsg->Buffer)); pointer_buffer_mark_set = TRUE; /* restore fields */ pStubMsg->ActualCount = saved_actual_count; pStubMsg->Offset = saved_offset; pStubMsg->MaxCount = saved_max_count; pStubMsg->BufferLength = saved_buffer_length; } array_compute_and_write_conformance(RPC_FC_BOGUS_ARRAY, pStubMsg, pMemory, pFormat); array_write_variance_and_marshall(RPC_FC_BOGUS_ARRAY, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); STD_OVERFLOW_CHECK(pStubMsg); if (pointer_buffer_mark_set) { pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; } return NULL; } /*********************************************************************** * NdrComplexArrayUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrComplexArrayUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned char *saved_buffer; BOOL pointer_buffer_mark_set = FALSE; int saved_ignore_embedded; TRACE("(%p,%p,%p,%d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); if (pFormat[0] != RPC_FC_BOGUS_ARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } saved_ignore_embedded = pStubMsg->IgnoreEmbeddedPointers; /* save buffer pointer */ saved_buffer = pStubMsg->Buffer; /* get the buffer pointer after complex array data, but before * pointer data */ pStubMsg->IgnoreEmbeddedPointers = 1; pStubMsg->MemorySize = 0; NdrComplexArrayMemorySize(pStubMsg, pFormat); pStubMsg->IgnoreEmbeddedPointers = saved_ignore_embedded; TRACE("difference = 0x%x\n", (ULONG)(pStubMsg->Buffer - saved_buffer)); if (!pStubMsg->PointerBufferMark) { /* save it for use by embedded pointer code later */ pStubMsg->PointerBufferMark = pStubMsg->Buffer; pointer_buffer_mark_set = TRUE; } /* restore the original buffer */ pStubMsg->Buffer = saved_buffer; array_read_conformance(RPC_FC_BOGUS_ARRAY, pStubMsg, pFormat); array_read_variance_and_unmarshall(RPC_FC_BOGUS_ARRAY, pStubMsg, ppMemory, pFormat, fMustAlloc, TRUE /* fUseBufferMemoryServer */, TRUE /* fUnmarshall */); if (pointer_buffer_mark_set) { pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; } return NULL; } /*********************************************************************** * NdrComplexArrayBufferSize [RPCRT4.@] */ void WINAPI NdrComplexArrayBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { int pointer_length_set = 0; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_BOGUS_ARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } if (!pStubMsg->IgnoreEmbeddedPointers && !pStubMsg->PointerLength) { /* save buffer fields that may be changed by buffer sizer functions * and that may be needed later on */ int saved_ignore_embedded = pStubMsg->IgnoreEmbeddedPointers; ULONG saved_buffer_length = pStubMsg->BufferLength; ULONG_PTR saved_max_count = pStubMsg->MaxCount; ULONG saved_offset = pStubMsg->Offset; ULONG saved_actual_count = pStubMsg->ActualCount; /* get the buffer pointer after complex array data, but before * pointer data */ pStubMsg->IgnoreEmbeddedPointers = 1; NdrComplexArrayBufferSize(pStubMsg, pMemory, pFormat); pStubMsg->IgnoreEmbeddedPointers = saved_ignore_embedded; /* save it for use by embedded pointer code later */ pStubMsg->PointerLength = pStubMsg->BufferLength; pointer_length_set = 1; /* restore fields */ pStubMsg->ActualCount = saved_actual_count; pStubMsg->Offset = saved_offset; pStubMsg->MaxCount = saved_max_count; pStubMsg->BufferLength = saved_buffer_length; } array_compute_and_size_conformance(RPC_FC_BOGUS_ARRAY, pStubMsg, pMemory, pFormat); array_buffer_size(RPC_FC_BOGUS_ARRAY, pStubMsg, pMemory, pFormat, TRUE /* fHasPointers */); if(pointer_length_set) { pStubMsg->BufferLength = pStubMsg->PointerLength; pStubMsg->PointerLength = 0; } } /*********************************************************************** * NdrComplexArrayMemorySize [RPCRT4.@] */ ULONG WINAPI NdrComplexArrayMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { TRACE("(%p,%p)\n", pStubMsg, pFormat); if (pFormat[0] != RPC_FC_BOGUS_ARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return 0; } array_read_conformance(RPC_FC_BOGUS_ARRAY, pStubMsg, pFormat); array_memory_size(RPC_FC_BOGUS_ARRAY, pStubMsg, pFormat, TRUE /* fHasPointers */); return pStubMsg->MemorySize; } /*********************************************************************** * NdrComplexArrayFree [RPCRT4.@] */ void WINAPI NdrComplexArrayFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { ULONG i, count, def; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); if (pFormat[0] != RPC_FC_BOGUS_ARRAY) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } def = *(const WORD*)&pFormat[2]; pFormat += 4; pFormat = ComputeConformance(pStubMsg, pMemory, pFormat, def); TRACE("conformance = %ld\n", pStubMsg->MaxCount); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, pStubMsg->MaxCount); TRACE("variance = %d\n", pStubMsg->ActualCount); count = pStubMsg->ActualCount; for (i = 0; i < count; i++) pMemory = ComplexFree(pStubMsg, pMemory, pFormat, NULL); } static void UserMarshalCB(PMIDL_STUB_MESSAGE pStubMsg, USER_MARSHAL_CB_TYPE cbtype, PFORMAT_STRING pFormat, USER_MARSHAL_CB *umcb) { umcb->Flags = MAKELONG(pStubMsg->dwDestContext, pStubMsg->RpcMsg->DataRepresentation); umcb->pStubMsg = pStubMsg; umcb->pReserve = NULL; umcb->Signature = USER_MARSHAL_CB_SIGNATURE; umcb->CBType = cbtype; umcb->pFormat = pFormat; umcb->pTypeFormat = NULL /* FIXME */; } #define USER_MARSHAL_PTR_PREFIX \ ( (DWORD)'U' | ( (DWORD)'s' << 8 ) | \ ( (DWORD)'e' << 16 ) | ( (DWORD)'r' << 24 ) ) /*********************************************************************** * NdrUserMarshalMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrUserMarshalMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned flags = pFormat[1]; unsigned index = *(const WORD*)&pFormat[2]; unsigned char *saved_buffer = NULL; USER_MARSHAL_CB umcb; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); TRACE("index=%d\n", index); UserMarshalCB(pStubMsg, USER_MARSHAL_CB_MARSHALL, pFormat, &umcb); if (flags & USER_MARSHAL_POINTER) { align_pointer_clear(&pStubMsg->Buffer, 4); NDR_LOCAL_UINT32_WRITE(pStubMsg->Buffer, USER_MARSHAL_PTR_PREFIX); pStubMsg->Buffer += 4; if (pStubMsg->PointerBufferMark) { saved_buffer = pStubMsg->Buffer; pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; } align_pointer_clear(&pStubMsg->Buffer, 8); } else align_pointer_clear(&pStubMsg->Buffer, (flags & 0xf) + 1); pStubMsg->Buffer = pStubMsg->StubDesc->aUserMarshalQuadruple[index].pfnMarshall( &umcb.Flags, pStubMsg->Buffer, pMemory); if (saved_buffer) { STD_OVERFLOW_CHECK(pStubMsg); pStubMsg->PointerBufferMark = pStubMsg->Buffer; pStubMsg->Buffer = saved_buffer; } STD_OVERFLOW_CHECK(pStubMsg); return NULL; } /*********************************************************************** * NdrUserMarshalUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrUserMarshalUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned flags = pFormat[1]; unsigned index = *(const WORD*)&pFormat[2]; DWORD memsize = *(const WORD*)&pFormat[4]; unsigned char *saved_buffer = NULL; USER_MARSHAL_CB umcb; TRACE("(%p,%p,%p,%d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); TRACE("index=%d\n", index); UserMarshalCB(pStubMsg, USER_MARSHAL_CB_UNMARSHALL, pFormat, &umcb); if (flags & USER_MARSHAL_POINTER) { align_pointer(&pStubMsg->Buffer, 4); /* skip pointer prefix */ pStubMsg->Buffer += 4; if (pStubMsg->PointerBufferMark) { saved_buffer = pStubMsg->Buffer; pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; } align_pointer(&pStubMsg->Buffer, 8); } else align_pointer(&pStubMsg->Buffer, (flags & 0xf) + 1); if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) { *ppMemory = NdrAllocate(pStubMsg, memsize); memset(*ppMemory, 0, memsize); } pStubMsg->Buffer = pStubMsg->StubDesc->aUserMarshalQuadruple[index].pfnUnmarshall( &umcb.Flags, pStubMsg->Buffer, *ppMemory); if (saved_buffer) { STD_OVERFLOW_CHECK(pStubMsg); pStubMsg->PointerBufferMark = pStubMsg->Buffer; pStubMsg->Buffer = saved_buffer; } return NULL; } /*********************************************************************** * NdrUserMarshalBufferSize [RPCRT4.@] */ void WINAPI NdrUserMarshalBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned flags = pFormat[1]; unsigned index = *(const WORD*)&pFormat[2]; DWORD bufsize = *(const WORD*)&pFormat[6]; USER_MARSHAL_CB umcb; ULONG saved_buffer_length = 0; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); TRACE("index=%d\n", index); UserMarshalCB(pStubMsg, USER_MARSHAL_CB_BUFFER_SIZE, pFormat, &umcb); if (flags & USER_MARSHAL_POINTER) { align_length(&pStubMsg->BufferLength, 4); /* skip pointer prefix */ safe_buffer_length_increment(pStubMsg, 4); if (pStubMsg->IgnoreEmbeddedPointers) return; if (pStubMsg->PointerLength) { saved_buffer_length = pStubMsg->BufferLength; pStubMsg->BufferLength = pStubMsg->PointerLength; pStubMsg->PointerLength = 0; } align_length(&pStubMsg->BufferLength, 8); } else align_length(&pStubMsg->BufferLength, (flags & 0xf) + 1); if (bufsize) { TRACE("size=%d\n", bufsize); safe_buffer_length_increment(pStubMsg, bufsize); } else pStubMsg->BufferLength = pStubMsg->StubDesc->aUserMarshalQuadruple[index].pfnBufferSize( &umcb.Flags, pStubMsg->BufferLength, pMemory); if (saved_buffer_length) { pStubMsg->PointerLength = pStubMsg->BufferLength; pStubMsg->BufferLength = saved_buffer_length; } } /*********************************************************************** * NdrUserMarshalMemorySize [RPCRT4.@] */ ULONG WINAPI NdrUserMarshalMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { unsigned flags = pFormat[1]; unsigned index = *(const WORD*)&pFormat[2]; DWORD memsize = *(const WORD*)&pFormat[4]; DWORD bufsize = *(const WORD*)&pFormat[6]; TRACE("(%p,%p)\n", pStubMsg, pFormat); TRACE("index=%d\n", index); pStubMsg->MemorySize += memsize; if (flags & USER_MARSHAL_POINTER) { align_pointer(&pStubMsg->Buffer, 4); /* skip pointer prefix */ pStubMsg->Buffer += 4; if (pStubMsg->IgnoreEmbeddedPointers) return pStubMsg->MemorySize; align_pointer(&pStubMsg->Buffer, 8); } else align_pointer(&pStubMsg->Buffer, (flags & 0xf) + 1); if (!bufsize) FIXME("not implemented for varying buffer size\n"); pStubMsg->Buffer += bufsize; return pStubMsg->MemorySize; } /*********************************************************************** * NdrUserMarshalFree [RPCRT4.@] */ void WINAPI NdrUserMarshalFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { /* unsigned flags = pFormat[1]; */ unsigned index = *(const WORD*)&pFormat[2]; USER_MARSHAL_CB umcb; TRACE("(%p,%p,%p)\n", pStubMsg, pMemory, pFormat); TRACE("index=%d\n", index); UserMarshalCB(pStubMsg, USER_MARSHAL_CB_FREE, pFormat, &umcb); pStubMsg->StubDesc->aUserMarshalQuadruple[index].pfnFree( &umcb.Flags, pMemory); } /*********************************************************************** * NdrGetUserMarshalInfo [RPCRT4.@] */ RPC_STATUS RPC_ENTRY NdrGetUserMarshalInfo(ULONG *flags, ULONG level, NDR_USER_MARSHAL_INFO *umi) { USER_MARSHAL_CB *umcb = CONTAINING_RECORD(flags, USER_MARSHAL_CB, Flags); TRACE("(%p,%u,%p)\n", flags, level, umi); if (level != 1) return RPC_S_INVALID_ARG; memset(&umi->u1.Level1, 0, sizeof(umi->u1.Level1)); umi->InformationLevel = level; if (umcb->Signature != USER_MARSHAL_CB_SIGNATURE) return RPC_S_INVALID_ARG; umi->u1.Level1.pfnAllocate = umcb->pStubMsg->pfnAllocate; umi->u1.Level1.pfnFree = umcb->pStubMsg->pfnFree; umi->u1.Level1.pRpcChannelBuffer = umcb->pStubMsg->pRpcChannelBuffer; switch (umcb->CBType) { case USER_MARSHAL_CB_MARSHALL: case USER_MARSHAL_CB_UNMARSHALL: { RPC_MESSAGE *msg = umcb->pStubMsg->RpcMsg; unsigned char *buffer_start = msg->Buffer; unsigned char *buffer_end = (unsigned char *)msg->Buffer + msg->BufferLength; if (umcb->pStubMsg->Buffer < buffer_start || umcb->pStubMsg->Buffer > buffer_end) return RPC_X_INVALID_BUFFER; umi->u1.Level1.Buffer = umcb->pStubMsg->Buffer; umi->u1.Level1.BufferSize = buffer_end - umcb->pStubMsg->Buffer; break; } case USER_MARSHAL_CB_BUFFER_SIZE: case USER_MARSHAL_CB_FREE: break; default: WARN("unrecognised CBType %d\n", umcb->CBType); } return RPC_S_OK; } /*********************************************************************** * NdrClearOutParameters [RPCRT4.@] */ void WINAPI NdrClearOutParameters(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat, void *ArgAddr) { FIXME("(%p,%p,%p): stub\n", pStubMsg, pFormat, ArgAddr); } /*********************************************************************** * NdrConvert [RPCRT4.@] */ void WINAPI NdrConvert( PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat ) { FIXME("(pStubMsg == ^%p, pFormat == ^%p): stub.\n", pStubMsg, pFormat); /* FIXME: since this stub doesn't do any converting, the proper behavior is to raise an exception */ } /*********************************************************************** * NdrConvert2 [RPCRT4.@] */ void WINAPI NdrConvert2( PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat, LONG NumberParams ) { FIXME("(pStubMsg == ^%p, pFormat == ^%p, NumberParams == %d): stub.\n", pStubMsg, pFormat, NumberParams); /* FIXME: since this stub doesn't do any converting, the proper behavior is to raise an exception */ } #include "pshpack1.h" typedef struct _NDR_CSTRUCT_FORMAT { unsigned char type; unsigned char alignment; unsigned short memory_size; short offset_to_array_description; } NDR_CSTRUCT_FORMAT, NDR_CVSTRUCT_FORMAT; #include "poppack.h" /*********************************************************************** * NdrConformantStructMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrConformantStructMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_CSTRUCT_FORMAT *pCStructFormat = (const NDR_CSTRUCT_FORMAT *)pFormat; PFORMAT_STRING pCArrayFormat; ULONG esize, bufsize; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat += sizeof(NDR_CSTRUCT_FORMAT); if ((pCStructFormat->type != RPC_FC_CPSTRUCT) && (pCStructFormat->type != RPC_FC_CSTRUCT)) { ERR("invalid format type %x\n", pCStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } pCArrayFormat = (const unsigned char *)&pCStructFormat->offset_to_array_description + pCStructFormat->offset_to_array_description; if (*pCArrayFormat != RPC_FC_CARRAY) { ERR("invalid array format type %x\n", pCStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } esize = *(const WORD*)(pCArrayFormat+2); ComputeConformance(pStubMsg, pMemory + pCStructFormat->memory_size, pCArrayFormat + 4, 0); WriteConformance(pStubMsg); align_pointer_clear(&pStubMsg->Buffer, pCStructFormat->alignment + 1); TRACE("memory_size = %d\n", pCStructFormat->memory_size); bufsize = safe_multiply(esize, pStubMsg->MaxCount); if (pCStructFormat->memory_size + bufsize < pCStructFormat->memory_size) /* integer overflow */ { ERR("integer overflow of memory_size %u with bufsize %u\n", pCStructFormat->memory_size, bufsize); RpcRaiseException(RPC_X_BAD_STUB_DATA); } /* copy constant sized part of struct */ pStubMsg->BufferMark = pStubMsg->Buffer; safe_copy_to_buffer(pStubMsg, pMemory, pCStructFormat->memory_size + bufsize); if (pCStructFormat->type == RPC_FC_CPSTRUCT) EmbeddedPointerMarshall(pStubMsg, pMemory, pFormat); return NULL; } /*********************************************************************** * NdrConformantStructUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrConformantStructUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { const NDR_CSTRUCT_FORMAT *pCStructFormat = (const NDR_CSTRUCT_FORMAT *)pFormat; PFORMAT_STRING pCArrayFormat; ULONG esize, bufsize; unsigned char *saved_buffer; TRACE("(%p, %p, %p, %d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); pFormat += sizeof(NDR_CSTRUCT_FORMAT); if ((pCStructFormat->type != RPC_FC_CPSTRUCT) && (pCStructFormat->type != RPC_FC_CSTRUCT)) { ERR("invalid format type %x\n", pCStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } pCArrayFormat = (const unsigned char *)&pCStructFormat->offset_to_array_description + pCStructFormat->offset_to_array_description; if (*pCArrayFormat != RPC_FC_CARRAY) { ERR("invalid array format type %x\n", pCStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } esize = *(const WORD*)(pCArrayFormat+2); pCArrayFormat = ReadConformance(pStubMsg, pCArrayFormat + 4); align_pointer(&pStubMsg->Buffer, pCStructFormat->alignment + 1); TRACE("memory_size = %d\n", pCStructFormat->memory_size); bufsize = safe_multiply(esize, pStubMsg->MaxCount); if (pCStructFormat->memory_size + bufsize < pCStructFormat->memory_size) /* integer overflow */ { ERR("integer overflow of memory_size %u with bufsize %u\n", pCStructFormat->memory_size, bufsize); RpcRaiseException(RPC_X_BAD_STUB_DATA); } if (fMustAlloc) { SIZE_T size = pCStructFormat->memory_size + bufsize; *ppMemory = NdrAllocate(pStubMsg, size); } else { if (!pStubMsg->IsClient && !*ppMemory) /* for servers, we just point straight into the RPC buffer */ *ppMemory = pStubMsg->Buffer; } saved_buffer = pStubMsg->BufferMark = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, pCStructFormat->memory_size + bufsize); if (pCStructFormat->type == RPC_FC_CPSTRUCT) EmbeddedPointerUnmarshall(pStubMsg, saved_buffer, *ppMemory, pFormat, fMustAlloc); TRACE("copying %p to %p\n", saved_buffer, *ppMemory); if (*ppMemory != saved_buffer) memcpy(*ppMemory, saved_buffer, pCStructFormat->memory_size + bufsize); return NULL; } /*********************************************************************** * NdrConformantStructBufferSize [RPCRT4.@] */ void WINAPI NdrConformantStructBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_CSTRUCT_FORMAT * pCStructFormat = (const NDR_CSTRUCT_FORMAT *)pFormat; PFORMAT_STRING pCArrayFormat; ULONG esize; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat += sizeof(NDR_CSTRUCT_FORMAT); if ((pCStructFormat->type != RPC_FC_CPSTRUCT) && (pCStructFormat->type != RPC_FC_CSTRUCT)) { ERR("invalid format type %x\n", pCStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } pCArrayFormat = (const unsigned char *)&pCStructFormat->offset_to_array_description + pCStructFormat->offset_to_array_description; if (*pCArrayFormat != RPC_FC_CARRAY) { ERR("invalid array format type %x\n", pCStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } esize = *(const WORD*)(pCArrayFormat+2); pCArrayFormat = ComputeConformance(pStubMsg, pMemory + pCStructFormat->memory_size, pCArrayFormat+4, 0); SizeConformance(pStubMsg); align_length(&pStubMsg->BufferLength, pCStructFormat->alignment + 1); TRACE("memory_size = %d\n", pCStructFormat->memory_size); safe_buffer_length_increment(pStubMsg, pCStructFormat->memory_size); safe_buffer_length_increment(pStubMsg, safe_multiply(pStubMsg->MaxCount, esize)); if (pCStructFormat->type == RPC_FC_CPSTRUCT) EmbeddedPointerBufferSize(pStubMsg, pMemory, pFormat); } /*********************************************************************** * NdrConformantStructMemorySize [RPCRT4.@] */ ULONG WINAPI NdrConformantStructMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { FIXME("stub\n"); return 0; } /*********************************************************************** * NdrConformantStructFree [RPCRT4.@] */ void WINAPI NdrConformantStructFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_CSTRUCT_FORMAT *pCStructFormat = (const NDR_CSTRUCT_FORMAT *)pFormat; PFORMAT_STRING pCArrayFormat; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat += sizeof(NDR_CSTRUCT_FORMAT); if ((pCStructFormat->type != RPC_FC_CPSTRUCT) && (pCStructFormat->type != RPC_FC_CSTRUCT)) { ERR("invalid format type %x\n", pCStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } pCArrayFormat = (const unsigned char *)&pCStructFormat->offset_to_array_description + pCStructFormat->offset_to_array_description; if (*pCArrayFormat != RPC_FC_CARRAY) { ERR("invalid array format type %x\n", pCStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } ComputeConformance(pStubMsg, pMemory + pCStructFormat->memory_size, pCArrayFormat + 4, 0); TRACE("memory_size = %d\n", pCStructFormat->memory_size); /* copy constant sized part of struct */ pStubMsg->BufferMark = pStubMsg->Buffer; if (pCStructFormat->type == RPC_FC_CPSTRUCT) EmbeddedPointerFree(pStubMsg, pMemory, pFormat); } /*********************************************************************** * NdrConformantVaryingStructMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrConformantVaryingStructMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_CVSTRUCT_FORMAT *pCVStructFormat = (const NDR_CVSTRUCT_FORMAT *)pFormat; PFORMAT_STRING pCVArrayFormat; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat += sizeof(NDR_CVSTRUCT_FORMAT); if (pCVStructFormat->type != RPC_FC_CVSTRUCT) { ERR("invalid format type %x\n", pCVStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } pCVArrayFormat = (const unsigned char *)&pCVStructFormat->offset_to_array_description + pCVStructFormat->offset_to_array_description; array_compute_and_write_conformance(*pCVArrayFormat, pStubMsg, pMemory + pCVStructFormat->memory_size, pCVArrayFormat); align_pointer_clear(&pStubMsg->Buffer, pCVStructFormat->alignment + 1); TRACE("memory_size = %d\n", pCVStructFormat->memory_size); /* write constant sized part */ pStubMsg->BufferMark = pStubMsg->Buffer; safe_copy_to_buffer(pStubMsg, pMemory, pCVStructFormat->memory_size); array_write_variance_and_marshall(*pCVArrayFormat, pStubMsg, pMemory + pCVStructFormat->memory_size, pCVArrayFormat, FALSE /* fHasPointers */); EmbeddedPointerMarshall(pStubMsg, pMemory, pFormat); return NULL; } /*********************************************************************** * NdrConformantVaryingStructUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrConformantVaryingStructUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { const NDR_CVSTRUCT_FORMAT *pCVStructFormat = (const NDR_CVSTRUCT_FORMAT *)pFormat; PFORMAT_STRING pCVArrayFormat; ULONG memsize, bufsize; unsigned char *saved_buffer, *saved_array_buffer; ULONG offset; unsigned char *array_memory; TRACE("(%p, %p, %p, %d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); pFormat += sizeof(NDR_CVSTRUCT_FORMAT); if (pCVStructFormat->type != RPC_FC_CVSTRUCT) { ERR("invalid format type %x\n", pCVStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } pCVArrayFormat = (const unsigned char *)&pCVStructFormat->offset_to_array_description + pCVStructFormat->offset_to_array_description; memsize = array_read_conformance(*pCVArrayFormat, pStubMsg, pCVArrayFormat); align_pointer(&pStubMsg->Buffer, pCVStructFormat->alignment + 1); TRACE("memory_size = %d\n", pCVStructFormat->memory_size); /* work out how much memory to allocate if we need to do so */ if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) { SIZE_T size = pCVStructFormat->memory_size + memsize; *ppMemory = NdrAllocate(pStubMsg, size); } /* mark the start of the constant data */ saved_buffer = pStubMsg->BufferMark = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, pCVStructFormat->memory_size); array_memory = *ppMemory + pCVStructFormat->memory_size; bufsize = array_read_variance_and_unmarshall(*pCVArrayFormat, pStubMsg, &array_memory, pCVArrayFormat, FALSE /* fMustAlloc */, FALSE /* fUseServerBufferMemory */, FALSE /* fUnmarshall */); /* save offset in case unmarshalling pointers changes it */ offset = pStubMsg->Offset; /* mark the start of the array data */ saved_array_buffer = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, bufsize); EmbeddedPointerUnmarshall(pStubMsg, saved_buffer, *ppMemory, pFormat, fMustAlloc); /* copy the constant data */ memcpy(*ppMemory, saved_buffer, pCVStructFormat->memory_size); /* copy the array data */ TRACE("copying %p to %p\n", saved_array_buffer, *ppMemory + pCVStructFormat->memory_size); memcpy(*ppMemory + pCVStructFormat->memory_size + offset, saved_array_buffer, bufsize); if (*pCVArrayFormat == RPC_FC_C_CSTRING) TRACE("string=%s\n", debugstr_a((char *)(*ppMemory + pCVStructFormat->memory_size))); else if (*pCVArrayFormat == RPC_FC_C_WSTRING) TRACE("string=%s\n", debugstr_w((WCHAR *)(*ppMemory + pCVStructFormat->memory_size))); return NULL; } /*********************************************************************** * NdrConformantVaryingStructBufferSize [RPCRT4.@] */ void WINAPI NdrConformantVaryingStructBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_CVSTRUCT_FORMAT *pCVStructFormat = (const NDR_CVSTRUCT_FORMAT *)pFormat; PFORMAT_STRING pCVArrayFormat; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat += sizeof(NDR_CVSTRUCT_FORMAT); if (pCVStructFormat->type != RPC_FC_CVSTRUCT) { ERR("invalid format type %x\n", pCVStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } pCVArrayFormat = (const unsigned char *)&pCVStructFormat->offset_to_array_description + pCVStructFormat->offset_to_array_description; array_compute_and_size_conformance(*pCVArrayFormat, pStubMsg, pMemory + pCVStructFormat->memory_size, pCVArrayFormat); align_length(&pStubMsg->BufferLength, pCVStructFormat->alignment + 1); TRACE("memory_size = %d\n", pCVStructFormat->memory_size); safe_buffer_length_increment(pStubMsg, pCVStructFormat->memory_size); array_buffer_size(*pCVArrayFormat, pStubMsg, pMemory + pCVStructFormat->memory_size, pCVArrayFormat, FALSE /* fHasPointers */); EmbeddedPointerBufferSize(pStubMsg, pMemory, pFormat); } /*********************************************************************** * NdrConformantVaryingStructMemorySize [RPCRT4.@] */ ULONG WINAPI NdrConformantVaryingStructMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { const NDR_CVSTRUCT_FORMAT *pCVStructFormat = (const NDR_CVSTRUCT_FORMAT *)pFormat; PFORMAT_STRING pCVArrayFormat; TRACE("(%p, %p)\n", pStubMsg, pFormat); pFormat += sizeof(NDR_CVSTRUCT_FORMAT); if (pCVStructFormat->type != RPC_FC_CVSTRUCT) { ERR("invalid format type %x\n", pCVStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return 0; } pCVArrayFormat = (const unsigned char *)&pCVStructFormat->offset_to_array_description + pCVStructFormat->offset_to_array_description; array_read_conformance(*pCVArrayFormat, pStubMsg, pCVArrayFormat); align_pointer(&pStubMsg->Buffer, pCVStructFormat->alignment + 1); TRACE("memory_size = %d\n", pCVStructFormat->memory_size); safe_buffer_increment(pStubMsg, pCVStructFormat->memory_size); array_memory_size(*pCVArrayFormat, pStubMsg, pCVArrayFormat, FALSE /* fHasPointers */); pStubMsg->MemorySize += pCVStructFormat->memory_size; EmbeddedPointerMemorySize(pStubMsg, pFormat); return pStubMsg->MemorySize; } /*********************************************************************** * NdrConformantVaryingStructFree [RPCRT4.@] */ void WINAPI NdrConformantVaryingStructFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_CVSTRUCT_FORMAT *pCVStructFormat = (const NDR_CVSTRUCT_FORMAT *)pFormat; PFORMAT_STRING pCVArrayFormat; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat += sizeof(NDR_CVSTRUCT_FORMAT); if (pCVStructFormat->type != RPC_FC_CVSTRUCT) { ERR("invalid format type %x\n", pCVStructFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } pCVArrayFormat = (const unsigned char *)&pCVStructFormat->offset_to_array_description + pCVStructFormat->offset_to_array_description; array_free(*pCVArrayFormat, pStubMsg, pMemory + pCVStructFormat->memory_size, pCVArrayFormat, FALSE /* fHasPointers */); TRACE("memory_size = %d\n", pCVStructFormat->memory_size); EmbeddedPointerFree(pStubMsg, pMemory, pFormat); } #include "pshpack1.h" typedef struct { unsigned char type; unsigned char alignment; unsigned short total_size; } NDR_SMFARRAY_FORMAT; typedef struct { unsigned char type; unsigned char alignment; ULONG total_size; } NDR_LGFARRAY_FORMAT; #include "poppack.h" /*********************************************************************** * NdrFixedArrayMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrFixedArrayMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_SMFARRAY_FORMAT *pSmFArrayFormat = (const NDR_SMFARRAY_FORMAT *)pFormat; ULONG total_size; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); if ((pSmFArrayFormat->type != RPC_FC_SMFARRAY) && (pSmFArrayFormat->type != RPC_FC_LGFARRAY)) { ERR("invalid format type %x\n", pSmFArrayFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } align_pointer_clear(&pStubMsg->Buffer, pSmFArrayFormat->alignment + 1); if (pSmFArrayFormat->type == RPC_FC_SMFARRAY) { total_size = pSmFArrayFormat->total_size; pFormat = (const unsigned char *)(pSmFArrayFormat + 1); } else { const NDR_LGFARRAY_FORMAT *pLgFArrayFormat = (const NDR_LGFARRAY_FORMAT *)pFormat; total_size = pLgFArrayFormat->total_size; pFormat = (const unsigned char *)(pLgFArrayFormat + 1); } pStubMsg->BufferMark = pStubMsg->Buffer; safe_copy_to_buffer(pStubMsg, pMemory, total_size); pFormat = EmbeddedPointerMarshall(pStubMsg, pMemory, pFormat); return NULL; } /*********************************************************************** * NdrFixedArrayUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrFixedArrayUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { const NDR_SMFARRAY_FORMAT *pSmFArrayFormat = (const NDR_SMFARRAY_FORMAT *)pFormat; ULONG total_size; unsigned char *saved_buffer; TRACE("(%p, %p, %p, %d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); if ((pSmFArrayFormat->type != RPC_FC_SMFARRAY) && (pSmFArrayFormat->type != RPC_FC_LGFARRAY)) { ERR("invalid format type %x\n", pSmFArrayFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } align_pointer(&pStubMsg->Buffer, pSmFArrayFormat->alignment + 1); if (pSmFArrayFormat->type == RPC_FC_SMFARRAY) { total_size = pSmFArrayFormat->total_size; pFormat = (const unsigned char *)(pSmFArrayFormat + 1); } else { const NDR_LGFARRAY_FORMAT *pLgFArrayFormat = (const NDR_LGFARRAY_FORMAT *)pFormat; total_size = pLgFArrayFormat->total_size; pFormat = (const unsigned char *)(pLgFArrayFormat + 1); } if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, total_size); else { if (!pStubMsg->IsClient && !*ppMemory) /* for servers, we just point straight into the RPC buffer */ *ppMemory = pStubMsg->Buffer; } saved_buffer = pStubMsg->BufferMark = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, total_size); pFormat = EmbeddedPointerUnmarshall(pStubMsg, saved_buffer, *ppMemory, pFormat, fMustAlloc); TRACE("copying %p to %p\n", saved_buffer, *ppMemory); if (*ppMemory != saved_buffer) memcpy(*ppMemory, saved_buffer, total_size); return NULL; } /*********************************************************************** * NdrFixedArrayBufferSize [RPCRT4.@] */ void WINAPI NdrFixedArrayBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_SMFARRAY_FORMAT *pSmFArrayFormat = (const NDR_SMFARRAY_FORMAT *)pFormat; ULONG total_size; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); if ((pSmFArrayFormat->type != RPC_FC_SMFARRAY) && (pSmFArrayFormat->type != RPC_FC_LGFARRAY)) { ERR("invalid format type %x\n", pSmFArrayFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } align_length(&pStubMsg->BufferLength, pSmFArrayFormat->alignment + 1); if (pSmFArrayFormat->type == RPC_FC_SMFARRAY) { total_size = pSmFArrayFormat->total_size; pFormat = (const unsigned char *)(pSmFArrayFormat + 1); } else { const NDR_LGFARRAY_FORMAT *pLgFArrayFormat = (const NDR_LGFARRAY_FORMAT *)pFormat; total_size = pLgFArrayFormat->total_size; pFormat = (const unsigned char *)(pLgFArrayFormat + 1); } safe_buffer_length_increment(pStubMsg, total_size); EmbeddedPointerBufferSize(pStubMsg, pMemory, pFormat); } /*********************************************************************** * NdrFixedArrayMemorySize [RPCRT4.@] */ ULONG WINAPI NdrFixedArrayMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { const NDR_SMFARRAY_FORMAT *pSmFArrayFormat = (const NDR_SMFARRAY_FORMAT *)pFormat; ULONG total_size; TRACE("(%p, %p)\n", pStubMsg, pFormat); if ((pSmFArrayFormat->type != RPC_FC_SMFARRAY) && (pSmFArrayFormat->type != RPC_FC_LGFARRAY)) { ERR("invalid format type %x\n", pSmFArrayFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return 0; } align_pointer(&pStubMsg->Buffer, pSmFArrayFormat->alignment + 1); if (pSmFArrayFormat->type == RPC_FC_SMFARRAY) { total_size = pSmFArrayFormat->total_size; pFormat = (const unsigned char *)(pSmFArrayFormat + 1); } else { const NDR_LGFARRAY_FORMAT *pLgFArrayFormat = (const NDR_LGFARRAY_FORMAT *)pFormat; total_size = pLgFArrayFormat->total_size; pFormat = (const unsigned char *)(pLgFArrayFormat + 1); } pStubMsg->BufferMark = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, total_size); pStubMsg->MemorySize += total_size; EmbeddedPointerMemorySize(pStubMsg, pFormat); return total_size; } /*********************************************************************** * NdrFixedArrayFree [RPCRT4.@] */ void WINAPI NdrFixedArrayFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_SMFARRAY_FORMAT *pSmFArrayFormat = (const NDR_SMFARRAY_FORMAT *)pFormat; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); if ((pSmFArrayFormat->type != RPC_FC_SMFARRAY) && (pSmFArrayFormat->type != RPC_FC_LGFARRAY)) { ERR("invalid format type %x\n", pSmFArrayFormat->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } if (pSmFArrayFormat->type == RPC_FC_SMFARRAY) pFormat = (const unsigned char *)(pSmFArrayFormat + 1); else { const NDR_LGFARRAY_FORMAT *pLgFArrayFormat = (const NDR_LGFARRAY_FORMAT *)pFormat; pFormat = (const unsigned char *)(pLgFArrayFormat + 1); } EmbeddedPointerFree(pStubMsg, pMemory, pFormat); } /*********************************************************************** * NdrVaryingArrayMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrVaryingArrayMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned char alignment; DWORD elements, esize; ULONG bufsize; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); if ((pFormat[0] != RPC_FC_SMVARRAY) && (pFormat[0] != RPC_FC_LGVARRAY)) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } alignment = pFormat[1] + 1; if (pFormat[0] == RPC_FC_SMVARRAY) { pFormat += 2; pFormat += sizeof(WORD); elements = *(const WORD*)pFormat; pFormat += sizeof(WORD); } else { pFormat += 2; pFormat += sizeof(DWORD); elements = *(const DWORD*)pFormat; pFormat += sizeof(DWORD); } esize = *(const WORD*)pFormat; pFormat += sizeof(WORD); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, 0); if ((pStubMsg->ActualCount > elements) || (pStubMsg->ActualCount + pStubMsg->Offset > elements)) { RpcRaiseException(RPC_S_INVALID_BOUND); return NULL; } WriteVariance(pStubMsg); align_pointer_clear(&pStubMsg->Buffer, alignment); bufsize = safe_multiply(esize, pStubMsg->ActualCount); pStubMsg->BufferMark = pStubMsg->Buffer; safe_copy_to_buffer(pStubMsg, pMemory + pStubMsg->Offset, bufsize); EmbeddedPointerMarshall(pStubMsg, pMemory, pFormat); return NULL; } /*********************************************************************** * NdrVaryingArrayUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrVaryingArrayUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned char alignment; DWORD size, elements, esize; ULONG bufsize; unsigned char *saved_buffer; ULONG offset; TRACE("(%p, %p, %p, %d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); if ((pFormat[0] != RPC_FC_SMVARRAY) && (pFormat[0] != RPC_FC_LGVARRAY)) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } alignment = pFormat[1] + 1; if (pFormat[0] == RPC_FC_SMVARRAY) { pFormat += 2; size = *(const WORD*)pFormat; pFormat += sizeof(WORD); elements = *(const WORD*)pFormat; pFormat += sizeof(WORD); } else { pFormat += 2; size = *(const DWORD*)pFormat; pFormat += sizeof(DWORD); elements = *(const DWORD*)pFormat; pFormat += sizeof(DWORD); } esize = *(const WORD*)pFormat; pFormat += sizeof(WORD); pFormat = ReadVariance(pStubMsg, pFormat, elements); align_pointer(&pStubMsg->Buffer, alignment); bufsize = safe_multiply(esize, pStubMsg->ActualCount); offset = pStubMsg->Offset; if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, size); saved_buffer = pStubMsg->BufferMark = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, bufsize); EmbeddedPointerUnmarshall(pStubMsg, saved_buffer, *ppMemory, pFormat, fMustAlloc); memcpy(*ppMemory + offset, saved_buffer, bufsize); return NULL; } /*********************************************************************** * NdrVaryingArrayBufferSize [RPCRT4.@] */ void WINAPI NdrVaryingArrayBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned char alignment; DWORD elements, esize; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); if ((pFormat[0] != RPC_FC_SMVARRAY) && (pFormat[0] != RPC_FC_LGVARRAY)) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } alignment = pFormat[1] + 1; if (pFormat[0] == RPC_FC_SMVARRAY) { pFormat += 2; pFormat += sizeof(WORD); elements = *(const WORD*)pFormat; pFormat += sizeof(WORD); } else { pFormat += 2; pFormat += sizeof(DWORD); elements = *(const DWORD*)pFormat; pFormat += sizeof(DWORD); } esize = *(const WORD*)pFormat; pFormat += sizeof(WORD); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, 0); if ((pStubMsg->ActualCount > elements) || (pStubMsg->ActualCount + pStubMsg->Offset > elements)) { RpcRaiseException(RPC_S_INVALID_BOUND); return; } SizeVariance(pStubMsg); align_length(&pStubMsg->BufferLength, alignment); safe_buffer_length_increment(pStubMsg, safe_multiply(esize, pStubMsg->ActualCount)); EmbeddedPointerBufferSize(pStubMsg, pMemory, pFormat); } /*********************************************************************** * NdrVaryingArrayMemorySize [RPCRT4.@] */ ULONG WINAPI NdrVaryingArrayMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { unsigned char alignment; DWORD size, elements, esize; TRACE("(%p, %p)\n", pStubMsg, pFormat); if ((pFormat[0] != RPC_FC_SMVARRAY) && (pFormat[0] != RPC_FC_LGVARRAY)) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return 0; } alignment = pFormat[1] + 1; if (pFormat[0] == RPC_FC_SMVARRAY) { pFormat += 2; size = *(const WORD*)pFormat; pFormat += sizeof(WORD); elements = *(const WORD*)pFormat; pFormat += sizeof(WORD); } else { pFormat += 2; size = *(const DWORD*)pFormat; pFormat += sizeof(DWORD); elements = *(const DWORD*)pFormat; pFormat += sizeof(DWORD); } esize = *(const WORD*)pFormat; pFormat += sizeof(WORD); pFormat = ReadVariance(pStubMsg, pFormat, elements); align_pointer(&pStubMsg->Buffer, alignment); safe_buffer_increment(pStubMsg, safe_multiply(esize, pStubMsg->ActualCount)); pStubMsg->MemorySize += size; EmbeddedPointerMemorySize(pStubMsg, pFormat); return pStubMsg->MemorySize; } /*********************************************************************** * NdrVaryingArrayFree [RPCRT4.@] */ void WINAPI NdrVaryingArrayFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { DWORD elements; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); if ((pFormat[0] != RPC_FC_SMVARRAY) && (pFormat[0] != RPC_FC_LGVARRAY)) { ERR("invalid format type %x\n", pFormat[0]); RpcRaiseException(RPC_S_INTERNAL_ERROR); return; } if (pFormat[0] == RPC_FC_SMVARRAY) { pFormat += 2; pFormat += sizeof(WORD); elements = *(const WORD*)pFormat; pFormat += sizeof(WORD); } else { pFormat += 2; pFormat += sizeof(DWORD); elements = *(const DWORD*)pFormat; pFormat += sizeof(DWORD); } pFormat += sizeof(WORD); pFormat = ComputeVariance(pStubMsg, pMemory, pFormat, 0); if ((pStubMsg->ActualCount > elements) || (pStubMsg->ActualCount + pStubMsg->Offset > elements)) { RpcRaiseException(RPC_S_INVALID_BOUND); return; } EmbeddedPointerFree(pStubMsg, pMemory, pFormat); } static ULONG get_discriminant(unsigned char fc, const unsigned char *pMemory) { switch (fc) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: return *pMemory; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: case RPC_FC_ENUM16: return *(const USHORT *)pMemory; case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM32: return *(const ULONG *)pMemory; case RPC_FC_INT3264: case RPC_FC_UINT3264: return *(const ULONG_PTR *)pMemory; default: FIXME("Unhandled base type: 0x%02x\n", fc); return 0; } } static PFORMAT_STRING get_arm_offset_from_union_arm_selector(PMIDL_STUB_MESSAGE pStubMsg, ULONG discriminant, PFORMAT_STRING pFormat) { unsigned short num_arms, arm, type; num_arms = *(const SHORT*)pFormat & 0x0fff; pFormat += 2; for(arm = 0; arm < num_arms; arm++) { if(discriminant == *(const ULONG*)pFormat) { pFormat += 4; break; } pFormat += 6; } type = *(const unsigned short*)pFormat; TRACE("type %04x\n", type); if(arm == num_arms) /* default arm extras */ { if(type == 0xffff) { ERR("no arm for 0x%x and no default case\n", discriminant); RpcRaiseException(RPC_S_INVALID_TAG); return NULL; } if(type == 0) { TRACE("falling back to empty default case for 0x%x\n", discriminant); return NULL; } } return pFormat; } static unsigned char *union_arm_marshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, ULONG discriminant, PFORMAT_STRING pFormat) { unsigned short type; pFormat += 2; pFormat = get_arm_offset_from_union_arm_selector(pStubMsg, discriminant, pFormat); if(!pFormat) return NULL; type = *(const unsigned short*)pFormat; if((type & 0xff00) == 0x8000) { unsigned char basetype = LOBYTE(type); return NdrBaseTypeMarshall(pStubMsg, pMemory, &basetype); } else { PFORMAT_STRING desc = pFormat + *(const SHORT*)pFormat; NDR_MARSHALL m = NdrMarshaller[*desc & NDR_TABLE_MASK]; if (m) { unsigned char *saved_buffer = NULL; BOOL pointer_buffer_mark_set = FALSE; switch(*desc) { case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: align_pointer_clear(&pStubMsg->Buffer, 4); saved_buffer = pStubMsg->Buffer; if (pStubMsg->PointerBufferMark) { pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; pointer_buffer_mark_set = TRUE; } else safe_buffer_increment(pStubMsg, 4); /* for pointer ID */ PointerMarshall(pStubMsg, saved_buffer, *(unsigned char **)pMemory, desc); if (pointer_buffer_mark_set) { STD_OVERFLOW_CHECK(pStubMsg); pStubMsg->PointerBufferMark = pStubMsg->Buffer; if (saved_buffer + 4 > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength) { ERR("buffer overflow - saved_buffer = %p, BufferEnd = %p\n", saved_buffer, (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength); RpcRaiseException(RPC_X_BAD_STUB_DATA); } pStubMsg->Buffer = saved_buffer + 4; } break; default: m(pStubMsg, pMemory, desc); } } else FIXME("no marshaller for embedded type %02x\n", *desc); } return NULL; } static unsigned char *union_arm_unmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, ULONG discriminant, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned short type; pFormat += 2; pFormat = get_arm_offset_from_union_arm_selector(pStubMsg, discriminant, pFormat); if(!pFormat) return NULL; type = *(const unsigned short*)pFormat; if((type & 0xff00) == 0x8000) { unsigned char basetype = LOBYTE(type); return NdrBaseTypeUnmarshall(pStubMsg, ppMemory, &basetype, FALSE); } else { PFORMAT_STRING desc = pFormat + *(const SHORT*)pFormat; NDR_UNMARSHALL m = NdrUnmarshaller[*desc & NDR_TABLE_MASK]; if (m) { unsigned char *saved_buffer = NULL; BOOL pointer_buffer_mark_set = FALSE; switch(*desc) { case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: align_pointer(&pStubMsg->Buffer, 4); saved_buffer = pStubMsg->Buffer; if (pStubMsg->PointerBufferMark) { pStubMsg->Buffer = pStubMsg->PointerBufferMark; pStubMsg->PointerBufferMark = NULL; pointer_buffer_mark_set = TRUE; } else pStubMsg->Buffer += 4; /* for pointer ID */ if (saved_buffer + 4 > pStubMsg->BufferEnd) { ERR("buffer overflow - saved_buffer = %p, BufferEnd = %p\n", saved_buffer, pStubMsg->BufferEnd); RpcRaiseException(RPC_X_BAD_STUB_DATA); } PointerUnmarshall(pStubMsg, saved_buffer, *(unsigned char ***)ppMemory, **(unsigned char ***)ppMemory, desc, fMustAlloc); if (pointer_buffer_mark_set) { STD_OVERFLOW_CHECK(pStubMsg); pStubMsg->PointerBufferMark = pStubMsg->Buffer; pStubMsg->Buffer = saved_buffer + 4; } break; default: m(pStubMsg, ppMemory, desc, fMustAlloc); } } else FIXME("no marshaller for embedded type %02x\n", *desc); } return NULL; } static void union_arm_buffer_size(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, ULONG discriminant, PFORMAT_STRING pFormat) { unsigned short type; pFormat += 2; pFormat = get_arm_offset_from_union_arm_selector(pStubMsg, discriminant, pFormat); if(!pFormat) return; type = *(const unsigned short*)pFormat; if((type & 0xff00) == 0x8000) { unsigned char basetype = LOBYTE(type); NdrBaseTypeBufferSize(pStubMsg, pMemory, &basetype); } else { PFORMAT_STRING desc = pFormat + *(const SHORT*)pFormat; NDR_BUFFERSIZE m = NdrBufferSizer[*desc & NDR_TABLE_MASK]; if (m) { switch(*desc) { case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: align_length(&pStubMsg->BufferLength, 4); safe_buffer_length_increment(pStubMsg, 4); /* for pointer ID */ if (!pStubMsg->IgnoreEmbeddedPointers) { int saved_buffer_length = pStubMsg->BufferLength; pStubMsg->BufferLength = pStubMsg->PointerLength; pStubMsg->PointerLength = 0; if(!pStubMsg->BufferLength) ERR("BufferLength == 0??\n"); PointerBufferSize(pStubMsg, *(unsigned char **)pMemory, desc); pStubMsg->PointerLength = pStubMsg->BufferLength; pStubMsg->BufferLength = saved_buffer_length; } break; default: m(pStubMsg, pMemory, desc); } } else FIXME("no buffersizer for embedded type %02x\n", *desc); } } static ULONG union_arm_memory_size(PMIDL_STUB_MESSAGE pStubMsg, ULONG discriminant, PFORMAT_STRING pFormat) { unsigned short type, size; size = *(const unsigned short*)pFormat; pStubMsg->Memory += size; pFormat += 2; pFormat = get_arm_offset_from_union_arm_selector(pStubMsg, discriminant, pFormat); if(!pFormat) return 0; type = *(const unsigned short*)pFormat; if((type & 0xff00) == 0x8000) { return NdrBaseTypeMemorySize(pStubMsg, pFormat); } else { PFORMAT_STRING desc = pFormat + *(const SHORT*)pFormat; NDR_MEMORYSIZE m = NdrMemorySizer[*desc & NDR_TABLE_MASK]; unsigned char *saved_buffer; if (m) { switch(*desc) { case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: align_pointer(&pStubMsg->Buffer, 4); saved_buffer = pStubMsg->Buffer; safe_buffer_increment(pStubMsg, 4); align_length(&pStubMsg->MemorySize, sizeof(void *)); pStubMsg->MemorySize += sizeof(void *); if (!pStubMsg->IgnoreEmbeddedPointers) PointerMemorySize(pStubMsg, saved_buffer, pFormat); break; default: return m(pStubMsg, desc); } } else FIXME("no marshaller for embedded type %02x\n", *desc); } TRACE("size %d\n", size); return size; } static void union_arm_free(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, ULONG discriminant, PFORMAT_STRING pFormat) { unsigned short type; pFormat += 2; pFormat = get_arm_offset_from_union_arm_selector(pStubMsg, discriminant, pFormat); if(!pFormat) return; type = *(const unsigned short*)pFormat; if((type & 0xff00) != 0x8000) { PFORMAT_STRING desc = pFormat + *(const SHORT*)pFormat; NDR_FREE m = NdrFreer[*desc & NDR_TABLE_MASK]; if (m) { switch(*desc) { case RPC_FC_RP: case RPC_FC_UP: case RPC_FC_OP: case RPC_FC_FP: PointerFree(pStubMsg, *(unsigned char **)pMemory, desc); break; default: m(pStubMsg, pMemory, desc); } } } } /*********************************************************************** * NdrEncapsulatedUnionMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrEncapsulatedUnionMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned char switch_type; unsigned char increment; ULONG switch_value; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat++; switch_type = *pFormat & 0xf; increment = (*pFormat & 0xf0) >> 4; pFormat++; align_pointer_clear(&pStubMsg->Buffer, increment); switch_value = get_discriminant(switch_type, pMemory); TRACE("got switch value 0x%x\n", switch_value); NdrBaseTypeMarshall(pStubMsg, pMemory, &switch_type); pMemory += increment; return union_arm_marshall(pStubMsg, pMemory, switch_value, pFormat); } /*********************************************************************** * NdrEncapsulatedUnionUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrEncapsulatedUnionUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { unsigned char switch_type; unsigned char increment; ULONG switch_value; unsigned short size; unsigned char *pMemoryArm; TRACE("(%p, %p, %p, %d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); pFormat++; switch_type = *pFormat & 0xf; increment = (*pFormat & 0xf0) >> 4; pFormat++; align_pointer(&pStubMsg->Buffer, increment); switch_value = get_discriminant(switch_type, pStubMsg->Buffer); TRACE("got switch value 0x%x\n", switch_value); size = *(const unsigned short*)pFormat + increment; if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, size); /* we can't pass fMustAlloc=TRUE into the marshaller for the arm * since the arm is part of the memory block that is encompassed by * the whole union. Memory is forced to allocate when pointers * are set to NULL, so we emulate that part of fMustAlloc=TRUE by * clearing the memory we pass in to the unmarshaller */ if (fMustAlloc) memset(*ppMemory, 0, size); NdrBaseTypeUnmarshall(pStubMsg, ppMemory, &switch_type, FALSE); pMemoryArm = *ppMemory + increment; return union_arm_unmarshall(pStubMsg, &pMemoryArm, switch_value, pFormat, FALSE); } /*********************************************************************** * NdrEncapsulatedUnionBufferSize [RPCRT4.@] */ void WINAPI NdrEncapsulatedUnionBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned char switch_type; unsigned char increment; ULONG switch_value; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat++; switch_type = *pFormat & 0xf; increment = (*pFormat & 0xf0) >> 4; pFormat++; align_length(&pStubMsg->BufferLength, increment); switch_value = get_discriminant(switch_type, pMemory); TRACE("got switch value 0x%x\n", switch_value); /* Add discriminant size */ NdrBaseTypeBufferSize(pStubMsg, (unsigned char *)&switch_value, &switch_type); pMemory += increment; union_arm_buffer_size(pStubMsg, pMemory, switch_value, pFormat); } /*********************************************************************** * NdrEncapsulatedUnionMemorySize [RPCRT4.@] */ ULONG WINAPI NdrEncapsulatedUnionMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { unsigned char switch_type; unsigned char increment; ULONG switch_value; switch_type = *pFormat & 0xf; increment = (*pFormat & 0xf0) >> 4; pFormat++; align_pointer(&pStubMsg->Buffer, increment); switch_value = get_discriminant(switch_type, pStubMsg->Buffer); TRACE("got switch value 0x%x\n", switch_value); pStubMsg->Memory += increment; return increment + union_arm_memory_size(pStubMsg, switch_value, pFormat + *(const SHORT*)pFormat); } /*********************************************************************** * NdrEncapsulatedUnionFree [RPCRT4.@] */ void WINAPI NdrEncapsulatedUnionFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned char switch_type; unsigned char increment; ULONG switch_value; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat++; switch_type = *pFormat & 0xf; increment = (*pFormat & 0xf0) >> 4; pFormat++; switch_value = get_discriminant(switch_type, pMemory); TRACE("got switch value 0x%x\n", switch_value); pMemory += increment; union_arm_free(pStubMsg, pMemory, switch_value, pFormat); } /*********************************************************************** * NdrNonEncapsulatedUnionMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrNonEncapsulatedUnionMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned char switch_type; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat++; switch_type = *pFormat; pFormat++; pFormat = ComputeConformance(pStubMsg, pMemory, pFormat, 0); TRACE("got switch value 0x%lx\n", pStubMsg->MaxCount); /* Marshall discriminant */ NdrBaseTypeMarshall(pStubMsg, (unsigned char *)&pStubMsg->MaxCount, &switch_type); return union_arm_marshall(pStubMsg, pMemory, pStubMsg->MaxCount, pFormat + *(const SHORT*)pFormat); } static LONG unmarshall_discriminant(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING *ppFormat) { LONG discriminant = 0; switch(**ppFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: { UCHAR d; safe_copy_from_buffer(pStubMsg, &d, sizeof(d)); discriminant = d; break; } case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: case RPC_FC_ENUM16: { USHORT d; align_pointer(&pStubMsg->Buffer, sizeof(USHORT)); safe_copy_from_buffer(pStubMsg, &d, sizeof(d)); discriminant = d; break; } case RPC_FC_LONG: case RPC_FC_ULONG: { ULONG d; align_pointer(&pStubMsg->Buffer, sizeof(ULONG)); safe_copy_from_buffer(pStubMsg, &d, sizeof(d)); discriminant = d; break; } default: FIXME("Unhandled base type: 0x%02x\n", **ppFormat); } (*ppFormat)++; *ppFormat = SkipConformance(pStubMsg, *ppFormat); return discriminant; } /********************************************************************** * NdrNonEncapsulatedUnionUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrNonEncapsulatedUnionUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { LONG discriminant; unsigned short size; TRACE("(%p, %p, %p, %d)\n", pStubMsg, ppMemory, pFormat, fMustAlloc); pFormat++; /* Unmarshall discriminant */ discriminant = unmarshall_discriminant(pStubMsg, &pFormat); TRACE("unmarshalled discriminant %x\n", discriminant); pFormat += *(const SHORT*)pFormat; size = *(const unsigned short*)pFormat; if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, size); /* we can't pass fMustAlloc=TRUE into the marshaller for the arm * since the arm is part of the memory block that is encompassed by * the whole union. Memory is forced to allocate when pointers * are set to NULL, so we emulate that part of fMustAlloc=TRUE by * clearing the memory we pass in to the unmarshaller */ if (fMustAlloc) memset(*ppMemory, 0, size); return union_arm_unmarshall(pStubMsg, ppMemory, discriminant, pFormat, FALSE); } /*********************************************************************** * NdrNonEncapsulatedUnionBufferSize [RPCRT4.@] */ void WINAPI NdrNonEncapsulatedUnionBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { unsigned char switch_type; TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat++; switch_type = *pFormat; pFormat++; pFormat = ComputeConformance(pStubMsg, pMemory, pFormat, 0); TRACE("got switch value 0x%lx\n", pStubMsg->MaxCount); /* Add discriminant size */ NdrBaseTypeBufferSize(pStubMsg, (unsigned char *)&pStubMsg->MaxCount, &switch_type); union_arm_buffer_size(pStubMsg, pMemory, pStubMsg->MaxCount, pFormat + *(const SHORT*)pFormat); } /*********************************************************************** * NdrNonEncapsulatedUnionMemorySize [RPCRT4.@] */ ULONG WINAPI NdrNonEncapsulatedUnionMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { ULONG discriminant; pFormat++; /* Unmarshall discriminant */ discriminant = unmarshall_discriminant(pStubMsg, &pFormat); TRACE("unmarshalled discriminant 0x%x\n", discriminant); return union_arm_memory_size(pStubMsg, discriminant, pFormat + *(const SHORT*)pFormat); } /*********************************************************************** * NdrNonEncapsulatedUnionFree [RPCRT4.@] */ void WINAPI NdrNonEncapsulatedUnionFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("(%p, %p, %p)\n", pStubMsg, pMemory, pFormat); pFormat++; pFormat++; pFormat = ComputeConformance(pStubMsg, pMemory, pFormat, 0); TRACE("got switch value 0x%lx\n", pStubMsg->MaxCount); union_arm_free(pStubMsg, pMemory, pStubMsg->MaxCount, pFormat + *(const SHORT*)pFormat); } /*********************************************************************** * NdrByteCountPointerMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrByteCountPointerMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { FIXME("stub\n"); return NULL; } /*********************************************************************** * NdrByteCountPointerUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrByteCountPointerUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { FIXME("stub\n"); return NULL; } /*********************************************************************** * NdrByteCountPointerBufferSize [RPCRT4.@] */ void WINAPI NdrByteCountPointerBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { FIXME("stub\n"); } /*********************************************************************** * NdrByteCountPointerMemorySize [internal] */ static ULONG WINAPI NdrByteCountPointerMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { FIXME("stub\n"); return 0; } /*********************************************************************** * NdrByteCountPointerFree [RPCRT4.@] */ void WINAPI NdrByteCountPointerFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { FIXME("stub\n"); } /*********************************************************************** * NdrXmitOrRepAsMarshall [RPCRT4.@] */ unsigned char * WINAPI NdrXmitOrRepAsMarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { FIXME("stub\n"); return NULL; } /*********************************************************************** * NdrXmitOrRepAsUnmarshall [RPCRT4.@] */ unsigned char * WINAPI NdrXmitOrRepAsUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { FIXME("stub\n"); return NULL; } /*********************************************************************** * NdrXmitOrRepAsBufferSize [RPCRT4.@] */ void WINAPI NdrXmitOrRepAsBufferSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { FIXME("stub\n"); } /*********************************************************************** * NdrXmitOrRepAsMemorySize [RPCRT4.@] */ ULONG WINAPI NdrXmitOrRepAsMemorySize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { FIXME("stub\n"); return 0; } /*********************************************************************** * NdrXmitOrRepAsFree [RPCRT4.@] */ void WINAPI NdrXmitOrRepAsFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { FIXME("stub\n"); } /*********************************************************************** * NdrRangeMarshall [internal] */ static unsigned char *WINAPI NdrRangeMarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_RANGE *pRange = (const NDR_RANGE *)pFormat; unsigned char base_type; TRACE("pStubMsg %p, pMemory %p, type 0x%02x\n", pStubMsg, pMemory, *pFormat); if (pRange->type != RPC_FC_RANGE) { ERR("invalid format type %x\n", pRange->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } base_type = pRange->flags_type & 0xf; return NdrBaseTypeMarshall(pStubMsg, pMemory, &base_type); } /*********************************************************************** * NdrRangeUnmarshall [RPCRT4.@] */ unsigned char *WINAPI NdrRangeUnmarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { const NDR_RANGE *pRange = (const NDR_RANGE *)pFormat; unsigned char base_type; TRACE("pStubMsg: %p, ppMemory: %p, type: 0x%02x, fMustAlloc: %s\n", pStubMsg, ppMemory, *pFormat, fMustAlloc ? "true" : "false"); if (pRange->type != RPC_FC_RANGE) { ERR("invalid format type %x\n", pRange->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return NULL; } base_type = pRange->flags_type & 0xf; TRACE("base_type = 0x%02x, low_value = %d, high_value = %d\n", base_type, pRange->low_value, pRange->high_value); #define RANGE_UNMARSHALL(mem_type, wire_type, format_spec) \ do \ { \ align_pointer(&pStubMsg->Buffer, sizeof(wire_type)); \ if (!fMustAlloc && !*ppMemory) \ fMustAlloc = TRUE; \ if (fMustAlloc) \ *ppMemory = NdrAllocate(pStubMsg, sizeof(mem_type)); \ if (pStubMsg->Buffer + sizeof(wire_type) > pStubMsg->BufferEnd) \ { \ ERR("buffer overflow - Buffer = %p, BufferEnd = %p\n", \ pStubMsg->Buffer, (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength); \ RpcRaiseException(RPC_X_BAD_STUB_DATA); \ } \ if ((*(wire_type *)pStubMsg->Buffer < (mem_type)pRange->low_value) || \ (*(wire_type *)pStubMsg->Buffer > (mem_type)pRange->high_value)) \ { \ ERR("value exceeded bounds: " format_spec ", low: " format_spec ", high: " format_spec "\n", \ *(wire_type *)pStubMsg->Buffer, (mem_type)pRange->low_value, \ (mem_type)pRange->high_value); \ RpcRaiseException(RPC_S_INVALID_BOUND); \ return NULL; \ } \ TRACE("*ppMemory: %p\n", *ppMemory); \ **(mem_type **)ppMemory = *(wire_type *)pStubMsg->Buffer; \ pStubMsg->Buffer += sizeof(wire_type); \ } while (0) switch(base_type) { case RPC_FC_CHAR: case RPC_FC_SMALL: RANGE_UNMARSHALL(UCHAR, UCHAR, "%d"); TRACE("value: 0x%02x\n", **ppMemory); break; case RPC_FC_BYTE: case RPC_FC_USMALL: RANGE_UNMARSHALL(CHAR, CHAR, "%u"); TRACE("value: 0x%02x\n", **ppMemory); break; case RPC_FC_WCHAR: /* FIXME: valid? */ case RPC_FC_USHORT: RANGE_UNMARSHALL(USHORT, USHORT, "%u"); TRACE("value: 0x%04x\n", **(USHORT **)ppMemory); break; case RPC_FC_SHORT: RANGE_UNMARSHALL(SHORT, SHORT, "%d"); TRACE("value: 0x%04x\n", **(USHORT **)ppMemory); break; case RPC_FC_LONG: case RPC_FC_ENUM32: RANGE_UNMARSHALL(LONG, LONG, "%d"); TRACE("value: 0x%08x\n", **(ULONG **)ppMemory); break; case RPC_FC_ULONG: RANGE_UNMARSHALL(ULONG, ULONG, "%u"); TRACE("value: 0x%08x\n", **(ULONG **)ppMemory); break; case RPC_FC_ENUM16: RANGE_UNMARSHALL(UINT, USHORT, "%u"); TRACE("value: 0x%08x\n", **(UINT **)ppMemory); break; case RPC_FC_FLOAT: case RPC_FC_DOUBLE: case RPC_FC_HYPER: default: ERR("invalid range base type: 0x%02x\n", base_type); RpcRaiseException(RPC_S_INTERNAL_ERROR); } return NULL; } /*********************************************************************** * NdrRangeBufferSize [internal] */ static void WINAPI NdrRangeBufferSize( PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { const NDR_RANGE *pRange = (const NDR_RANGE *)pFormat; unsigned char base_type; TRACE("pStubMsg %p, pMemory %p, type 0x%02x\n", pStubMsg, pMemory, *pFormat); if (pRange->type != RPC_FC_RANGE) { ERR("invalid format type %x\n", pRange->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); } base_type = pRange->flags_type & 0xf; NdrBaseTypeBufferSize(pStubMsg, pMemory, &base_type); } /*********************************************************************** * NdrRangeMemorySize [internal] */ static ULONG WINAPI NdrRangeMemorySize( PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { const NDR_RANGE *pRange = (const NDR_RANGE *)pFormat; unsigned char base_type; if (pRange->type != RPC_FC_RANGE) { ERR("invalid format type %x\n", pRange->type); RpcRaiseException(RPC_S_INTERNAL_ERROR); return 0; } base_type = pRange->flags_type & 0xf; return NdrBaseTypeMemorySize(pStubMsg, &base_type); } /*********************************************************************** * NdrRangeFree [internal] */ static void WINAPI NdrRangeFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("pStubMsg %p pMemory %p type 0x%02x\n", pStubMsg, pMemory, *pFormat); /* nothing to do */ } /*********************************************************************** * NdrBaseTypeMarshall [internal] */ static unsigned char *WINAPI NdrBaseTypeMarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("pStubMsg %p, pMemory %p, type 0x%02x\n", pStubMsg, pMemory, *pFormat); switch(*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: safe_copy_to_buffer(pStubMsg, pMemory, sizeof(UCHAR)); TRACE("value: 0x%02x\n", *pMemory); break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: align_pointer_clear(&pStubMsg->Buffer, sizeof(USHORT)); safe_copy_to_buffer(pStubMsg, pMemory, sizeof(USHORT)); TRACE("value: 0x%04x\n", *(USHORT *)pMemory); break; case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ERROR_STATUS_T: case RPC_FC_ENUM32: align_pointer_clear(&pStubMsg->Buffer, sizeof(ULONG)); safe_copy_to_buffer(pStubMsg, pMemory, sizeof(ULONG)); TRACE("value: 0x%08x\n", *(ULONG *)pMemory); break; case RPC_FC_FLOAT: align_pointer_clear(&pStubMsg->Buffer, sizeof(float)); safe_copy_to_buffer(pStubMsg, pMemory, sizeof(float)); break; case RPC_FC_DOUBLE: align_pointer_clear(&pStubMsg->Buffer, sizeof(double)); safe_copy_to_buffer(pStubMsg, pMemory, sizeof(double)); break; case RPC_FC_HYPER: align_pointer_clear(&pStubMsg->Buffer, sizeof(ULONGLONG)); safe_copy_to_buffer(pStubMsg, pMemory, sizeof(ULONGLONG)); TRACE("value: %s\n", wine_dbgstr_longlong(*(ULONGLONG*)pMemory)); break; case RPC_FC_ENUM16: { USHORT val = *(UINT *)pMemory; /* only 16-bits on the wire, so do a sanity check */ if (*(UINT *)pMemory > SHRT_MAX) RpcRaiseException(RPC_X_ENUM_VALUE_OUT_OF_RANGE); align_pointer_clear(&pStubMsg->Buffer, sizeof(USHORT)); safe_copy_to_buffer(pStubMsg, &val, sizeof(val)); TRACE("value: 0x%04x\n", *(UINT *)pMemory); break; } case RPC_FC_INT3264: case RPC_FC_UINT3264: { UINT val = *(UINT_PTR *)pMemory; align_pointer_clear(&pStubMsg->Buffer, sizeof(UINT)); safe_copy_to_buffer(pStubMsg, &val, sizeof(val)); break; } case RPC_FC_IGNORE: break; default: FIXME("Unhandled base type: 0x%02x\n", *pFormat); } /* FIXME: what is the correct return value? */ return NULL; } /*********************************************************************** * NdrBaseTypeUnmarshall [internal] */ static unsigned char *WINAPI NdrBaseTypeUnmarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { TRACE("pStubMsg: %p, ppMemory: %p, type: 0x%02x, fMustAlloc: %s\n", pStubMsg, ppMemory, *pFormat, fMustAlloc ? "true" : "false"); #define BASE_TYPE_UNMARSHALL(type) do { \ align_pointer(&pStubMsg->Buffer, sizeof(type)); \ if (!fMustAlloc && !pStubMsg->IsClient && !*ppMemory) \ { \ *ppMemory = pStubMsg->Buffer; \ TRACE("*ppMemory: %p\n", *ppMemory); \ safe_buffer_increment(pStubMsg, sizeof(type)); \ } \ else \ { \ if (fMustAlloc) \ *ppMemory = NdrAllocate(pStubMsg, sizeof(type)); \ TRACE("*ppMemory: %p\n", *ppMemory); \ safe_copy_from_buffer(pStubMsg, *ppMemory, sizeof(type)); \ } \ } while (0) switch(*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: BASE_TYPE_UNMARSHALL(UCHAR); TRACE("value: 0x%02x\n", **ppMemory); break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: BASE_TYPE_UNMARSHALL(USHORT); TRACE("value: 0x%04x\n", **(USHORT **)ppMemory); break; case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ERROR_STATUS_T: case RPC_FC_ENUM32: BASE_TYPE_UNMARSHALL(ULONG); TRACE("value: 0x%08x\n", **(ULONG **)ppMemory); break; case RPC_FC_FLOAT: BASE_TYPE_UNMARSHALL(float); TRACE("value: %f\n", **(float **)ppMemory); break; case RPC_FC_DOUBLE: BASE_TYPE_UNMARSHALL(double); TRACE("value: %f\n", **(double **)ppMemory); break; case RPC_FC_HYPER: BASE_TYPE_UNMARSHALL(ULONGLONG); TRACE("value: %s\n", wine_dbgstr_longlong(**(ULONGLONG **)ppMemory)); break; case RPC_FC_ENUM16: { USHORT val; align_pointer(&pStubMsg->Buffer, sizeof(USHORT)); if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, sizeof(UINT)); safe_copy_from_buffer(pStubMsg, &val, sizeof(USHORT)); /* 16-bits on the wire, but int in memory */ **(UINT **)ppMemory = val; TRACE("value: 0x%08x\n", **(UINT **)ppMemory); break; } case RPC_FC_INT3264: if (sizeof(INT_PTR) == sizeof(INT)) BASE_TYPE_UNMARSHALL(INT); else { INT val; align_pointer(&pStubMsg->Buffer, sizeof(INT)); if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, sizeof(INT_PTR)); safe_copy_from_buffer(pStubMsg, &val, sizeof(INT)); **(INT_PTR **)ppMemory = val; TRACE("value: 0x%08lx\n", **(INT_PTR **)ppMemory); } break; case RPC_FC_UINT3264: if (sizeof(UINT_PTR) == sizeof(UINT)) BASE_TYPE_UNMARSHALL(UINT); else { UINT val; align_pointer(&pStubMsg->Buffer, sizeof(UINT)); if (!fMustAlloc && !*ppMemory) fMustAlloc = TRUE; if (fMustAlloc) *ppMemory = NdrAllocate(pStubMsg, sizeof(UINT_PTR)); safe_copy_from_buffer(pStubMsg, &val, sizeof(UINT)); **(UINT_PTR **)ppMemory = val; TRACE("value: 0x%08lx\n", **(UINT_PTR **)ppMemory); } break; case RPC_FC_IGNORE: break; default: FIXME("Unhandled base type: 0x%02x\n", *pFormat); } #undef BASE_TYPE_UNMARSHALL /* FIXME: what is the correct return value? */ return NULL; } /*********************************************************************** * NdrBaseTypeBufferSize [internal] */ static void WINAPI NdrBaseTypeBufferSize( PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("pStubMsg %p, pMemory %p, type 0x%02x\n", pStubMsg, pMemory, *pFormat); switch(*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: safe_buffer_length_increment(pStubMsg, sizeof(UCHAR)); break; case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: case RPC_FC_ENUM16: align_length(&pStubMsg->BufferLength, sizeof(USHORT)); safe_buffer_length_increment(pStubMsg, sizeof(USHORT)); break; case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM32: case RPC_FC_INT3264: case RPC_FC_UINT3264: align_length(&pStubMsg->BufferLength, sizeof(ULONG)); safe_buffer_length_increment(pStubMsg, sizeof(ULONG)); break; case RPC_FC_FLOAT: align_length(&pStubMsg->BufferLength, sizeof(float)); safe_buffer_length_increment(pStubMsg, sizeof(float)); break; case RPC_FC_DOUBLE: align_length(&pStubMsg->BufferLength, sizeof(double)); safe_buffer_length_increment(pStubMsg, sizeof(double)); break; case RPC_FC_HYPER: align_length(&pStubMsg->BufferLength, sizeof(ULONGLONG)); safe_buffer_length_increment(pStubMsg, sizeof(ULONGLONG)); break; case RPC_FC_ERROR_STATUS_T: align_length(&pStubMsg->BufferLength, sizeof(error_status_t)); safe_buffer_length_increment(pStubMsg, sizeof(error_status_t)); break; case RPC_FC_IGNORE: break; default: FIXME("Unhandled base type: 0x%02x\n", *pFormat); } } /*********************************************************************** * NdrBaseTypeMemorySize [internal] */ static ULONG WINAPI NdrBaseTypeMemorySize( PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { TRACE("pStubMsg %p, type 0x%02x\n", pStubMsg, *pFormat); switch(*pFormat) { case RPC_FC_BYTE: case RPC_FC_CHAR: case RPC_FC_SMALL: case RPC_FC_USMALL: safe_buffer_increment(pStubMsg, sizeof(UCHAR)); pStubMsg->MemorySize += sizeof(UCHAR); return sizeof(UCHAR); case RPC_FC_WCHAR: case RPC_FC_SHORT: case RPC_FC_USHORT: align_pointer(&pStubMsg->Buffer, sizeof(USHORT)); safe_buffer_increment(pStubMsg, sizeof(USHORT)); align_length(&pStubMsg->MemorySize, sizeof(USHORT)); pStubMsg->MemorySize += sizeof(USHORT); return sizeof(USHORT); case RPC_FC_LONG: case RPC_FC_ULONG: case RPC_FC_ENUM32: align_pointer(&pStubMsg->Buffer, sizeof(ULONG)); safe_buffer_increment(pStubMsg, sizeof(ULONG)); align_length(&pStubMsg->MemorySize, sizeof(ULONG)); pStubMsg->MemorySize += sizeof(ULONG); return sizeof(ULONG); case RPC_FC_FLOAT: align_pointer(&pStubMsg->Buffer, sizeof(float)); safe_buffer_increment(pStubMsg, sizeof(float)); align_length(&pStubMsg->MemorySize, sizeof(float)); pStubMsg->MemorySize += sizeof(float); return sizeof(float); case RPC_FC_DOUBLE: align_pointer(&pStubMsg->Buffer, sizeof(double)); safe_buffer_increment(pStubMsg, sizeof(double)); align_length(&pStubMsg->MemorySize, sizeof(double)); pStubMsg->MemorySize += sizeof(double); return sizeof(double); case RPC_FC_HYPER: align_pointer(&pStubMsg->Buffer, sizeof(ULONGLONG)); safe_buffer_increment(pStubMsg, sizeof(ULONGLONG)); align_length(&pStubMsg->MemorySize, sizeof(ULONGLONG)); pStubMsg->MemorySize += sizeof(ULONGLONG); return sizeof(ULONGLONG); case RPC_FC_ERROR_STATUS_T: align_pointer(&pStubMsg->Buffer, sizeof(error_status_t)); safe_buffer_increment(pStubMsg, sizeof(error_status_t)); align_length(&pStubMsg->MemorySize, sizeof(error_status_t)); pStubMsg->MemorySize += sizeof(error_status_t); return sizeof(error_status_t); case RPC_FC_ENUM16: align_pointer(&pStubMsg->Buffer, sizeof(USHORT)); safe_buffer_increment(pStubMsg, sizeof(USHORT)); align_length(&pStubMsg->MemorySize, sizeof(UINT)); pStubMsg->MemorySize += sizeof(UINT); return sizeof(UINT); case RPC_FC_INT3264: case RPC_FC_UINT3264: align_pointer(&pStubMsg->Buffer, sizeof(UINT)); safe_buffer_increment(pStubMsg, sizeof(UINT)); align_length(&pStubMsg->MemorySize, sizeof(UINT_PTR)); pStubMsg->MemorySize += sizeof(UINT_PTR); return sizeof(UINT_PTR); case RPC_FC_IGNORE: align_length(&pStubMsg->MemorySize, sizeof(void *)); pStubMsg->MemorySize += sizeof(void *); return sizeof(void *); default: FIXME("Unhandled base type: 0x%02x\n", *pFormat); return 0; } } /*********************************************************************** * NdrBaseTypeFree [internal] */ static void WINAPI NdrBaseTypeFree(PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("pStubMsg %p pMemory %p type 0x%02x\n", pStubMsg, pMemory, *pFormat); /* nothing to do */ } /*********************************************************************** * NdrContextHandleBufferSize [internal] */ static void WINAPI NdrContextHandleBufferSize( PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("pStubMsg %p, pMemory %p, type 0x%02x\n", pStubMsg, pMemory, *pFormat); if (*pFormat != RPC_FC_BIND_CONTEXT) { ERR("invalid format type %x\n", *pFormat); RpcRaiseException(RPC_S_INTERNAL_ERROR); } align_length(&pStubMsg->BufferLength, 4); safe_buffer_length_increment(pStubMsg, cbNDRContext); } /*********************************************************************** * NdrContextHandleMarshall [internal] */ static unsigned char *WINAPI NdrContextHandleMarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char *pMemory, PFORMAT_STRING pFormat) { TRACE("pStubMsg %p, pMemory %p, type 0x%02x\n", pStubMsg, pMemory, *pFormat); if (*pFormat != RPC_FC_BIND_CONTEXT) { ERR("invalid format type %x\n", *pFormat); RpcRaiseException(RPC_S_INTERNAL_ERROR); } TRACE("flags: 0x%02x\n", pFormat[1]); if (pStubMsg->IsClient) { if (pFormat[1] & HANDLE_PARAM_IS_VIA_PTR) NdrClientContextMarshall(pStubMsg, *(NDR_CCONTEXT **)pMemory, FALSE); else NdrClientContextMarshall(pStubMsg, pMemory, FALSE); } else { NDR_SCONTEXT ctxt = NDRSContextFromValue(pMemory); NDR_RUNDOWN rundown = pStubMsg->StubDesc->apfnNdrRundownRoutines[pFormat[2]]; NdrServerContextNewMarshall(pStubMsg, ctxt, rundown, pFormat); } return NULL; } /*********************************************************************** * NdrContextHandleUnmarshall [internal] */ static unsigned char *WINAPI NdrContextHandleUnmarshall( PMIDL_STUB_MESSAGE pStubMsg, unsigned char **ppMemory, PFORMAT_STRING pFormat, unsigned char fMustAlloc) { TRACE("pStubMsg %p, ppMemory %p, pFormat %p, fMustAlloc %s\n", pStubMsg, ppMemory, pFormat, fMustAlloc ? "TRUE": "FALSE"); if (*pFormat != RPC_FC_BIND_CONTEXT) { ERR("invalid format type %x\n", *pFormat); RpcRaiseException(RPC_S_INTERNAL_ERROR); } TRACE("flags: 0x%02x\n", pFormat[1]); if (pStubMsg->IsClient) { /* [out]-only or [ret] param */ if ((pFormat[1] & (HANDLE_PARAM_IS_IN|HANDLE_PARAM_IS_OUT)) == HANDLE_PARAM_IS_OUT) **(NDR_CCONTEXT **)ppMemory = NULL; NdrClientContextUnmarshall(pStubMsg, *(NDR_CCONTEXT **)ppMemory, pStubMsg->RpcMsg->Handle); } else { NDR_SCONTEXT ctxt; ctxt = NdrServerContextNewUnmarshall(pStubMsg, pFormat); if (pFormat[1] & HANDLE_PARAM_IS_VIA_PTR) *(void **)ppMemory = NDRSContextValue(ctxt); else *(void **)ppMemory = *NDRSContextValue(ctxt); } return NULL; } /*********************************************************************** * NdrClientContextMarshall [RPCRT4.@] */ void WINAPI NdrClientContextMarshall(PMIDL_STUB_MESSAGE pStubMsg, NDR_CCONTEXT ContextHandle, int fCheck) { TRACE("(%p, %p, %d)\n", pStubMsg, ContextHandle, fCheck); align_pointer_clear(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + cbNDRContext > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength) { ERR("buffer overflow - Buffer = %p, BufferEnd = %p\n", pStubMsg->Buffer, (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength); RpcRaiseException(RPC_X_BAD_STUB_DATA); } /* FIXME: what does fCheck do? */ NDRCContextMarshall(ContextHandle, pStubMsg->Buffer); pStubMsg->Buffer += cbNDRContext; } /*********************************************************************** * NdrClientContextUnmarshall [RPCRT4.@] */ void WINAPI NdrClientContextUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, NDR_CCONTEXT * pContextHandle, RPC_BINDING_HANDLE BindHandle) { TRACE("(%p, %p, %p)\n", pStubMsg, pContextHandle, BindHandle); align_pointer(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + cbNDRContext > pStubMsg->BufferEnd) RpcRaiseException(RPC_X_BAD_STUB_DATA); NDRCContextUnmarshall(pContextHandle, BindHandle, pStubMsg->Buffer, pStubMsg->RpcMsg->DataRepresentation); pStubMsg->Buffer += cbNDRContext; } void WINAPI NdrServerContextMarshall(PMIDL_STUB_MESSAGE pStubMsg, NDR_SCONTEXT ContextHandle, NDR_RUNDOWN RundownRoutine ) { TRACE("(%p, %p, %p)\n", pStubMsg, ContextHandle, RundownRoutine); align_pointer(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + cbNDRContext > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength) { ERR("buffer overflow - Buffer = %p, BufferEnd = %p\n", pStubMsg->Buffer, (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength); RpcRaiseException(RPC_X_BAD_STUB_DATA); } NDRSContextMarshall2(pStubMsg->RpcMsg->Handle, ContextHandle, pStubMsg->Buffer, RundownRoutine, NULL, RPC_CONTEXT_HANDLE_DEFAULT_FLAGS); pStubMsg->Buffer += cbNDRContext; } NDR_SCONTEXT WINAPI NdrServerContextUnmarshall(PMIDL_STUB_MESSAGE pStubMsg) { NDR_SCONTEXT ContextHandle; TRACE("(%p)\n", pStubMsg); align_pointer(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + cbNDRContext > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength) { ERR("buffer overflow - Buffer = %p, BufferEnd = %p\n", pStubMsg->Buffer, (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength); RpcRaiseException(RPC_X_BAD_STUB_DATA); } ContextHandle = NDRSContextUnmarshall2(pStubMsg->RpcMsg->Handle, pStubMsg->Buffer, pStubMsg->RpcMsg->DataRepresentation, NULL, RPC_CONTEXT_HANDLE_DEFAULT_FLAGS); pStubMsg->Buffer += cbNDRContext; return ContextHandle; } void WINAPI NdrContextHandleSize(PMIDL_STUB_MESSAGE pStubMsg, unsigned char* pMemory, PFORMAT_STRING pFormat) { FIXME("(%p, %p, %p): stub\n", pStubMsg, pMemory, pFormat); } NDR_SCONTEXT WINAPI NdrContextHandleInitialize(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { RPC_SYNTAX_IDENTIFIER *if_id = NULL; ULONG flags = RPC_CONTEXT_HANDLE_DEFAULT_FLAGS; TRACE("(%p, %p)\n", pStubMsg, pFormat); if (pFormat[1] & NDR_CONTEXT_HANDLE_SERIALIZE) flags |= RPC_CONTEXT_HANDLE_SERIALIZE; if (pFormat[1] & NDR_CONTEXT_HANDLE_NO_SERIALIZE) flags |= RPC_CONTEXT_HANDLE_DONT_SERIALIZE; if (pFormat[1] & NDR_STRICT_CONTEXT_HANDLE) { RPC_SERVER_INTERFACE *sif = pStubMsg->StubDesc->RpcInterfaceInformation; if_id = &sif->InterfaceId; } return NDRSContextUnmarshall2(pStubMsg->RpcMsg->Handle, NULL, pStubMsg->RpcMsg->DataRepresentation, if_id, flags); } void WINAPI NdrServerContextNewMarshall(PMIDL_STUB_MESSAGE pStubMsg, NDR_SCONTEXT ContextHandle, NDR_RUNDOWN RundownRoutine, PFORMAT_STRING pFormat) { RPC_SYNTAX_IDENTIFIER *if_id = NULL; ULONG flags = RPC_CONTEXT_HANDLE_DEFAULT_FLAGS; TRACE("(%p, %p, %p, %p)\n", pStubMsg, ContextHandle, RundownRoutine, pFormat); align_pointer(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + cbNDRContext > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength) { ERR("buffer overflow - Buffer = %p, BufferEnd = %p\n", pStubMsg->Buffer, (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength); RpcRaiseException(RPC_X_BAD_STUB_DATA); } if (pFormat[1] & NDR_CONTEXT_HANDLE_SERIALIZE) flags |= RPC_CONTEXT_HANDLE_SERIALIZE; if (pFormat[1] & NDR_CONTEXT_HANDLE_NO_SERIALIZE) flags |= RPC_CONTEXT_HANDLE_DONT_SERIALIZE; if (pFormat[1] & NDR_STRICT_CONTEXT_HANDLE) { RPC_SERVER_INTERFACE *sif = pStubMsg->StubDesc->RpcInterfaceInformation; if_id = &sif->InterfaceId; } NDRSContextMarshall2(pStubMsg->RpcMsg->Handle, ContextHandle, pStubMsg->Buffer, RundownRoutine, if_id, flags); pStubMsg->Buffer += cbNDRContext; } NDR_SCONTEXT WINAPI NdrServerContextNewUnmarshall(PMIDL_STUB_MESSAGE pStubMsg, PFORMAT_STRING pFormat) { NDR_SCONTEXT ContextHandle; RPC_SYNTAX_IDENTIFIER *if_id = NULL; ULONG flags = RPC_CONTEXT_HANDLE_DEFAULT_FLAGS; TRACE("(%p, %p)\n", pStubMsg, pFormat); align_pointer(&pStubMsg->Buffer, 4); if (pStubMsg->Buffer + cbNDRContext > (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength) { ERR("buffer overflow - Buffer = %p, BufferEnd = %p\n", pStubMsg->Buffer, (unsigned char *)pStubMsg->RpcMsg->Buffer + pStubMsg->BufferLength); RpcRaiseException(RPC_X_BAD_STUB_DATA); } if (pFormat[1] & NDR_CONTEXT_HANDLE_SERIALIZE) flags |= RPC_CONTEXT_HANDLE_SERIALIZE; if (pFormat[1] & NDR_CONTEXT_HANDLE_NO_SERIALIZE) flags |= RPC_CONTEXT_HANDLE_DONT_SERIALIZE; if (pFormat[1] & NDR_STRICT_CONTEXT_HANDLE) { RPC_SERVER_INTERFACE *sif = pStubMsg->StubDesc->RpcInterfaceInformation; if_id = &sif->InterfaceId; } ContextHandle = NDRSContextUnmarshall2(pStubMsg->RpcMsg->Handle, pStubMsg->Buffer, pStubMsg->RpcMsg->DataRepresentation, if_id, flags); pStubMsg->Buffer += cbNDRContext; return ContextHandle; } /*********************************************************************** * NdrCorrelationInitialize [RPCRT4.@] * * Initializes correlation validity checking. * * PARAMS * pStubMsg [I] MIDL_STUB_MESSAGE used during unmarshalling. * pMemory [I] Pointer to memory to use as a cache. * CacheSize [I] Size of the memory pointed to by pMemory. * Flags [I] Reserved. Set to zero. * * RETURNS * Nothing. */ void WINAPI NdrCorrelationInitialize(PMIDL_STUB_MESSAGE pStubMsg, void *pMemory, ULONG CacheSize, ULONG Flags) { FIXME("(%p, %p, %d, 0x%x): stub\n", pStubMsg, pMemory, CacheSize, Flags); pStubMsg->fHasNewCorrDesc = TRUE; } /*********************************************************************** * NdrCorrelationPass [RPCRT4.@] * * Performs correlation validity checking. * * PARAMS * pStubMsg [I] MIDL_STUB_MESSAGE used during unmarshalling. * * RETURNS * Nothing. */ void WINAPI NdrCorrelationPass(PMIDL_STUB_MESSAGE pStubMsg) { FIXME("(%p): stub\n", pStubMsg); } /*********************************************************************** * NdrCorrelationFree [RPCRT4.@] * * Frees any resources used while unmarshalling parameters that need * correlation validity checking. * * PARAMS * pStubMsg [I] MIDL_STUB_MESSAGE used during unmarshalling. * * RETURNS * Nothing. */ void WINAPI NdrCorrelationFree(PMIDL_STUB_MESSAGE pStubMsg) { FIXME("(%p): stub\n", pStubMsg); }
891777.c
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* _ _ * _ __ ___ ___ __| | ___ ___| | mod_ssl * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL * | | | | | | (_) | (_| | \__ \__ \ | * |_| |_| |_|\___/ \__,_|___|___/___/_| * |_____| * ssl_engine_io.c * I/O Functions */ /* ``MY HACK: This universe. Just one little problem: core keeps dumping.'' -- Unknown */ #include "ssl_private.h" #include "mod_ssl.h" #include "apr_date.h" /* _________________________________________________________________ ** ** I/O Hooks ** _________________________________________________________________ */ /* This file is designed to be the bridge between OpenSSL and httpd. * However, we really don't expect anyone (let alone ourselves) to * remember what is in this file. So, first, a quick overview. * * In this file, you will find: * - ssl_io_filter_input (Apache input filter) * - ssl_io_filter_output (Apache output filter) * * - bio_filter_in_* (OpenSSL input filter) * - bio_filter_out_* (OpenSSL output filter) * * The input chain is roughly: * * ssl_io_filter_input->ssl_io_input_read->SSL_read->... * ...->bio_filter_in_read->ap_get_brigade/next-httpd-filter * * In mortal terminology, we do the following: * - Receive a request for data to the SSL input filter * - Call a helper function once we know we should perform a read * - Call OpenSSL's SSL_read() * - SSL_read() will then call bio_filter_in_read * - bio_filter_in_read will then try to fetch data from the next httpd filter * - bio_filter_in_read will flatten that data and return it to SSL_read * - SSL_read will then decrypt the data * - ssl_io_input_read will then receive decrypted data as a char* and * ensure that there were no read errors * - The char* is placed in a brigade and returned * * Since connection-level input filters in httpd need to be able to * handle AP_MODE_GETLINE calls (namely identifying LF-terminated strings), * ssl_io_input_getline which will handle this special case. * * Due to AP_MODE_GETLINE and AP_MODE_SPECULATIVE, we may sometimes have * 'leftover' decoded data which must be setaside for the next read. That * is currently handled by the char_buffer_{read|write} functions. So, * ssl_io_input_read may be able to fulfill reads without invoking * SSL_read(). * * Note that the filter context of ssl_io_filter_input and bio_filter_in_* * are shared as bio_filter_in_ctx_t. * * Note that the filter is by choice limited to reading at most * AP_IOBUFSIZE (8192 bytes) per call. * */ /* this custom BIO allows us to hook SSL_write directly into * an apr_bucket_brigade and use transient buckets with the SSL * malloc-ed buffer, rather than copying into a mem BIO. * also allows us to pass the brigade as data is being written * rather than buffering up the entire response in the mem BIO. * * when SSL needs to flush (e.g. SSL_accept()), it will call BIO_flush() * which will trigger a call to bio_filter_out_ctrl() -> bio_filter_out_flush(). * so we only need to flush the output ourselves if we receive an * EOS or FLUSH bucket. this was not possible with the mem BIO where we * had to flush all over the place not really knowing when it was required * to do so. */ typedef struct { SSL *pssl; BIO *pbioRead; BIO *pbioWrite; ap_filter_t *pInputFilter; ap_filter_t *pOutputFilter; SSLConnRec *config; } ssl_filter_ctx_t; typedef struct { ssl_filter_ctx_t *filter_ctx; conn_rec *c; apr_bucket_brigade *bb; /* Brigade used as a buffer. */ apr_status_t rc; } bio_filter_out_ctx_t; static bio_filter_out_ctx_t *bio_filter_out_ctx_new(ssl_filter_ctx_t *filter_ctx, conn_rec *c) { bio_filter_out_ctx_t *outctx = apr_palloc(c->pool, sizeof(*outctx)); outctx->filter_ctx = filter_ctx; outctx->c = c; outctx->bb = apr_brigade_create(c->pool, c->bucket_alloc); return outctx; } /* Pass an output brigade down the filter stack; returns 1 on success * or -1 on failure. */ static int bio_filter_out_pass(bio_filter_out_ctx_t *outctx) { AP_DEBUG_ASSERT(!APR_BRIGADE_EMPTY(outctx->bb)); outctx->rc = ap_pass_brigade(outctx->filter_ctx->pOutputFilter->next, outctx->bb); /* Fail if the connection was reset: */ if (outctx->rc == APR_SUCCESS && outctx->c->aborted) { outctx->rc = APR_ECONNRESET; } return (outctx->rc == APR_SUCCESS) ? 1 : -1; } /* Send a FLUSH bucket down the output filter stack; returns 1 on * success, -1 on failure. */ static int bio_filter_out_flush(BIO *bio) { bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)(bio->ptr); apr_bucket *e; AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(outctx->bb)); e = apr_bucket_flush_create(outctx->bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(outctx->bb, e); return bio_filter_out_pass(outctx); } static int bio_filter_create(BIO *bio) { bio->shutdown = 1; bio->init = 1; bio->num = -1; bio->ptr = NULL; return 1; } static int bio_filter_destroy(BIO *bio) { if (bio == NULL) { return 0; } /* nothing to free here. * apache will destroy the bucket brigade for us */ return 1; } static int bio_filter_out_read(BIO *bio, char *out, int outl) { /* this is never called */ return -1; } static int bio_filter_out_write(BIO *bio, const char *in, int inl) { bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)(bio->ptr); apr_bucket *e; /* Abort early if the client has initiated a renegotiation. */ if (outctx->filter_ctx->config->reneg_state == RENEG_ABORT) { outctx->rc = APR_ECONNABORTED; return -1; } /* when handshaking we'll have a small number of bytes. * max size SSL will pass us here is about 16k. * (16413 bytes to be exact) */ BIO_clear_retry_flags(bio); /* Use a transient bucket for the output data - any downstream * filter must setaside if necessary. */ e = apr_bucket_transient_create(in, inl, outctx->bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(outctx->bb, e); if (bio_filter_out_pass(outctx) < 0) { return -1; } return inl; } static long bio_filter_out_ctrl(BIO *bio, int cmd, long num, void *ptr) { long ret = 1; bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)(bio->ptr); switch (cmd) { case BIO_CTRL_RESET: case BIO_CTRL_EOF: case BIO_C_SET_BUF_MEM_EOF_RETURN: ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, outctx->c, "output bio: unhandled control %d", cmd); ret = 0; break; case BIO_CTRL_WPENDING: case BIO_CTRL_PENDING: case BIO_CTRL_INFO: ret = 0; break; case BIO_CTRL_GET_CLOSE: ret = (long)bio->shutdown; break; case BIO_CTRL_SET_CLOSE: bio->shutdown = (int)num; break; case BIO_CTRL_FLUSH: ret = bio_filter_out_flush(bio); break; case BIO_CTRL_DUP: ret = 1; break; /* N/A */ case BIO_C_SET_BUF_MEM: case BIO_C_GET_BUF_MEM_PTR: /* we don't care */ case BIO_CTRL_PUSH: case BIO_CTRL_POP: default: ret = 0; break; } return ret; } static int bio_filter_out_gets(BIO *bio, char *buf, int size) { /* this is never called */ return -1; } static int bio_filter_out_puts(BIO *bio, const char *str) { /* this is never called */ return -1; } static BIO_METHOD bio_filter_out_method = { BIO_TYPE_MEM, "APR output filter", bio_filter_out_write, bio_filter_out_read, /* read is never called */ bio_filter_out_puts, /* puts is never called */ bio_filter_out_gets, /* gets is never called */ bio_filter_out_ctrl, bio_filter_create, bio_filter_destroy, NULL }; typedef struct { int length; char *value; } char_buffer_t; typedef struct { SSL *ssl; BIO *bio_out; ap_filter_t *f; apr_status_t rc; ap_input_mode_t mode; apr_read_type_e block; apr_bucket_brigade *bb; char_buffer_t cbuf; apr_pool_t *pool; char buffer[AP_IOBUFSIZE]; ssl_filter_ctx_t *filter_ctx; int npn_finished; /* 1 if NPN has finished, 0 otherwise */ } bio_filter_in_ctx_t; /* * this char_buffer api might seem silly, but we don't need to copy * any of this data and we need to remember the length. */ /* Copy up to INL bytes from the char_buffer BUFFER into IN. Note * that due to the strange way this API is designed/used, the * char_buffer object is used to cache a segment of inctx->buffer, and * then this function called to copy (part of) that segment to the * beginning of inctx->buffer. So the segments to copy cannot be * presumed to be non-overlapping, and memmove must be used. */ static int char_buffer_read(char_buffer_t *buffer, char *in, int inl) { if (!buffer->length) { return 0; } if (buffer->length > inl) { /* we have have enough to fill the caller's buffer */ memmove(in, buffer->value, inl); buffer->value += inl; buffer->length -= inl; } else { /* swallow remainder of the buffer */ memmove(in, buffer->value, buffer->length); inl = buffer->length; buffer->value = NULL; buffer->length = 0; } return inl; } static int char_buffer_write(char_buffer_t *buffer, char *in, int inl) { buffer->value = in; buffer->length = inl; return inl; } /* This function will read from a brigade and discard the read buckets as it * proceeds. It will read at most *len bytes. */ static apr_status_t brigade_consume(apr_bucket_brigade *bb, apr_read_type_e block, char *c, apr_size_t *len) { apr_size_t actual = 0; apr_status_t status = APR_SUCCESS; while (!APR_BRIGADE_EMPTY(bb)) { apr_bucket *b = APR_BRIGADE_FIRST(bb); const char *str; apr_size_t str_len; apr_size_t consume; /* Justin points out this is an http-ism that might * not fit if brigade_consume is added to APR. Perhaps * apr_bucket_read(eos_bucket) should return APR_EOF? * Then this becomes mainline instead of a one-off. */ if (APR_BUCKET_IS_EOS(b)) { status = APR_EOF; break; } /* The reason I'm not offering brigade_consume yet * across to apr-util is that the following call * illustrates how borked that API really is. For * this sort of case (caller provided buffer) it * would be much more trivial for apr_bucket_consume * to do all the work that follows, based on the * particular characteristics of the bucket we are * consuming here. */ status = apr_bucket_read(b, &str, &str_len, block); if (status != APR_SUCCESS) { if (APR_STATUS_IS_EOF(status)) { /* This stream bucket was consumed */ apr_bucket_delete(b); continue; } break; } if (str_len > 0) { /* Do not block once some data has been consumed */ block = APR_NONBLOCK_READ; /* Assure we don't overflow. */ consume = (str_len + actual > *len) ? *len - actual : str_len; memcpy(c, str, consume); c += consume; actual += consume; if (consume >= b->length) { /* This physical bucket was consumed */ apr_bucket_delete(b); } else { /* Only part of this physical bucket was consumed */ b->start += consume; b->length -= consume; } } else if (b->length == 0) { apr_bucket_delete(b); } /* This could probably be actual == *len, but be safe from stray * photons. */ if (actual >= *len) { break; } } *len = actual; return status; } /* * this is the function called by SSL_read() */ static int bio_filter_in_read(BIO *bio, char *in, int inlen) { apr_size_t inl = inlen; bio_filter_in_ctx_t *inctx = (bio_filter_in_ctx_t *)(bio->ptr); apr_read_type_e block = inctx->block; inctx->rc = APR_SUCCESS; /* OpenSSL catches this case, so should we. */ if (!in) return 0; /* Abort early if the client has initiated a renegotiation. */ if (inctx->filter_ctx->config->reneg_state == RENEG_ABORT) { inctx->rc = APR_ECONNABORTED; return -1; } /* In theory, OpenSSL should flush as necessary, but it is known * not to do so correctly in some cases; see PR 46952. * * Historically, this flush call was performed only for an SSLv2 * connection or for a proxy connection. Calling _out_flush * should be very cheap in cases where it is unnecessary (and no * output is buffered) so the performance impact of doing it * unconditionally should be minimal. */ if (bio_filter_out_flush(inctx->bio_out) < 0) { bio_filter_out_ctx_t *outctx = inctx->bio_out->ptr; inctx->rc = outctx->rc; return -1; } BIO_clear_retry_flags(bio); if (!inctx->bb) { inctx->rc = APR_EOF; return -1; } if (APR_BRIGADE_EMPTY(inctx->bb)) { inctx->rc = ap_get_brigade(inctx->f->next, inctx->bb, AP_MODE_READBYTES, block, inl); /* If the read returns EAGAIN or success with an empty * brigade, return an error after setting the retry flag; * SSL_read() will then return -1, and SSL_get_error() will * indicate SSL_ERROR_WANT_READ. */ if (APR_STATUS_IS_EAGAIN(inctx->rc) || APR_STATUS_IS_EINTR(inctx->rc) || (inctx->rc == APR_SUCCESS && APR_BRIGADE_EMPTY(inctx->bb))) { BIO_set_retry_read(bio); return -1; } if (inctx->rc != APR_SUCCESS) { /* Unexpected errors discard the brigade */ apr_brigade_cleanup(inctx->bb); inctx->bb = NULL; return -1; } } inctx->rc = brigade_consume(inctx->bb, block, in, &inl); if (inctx->rc == APR_SUCCESS) { return (int)inl; } if (APR_STATUS_IS_EAGAIN(inctx->rc) || APR_STATUS_IS_EINTR(inctx->rc)) { BIO_set_retry_read(bio); return (int)inl; } /* Unexpected errors and APR_EOF clean out the brigade. * Subsequent calls will return APR_EOF. */ apr_brigade_cleanup(inctx->bb); inctx->bb = NULL; if (APR_STATUS_IS_EOF(inctx->rc) && inl) { /* Provide the results of this read pass, * without resetting the BIO retry_read flag */ return (int)inl; } return -1; } static BIO_METHOD bio_filter_in_method = { BIO_TYPE_MEM, "APR input filter", NULL, /* write is never called */ bio_filter_in_read, NULL, /* puts is never called */ NULL, /* gets is never called */ NULL, /* ctrl is never called */ bio_filter_create, bio_filter_destroy, NULL }; static apr_status_t ssl_io_input_read(bio_filter_in_ctx_t *inctx, char *buf, apr_size_t *len) { apr_size_t wanted = *len; apr_size_t bytes = 0; int rc; *len = 0; /* If we have something leftover from last time, try that first. */ if ((bytes = char_buffer_read(&inctx->cbuf, buf, wanted))) { *len = bytes; if (inctx->mode == AP_MODE_SPECULATIVE) { /* We want to rollback this read. */ if (inctx->cbuf.length > 0) { inctx->cbuf.value -= bytes; inctx->cbuf.length += bytes; } else { char_buffer_write(&inctx->cbuf, buf, (int)bytes); } return APR_SUCCESS; } /* This could probably be *len == wanted, but be safe from stray * photons. */ if (*len >= wanted) { return APR_SUCCESS; } if (inctx->mode == AP_MODE_GETLINE) { if (memchr(buf, APR_ASCII_LF, *len)) { return APR_SUCCESS; } } else { /* Down to a nonblock pattern as we have some data already */ inctx->block = APR_NONBLOCK_READ; } } while (1) { if (!inctx->filter_ctx->pssl) { /* Ensure a non-zero error code is returned */ if (inctx->rc == APR_SUCCESS) { inctx->rc = APR_EGENERAL; } break; } /* SSL_read may not read because we haven't taken enough data * from the stack. This is where we want to consider all of * the blocking and SPECULATIVE semantics */ rc = SSL_read(inctx->filter_ctx->pssl, buf + bytes, wanted - bytes); if (rc > 0) { *len += rc; if (inctx->mode == AP_MODE_SPECULATIVE) { /* We want to rollback this read. */ char_buffer_write(&inctx->cbuf, buf, rc); } return inctx->rc; } else if (rc == 0) { /* If EAGAIN, we will loop given a blocking read, * otherwise consider ourselves at EOF. */ if (APR_STATUS_IS_EAGAIN(inctx->rc) || APR_STATUS_IS_EINTR(inctx->rc)) { /* Already read something, return APR_SUCCESS instead. * On win32 in particular, but perhaps on other kernels, * a blocking call isn't 'always' blocking. */ if (*len > 0) { inctx->rc = APR_SUCCESS; break; } if (inctx->block == APR_NONBLOCK_READ) { break; } } else { if (*len > 0) { inctx->rc = APR_SUCCESS; } else { inctx->rc = APR_EOF; } break; } } else /* (rc < 0) */ { int ssl_err = SSL_get_error(inctx->filter_ctx->pssl, rc); conn_rec *c = (conn_rec*)SSL_get_app_data(inctx->filter_ctx->pssl); if (ssl_err == SSL_ERROR_WANT_READ) { /* * If OpenSSL wants to read more, and we were nonblocking, * report as an EAGAIN. Otherwise loop, pulling more * data from network filter. * * (This is usually the case when the client forces an SSL * renegotiation which is handled implicitly by OpenSSL.) */ inctx->rc = APR_EAGAIN; if (*len > 0) { inctx->rc = APR_SUCCESS; break; } if (inctx->block == APR_NONBLOCK_READ) { break; } continue; /* Blocking and nothing yet? Try again. */ } else if (ssl_err == SSL_ERROR_SYSCALL) { if (APR_STATUS_IS_EAGAIN(inctx->rc) || APR_STATUS_IS_EINTR(inctx->rc)) { /* Already read something, return APR_SUCCESS instead. */ if (*len > 0) { inctx->rc = APR_SUCCESS; break; } if (inctx->block == APR_NONBLOCK_READ) { break; } continue; /* Blocking and nothing yet? Try again. */ } else { ap_log_cerror(APLOG_MARK, APLOG_INFO, inctx->rc, c, APLOGNO(01991) "SSL input filter read failed."); } } else /* if (ssl_err == SSL_ERROR_SSL) */ { /* * Log SSL errors and any unexpected conditions. */ ap_log_cerror(APLOG_MARK, APLOG_INFO, inctx->rc, c, APLOGNO(01992) "SSL library error %d reading data", ssl_err); ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, mySrvFromConn(c)); } if (inctx->rc == APR_SUCCESS) { inctx->rc = APR_EGENERAL; } break; } } return inctx->rc; } /* Read a line of input from the SSL input layer into buffer BUF of * length *LEN; updating *len to reflect the length of the line * including the LF character. */ static apr_status_t ssl_io_input_getline(bio_filter_in_ctx_t *inctx, char *buf, apr_size_t *len) { const char *pos = NULL; apr_status_t status; apr_size_t tmplen = *len, buflen = *len, offset = 0; *len = 0; /* * in most cases we get all the headers on the first SSL_read. * however, in certain cases SSL_read will only get a partial * chunk of the headers, so we try to read until LF is seen. */ while (tmplen > 0) { status = ssl_io_input_read(inctx, buf + offset, &tmplen); if (status != APR_SUCCESS) { if (APR_STATUS_IS_EAGAIN(status) && (*len > 0)) { /* Save the part of the line we already got */ char_buffer_write(&inctx->cbuf, buf, *len); } return status; } *len += tmplen; if ((pos = memchr(buf, APR_ASCII_LF, *len))) { break; } offset += tmplen; tmplen = buflen - offset; } if (pos) { char *value; int length; apr_size_t bytes = pos - buf; bytes += 1; value = buf + bytes; length = *len - bytes; char_buffer_write(&inctx->cbuf, value, length); *len = bytes; } return APR_SUCCESS; } static apr_status_t ssl_filter_write(ap_filter_t *f, const char *data, apr_size_t len) { ssl_filter_ctx_t *filter_ctx = f->ctx; bio_filter_out_ctx_t *outctx; int res; /* write SSL */ if (filter_ctx->pssl == NULL) { return APR_EGENERAL; } outctx = (bio_filter_out_ctx_t *)filter_ctx->pbioWrite->ptr; res = SSL_write(filter_ctx->pssl, (unsigned char *)data, len); if (res < 0) { int ssl_err = SSL_get_error(filter_ctx->pssl, res); conn_rec *c = (conn_rec*)SSL_get_app_data(outctx->filter_ctx->pssl); if (ssl_err == SSL_ERROR_WANT_WRITE) { /* * If OpenSSL wants to write more, and we were nonblocking, * report as an EAGAIN. Otherwise loop, pushing more * data at the network filter. * * (This is usually the case when the client forces an SSL * renegotiation which is handled implicitly by OpenSSL.) */ outctx->rc = APR_EAGAIN; } else if (ssl_err == SSL_ERROR_SYSCALL) { ap_log_cerror(APLOG_MARK, APLOG_INFO, outctx->rc, c, APLOGNO(01993) "SSL output filter write failed."); } else /* if (ssl_err == SSL_ERROR_SSL) */ { /* * Log SSL errors */ ap_log_cerror(APLOG_MARK, APLOG_INFO, outctx->rc, c, APLOGNO(01994) "SSL library error %d writing data", ssl_err); ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, mySrvFromConn(c)); } if (outctx->rc == APR_SUCCESS) { outctx->rc = APR_EGENERAL; } } else if ((apr_size_t)res != len) { conn_rec *c = f->c; char *reason = "reason unknown"; /* XXX: probably a better way to determine this */ if (SSL_total_renegotiations(filter_ctx->pssl)) { reason = "likely due to failed renegotiation"; } ap_log_cerror(APLOG_MARK, APLOG_INFO, outctx->rc, c, APLOGNO(01995) "failed to write %" APR_SSIZE_T_FMT " of %" APR_SIZE_T_FMT " bytes (%s)", len - (apr_size_t)res, len, reason); outctx->rc = APR_EGENERAL; } return outctx->rc; } /* Just use a simple request. Any request will work for this, because * we use a flag in the conn_rec->conn_vector now. The fake request just * gets the request back to the Apache core so that a response can be sent. * Since we use an HTTP/1.x request, we also have to inject the empty line * that terminates the headers, or the core will read more data from the * socket. */ #define HTTP_ON_HTTPS_PORT \ "GET / HTTP/1.0" CRLF #define HTTP_ON_HTTPS_PORT_BUCKET(alloc) \ apr_bucket_immortal_create(HTTP_ON_HTTPS_PORT, \ sizeof(HTTP_ON_HTTPS_PORT) - 1, \ alloc) /* Custom apr_status_t error code, used when a plain HTTP request is * recevied on an SSL port. */ #define MODSSL_ERROR_HTTP_ON_HTTPS (APR_OS_START_USERERR + 0) /* Custom apr_status_t error code, used when the proxy cannot * establish an outgoing SSL connection. */ #define MODSSL_ERROR_BAD_GATEWAY (APR_OS_START_USERERR + 1) static void ssl_io_filter_disable(SSLConnRec *sslconn, ap_filter_t *f) { bio_filter_in_ctx_t *inctx = f->ctx; SSL_free(inctx->ssl); sslconn->ssl = NULL; inctx->ssl = NULL; inctx->filter_ctx->pssl = NULL; } static apr_status_t ssl_io_filter_error(ap_filter_t *f, apr_bucket_brigade *bb, apr_status_t status) { SSLConnRec *sslconn = myConnConfig(f->c); apr_bucket *bucket; int send_eos = 1; switch (status) { case MODSSL_ERROR_HTTP_ON_HTTPS: /* log the situation */ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, f->c, APLOGNO(01996) "SSL handshake failed: HTTP spoken on HTTPS port; " "trying to send HTML error page"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, sslconn->server); sslconn->non_ssl_request = NON_SSL_SEND_HDR_SEP; ssl_io_filter_disable(sslconn, f); /* fake the request line */ bucket = HTTP_ON_HTTPS_PORT_BUCKET(f->c->bucket_alloc); send_eos = 0; break; case MODSSL_ERROR_BAD_GATEWAY: bucket = ap_bucket_error_create(HTTP_BAD_REQUEST, NULL, f->c->pool, f->c->bucket_alloc); ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, f->c, APLOGNO(01997) "SSL handshake failed: sending 502"); break; default: return status; } APR_BRIGADE_INSERT_TAIL(bb, bucket); if (send_eos) { bucket = apr_bucket_eos_create(f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bucket); } return APR_SUCCESS; } static const char ssl_io_filter[] = "SSL/TLS Filter"; static const char ssl_io_buffer[] = "SSL/TLS Buffer"; static const char ssl_io_coalesce[] = "SSL/TLS Coalescing Filter"; /* * Close the SSL part of the socket connection * (called immediately _before_ the socket is closed) * or called with */ static void ssl_filter_io_shutdown(ssl_filter_ctx_t *filter_ctx, conn_rec *c, int abortive) { SSL *ssl = filter_ctx->pssl; const char *type = ""; SSLConnRec *sslconn = myConnConfig(c); int shutdown_type; int loglevel = APLOG_DEBUG; const char *logno; if (!ssl) { return; } /* * Now close the SSL layer of the connection. We've to take * the TLSv1 standard into account here: * * | 7.2.1. Closure alerts * | * | The client and the server must share knowledge that the connection is * | ending in order to avoid a truncation attack. Either party may * | initiate the exchange of closing messages. * | * | close_notify * | This message notifies the recipient that the sender will not send * | any more messages on this connection. The session becomes * | unresumable if any connection is terminated without proper * | close_notify messages with level equal to warning. * | * | Either party may initiate a close by sending a close_notify alert. * | Any data received after a closure alert is ignored. * | * | Each party is required to send a close_notify alert before closing * | the write side of the connection. It is required that the other party * | respond with a close_notify alert of its own and close down the * | connection immediately, discarding any pending writes. It is not * | required for the initiator of the close to wait for the responding * | close_notify alert before closing the read side of the connection. * * This means we've to send a close notify message, but haven't to wait * for the close notify of the client. Actually we cannot wait for the * close notify of the client because some clients (including Netscape * 4.x) don't send one, so we would hang. */ /* * exchange close notify messages, but allow the user * to force the type of handshake via SetEnvIf directive */ if (abortive) { shutdown_type = SSL_SENT_SHUTDOWN|SSL_RECEIVED_SHUTDOWN; type = "abortive"; logno = APLOGNO(01998); loglevel = APLOG_INFO; } else switch (sslconn->shutdown_type) { case SSL_SHUTDOWN_TYPE_UNCLEAN: /* perform no close notify handshake at all (violates the SSL/TLS standard!) */ shutdown_type = SSL_SENT_SHUTDOWN|SSL_RECEIVED_SHUTDOWN; type = "unclean"; logno = APLOGNO(01999); break; case SSL_SHUTDOWN_TYPE_ACCURATE: /* send close notify and wait for clients close notify (standard compliant, but usually causes connection hangs) */ shutdown_type = 0; type = "accurate"; logno = APLOGNO(02000); break; default: /* * case SSL_SHUTDOWN_TYPE_UNSET: * case SSL_SHUTDOWN_TYPE_STANDARD: */ /* send close notify, but don't wait for clients close notify (standard compliant and safe, so it's the DEFAULT!) */ shutdown_type = SSL_RECEIVED_SHUTDOWN; type = "standard"; logno = APLOGNO(02001); break; } SSL_set_shutdown(ssl, shutdown_type); SSL_smart_shutdown(ssl); /* and finally log the fact that we've closed the connection */ if (APLOG_CS_IS_LEVEL(c, mySrvFromConn(c), loglevel)) { ap_log_cserror(APLOG_MARK, loglevel, 0, c, mySrvFromConn(c), "%sConnection closed to child %ld with %s shutdown " "(server %s)", logno, c->id, type, ssl_util_vhostid(c->pool, mySrvFromConn(c))); } /* deallocate the SSL connection */ if (sslconn->client_cert) { X509_free(sslconn->client_cert); sslconn->client_cert = NULL; } SSL_free(ssl); sslconn->ssl = NULL; filter_ctx->pssl = NULL; /* so filters know we've been shutdown */ if (abortive) { /* prevent any further I/O */ c->aborted = 1; } } static apr_status_t ssl_io_filter_cleanup(void *data) { ssl_filter_ctx_t *filter_ctx = data; if (filter_ctx->pssl) { conn_rec *c = (conn_rec *)SSL_get_app_data(filter_ctx->pssl); SSLConnRec *sslconn = myConnConfig(c); SSL_free(filter_ctx->pssl); sslconn->ssl = filter_ctx->pssl = NULL; } return APR_SUCCESS; } /* * The hook is NOT registered with ap_hook_process_connection. Instead, it is * called manually from the churn () before it tries to read any data. * There is some problem if I accept conn_rec *. Still investigating.. * Adv. if conn_rec * can be accepted is we can hook this function using the * ap_hook_process_connection hook. */ /* Perform the SSL handshake (whether in client or server mode), if * necessary, for the given connection. */ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) { conn_rec *c = (conn_rec *)SSL_get_app_data(filter_ctx->pssl); SSLConnRec *sslconn = myConnConfig(c); SSLSrvConfigRec *sc; X509 *cert; int n; int ssl_err; long verify_result; server_rec *server; if (SSL_is_init_finished(filter_ctx->pssl)) { return APR_SUCCESS; } server = sslconn->server; if (sslconn->is_proxy) { #ifndef OPENSSL_NO_TLSEXT apr_ipsubnet_t *ip; #endif const char *hostname_note = apr_table_get(c->notes, "proxy-request-hostname"); sc = mySrvConfig(server); #ifndef OPENSSL_NO_TLSEXT /* * Enable SNI for backend requests. Make sure we don't do it for * pure SSLv3 connections, and also prevent IP addresses * from being included in the SNI extension. (OpenSSL would simply * pass them on, but RFC 6066 is quite clear on this: "Literal * IPv4 and IPv6 addresses are not permitted".) */ if (hostname_note && sc->proxy->protocol != SSL_PROTOCOL_SSLV3 && apr_ipsubnet_create(&ip, hostname_note, NULL, c->pool) != APR_SUCCESS) { if (SSL_set_tlsext_host_name(filter_ctx->pssl, hostname_note)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "SNI extension for SSL Proxy request set to '%s'", hostname_note); } else { ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(02002) "Failed to set SNI extension for SSL Proxy " "request to '%s'", hostname_note); ssl_log_ssl_error(SSLLOG_MARK, APLOG_WARNING, server); } } #endif if ((n = SSL_connect(filter_ctx->pssl)) <= 0) { ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02003) "SSL Proxy connect failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, server); /* ensure that the SSL structures etc are freed, etc: */ ssl_filter_io_shutdown(filter_ctx, c, 1); apr_table_setn(c->notes, "SSL_connect_rv", "err"); return MODSSL_ERROR_BAD_GATEWAY; } if (sc->proxy_ssl_check_peer_expire != SSL_ENABLED_FALSE) { cert = SSL_get_peer_certificate(filter_ctx->pssl); if (!cert || (X509_cmp_current_time( X509_get_notBefore(cert)) >= 0) || (X509_cmp_current_time( X509_get_notAfter(cert)) <= 0)) { ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02004) "SSL Proxy: Peer certificate is expired"); if (cert) { X509_free(cert); } /* ensure that the SSL structures etc are freed, etc: */ ssl_filter_io_shutdown(filter_ctx, c, 1); apr_table_setn(c->notes, "SSL_connect_rv", "err"); return HTTP_BAD_GATEWAY; } X509_free(cert); } if ((sc->proxy_ssl_check_peer_cn != SSL_ENABLED_FALSE) && hostname_note) { const char *hostname; hostname = ssl_var_lookup(NULL, server, c, NULL, "SSL_CLIENT_S_DN_CN"); apr_table_unset(c->notes, "proxy-request-hostname"); if (strcasecmp(hostname, hostname_note)) { ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02005) "SSL Proxy: Peer certificate CN mismatch:" " Certificate CN: %s Requested hostname: %s", hostname, hostname_note); /* ensure that the SSL structures etc are freed, etc: */ ssl_filter_io_shutdown(filter_ctx, c, 1); apr_table_setn(c->notes, "SSL_connect_rv", "err"); return HTTP_BAD_GATEWAY; } } apr_table_setn(c->notes, "SSL_connect_rv", "ok"); return APR_SUCCESS; } if ((n = SSL_accept(filter_ctx->pssl)) <= 0) { bio_filter_in_ctx_t *inctx = (bio_filter_in_ctx_t *) (filter_ctx->pbioRead->ptr); bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *) (filter_ctx->pbioWrite->ptr); apr_status_t rc = inctx->rc ? inctx->rc : outctx->rc ; ssl_err = SSL_get_error(filter_ctx->pssl, n); if (ssl_err == SSL_ERROR_ZERO_RETURN) { /* * The case where the connection was closed before any data * was transferred. That's not a real error and can occur * sporadically with some clients. */ ap_log_cerror(APLOG_MARK, APLOG_INFO, rc, c, APLOGNO(02006) "SSL handshake stopped: connection was closed"); } else if (ssl_err == SSL_ERROR_WANT_READ) { /* * This is in addition to what was present earlier. It is * borrowed from openssl_state_machine.c [mod_tls]. * TBD. */ outctx->rc = APR_EAGAIN; return APR_EAGAIN; } else if (ERR_GET_LIB(ERR_peek_error()) == ERR_LIB_SSL && ERR_GET_REASON(ERR_peek_error()) == SSL_R_HTTP_REQUEST) { /* * The case where OpenSSL has recognized a HTTP request: * This means the client speaks plain HTTP on our HTTPS port. * ssl_io_filter_error will disable the ssl filters when it * sees this status code. */ return MODSSL_ERROR_HTTP_ON_HTTPS; } else if (ssl_err == SSL_ERROR_SYSCALL) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rc, c, APLOGNO(02007) "SSL handshake interrupted by system " "[Hint: Stop button pressed in browser?!]"); } else /* if (ssl_err == SSL_ERROR_SSL) */ { /* * Log SSL errors and any unexpected conditions. */ ap_log_cerror(APLOG_MARK, APLOG_INFO, rc, c, APLOGNO(02008) "SSL library error %d in handshake " "(server %s)", ssl_err, ssl_util_vhostid(c->pool, server)); ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, server); } if (inctx->rc == APR_SUCCESS) { inctx->rc = APR_EGENERAL; } ssl_filter_io_shutdown(filter_ctx, c, 1); return inctx->rc; } sc = mySrvConfig(sslconn->server); /* * Check for failed client authentication */ verify_result = SSL_get_verify_result(filter_ctx->pssl); if ((verify_result != X509_V_OK) || sslconn->verify_error) { if (ssl_verify_error_is_optional(verify_result) && (sc->server->auth.verify_mode == SSL_CVERIFY_OPTIONAL_NO_CA)) { /* leaving this log message as an error for the moment, * according to the mod_ssl docs: * "level optional_no_ca is actually against the idea * of authentication (but can be used to establish * SSL test pages, etc.)" * optional_no_ca doesn't appear to work as advertised * in 1.x */ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02009) "SSL client authentication failed, " "accepting certificate based on " "\"SSLVerifyClient optional_no_ca\" " "configuration"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, server); } else { const char *error = sslconn->verify_error ? sslconn->verify_error : X509_verify_cert_error_string(verify_result); ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02010) "SSL client authentication failed: %s", error ? error : "unknown"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, server); ssl_filter_io_shutdown(filter_ctx, c, 1); return APR_ECONNABORTED; } } /* * Remember the peer certificate's DN */ if ((cert = SSL_get_peer_certificate(filter_ctx->pssl))) { if (sslconn->client_cert) { X509_free(sslconn->client_cert); } sslconn->client_cert = cert; sslconn->client_dn = NULL; } /* * Make really sure that when a peer certificate * is required we really got one... (be paranoid) */ if ((sc->server->auth.verify_mode == SSL_CVERIFY_REQUIRE) && !sslconn->client_cert) { ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02011) "No acceptable peer certificate available"); ssl_filter_io_shutdown(filter_ctx, c, 1); return APR_ECONNABORTED; } return APR_SUCCESS; } static apr_status_t ssl_io_filter_input(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { apr_status_t status; bio_filter_in_ctx_t *inctx = f->ctx; const char *start = inctx->buffer; /* start of block to return */ apr_size_t len = sizeof(inctx->buffer); /* length of block to return */ int is_init = (mode == AP_MODE_INIT); if (f->c->aborted) { /* XXX: Ok, if we aborted, we ARE at the EOS. We also have * aborted. This 'double protection' is probably redundant, * but also effective against just about anything. */ apr_bucket *bucket = apr_bucket_eos_create(f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bucket); return APR_ECONNABORTED; } if (!inctx->ssl) { SSLConnRec *sslconn = myConnConfig(f->c); if (sslconn->non_ssl_request == NON_SSL_SEND_HDR_SEP) { apr_bucket *bucket = apr_bucket_immortal_create(CRLF, 2, f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bucket); sslconn->non_ssl_request = NON_SSL_SET_ERROR_MSG; return APR_SUCCESS; } return ap_get_brigade(f->next, bb, mode, block, readbytes); } /* XXX: we don't currently support anything other than these modes. */ if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE && mode != AP_MODE_SPECULATIVE && mode != AP_MODE_INIT) { return APR_ENOTIMPL; } inctx->mode = mode; inctx->block = block; /* XXX: we could actually move ssl_io_filter_handshake to an * ap_hook_process_connection but would still need to call it for * AP_MODE_INIT for protocols that may upgrade the connection * rather than have SSLEngine On configured. */ if ((status = ssl_io_filter_handshake(inctx->filter_ctx)) != APR_SUCCESS) { return ssl_io_filter_error(f, bb, status); } if (is_init) { /* protocol module needs to handshake before sending * data to client (e.g. NNTP or FTP) */ return APR_SUCCESS; } if (inctx->mode == AP_MODE_READBYTES || inctx->mode == AP_MODE_SPECULATIVE) { /* Protected from truncation, readbytes < MAX_SIZE_T * FIXME: No, it's *not* protected. -- jre */ if (readbytes < len) { len = (apr_size_t)readbytes; } status = ssl_io_input_read(inctx, inctx->buffer, &len); } else if (inctx->mode == AP_MODE_GETLINE) { const char *pos; /* Satisfy the read directly out of the buffer if possible; * invoking ssl_io_input_getline will mean the entire buffer * is copied once (unnecessarily) for each GETLINE call. */ if (inctx->cbuf.length && (pos = memchr(inctx->cbuf.value, APR_ASCII_LF, inctx->cbuf.length)) != NULL) { start = inctx->cbuf.value; len = 1 + pos - start; /* +1 to include LF */ /* Buffer contents now consumed. */ inctx->cbuf.value += len; inctx->cbuf.length -= len; status = APR_SUCCESS; } else { /* Otherwise fall back to the hard way. */ status = ssl_io_input_getline(inctx, inctx->buffer, &len); } } else { /* We have no idea what you are talking about, so return an error. */ status = APR_ENOTIMPL; } /* It is possible for mod_ssl's BIO to be used outside of the * direct control of mod_ssl's input or output filter -- notably, * when mod_ssl initiates a renegotiation. Switching the BIO mode * back to "blocking" here ensures such operations don't fail with * SSL_ERROR_WANT_READ. */ inctx->block = APR_BLOCK_READ; /* Handle custom errors. */ if (status != APR_SUCCESS) { return ssl_io_filter_error(f, bb, status); } /* Create a transient bucket out of the decrypted data. */ if (len > 0) { apr_bucket *bucket = apr_bucket_transient_create(start, len, f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bucket); } #ifdef HAVE_TLS_NPN /* By this point, Next Protocol Negotiation (NPN) should be completed (if * our version of OpenSSL supports it). If we haven't already, find out * which protocol was decided upon and inform other modules by calling * npn_proto_negotiated_hook. */ if (!inctx->npn_finished) { const unsigned char *next_proto = NULL; unsigned next_proto_len = 0; SSL_get0_next_proto_negotiated( inctx->ssl, &next_proto, &next_proto_len); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, f->c, "SSL NPN negotiated protocol: '%s'", apr_pstrmemdup(f->c->pool, (const char*)next_proto, next_proto_len)); modssl_run_npn_proto_negotiated_hook( f->c, (const char*)next_proto, next_proto_len); inctx->npn_finished = 1; } #endif return APR_SUCCESS; } /* ssl_io_filter_output() produces one SSL/TLS message per bucket * passed down the output filter stack. This results in a high * overhead (network packets) for any output comprising many small * buckets. SSI page applied through the HTTP chunk filter, for * example, may produce many brigades containing small buckets - * [chunk-size CRLF] [chunk-data] [CRLF]. * * The coalescing filter merges many small buckets into larger buckets * where possible, allowing the SSL I/O output filter to handle them * more efficiently. */ #define COALESCE_BYTES (2048) struct coalesce_ctx { char buffer[COALESCE_BYTES]; apr_size_t bytes; /* number of bytes of buffer used. */ }; static apr_status_t ssl_io_filter_coalesce(ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *e, *last = NULL; apr_size_t bytes = 0; struct coalesce_ctx *ctx = f->ctx; unsigned count = 0; /* The brigade consists of zero-or-more small data buckets which * can be coalesced (the prefix), followed by the remainder of the * brigade. * * Find the last bucket - if any - of that prefix. count gives * the number of buckets in the prefix. The "prefix" must contain * only data buckets with known length, and must be of a total * size which fits into the buffer. * * N.B.: The process here could be repeated throughout the brigade * (coalesce any run of consecutive data buckets) but this would * add significant complexity, particularly to memory * management. */ for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_METADATA(e) && e->length != (apr_size_t)-1 && e->length < COALESCE_BYTES && (bytes + e->length) < COALESCE_BYTES && (ctx == NULL || bytes + ctx->bytes + e->length < COALESCE_BYTES); e = APR_BUCKET_NEXT(e)) { last = e; if (e->length) count++; /* don't count zero-length buckets */ bytes += e->length; } /* Coalesce the prefix, if: * a) more than one bucket is found to coalesce, or * b) the brigade contains only a single data bucket, or * c) */ if (bytes > 0 && (count > 1 || (count == 1 && APR_BUCKET_NEXT(last) == APR_BRIGADE_SENTINEL(bb)))) { /* If coalescing some bytes, ensure a context has been * created. */ if (!ctx) { f->ctx = ctx = apr_palloc(f->c->pool, sizeof *ctx); ctx->bytes = 0; } ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, f->c, "coalesce: have %" APR_SIZE_T_FMT " bytes, " "adding %" APR_SIZE_T_FMT " more", ctx->bytes, bytes); /* Iterate through the prefix segment. For non-fatal errors * in this loop it is safe to break out and fall back to the * normal path of sending the buffer + remaining buckets in * brigade. */ e = APR_BRIGADE_FIRST(bb); while (e != last) { apr_size_t len; const char *data; apr_bucket *next; if (APR_BUCKET_IS_METADATA(e) || e->length == (apr_size_t)-1) { ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(02012) "unexpected bucket type during coalesce"); break; /* non-fatal error; break out */ } if (e->length) { apr_status_t rv; /* A blocking read should be fine here for a * known-length data bucket, rather than the usual * non-block/flush/block. */ rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ); if (rv) { ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, f->c, APLOGNO(02013) "coalesce failed to read from data bucket"); return AP_FILTER_ERROR; } /* Be paranoid. */ if (len > sizeof ctx->buffer || (len + ctx->bytes > sizeof ctx->buffer)) { ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(02014) "unexpected coalesced bucket data length"); break; /* non-fatal error; break out */ } memcpy(ctx->buffer + ctx->bytes, data, len); ctx->bytes += len; } next = APR_BUCKET_NEXT(e); apr_bucket_delete(e); e = next; } } if (APR_BRIGADE_EMPTY(bb)) { /* If the brigade is now empty, our work here is done. */ return APR_SUCCESS; } /* If anything remains in the brigade, it must now be passed down * the filter stack, first prepending anything that has been * coalesced. */ if (ctx && ctx->bytes) { apr_bucket *e; ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, f->c, "coalesce: passing on %" APR_SIZE_T_FMT " bytes", ctx->bytes); e = apr_bucket_transient_create(ctx->buffer, ctx->bytes, bb->bucket_alloc); APR_BRIGADE_INSERT_HEAD(bb, e); ctx->bytes = 0; /* buffer now emptied. */ } return ap_pass_brigade(f->next, bb); } static apr_status_t ssl_io_filter_output(ap_filter_t *f, apr_bucket_brigade *bb) { apr_status_t status = APR_SUCCESS; ssl_filter_ctx_t *filter_ctx = f->ctx; bio_filter_in_ctx_t *inctx; bio_filter_out_ctx_t *outctx; apr_read_type_e rblock = APR_NONBLOCK_READ; if (f->c->aborted) { apr_brigade_cleanup(bb); return APR_ECONNABORTED; } if (!filter_ctx->pssl) { /* ssl_filter_io_shutdown was called */ return ap_pass_brigade(f->next, bb); } inctx = (bio_filter_in_ctx_t *)filter_ctx->pbioRead->ptr; outctx = (bio_filter_out_ctx_t *)filter_ctx->pbioWrite->ptr; /* When we are the writer, we must initialize the inctx * mode so that we block for any required ssl input, because * output filtering is always nonblocking. */ inctx->mode = AP_MODE_READBYTES; inctx->block = APR_BLOCK_READ; if ((status = ssl_io_filter_handshake(filter_ctx)) != APR_SUCCESS) { return ssl_io_filter_error(f, bb, status); } while (!APR_BRIGADE_EMPTY(bb)) { apr_bucket *bucket = APR_BRIGADE_FIRST(bb); /* If it is a flush or EOS, we need to pass this down. * These types do not require translation by OpenSSL. */ if (APR_BUCKET_IS_EOS(bucket) || APR_BUCKET_IS_FLUSH(bucket)) { if (bio_filter_out_flush(filter_ctx->pbioWrite) < 0) { status = outctx->rc; break; } if (APR_BUCKET_IS_EOS(bucket)) { /* * By definition, nothing can come after EOS. * which also means we can pass the rest of this brigade * without creating a new one since it only contains the * EOS bucket. */ if ((status = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) { return status; } break; } else { /* bio_filter_out_flush() already passed down a flush bucket * if there was any data to be flushed. */ apr_bucket_delete(bucket); } } else if (AP_BUCKET_IS_EOC(bucket)) { /* The EOC bucket indicates connection closure, so SSL * shutdown must now be performed. */ ssl_filter_io_shutdown(filter_ctx, f->c, 0); if ((status = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) { return status; } break; } else { /* filter output */ const char *data; apr_size_t len; status = apr_bucket_read(bucket, &data, &len, rblock); if (APR_STATUS_IS_EAGAIN(status)) { /* No data available: flush... */ if (bio_filter_out_flush(filter_ctx->pbioWrite) < 0) { status = outctx->rc; break; } rblock = APR_BLOCK_READ; continue; /* and try again with a blocking read. */ } rblock = APR_NONBLOCK_READ; if (!APR_STATUS_IS_EOF(status) && (status != APR_SUCCESS)) { break; } status = ssl_filter_write(f, data, len); apr_bucket_delete(bucket); if (status != APR_SUCCESS) { break; } } } return status; } struct modssl_buffer_ctx { apr_bucket_brigade *bb; }; int ssl_io_buffer_fill(request_rec *r, apr_size_t maxlen) { conn_rec *c = r->connection; struct modssl_buffer_ctx *ctx; apr_bucket_brigade *tempb; apr_off_t total = 0; /* total length buffered */ int eos = 0; /* non-zero once EOS is seen */ /* Create the context which will be passed to the input filter; * containing a setaside pool and a brigade which constrain the * lifetime of the buffered data. */ ctx = apr_palloc(r->pool, sizeof *ctx); ctx->bb = apr_brigade_create(r->pool, c->bucket_alloc); /* ... and a temporary brigade. */ tempb = apr_brigade_create(r->pool, c->bucket_alloc); ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c, "filling buffer, max size " "%" APR_SIZE_T_FMT " bytes", maxlen); do { apr_status_t rv; apr_bucket *e, *next; /* The request body is read from the protocol-level input * filters; the buffering filter will reinject it from that * level, allowing content/resource filters to run later, if * necessary. */ rv = ap_get_brigade(r->proto_input_filters, tempb, AP_MODE_READBYTES, APR_BLOCK_READ, 8192); if (rv) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02015) "could not read request body for SSL buffer"); return HTTP_INTERNAL_SERVER_ERROR; } /* Iterate through the returned brigade: setaside each bucket * into the context's pool and move it into the brigade. */ for (e = APR_BRIGADE_FIRST(tempb); e != APR_BRIGADE_SENTINEL(tempb) && !eos; e = next) { const char *data; apr_size_t len; next = APR_BUCKET_NEXT(e); if (APR_BUCKET_IS_EOS(e)) { eos = 1; } else if (!APR_BUCKET_IS_METADATA(e)) { rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02016) "could not read bucket for SSL buffer"); return HTTP_INTERNAL_SERVER_ERROR; } total += len; } rv = apr_bucket_setaside(e, r->pool); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02017) "could not setaside bucket for SSL buffer"); return HTTP_INTERNAL_SERVER_ERROR; } APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(ctx->bb, e); } ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c, "total of %" APR_OFF_T_FMT " bytes in buffer, eos=%d", total, eos); /* Fail if this exceeds the maximum buffer size. */ if (total > maxlen) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02018) "request body exceeds maximum size (%" APR_SIZE_T_FMT ") for SSL buffer", maxlen); return HTTP_REQUEST_ENTITY_TOO_LARGE; } } while (!eos); apr_brigade_destroy(tempb); /* After consuming all protocol-level input, remove all protocol-level * filters. It should strictly only be necessary to remove filters * at exactly ftype == AP_FTYPE_PROTOCOL, since this filter will * precede all > AP_FTYPE_PROTOCOL anyway. */ while (r->proto_input_filters->frec->ftype < AP_FTYPE_CONNECTION) { ap_remove_input_filter(r->proto_input_filters); } /* Insert the filter which will supply the buffered content. */ ap_add_input_filter(ssl_io_buffer, ctx, r, c); return 0; } /* This input filter supplies the buffered request body to the caller * from the brigade stored in f->ctx. Note that the placement of this * filter in the filter stack is important; it must be the first * r->proto_input_filter; lower-typed filters will not be preserved * across internal redirects (see PR 43738). */ static apr_status_t ssl_io_filter_buffer(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t bytes) { struct modssl_buffer_ctx *ctx = f->ctx; apr_status_t rv; ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, f->c, "read from buffered SSL brigade, mode %d, " "%" APR_OFF_T_FMT " bytes", mode, bytes); if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) { return APR_ENOTIMPL; } if (APR_BRIGADE_EMPTY(ctx->bb)) { /* Suprisingly (and perhaps, wrongly), the request body can be * pulled from the input filter stack more than once; a * handler may read it, and ap_discard_request_body() will * attempt to do so again after *every* request. So input * filters must be prepared to give up an EOS if invoked after * initially reading the request. The HTTP_IN filter does this * with its ->eos_sent flag. */ APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(f->c->bucket_alloc)); return APR_SUCCESS; } if (mode == AP_MODE_READBYTES) { apr_bucket *e; /* Partition the buffered brigade. */ rv = apr_brigade_partition(ctx->bb, bytes, &e); if (rv && rv != APR_INCOMPLETE) { ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, f->c, APLOGNO(02019) "could not partition buffered SSL brigade"); ap_remove_input_filter(f); return rv; } /* If the buffered brigade contains less then the requested * length, just pass it all back. */ if (rv == APR_INCOMPLETE) { APR_BRIGADE_CONCAT(bb, ctx->bb); } else { apr_bucket *d = APR_BRIGADE_FIRST(ctx->bb); e = APR_BUCKET_PREV(e); /* Unsplice the partitioned segment and move it into the * passed-in brigade; no convenient way to do this with * the APR_BRIGADE_* macros. */ APR_RING_UNSPLICE(d, e, link); APR_RING_SPLICE_HEAD(&bb->list, d, e, apr_bucket, link); APR_BRIGADE_CHECK_CONSISTENCY(bb); APR_BRIGADE_CHECK_CONSISTENCY(ctx->bb); } } else { /* Split a line into the passed-in brigade. */ rv = apr_brigade_split_line(bb, ctx->bb, block, bytes); if (rv) { ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, f->c, APLOGNO(02020) "could not split line from buffered SSL brigade"); ap_remove_input_filter(f); return rv; } } if (APR_BRIGADE_EMPTY(ctx->bb)) { apr_bucket *e = APR_BRIGADE_LAST(bb); /* Ensure that the brigade is terminated by an EOS if the * buffered request body has been entirely consumed. */ if (e == APR_BRIGADE_SENTINEL(bb) || !APR_BUCKET_IS_EOS(e)) { e = apr_bucket_eos_create(f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); } ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, f->c, "buffered SSL brigade exhausted"); /* Note that the filter must *not* be removed here; it may be * invoked again, see comment above. */ } return APR_SUCCESS; } /* The request_rec pointer is passed in here only to ensure that the * filter chain is modified correctly when doing a TLS upgrade. It * must *not* be used otherwise. */ static void ssl_io_input_add_filter(ssl_filter_ctx_t *filter_ctx, conn_rec *c, request_rec *r, SSL *ssl) { bio_filter_in_ctx_t *inctx; inctx = apr_palloc(c->pool, sizeof(*inctx)); filter_ctx->pInputFilter = ap_add_input_filter(ssl_io_filter, inctx, r, c); filter_ctx->pbioRead = BIO_new(&bio_filter_in_method); filter_ctx->pbioRead->ptr = (void *)inctx; inctx->ssl = ssl; inctx->bio_out = filter_ctx->pbioWrite; inctx->f = filter_ctx->pInputFilter; inctx->rc = APR_SUCCESS; inctx->mode = AP_MODE_READBYTES; inctx->cbuf.length = 0; inctx->bb = apr_brigade_create(c->pool, c->bucket_alloc); inctx->block = APR_BLOCK_READ; inctx->pool = c->pool; inctx->filter_ctx = filter_ctx; inctx->npn_finished = 0; } /* The request_rec pointer is passed in here only to ensure that the * filter chain is modified correctly when doing a TLS upgrade. It * must *not* be used otherwise. */ void ssl_io_filter_init(conn_rec *c, request_rec *r, SSL *ssl) { ssl_filter_ctx_t *filter_ctx; filter_ctx = apr_palloc(c->pool, sizeof(ssl_filter_ctx_t)); filter_ctx->config = myConnConfig(c); ap_add_output_filter(ssl_io_coalesce, NULL, r, c); filter_ctx->pOutputFilter = ap_add_output_filter(ssl_io_filter, filter_ctx, r, c); filter_ctx->pbioWrite = BIO_new(&bio_filter_out_method); filter_ctx->pbioWrite->ptr = (void *)bio_filter_out_ctx_new(filter_ctx, c); /* We insert a clogging input filter. Let the core know. */ c->clogging_input_filters = 1; ssl_io_input_add_filter(filter_ctx, c, r, ssl); SSL_set_bio(ssl, filter_ctx->pbioRead, filter_ctx->pbioWrite); filter_ctx->pssl = ssl; apr_pool_cleanup_register(c->pool, (void*)filter_ctx, ssl_io_filter_cleanup, apr_pool_cleanup_null); if (APLOG_CS_IS_LEVEL(c, mySrvFromConn(c), APLOG_TRACE4)) { BIO_set_callback(SSL_get_rbio(ssl), ssl_io_data_cb); BIO_set_callback_arg(SSL_get_rbio(ssl), (void *)ssl); } return; } void ssl_io_filter_register(apr_pool_t *p) { ap_register_input_filter (ssl_io_filter, ssl_io_filter_input, NULL, AP_FTYPE_CONNECTION + 5); ap_register_output_filter (ssl_io_coalesce, ssl_io_filter_coalesce, NULL, AP_FTYPE_CONNECTION + 4); ap_register_output_filter (ssl_io_filter, ssl_io_filter_output, NULL, AP_FTYPE_CONNECTION + 5); ap_register_input_filter (ssl_io_buffer, ssl_io_filter_buffer, NULL, AP_FTYPE_PROTOCOL); return; } /* _________________________________________________________________ ** ** I/O Data Debugging ** _________________________________________________________________ */ #define DUMP_WIDTH 16 static void ssl_io_data_dump(server_rec *srvr, const char *s, long len) { char buf[256]; char tmp[64]; int i, j, rows, trunc; unsigned char ch; trunc = 0; for(; (len > 0) && ((s[len-1] == ' ') || (s[len-1] == '\0')); len--) trunc++; rows = (len / DUMP_WIDTH); if ((rows * DUMP_WIDTH) < len) rows++; ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, srvr, "+-------------------------------------------------------------------------+"); for(i = 0 ; i< rows; i++) { #if APR_CHARSET_EBCDIC char ebcdic_text[DUMP_WIDTH]; j = DUMP_WIDTH; if ((i * DUMP_WIDTH + j) > len) j = len % DUMP_WIDTH; if (j == 0) j = DUMP_WIDTH; memcpy(ebcdic_text,(char *)(s) + i * DUMP_WIDTH, j); ap_xlate_proto_from_ascii(ebcdic_text, j); #endif /* APR_CHARSET_EBCDIC */ apr_snprintf(tmp, sizeof(tmp), "| %04x: ", i * DUMP_WIDTH); apr_cpystrn(buf, tmp, sizeof(buf)); for (j = 0; j < DUMP_WIDTH; j++) { if (((i * DUMP_WIDTH) + j) >= len) apr_cpystrn(buf+strlen(buf), " ", sizeof(buf)-strlen(buf)); else { ch = ((unsigned char)*((char *)(s) + i * DUMP_WIDTH + j)) & 0xff; apr_snprintf(tmp, sizeof(tmp), "%02x%c", ch , j==7 ? '-' : ' '); apr_cpystrn(buf+strlen(buf), tmp, sizeof(buf)-strlen(buf)); } } apr_cpystrn(buf+strlen(buf), " ", sizeof(buf)-strlen(buf)); for (j = 0; j < DUMP_WIDTH; j++) { if (((i * DUMP_WIDTH) + j) >= len) apr_cpystrn(buf+strlen(buf), " ", sizeof(buf)-strlen(buf)); else { ch = ((unsigned char)*((char *)(s) + i * DUMP_WIDTH + j)) & 0xff; #if APR_CHARSET_EBCDIC apr_snprintf(tmp, sizeof(tmp), "%c", (ch >= 0x20 && ch <= 0x7F) ? ebcdic_text[j] : '.'); #else /* APR_CHARSET_EBCDIC */ apr_snprintf(tmp, sizeof(tmp), "%c", ((ch >= ' ') && (ch <= '~')) ? ch : '.'); #endif /* APR_CHARSET_EBCDIC */ apr_cpystrn(buf+strlen(buf), tmp, sizeof(buf)-strlen(buf)); } } apr_cpystrn(buf+strlen(buf), " |", sizeof(buf)-strlen(buf)); ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, srvr, "%s", buf); } if (trunc > 0) ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, srvr, "| %04ld - <SPACES/NULS>", len + trunc); ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, srvr, "+-------------------------------------------------------------------------+"); return; } long ssl_io_data_cb(BIO *bio, int cmd, const char *argp, int argi, long argl, long rc) { SSL *ssl; conn_rec *c; server_rec *s; if ((ssl = (SSL *)BIO_get_callback_arg(bio)) == NULL) return rc; if ((c = (conn_rec *)SSL_get_app_data(ssl)) == NULL) return rc; s = mySrvFromConn(c); if ( cmd == (BIO_CB_WRITE|BIO_CB_RETURN) || cmd == (BIO_CB_READ |BIO_CB_RETURN) ) { if (rc >= 0) { ap_log_cserror(APLOG_MARK, APLOG_TRACE4, 0, c, s, "%s: %s %ld/%d bytes %s BIO#%pp [mem: %pp] %s", SSL_LIBRARY_NAME, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "write" : "read"), rc, argi, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "to" : "from"), bio, argp, (argp != NULL ? "(BIO dump follows)" : "(Oops, no memory buffer?)")); if ((argp != NULL) && APLOG_CS_IS_LEVEL(c, s, APLOG_TRACE7)) ssl_io_data_dump(s, argp, rc); } else { ap_log_cserror(APLOG_MARK, APLOG_TRACE4, 0, c, s, "%s: I/O error, %d bytes expected to %s on BIO#%pp [mem: %pp]", SSL_LIBRARY_NAME, argi, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "write" : "read"), bio, argp); } } return rc; }
465938.c
#include "ntt287x.h" void ntt1722_857(int* fpad, int* f){ good_mask_41_7_6_857(fpad, f); ntt41_rader(fpad, fpad); ntt7_rader(fpad); } void basemul_1722(int* fpad, int* gpad){ polymul_6x6_287(fpad, gpad); } void intt1722(int* h, int* fpad){ intt7_rader(fpad); intt41_rader(fpad, fpad); inv_good_mask_41_7_6_1722(h, fpad); } void ntt574_256(int* fpad, int* f){ good_mask_41_7_2_256(fpad, f); ntt41_rader_574(fpad, fpad); ntt7_rader_574(fpad); } void ntt574_256x(int* fpad, int* f){ good_mask_41_7_2_256x(fpad, f); ntt41_rader_574(fpad, fpad); ntt7_rader_574(fpad); } void basemul_574(int* h, int* f, int* g){ polymul_2x2_287(h, f, g); } void intt574_512(int* h, int* fpad){ int tmp[287]; intt7_rader_574(fpad); intt41_rader_574(fpad, fpad); inv_good_mask_41_7_2_574(tmp, fpad); imask512_574(h, tmp); } void intt574_512x(int* h, int* fpad){ int tmp[287]; intt7_rader_574(fpad); intt41_rader_574(fpad, fpad); inv_good_mask_41_7_2_574(tmp, fpad); imask512x_574(h, tmp); } void ntt1148_512(int* fpad, int* f){ good_mask_41_7_4_512(fpad, f); ntt41_rader_1148(fpad, fpad); ntt7_rader_1148(fpad); } void ntt1148_512x(int* fpad, int* f){ good_mask_41_7_4_512x(fpad, f); ntt41_rader_1148(fpad, fpad); ntt7_rader_1148(fpad); } void basemul_1148(int* h, int* f, int* g){ polymul_4x4_287(h, f, g); } void intt1148_1024(int* h, int* fpad){ int tmp[574]; intt7_rader_1148(fpad); intt41_rader_1148(fpad, fpad); inv_good_mask_41_7_4_1148(tmp, fpad); imask1024_1148(h, tmp); } void intt1148_1024x(int* h, int* fpad){ int tmp[574]; intt7_rader_1148(fpad); intt41_rader_1148(fpad, fpad); inv_good_mask_41_7_4_1148(tmp, fpad); imask1024x_1148(h, tmp); }
978244.c
/** ****************************************************************************** * File Name : SPI.c * Description : This file provides code for the configuration * of the SPI instances. ****************************************************************************** * * COPYRIGHT(c) 2016 STMicroelectronics * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "spi.h" #include "gpio.h" /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ SPI_HandleTypeDef hspi3; /* SPI3 init function */ void MX_SPI3_Init(void) { hspi3.Instance = SPI3; hspi3.Init.Mode = SPI_MODE_MASTER; hspi3.Init.Direction = SPI_DIRECTION_1LINE; hspi3.Init.DataSize = SPI_DATASIZE_4BIT; hspi3.Init.CLKPolarity = SPI_POLARITY_LOW; hspi3.Init.CLKPhase = SPI_PHASE_1EDGE; hspi3.Init.NSS = SPI_NSS_SOFT; hspi3.Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_2; hspi3.Init.FirstBit = SPI_FIRSTBIT_MSB; hspi3.Init.TIMode = SPI_TIMODE_DISABLE; hspi3.Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; hspi3.Init.CRCPolynomial = 7; hspi3.Init.CRCLength = SPI_CRC_LENGTH_DATASIZE; hspi3.Init.NSSPMode = SPI_NSS_PULSE_ENABLE; if (HAL_SPI_Init(&hspi3) != HAL_OK) { Error_Handler(); } } void HAL_SPI_MspInit(SPI_HandleTypeDef* spiHandle) { GPIO_InitTypeDef GPIO_InitStruct; if(spiHandle->Instance==SPI3) { /* USER CODE BEGIN SPI3_MspInit 0 */ /* USER CODE END SPI3_MspInit 0 */ /* Peripheral clock enable */ __HAL_RCC_SPI3_CLK_ENABLE(); /**SPI3 GPIO Configuration PC10 ------> SPI3_SCK PC12 ------> SPI3_MOSI */ GPIO_InitStruct.Pin = tgt_swclk_Pin|tgt_swdio_Pin; GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; GPIO_InitStruct.Pull = GPIO_NOPULL; GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; GPIO_InitStruct.Alternate = GPIO_AF6_SPI3; HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); /* USER CODE BEGIN SPI3_MspInit 1 */ /* USER CODE END SPI3_MspInit 1 */ } } void HAL_SPI_MspDeInit(SPI_HandleTypeDef* spiHandle) { if(spiHandle->Instance==SPI3) { /* USER CODE BEGIN SPI3_MspDeInit 0 */ /* USER CODE END SPI3_MspDeInit 0 */ /* Peripheral clock disable */ __HAL_RCC_SPI3_CLK_DISABLE(); /**SPI3 GPIO Configuration PC10 ------> SPI3_SCK PC12 ------> SPI3_MOSI */ HAL_GPIO_DeInit(GPIOC, tgt_swclk_Pin|tgt_swdio_Pin); } /* USER CODE BEGIN SPI3_MspDeInit 1 */ /* USER CODE END SPI3_MspDeInit 1 */ } /* USER CODE BEGIN 1 */ /* USER CODE END 1 */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
185423.c
#include <emmintrin.h> #include <stdlib.h> #include "system.h" #include "support.h" #include "game.h" #include "models.h" #include "draw.h" #include "lowgfx.h" #include "matrix.h" int r_resy; int r_pitch; U32 *r_canvas; static Real eye_x=0, eye_y=0; #define HORZ_VIEW_RANGE 90 //#define HORZ_VIEW_RANGE (W_WIDTH*2) static const float view_scale = r_resx / HORZ_VIEW_RANGE; //world unit -> pixel ///static struct { S16 r[4], g[4], b[4]; } r_canvas16[MAX_RESY][r_resx/4]; typedef struct { float r, g, b; } Color; #define DRAW_CLOUDS 1 #define NUM_CLOUD_BLOBS (50*12) static GfxBlob cloud_blobs[NUM_CLOUD_BLOBS]; static void generate_one_cloud( GfxBlob out[], unsigned num_blobs ) { const Real max_rotation = REALF( RADIANS( 360 ) ); const DReal min_step = REALF( 2.0 ); const DReal max_step = REALF( 5.0 ); const Real tau = REALF( 2 * PI ); Real r = REALF( 10.0 / 32.0 ) * num_blobs; unsigned blob = 0; Vec2 p; Real a; const unsigned g = 220; /* 220 + prng_next() % 32; */ p.x = prng_next() % REALF( W_WIDTH ); p.y = REALF( W_WATER_LEVEL - W_HEIGHT + 5 ); a = prng_next() % tau; for( blob=0; blob<num_blobs; blob++ ) { DReal step_length = min_step + prng_next() % ( max_step - min_step ); Vec2 dir = v_sincos( a = ( a + prng_next() % max_rotation - max_rotation / 2 + tau ) % tau ); out[blob].mode = BLOB_FUZZY; out[blob].color = RGBA_32( g, g, g, 40 ); out[blob].x = p.x; out[blob].y = p.y; out[blob].scale_x = r; out[blob].scale_y = r; dir.y >>= 1; p = v_addmul( p, dir, step_length ); r -= prng_next() % REALF( 0.5 ); } } void generate_clouds( void ) { if ( ! DRAW_CLOUDS ) return; #if 1 GfxBlob *blobs = cloud_blobs; unsigned blobs_left = NUM_CLOUD_BLOBS; do { /* s: when value is small there will be lots of small and fragmented clouds when value is large there will be only few clouds but they are BIG! */ unsigned s = MIN( blobs_left, 30 + prng_next() % 20 ); generate_one_cloud( blobs, s ); blobs += s; blobs_left -= s; } while( blobs_left ); #else generate_one_cloud( cloud_blobs, NUM_CLOUD_BLOBS ); #endif } static Color lerp( Color a, Color b, float t ) { Color c; c.r = a.r + ( b.r - a.r ) * t; c.g = a.g + ( b.g - a.g ) * t; c.b = a.b + ( b.b - a.b ) * t; return c; } typedef U32 Pixel; Pixel AlphaBlendPixels(Pixel p1, Pixel p2) { static const int AMASK = 0xFF000000; static const int RBMASK = 0x00FF00FF; static const int GMASK = 0x0000FF00; static const int AGMASK = AMASK | GMASK; static const int ONEALPHA = 0x01000000; unsigned int a = (p2 & AMASK) >> 24; unsigned int na = 255 - a; unsigned int rb = ((na * (p1 & RBMASK)) + (a * (p2 & RBMASK))) >> 8; unsigned int ag = (na * ((p1 & AGMASK) >> 8)) + (a * (ONEALPHA | ((p2 & GMASK) >> 8))); return ((rb & RBMASK) | (ag & AGMASK)); } static Color unpack( U32 x ) { Color c; c.r = x >> 16 & 0xff; c.g = x >> 8 & 0xff; c.b = x & 0xff; return c; } static U32 pack( Color c ) { float t=255; int r = c.r*t, g = c.g*t, b = c.b*t; return r << 16 | g << 8 | b; } static U32 gray( float t ) { Color c = {t,t,t}; return pack( c ); } static void hline( unsigned y, U32 c ) { if ( y < (unsigned) r_resy ) { U32 *dst = r_canvas + r_pitch / 4 * y; for( int x=0; x<r_resx; ++x ) dst[x] = c; } } static void vline( unsigned x, U32 c ) { if ( x < (unsigned) r_resx ) { for( int y=0; y<r_resy; ++y ) r_canvas[r_pitch / 4 * y + x] = c; } } Real get_rel_x( Real x, Real x0 ) { Real c = x - x0, half = REALF( W_WIDTH*0.5f ), w = REALF( W_WIDTH ); if ( c < -half ) c += w; else if ( c > half ) c -= w; return c; } Real get_draw_x( Real x ) { return get_rel_x( x, eye_x ); } Real get_draw_y( Real y ) { return y - eye_y; } static void hline_w( float w, U32 c ) { int y = r_resy / 2 + ( w - REALTOF( eye_y ) ) * view_scale; hline( y, c ); } static void vline_w( float w, U32 c ) { float p = REALTOF( get_draw_x( REALF( w ) ) ); int x = r_resx / 2 + p * view_scale; vline( x, c ); } static void draw_point( U32 x, U32 y, U32 c ) { if ( x < r_resx && y < r_resy ) r_canvas[r_pitch / 4 * y + x] = c; } static void draw_bg( void ) { Color c0 = {0.694, 0.784, 0.804}; Color c1 = {0.400, 0.435, 0.569}; float t0 = -50; float t1 = 70; float view_h = r_resy / view_scale; float t = ( REALTOF( eye_y ) - view_h * 0.5f - t0 ) / ( t1 - t0 ); float dt = 1.0 / (( t1 - t0 ) * view_scale ); U32 *dst = r_canvas; int skip = r_pitch >> 2; for( int y=0; y<r_resy; ++y ) { float q = CLIP( t, 0, 1 ); U32 c = pack( lerp( c0, c1, q ) ); for( int x=0; x<r_resx; ++x ) dst[x] = c; t += dt; dst += skip; } if ( 0 ) { hline_w( t0, pack( c1 ) ); hline_w( t1, pack( c0 ) ); } } void draw_water( void ) { int u_prec = 14; Real tmp = REALF( W_WIDTH ) + eye_x - REALF( HORZ_VIEW_RANGE/2 ); U32 u0 = (tmp % REALF( W_WIDTH ) << u_prec)/REALF(WATER_ELEM_SPACING); U32 dudx = (1u<<u_prec) / ( WATER_ELEM_SPACING * view_scale ); U32 *dst = r_canvas; int skip = r_pitch >> 2; Real y = eye_y - REALF( r_resy / view_scale * 0.5f ) - REALF( W_WATER_LEVEL ); Real dy = REALF( 1.0f / view_scale ); for( int ln=0; ln<r_resy; ++ln ) { U32 u = u0; for( int x=0; x<r_resx; ++x, u+=dudx ) { U32 m = WATER_RESOL; U32 i = u >> u_prec; U32 j = i%m; Real *z = WORLD.water.z; #if 1 // triangle shaped waves DReal f = REAL_FRACT_PART( u >> u_prec - REAL_FRACT_BITS ); Real l = z[j]; Real r = z[(i+1u) % m]; Real h = l + (( r - l ) * f >> REAL_FRACT_BITS ); #else Real h = z[j]; #endif if ( y > h ) { dst[x] = dst[x] >> 1 & 0x7f7f7f; } } y += dy; dst += skip; } } static int object_is_visible( Real x ) { return abs( get_rel_x( x, eye_x ) ) < REALF( HORZ_VIEW_RANGE/2+MAX_THING_BOUND_R ); } static S32 current_mvp[16]; int vertex_data_dim = 3; void set_mvp_matrix_f( const float m[16] ) { for( int i=0; i<16; ++i ) current_mvp[i] = REALF( m[i] ); } void flip_mvp_matrix_z( void ) { S32 *m = current_mvp; m[8] = -m[8]; m[9] = -m[9]; m[10] = -m[10]; } static void transform_vertex( S32 out[2], S32 const in[3], S32 const m[16] ) { int i, p = REAL_FRACT_BITS; S32 x = in[0], y = in[1], z = vertex_data_dim == 2 ? 0 : in[2]; for( i=0; i<2; ++i ) out[i] = ( m[0+i]*x + m[4+i]*y + m[8+i]*z >> p ) + m[12+i]; } static const S32 *get_transformed_vertex( const S32 in[3] ) { #define CACHE_SIZE 4 static S32 const *cache_p[CACHE_SIZE] = {0}; static S32 cache_v[CACHE_SIZE][2]; static int wr = 0; S32 *out; #if 1 for( int i=0; i<CACHE_SIZE; ++i ) { if ( cache_p[i] == in ) return cache_v[i]; } #endif cache_p[wr] = in; out = cache_v[wr]; wr = ( wr + 1 ) % CACHE_SIZE; transform_vertex( out, in, current_mvp ); return out; #undef CACHE_SIZE } static S32 edge_setup( S32 u[2], const S32 a[2], const S32 b[2], S32 x0, S32 y0, int p ) { S32 ux, uy; u[0] = ( ux = b[1] - a[1] ); u[1] = ( uy = a[0] - b[0] ); return (x0-a[0])*ux + (y0-a[1])*uy >> p; } void draw_triangle( const S32 v0[2], const S32 v1[2], const S32 v2[2], U32 color ) { const int p = REAL_FRACT_BITS; const int skip = r_pitch / 4; S32 u0[2], u1[2], u2[2]; S32 d0, d1, d2; S32 x0, y0, x1, y1, x, y; U32 *dst; x0 = MIN( MIN( v0[0], v1[0] ), v2[0] ) >> p; y0 = MIN( MIN( v0[1], v1[1] ), v2[1] ) >> p; x1 = ( MAX( MAX( v0[0], v1[0] ), v2[0] ) >> p ) + 1; y1 = ( MAX( MAX( v0[1], v1[1] ), v2[1] ) >> p ) + 1; x0 = MAX( x0, 0 ); y0 = MAX( y0, 0 ); x1 = MIN( x1, r_resx ); y1 = MIN( y1, r_resy ); dst = r_canvas + y0 * skip; S32 x0p = x0 << p, y0p = y0 << p; d0 = edge_setup( u0, v0, v1, x0p, y0p, p ); d1 = edge_setup( u1, v1, v2, x0p, y0p, p ); d2 = edge_setup( u2, v2, v0, x0p, y0p, p ); for( y=y0; y<y1; ++y ) { S32 d0_x0=d0, d1_x0=d1, d2_x0=d2; for( x=x0; x<x1; ++x ) { int s0 = d0 > 0; int s1 = d1 > 0; int s2 = d2 > 0; if ( s0 == s1 && s1 == s2 ) { dst[x] = color; } d0 += u0[0]; d1 += u1[0]; d2 += u2[0]; } d0 = d0_x0 + u0[1]; d1 = d1_x0 + u1[1]; d2 = d2_x0 + u2[1]; dst += skip; } return; draw_point( x0, y0, 0xFF ); draw_point( x1-1, y0, 0xFF ); draw_point( x0, y1-1, 0xFF ); draw_point( x1-1, y1-1, 0xFF ); draw_point( v0[0]>>p, v0[1]>>p, 0xFF00 ); draw_point( v1[0]>>p, v1[1]>>p, 0xD600 ); draw_point( v2[0]>>p, v2[1]>>p, 0x8000 ); } void draw_triangle_t( const S32 v0[3], const S32 v1[3], const S32 v2[3], U32 color ) { const S32 *a, *b, *c; a = get_transformed_vertex( v0 ); b = get_transformed_vertex( v1 ); c = get_transformed_vertex( v2 ); draw_triangle( a, b, c, color ); } void draw_triangles( const S32 verts[], const U8 idx[], int n_idx, U32 color ) { int i = 0; int s = vertex_data_dim; do { draw_triangle_t( verts + s*idx[i], verts + s*idx[i+1], verts + s*idx[i+2], color ); i += 3; } while ( i < n_idx ); } void draw_quads( const S32 verts[], const U8 idx[], int n_idx, U32 color ) { int i = 0; int s = vertex_data_dim; do { const S32 *a, *b, *c, *d; a = verts + s * idx[i]; b = verts + s * idx[i+1]; c = verts + s * idx[i+2]; d = verts + s * idx[i+3]; draw_triangle_t( a, b, c, color ); draw_triangle_t( a, c, d, color ); i += 4; } while ( i < n_idx ); } // fractional bits in circle coordinates (cx,cy,r0,r) #define CIRCLE_FB 2 // r0: interior radius // r1: exterior radius // max_alpha: 256 corresponds to 1.0 void draw_circle( S32 cx, S32 cy, S32 r0, S32 r, U32 color, U32 max_alpha ) { r = MAX( r, 1 ); r0 = MIN( r0, r-1 ); //prevent division by zero #define p CIRCLE_FB int x0, y0, x1, y1, x, y; // x0,y0,x1,y1: integer bounding box of pixels to be processed x0 = cx - r >> p; x0 = MAX( x0, 0 ); x0 &= ~3; // align to 16 byte boundary (aka 4 pixels) x1 = cx + r + (1<<p) >> p; x1 = x0 + ( x1 - x0 + 7 & ~7 ); // pad width to multiple of 8 x1 = MIN( x1, r_resx ); if ( x1 == r_resx ) { // since inner loop writes 8 pixels, the last 4 pixels may overflow to the next scanline: // better fix: drop this and add extra padding to scanlines x0 = x1 - ( x1 - x0 + 7 & ~7 ); } y0 = cy - r >> p; y0 = MAX( y0, 0 ); y1 = cy + r + (1<<p) >> p; y1 = MIN( y1, r_resy ); int skip = r_pitch >> 2; U32 *dst = r_canvas + y0 * skip; S32 half = 1<<p-1; S32 dx0s = ( x0 << p ) + half - cx; S32 dy0s = ( y0 << p ) + half - cy; static const S16 off[] = {0<<p,1<<p,2<<p,3<<p,4<<p,5<<p,6<<p,7<<p}; __m128i a0, b, dx0, dy, rr; __m128i zero = _mm_setzero_si128(); __m128i allset = _mm_cmpeq_epi16( allset, allset ); rr = _mm_set1_epi16( r*r + half >> p ); int a_bits = 6; __m128i a_edge, a_mul; U32 tmp = (U32)(r*r - r0*r0); a_edge = _mm_set1_epi16( r0*r0 >> p ); a_mul = tmp == 0 ? zero : _mm_set1_epi16( (max_alpha<<2*p+a_bits) / tmp ); __m128i one = _mm_srli_epi16( allset, 15 ); __m128i dy_inc = _mm_slli_epi16( one, p ), //1<<p dx_inc = _mm_slli_epi16( one, 3+p ); //8<<p __m128i b_inc = dy_inc, //1<<p f_inc = _mm_slli_epi16( one, 6+p ); //64<<p // (dx0,dy): distance to centre dx0 = _mm_add_epi16( _mm_set1_epi16( dx0s ), _mm_load_si128( (void*) off ) ); dy = _mm_set1_epi16( dy0s ); // (a0,b): squared distance to centre a0 = _mm_srli_epi16( _mm_mullo_epi16( dx0, dx0 ), p ); b = _mm_srli_epi16( _mm_mullo_epi16( dy, dy ), p ); __m128i colx; // color as 16bit colx = _mm_set_epi64x( 0, (U64) color | (U64) color << 32 ); colx = _mm_unpacklo_epi8( colx, zero ); for( y=y0; y<y1; ++y ) { __m128i f = _mm_add_epi16( a0, b ), dx = dx0; for( x=x0; x<x1; x+=8 ) { __m128i m = _mm_cmplt_epi16( f, rr ); int mi = _mm_movemask_epi8( m ); if ( mi ) { __m128i alpha; alpha = _mm_sub_epi16( rr, _mm_max_epi16( f, a_edge ) ); alpha = _mm_mullo_epi16( alpha, a_mul ); alpha = _mm_srli_epi16( alpha, p+a_bits ); //alpha = _mm_max_epi16( alpha, zero ); for( int j=0; j<2; ++j ) { __m128i c0, c1, c2, c3, c4, c6, c7, M; void *mem = dst + x + 4*j; __m128i a2, al, ah; a2 = _mm_unpacklo_epi16( alpha, alpha ); al = _mm_unpacklo_epi32( a2, a2 ); ah = _mm_unpackhi_epi32( a2, a2 ); c0 = _mm_load_si128( mem ); // rgb.rgb.rgb.rgb. c1 = _mm_unpacklo_epi8( c0, zero ); // first two pixels as 16bit c2 = _mm_add_epi16( _mm_mullo_epi16( _mm_sub_epi16( colx, c1 ), al ), _mm_slli_epi16( c1, 8 ) ); c3 = _mm_unpackhi_epi8( c0, zero ); // last two pixels as 16bit c4 = _mm_add_epi16( _mm_mullo_epi16( _mm_sub_epi16( colx, c3 ), ah ), _mm_slli_epi16( c3, 8 ) ); c6 = _mm_packus_epi16( _mm_srli_epi16( c2, 8 ), _mm_srli_epi16( c4, 8 ) ); M = _mm_unpacklo_epi16( m, m ); c7 = _mm_or_si128( _mm_and_si128( M, c6 ), _mm_andnot_si128( M, c0 ) ); _mm_store_si128( mem, c7 ); m = _mm_srli_si128( m, 8 ); alpha = _mm_srli_si128( alpha, 8 ); } } f = _mm_add_epi16( f, f_inc ); f = _mm_add_epi16( f, _mm_slli_epi16( dx, 4 ) ); dx = _mm_add_epi16( dx, dx_inc ); } b = _mm_add_epi16( b, b_inc ); b = _mm_add_epi16( b, _mm_add_epi16( dy, dy ) ); dy = _mm_add_epi16( dy, dy_inc ); dst += skip; } #undef p } void draw_blob( const GfxBlob blob[1] ) { int p = CIRCLE_FB; int s = REAL_FRACT_BITS - p; S32 f = REALF( view_scale ); S32 x = (r_resx/2<<p) + ( REAL_MUL( blob->x, f ) >> s ); S32 y = (r_resy/2<<p) + ( REAL_MUL( blob->y, f ) >> s ); S32 r1 = REAL_MUL( blob->scale_x, f ) >> s + 1; //+1 because blob->scale_x is the diameter S32 r0 = blob->mode == BLOB_FUZZY ? 0 : r1 - ( 1 << p - 1 ); int ri = ( r1 >> p ) + 1; int xi = abs( ( x >> p ) - r_resx/2 ) - ri; int yi = abs( ( y >> p ) - r_resy/2 ) - ri; if ( xi > r_resx/2 || yi > r_resy/2 ) return; draw_circle( x, y, r0, r1, blob->color, 256 ); } static int cmp_blob( const void *a0, const void *b0 ) { const GfxBlob *a=a0, *b=b0; S32 dx = abs( b->x - a->x ); S32 dy = abs( b->y - a->y ); S32 d = MAX( dx, dy ); S32 ar = a->scale_x; S32 br = b->scale_x; if ( d < (ar+br>>1) ) return 0; // don't sort overlapping blobs return a->y < b->y ? -1 : ( b->y > a->y ); } void draw_blobs( unsigned num_blobs, GfxBlob blobs[] ) { // cache coherency improvement qsort( blobs, num_blobs, sizeof(blobs[0]), cmp_blob ); for( unsigned i=0; i<num_blobs; ++i ) draw_blob( blobs+i ); } static GfxBlob get_particle_blob( Thing *thing ) { static const Real PARTICLE_SIZE[] = { REALF(0.5), REALF(0.5), /* water1 */ REALF(1.4), REALF(0.75), /* water2*/ REALF(1), REALF(1), /* smoke (size should be random) */ }; ParticleType type = thing->data.pt.type; U8 g, a; Real w, h; GfxBlob blob; /* 8-bit grayscale values for particles: 0xf4 ... water1 0xf4 ... water2 0x32 ... smoke */ g = 0x32f4f4 >> ( type << 3 ); #if ENABLE_BLEND /* Fade out */ a = 0xFF - ( thing->age << 8 ) / ( REALF( MAX_PARTICLE_TIME ) + 1 ); #else a = g; #endif w = PARTICLE_SIZE[type << 1]; h = PARTICLE_SIZE[(type << 1) + 1]; blob.mode = BLOB_FUZZY; blob.color = RGBA_32( g, g, g, a ); blob.x = get_draw_x( thing->phys.pos.x ); blob.y = get_draw_y( thing->phys.pos.y ); blob.scale_x = w; blob.scale_y = h; if ( type == PT_SMOKE ) { blob.scale_y = blob.scale_x = \ REALF( 1.0 ) + REALF( 0.75 ) * thing->age / (unsigned)( MAX_PARTICLE_TIME * GAME_TICKS_PER_SEC ); blob.mode = BLOB_FUZZY; } return blob; } static void draw_xyz_axis( float mat[16], float scale, int g ) { float org[3]; scale /= (float) g; m_mult_v3( org, mat, 0, 0, 0 ); draw_point( org[0], org[1], 0xFF ); for( int i=0; i<g; ++i ) { float t = scale * ( i + 1 ); float a[3], b[3], c[3]; m_mult_v3( a, mat, t, 0, 0 ); m_mult_v3( b, mat, 0, t, 0 ); m_mult_v3( c, mat, 0, 0, t ); draw_point( c[0], c[1], 0xFF0000 ); draw_point( b[0], b[1], 0xFF00 ); draw_point( a[0], a[1], 0xFF ); } } static void render_world_fg( void ) { #define MAX_BLOBS 32000 GfxBlob blobs[MAX_BLOBS]; unsigned num_blobs = 0; unsigned n; for( n=0; n<WORLD.num_things; n++ ) { Thing *t = WORLD.things + n; Real x = get_draw_x( t->phys.pos.x ); Real y = get_draw_y( t->phys.pos.y ); Real yaw = -t->angle; Real roll = 0; ModelID mdl = BAD_MODEL_ID; int is_heli = 0; if ( abs( x ) > REALF( HORZ_VIEW_RANGE*0.5+MAX_THING_BOUND_R ) ) { continue; } switch( t->type ) { case T_AIRCRAFT: roll = yaw + REALF( PI ); mdl = ( is_heli = t->data.ac.is_heli ) ? M_HELI_BODY : M_AIRCRAFT; break; case T_GUNSHIP: mdl = M_GUNSHIP; break; case T_AAGUN: if ( num_blobs < MAX_BLOBS ) { GfxBlob b; b.color = RGBA_32(0,0,0,255); b.x = x; b.y = y; b.scale_x = b.scale_y = REALF( 1.5 ); blobs[num_blobs++] = b; } mdl = M_AAGUN_BARREL; break; case T_RADAR: roll = t->age; yaw = REALF( -PI / 2 ); mdl = M_RADAR; if ( t->parent ) yaw += t->parent->angle; break; case T_BATTLESHIP: mdl = M_BATTLESHIP; break; case T_PROJECTILE: if ( num_blobs < MAX_BLOBS ) { GfxBlob b; b.color = RGBA_32(0,0,0,255); b.x = x; b.y = y; b.scale_x = b.scale_y = REALF( 0.5 ); blobs[num_blobs++] = b; } break; case T_PARTICLE: if ( num_blobs < MAX_BLOBS ) { blobs[num_blobs++] = get_particle_blob( t ); } break; case T_DEBRIS: mdl = M_DEBRIS; roll = 2 * yaw + REALF( PI/3 ); break; default: break; } if ( mdl != BAD_MODEL_ID ) { ms_push(); ms_translate_r( x, y, 0 ); { ms_push(); ms_rotate( 2, REALTOF( yaw ) ); { ms_push(); { ms_rotate( 0, REALTOF( roll ) ); push_model_mat( mdl ); //draw_xyz_axis( ms_cur, 3, 4 ); } ms_pop(); if ( t->type == T_AIRCRAFT && !is_heli && t->data.ac.throttle_on ) push_model_mat( M_AIRCRAFT_FLAME ); } ms_pop(); if ( is_heli ) { ms_rotate( 2, REALTOF( yaw ) ); ms_rotate( 0, REALTOF( roll ) ); /* The main rotor */ ms_push(); { ms_translate( 0.2, 0.1, 0 ); ms_rotate( 1, REALTOF( 16 * t->age % REALF( 2 * PI ) ) ); push_model_mat( M_HELI_ROTOR ); } ms_pop(); /* Small rotor in the rear */ ms_translate( -1.2, 0.2, 0 ); ms_rotate( 2, REALTOF( -16 * t->age % REALF( 2 * PI ) ) ); ms_scale( 0.5, 0.5, 0.5 ); ms_rotate( 0, -PI/2 ); push_model_mat( M_HELI_ROTOR ); } } ms_pop(); } } ASSERT( num_blobs < MAX_BLOBS ); flush_models(); draw_blobs( num_blobs, blobs ); #if DRAW_CLOUDS num_blobs=0; for( int i=0; i<NUM_CLOUD_BLOBS; ++i ) { GfxBlob *b = cloud_blobs + i; Real x = get_draw_x( b->x ); if ( abs(x) < REALF( HORZ_VIEW_RANGE/2.0 ) + (b->scale_x>>1) ) { GfxBlob b1 = *b; b1.x = x; b1.y = get_draw_y( b->y ); blobs[num_blobs++] = b1; ASSERT( num_blobs <= MAX_BLOBS ); } } draw_blobs( num_blobs, blobs ); #endif } void render( void ) { if ( WORLD.player ) { eye_x = WORLD.player->phys.pos.x; eye_y = WORLD.player->phys.pos.y; } draw_bg(); draw_water(); if ( 0 ) { hline_w( REALTOF( eye_y ), 0xe0e0e0 ); vline_w( REALTOF( eye_x ), 0xe0e0e0 ); } if ( 1 ) { hline_w( 0, 0x808080 ); vline_w( 0, 0x808080 ); } if ( 0 ) { //hline_w( W_WATER_LEVEL, 0xFF0000 ); hline_w( W_WATER_LEVEL + W_WATER_DEPTH, 0x7F0000 ); hline_w( W_WATER_DEATH_LEVEL, 0x1F003F ); hline_w( MAX_AIRCRAFT_ALTITUDE, 0x1f0000 ); } ms_push(); ms_translate( r_resx*0.5f, r_resy*0.5f, 0 ); ms_scale( view_scale, view_scale, 1 ); // ms_cur is now the modelview projection matrix (world coords -> pixels) render_world_fg(); if ( 0 ) { float a[3], b[3], c[3]; ms_push(); ms_translate_r( get_draw_x(0), get_draw_y(0), 0 ); m_mult_v3( a, ms_cur, 0, 0, 0 ); m_mult_v3( b, ms_cur, 3, 0, 0 ); m_mult_v3( c, ms_cur, 0, 3, 0 ); ms_pop(); draw_point( a[0], a[1], 0xFF ); draw_point( b[0], b[1], 0xFF ); draw_point( c[0], c[1], 0xFF ); } ms_pop(); if ( 0 ) { static float a = 0; float x = 50.5f + cosf( a ) * 40, y = 50.5f + sinf( a ) * 20, r = 20.0f + cosf( 0.3f * a ) * 15.0f, p = 1<<CIRCLE_FB; hline( y, ~0 ); vline( x, ~0 ); draw_circle( x*p, y*p, 0, r*p, ~0, 128 ); a += PI/300.0f; } if ( 0 ) { float k = view_scale * 20; S32 verts[][3] = { #define R3(x,y,z) {REALF(r_resx/2+x*k),REALF(r_resy/2+y*k),REALF(z)} R3( 0.2, 0.7, 0 ), R3( 0.8, 0.3, 0 ), R3( 0.1, 0.15, 0 ) }; float m[16] = {1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1}; set_mvp_matrix_f( m ); draw_triangle_t( verts[0], verts[1], verts[2], 0 ); } } static void pr_mat( const char *s, float m[16] ) { printf( "%s", s ); for( int i=0; i<4; ++i ) printf( "%9.2e %9.2e %9.2e %9.2e\n", m[i], m[i+4], m[i+8], m[i+12] ); } void test_mmul( void ) { float t[16], s[16], a[16], b[16]; m_translate( t, 1, 1, 1 ); m_scale( s, 2, 2, 2 ); m_mult( a, s, t ); m_mult( b, t, s ); pr_mat( "SxT=\n", a ); // all values 2 pr_mat( "TxS=\n", b ); // contains both 2 and 1 values }
505087.c
/* Unit test suite for wintrust asn functions * * Copyright 2007 Juan Lang * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA * */ #include <stdarg.h> #include "windef.h" #include "winbase.h" #include "winerror.h" #include "wincrypt.h" #include "wintrust.h" #include "wine/test.h" static BOOL (WINAPI *pCryptDecodeObjectEx)(DWORD,LPCSTR,const BYTE*,DWORD,DWORD,PCRYPT_DECODE_PARA,void*,DWORD*); static BOOL (WINAPI *pCryptEncodeObjectEx)(DWORD,LPCSTR,const void*,DWORD,PCRYPT_ENCODE_PARA,void*,DWORD*); static const BYTE falseCriteria[] = { 0x30,0x06,0x01,0x01,0x00,0x01,0x01,0x00 }; static const BYTE trueCriteria[] = { 0x30,0x06,0x01,0x01,0xff,0x01,0x01,0xff }; static void test_encodeSPCFinancialCriteria(void) { BOOL ret; DWORD size = 0; LPBYTE buf; SPC_FINANCIAL_CRITERIA criteria = { FALSE, FALSE }; if (!pCryptEncodeObjectEx) { skip("CryptEncodeObjectEx() is not available. Skipping the encodeFinancialCriteria tests\n"); return; } ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_FINANCIAL_CRITERIA_STRUCT, &criteria, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(falseCriteria), "Unexpected size %d\n", size); ok(!memcmp(buf, falseCriteria, size), "Unexpected value\n"); LocalFree(buf); } criteria.fFinancialInfoAvailable = criteria.fMeetsCriteria = TRUE; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_FINANCIAL_CRITERIA_STRUCT, &criteria, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(trueCriteria), "Unexpected size %d\n", size); ok(!memcmp(buf, trueCriteria, size), "Unexpected value\n"); LocalFree(buf); } } static void test_decodeSPCFinancialCriteria(void) { BOOL ret; SPC_FINANCIAL_CRITERIA criteria; DWORD size = sizeof(criteria); if (!pCryptDecodeObjectEx) { skip("CryptDecodeObjectEx() is not available. Skipping the decodeSPCFinancialCriteria tests\n"); return; } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_FINANCIAL_CRITERIA_STRUCT, falseCriteria, sizeof(falseCriteria), 0, NULL, &criteria, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(!criteria.fFinancialInfoAvailable, "expected FALSE\n"); ok(!criteria.fMeetsCriteria, "expected FALSE\n"); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_FINANCIAL_CRITERIA_STRUCT, trueCriteria, sizeof(trueCriteria), 0, NULL, &criteria, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(criteria.fFinancialInfoAvailable, "expected TRUE\n"); ok(criteria.fMeetsCriteria, "expected TRUE\n"); } } static WCHAR url[] = { 'h','t','t','p',':','/','/','w','i','n','e','h','q','.', 'o','r','g',0 }; static const WCHAR nihongoURL[] = { 'h','t','t','p',':','/','/',0x226f, 0x575b, 0 }; static const BYTE emptyURLSPCLink[] = { 0x80,0x00 }; static const BYTE urlSPCLink[] = { 0x80,0x11,0x68,0x74,0x74,0x70,0x3a,0x2f,0x2f,0x77,0x69,0x6e,0x65,0x68,0x71, 0x2e,0x6f,0x72,0x67}; static const BYTE fileSPCLink[] = { 0xa2,0x14,0x80,0x12,0x00,0x68,0x00,0x74,0x00,0x74,0x00,0x70,0x00,0x3a,0x00, 0x2f,0x00,0x2f,0x22,0x6f,0x57,0x5b }; static const BYTE emptyMonikerSPCLink[] = { 0xa1,0x14,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x04,0x00 }; static BYTE data[] = { 0xba, 0xad, 0xf0, 0x0d }; static const BYTE monikerSPCLink[] = { 0xa1,0x18,0x04,0x10,0xea,0xea,0xea,0xea,0xea,0xea,0xea,0xea,0xea,0xea,0xea, 0xea,0xea,0xea,0xea,0xea,0x04,0x04,0xba,0xad,0xf0,0x0d }; static void test_encodeSPCLink(void) { BOOL ret; DWORD size = 0; LPBYTE buf; SPC_LINK link = { 0 }; if (!pCryptEncodeObjectEx) { skip("CryptEncodeObjectEx() is not available. Skipping the encodeSPCLink tests\n"); return; } SetLastError(0xdeadbeef); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, &link, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(!ret && GetLastError() == E_INVALIDARG, "Expected E_INVALIDARG, got %08x\n", GetLastError()); link.dwLinkChoice = SPC_URL_LINK_CHOICE; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, &link, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(emptyURLSPCLink), "Unexpected size %d\n", size); ok(!memcmp(buf, emptyURLSPCLink, size), "Unexpected value\n"); LocalFree(buf); } /* With an invalid char: */ U(link).pwszUrl = (LPWSTR)nihongoURL; size = 1; SetLastError(0xdeadbeef); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, &link, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(!ret && (GetLastError() == CRYPT_E_INVALID_IA5_STRING || GetLastError() == OSS_BAD_PTR /* Win9x */), "Expected CRYPT_E_INVALID_IA5_STRING, got %08x\n", GetLastError()); /* Unlike the crypt32 string encoding routines, size is not set to the * index of the first invalid character. */ ok(size == 0, "Expected size 0, got %d\n", size); U(link).pwszUrl = url; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, &link, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(urlSPCLink), "Unexpected size %d\n", size); ok(!memcmp(buf, urlSPCLink, size), "Unexpected value\n"); LocalFree(buf); } link.dwLinkChoice = SPC_FILE_LINK_CHOICE; U(link).pwszFile = (LPWSTR)nihongoURL; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, &link, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(fileSPCLink), "Unexpected size %d\n", size); ok(!memcmp(buf, fileSPCLink, size), "Unexpected value\n"); LocalFree(buf); } link.dwLinkChoice = SPC_MONIKER_LINK_CHOICE; memset(&U(link).Moniker, 0, sizeof(U(link).Moniker)); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, &link, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(emptyMonikerSPCLink), "Unexpected size %d\n", size); ok(!memcmp(buf, emptyMonikerSPCLink, size), "Unexpected value\n"); LocalFree(buf); } memset(&U(link).Moniker.ClassId, 0xea, sizeof(U(link).Moniker.ClassId)); U(link).Moniker.SerializedData.pbData = data; U(link).Moniker.SerializedData.cbData = sizeof(data); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, &link, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(monikerSPCLink), "Unexpected size %d\n", size); ok(!memcmp(buf, monikerSPCLink, size), "Unexpected value\n"); LocalFree(buf); } } static const BYTE badMonikerSPCLink[] = { 0xa1,0x19,0x04,0x11,0xea,0xea,0xea,0xea,0xea,0xea,0xea,0xea,0xea,0xea,0xea, 0xea,0xea,0xea,0xea,0xea,0xea,0x04,0x04,0xba,0xad,0xf0,0x0d }; static void test_decodeSPCLink(void) { BOOL ret; LPBYTE buf = NULL; DWORD size = 0; SPC_LINK *link; if (!pCryptDecodeObjectEx) { skip("CryptDecodeObjectEx() is not available. Skipping the decodeSPCLink tests\n"); return; } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, emptyURLSPCLink, sizeof(emptyURLSPCLink), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { link = (SPC_LINK *)buf; ok(link->dwLinkChoice == SPC_URL_LINK_CHOICE, "Expected SPC_URL_LINK_CHOICE, got %d\n", link->dwLinkChoice); ok(lstrlenW(U(*link).pwszUrl) == 0, "Expected empty string\n"); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, urlSPCLink, sizeof(urlSPCLink), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { link = (SPC_LINK *)buf; ok(link->dwLinkChoice == SPC_URL_LINK_CHOICE, "Expected SPC_URL_LINK_CHOICE, got %d\n", link->dwLinkChoice); ok(!lstrcmpW(U(*link).pwszUrl, url), "Unexpected URL\n"); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, fileSPCLink, sizeof(fileSPCLink), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { link = (SPC_LINK *)buf; ok(link->dwLinkChoice == SPC_FILE_LINK_CHOICE, "Expected SPC_FILE_LINK_CHOICE, got %d\n", link->dwLinkChoice); ok(!lstrcmpW(U(*link).pwszFile, nihongoURL), "Unexpected file\n"); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, emptyMonikerSPCLink, sizeof(emptyMonikerSPCLink), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { SPC_SERIALIZED_OBJECT emptyMoniker = { { 0 } }; link = (SPC_LINK *)buf; ok(link->dwLinkChoice == SPC_MONIKER_LINK_CHOICE, "Expected SPC_MONIKER_LINK_CHOICE, got %d\n", link->dwLinkChoice); ok(!memcmp(&U(*link).Moniker.ClassId, &emptyMoniker.ClassId, sizeof(emptyMoniker.ClassId)), "Unexpected value\n"); ok(U(*link).Moniker.SerializedData.cbData == 0, "Expected no serialized data\n"); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, monikerSPCLink, sizeof(monikerSPCLink), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { SPC_UUID id; link = (SPC_LINK *)buf; ok(link->dwLinkChoice == SPC_MONIKER_LINK_CHOICE, "Expected SPC_MONIKER_LINK_CHOICE, got %d\n", link->dwLinkChoice); memset(&id, 0xea, sizeof(id)); ok(!memcmp(&U(*link).Moniker.ClassId, &id, sizeof(id)), "Unexpected value\n"); ok(U(*link).Moniker.SerializedData.cbData == sizeof(data), "Unexpected data size %d\n", U(*link).Moniker.SerializedData.cbData); ok(!memcmp(U(*link).Moniker.SerializedData.pbData, data, sizeof(data)), "Unexpected value\n"); LocalFree(buf); } SetLastError(0xdeadbeef); ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_LINK_STRUCT, badMonikerSPCLink, sizeof(badMonikerSPCLink), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(!ret && (GetLastError() == CRYPT_E_BAD_ENCODE || GetLastError() == OSS_DATA_ERROR /* Win9x */), "Expected CRYPT_E_BAD_ENCODE, got %08x\n", GetLastError()); } static const BYTE emptySequence[] = { 0x30,0x00 }; static BYTE flags[] = { 1 }; static const BYTE onlyFlagsPEImage[] = { 0x30,0x04,0x03,0x02,0x00,0x01 }; static const BYTE moreFlagsPEImage[] = { 0x30,0x06,0x03,0x04,0x04,0xff,0x80,0x10 }; static const BYTE onlyEmptyFilePEImage[] = { 0x30,0x06,0xa0,0x04,0xa2,0x02,0x80,0x00 }; static const BYTE flagsAndEmptyFilePEImage[] = { 0x30,0x0a,0x03,0x02,0x00,0x01,0xa0,0x04,0xa2,0x02,0x80,0x00 }; static const BYTE flagsAndFilePEImage[] = { 0x30,0x1c,0x03,0x02,0x00,0x01,0xa0,0x16,0xa2,0x14,0x80,0x12,0x00,0x68,0x00, 0x74,0x00,0x74,0x00,0x70,0x00,0x3a,0x00,0x2f,0x00,0x2f,0x22,0x6f,0x57,0x5b }; static void test_encodeSPCPEImage(void) { BOOL ret; DWORD size = 0; LPBYTE buf; SPC_PE_IMAGE_DATA imageData = { { 0 } }; SPC_LINK link = { 0 }; if (!pCryptEncodeObjectEx) { skip("CryptEncodeObjectEx() is not available. Skipping the encodeSPCPEImage tests\n"); return; } ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, &imageData, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(emptySequence), "Unexpected size %d\n", size); ok(!memcmp(buf, emptySequence, sizeof(emptySequence)), "Unexpected value\n"); LocalFree(buf); } /* With an invalid link: */ imageData.pFile = &link; SetLastError(0xdeadbeef); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, &imageData, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(!ret && GetLastError () == E_INVALIDARG, "Expected E_INVALIDARG, got %08x\n", GetLastError()); /* With just unused bits field set: */ imageData.pFile = NULL; imageData.Flags.cUnusedBits = 1; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, &imageData, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(emptySequence), "Unexpected size %d\n", size); ok(!memcmp(buf, emptySequence, sizeof(emptySequence)), "Unexpected value\n"); LocalFree(buf); } /* With flags set: */ imageData.Flags.cUnusedBits = 0; imageData.Flags.pbData = flags; imageData.Flags.cbData = sizeof(flags); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, &imageData, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); if (!ret && GetLastError() == OSS_TOO_LONG) { skip("SPC_PE_IMAGE_DATA_STRUCT not supported\n"); return; } ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(onlyFlagsPEImage), "Unexpected size %d\n", size); ok(!memcmp(buf, onlyFlagsPEImage, sizeof(onlyFlagsPEImage)), "Unexpected value\n"); LocalFree(buf); } /* With just an empty file: */ imageData.Flags.cbData = 0; link.dwLinkChoice = SPC_FILE_LINK_CHOICE; imageData.pFile = &link; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, &imageData, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(onlyEmptyFilePEImage), "Unexpected size %d\n", size); ok(!memcmp(buf, onlyEmptyFilePEImage, sizeof(onlyEmptyFilePEImage)), "Unexpected value\n"); LocalFree(buf); } /* With flags and an empty file: */ imageData.Flags.pbData = flags; imageData.Flags.cbData = sizeof(flags); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, &imageData, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(flagsAndEmptyFilePEImage), "Unexpected size %d\n", size); ok(!memcmp(buf, flagsAndEmptyFilePEImage, sizeof(flagsAndEmptyFilePEImage)), "Unexpected value\n"); LocalFree(buf); } /* Finally, a non-empty file: */ U(link).pwszFile = (LPWSTR)nihongoURL; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, &imageData, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(flagsAndFilePEImage), "Unexpected size %d\n", size); ok(!memcmp(buf, flagsAndFilePEImage, sizeof(flagsAndFilePEImage)), "Unexpected value\n"); LocalFree(buf); } } static void test_decodeSPCPEImage(void) { static const WCHAR emptyString[] = { 0 }; BOOL ret; LPBYTE buf = NULL; DWORD size = 0; SPC_PE_IMAGE_DATA *imageData; if (!pCryptDecodeObjectEx) { skip("CryptDecodeObjectEx() is not available. Skipping the decodeSPCPEImage tests\n"); return; } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, emptySequence, sizeof(emptySequence), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { imageData = (SPC_PE_IMAGE_DATA *)buf; ok(imageData->Flags.cbData == 0, "Expected empty flags, got %d\n", imageData->Flags.cbData); ok(imageData->pFile == NULL, "Expected no file\n"); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, onlyFlagsPEImage, sizeof(onlyFlagsPEImage), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { imageData = (SPC_PE_IMAGE_DATA *)buf; ok(imageData->Flags.cbData == sizeof(flags), "Unexpected flags size %d\n", imageData->Flags.cbData); if (imageData->Flags.cbData) ok(!memcmp(imageData->Flags.pbData, flags, sizeof(flags)), "Unexpected flags\n"); ok(imageData->pFile == NULL, "Expected no file\n"); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, onlyEmptyFilePEImage, sizeof(onlyEmptyFilePEImage), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { imageData = (SPC_PE_IMAGE_DATA *)buf; ok(imageData->Flags.cbData == 0, "Expected empty flags, got %d\n", imageData->Flags.cbData); ok(imageData->pFile != NULL, "Expected a file\n"); if (imageData->pFile) { ok(imageData->pFile->dwLinkChoice == SPC_FILE_LINK_CHOICE, "Expected SPC_FILE_LINK_CHOICE, got %d\n", imageData->pFile->dwLinkChoice); ok(!lstrcmpW(U(*imageData->pFile).pwszFile, emptyString), "Unexpected file\n"); } LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, flagsAndEmptyFilePEImage, sizeof(flagsAndEmptyFilePEImage), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { imageData = (SPC_PE_IMAGE_DATA *)buf; ok(imageData->Flags.cbData == sizeof(flags), "Unexpected flags size %d\n", imageData->Flags.cbData); if (imageData->Flags.cbData) ok(!memcmp(imageData->Flags.pbData, flags, sizeof(flags)), "Unexpected flags\n"); ok(imageData->pFile != NULL, "Expected a file\n"); if (imageData->pFile) { ok(imageData->pFile->dwLinkChoice == SPC_FILE_LINK_CHOICE, "Expected SPC_FILE_LINK_CHOICE, got %d\n", imageData->pFile->dwLinkChoice); ok(!lstrcmpW(U(*imageData->pFile).pwszFile, emptyString), "Unexpected file\n"); } LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_PE_IMAGE_DATA_STRUCT, flagsAndFilePEImage, sizeof(flagsAndFilePEImage), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { imageData = (SPC_PE_IMAGE_DATA *)buf; ok(imageData->Flags.cbData == sizeof(flags), "Unexpected flags size %d\n", imageData->Flags.cbData); if (imageData->Flags.cbData) ok(!memcmp(imageData->Flags.pbData, flags, sizeof(flags)), "Unexpected flags\n"); ok(imageData->pFile != NULL, "Expected a file\n"); if (imageData->pFile) { ok(imageData->pFile->dwLinkChoice == SPC_FILE_LINK_CHOICE, "Expected SPC_FILE_LINK_CHOICE, got %d\n", imageData->pFile->dwLinkChoice); ok(!lstrcmpW(U(*imageData->pFile).pwszFile, nihongoURL), "Unexpected file\n"); } LocalFree(buf); } } static WCHAR foo[] = { 'f','o','o',0 }; static WCHAR guidStr[] = { '{','8','b','c','9','6','b','0','0','-', '8','d','a','1','-','1','1','c','f','-','8','7','3','6','-','0','0', 'a','a','0','0','a','4','8','5','e','b','}',0 }; static const BYTE emptyCatMemberInfo[] = { 0x30,0x05,0x1e,0x00,0x02,0x01,0x00 }; static const BYTE catMemberInfoWithSillyGuid[] = { 0x30,0x0b,0x1e,0x06,0x00,0x66,0x00,0x6f,0x00,0x6f,0x02,0x01,0x00 }; static const BYTE catMemberInfoWithGuid[] = { 0x30,0x51,0x1e,0x4c,0x00,0x7b,0x00,0x38,0x00,0x62,0x00,0x63,0x00,0x39,0x00,0x36, 0x00,0x62,0x00,0x30,0x00,0x30,0x00,0x2d,0x00,0x38,0x00,0x64,0x00,0x61,0x00,0x31, 0x00,0x2d,0x00,0x31,0x00,0x31,0x00,0x63,0x00,0x66,0x00,0x2d,0x00,0x38,0x00,0x37, 0x00,0x33,0x00,0x36,0x00,0x2d,0x00,0x30,0x00,0x30,0x00,0x61,0x00,0x61,0x00,0x30, 0x00,0x30,0x00,0x61,0x00,0x34,0x00,0x38,0x00,0x35,0x00,0x65,0x00,0x62,0x00,0x7d, 0x02,0x01,0x00 }; static void test_encodeCatMemberInfo(void) { CAT_MEMBERINFO info; BOOL ret; DWORD size = 0; LPBYTE buf; memset(&info, 0, sizeof(info)); if (!pCryptEncodeObjectEx) { skip("CryptEncodeObjectEx() is not available. Skipping the encodeCatMemberInfo tests\n"); return; } ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, CAT_MEMBERINFO_STRUCT, &info, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(emptyCatMemberInfo), "Unexpected size %d\n", size); ok(!memcmp(buf, emptyCatMemberInfo, sizeof(emptyCatMemberInfo)), "Unexpected value\n"); LocalFree(buf); } info.pwszSubjGuid = foo; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, CAT_MEMBERINFO_STRUCT, &info, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(catMemberInfoWithSillyGuid), "Unexpected size %d\n", size); ok(!memcmp(buf, catMemberInfoWithSillyGuid, sizeof(catMemberInfoWithSillyGuid)), "Unexpected value\n"); LocalFree(buf); } info.pwszSubjGuid = guidStr; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, CAT_MEMBERINFO_STRUCT, &info, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(catMemberInfoWithGuid), "Unexpected size %d\n", size); ok(!memcmp(buf, catMemberInfoWithGuid, sizeof(catMemberInfoWithGuid)), "Unexpected value\n"); LocalFree(buf); } } static void test_decodeCatMemberInfo(void) { BOOL ret; LPBYTE buf; DWORD size; CAT_MEMBERINFO *info; if (!pCryptDecodeObjectEx) { skip("CryptDecodeObjectEx() is not available. Skipping the decodeCatMemberInfo tests\n"); return; } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, CAT_MEMBERINFO_STRUCT, emptyCatMemberInfo, sizeof(emptyCatMemberInfo), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { info = (CAT_MEMBERINFO *)buf; ok(!info->pwszSubjGuid || !info->pwszSubjGuid[0], "expected empty pwszSubjGuid\n"); ok(info->dwCertVersion == 0, "expected dwCertVersion == 0, got %d\n", info->dwCertVersion); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, CAT_MEMBERINFO_STRUCT, catMemberInfoWithSillyGuid, sizeof(catMemberInfoWithSillyGuid), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { info = (CAT_MEMBERINFO *)buf; ok(info->pwszSubjGuid && !lstrcmpW(info->pwszSubjGuid, foo), "unexpected pwszSubjGuid\n"); ok(info->dwCertVersion == 0, "expected dwCertVersion == 0, got %d\n", info->dwCertVersion); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, CAT_MEMBERINFO_STRUCT, catMemberInfoWithGuid, sizeof(catMemberInfoWithGuid), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { info = (CAT_MEMBERINFO *)buf; ok(info->pwszSubjGuid && !lstrcmpW(info->pwszSubjGuid, guidStr), "unexpected pwszSubjGuid\n"); ok(info->dwCertVersion == 0, "expected dwCertVersion == 0, got %d\n", info->dwCertVersion); LocalFree(buf); } } static const BYTE emptyCatNameValue[] = { 0x30,0x07,0x1e,0x00,0x02,0x01,0x00,0x04,0x00 }; static const BYTE catNameValueWithTag[] = { 0x30,0x0d,0x1e,0x06,0x00,0x66,0x00,0x6f,0x00,0x6f,0x02,0x01,0x00,0x04,0x00 }; static const BYTE catNameValueWithFlags[] = { 0x30,0x0a,0x1e,0x00,0x02,0x04,0xf0,0x0d,0xd0,0x0d,0x04,0x00 }; static const BYTE catNameValueWithValue[] = { 0x30,0x0b,0x1e,0x00,0x02,0x01,0x00,0x04,0x04,0x01,0x02,0x03,0x04 }; static BYTE aVal[] = { 1,2,3,4 }; static void test_encodeCatNameValue(void) { static WCHAR foo[] = { 'f','o','o',0 }; BOOL ret; LPBYTE buf; DWORD size; CAT_NAMEVALUE value; memset(&value, 0, sizeof(value)); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, CAT_NAMEVALUE_STRUCT, &value, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(emptyCatNameValue), "Unexpected size %d\n", size); ok(!memcmp(buf, emptyCatNameValue, sizeof(emptyCatNameValue)), "Unexpected value\n"); LocalFree(buf); } value.pwszTag = foo; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, CAT_NAMEVALUE_STRUCT, &value, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(catNameValueWithTag), "Unexpected size %d\n", size); ok(!memcmp(buf, catNameValueWithTag, sizeof(catNameValueWithTag)), "Unexpected value\n"); LocalFree(buf); } value.pwszTag = NULL; value.fdwFlags = 0xf00dd00d; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, CAT_NAMEVALUE_STRUCT, &value, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(catNameValueWithFlags), "Unexpected size %d\n", size); ok(!memcmp(buf, catNameValueWithFlags, sizeof(catNameValueWithFlags)), "Unexpected value\n"); LocalFree(buf); } value.fdwFlags = 0; value.Value.cbData = sizeof(aVal); value.Value.pbData = aVal; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, CAT_NAMEVALUE_STRUCT, &value, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(catNameValueWithValue), "Unexpected size %d\n", size); ok(!memcmp(buf, catNameValueWithValue, sizeof(catNameValueWithValue)), "Unexpected value\n"); LocalFree(buf); } } static void test_decodeCatNameValue(void) { BOOL ret; LPBYTE buf; DWORD size; CAT_NAMEVALUE *value; ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, CAT_NAMEVALUE_STRUCT, emptyCatNameValue, sizeof(emptyCatNameValue), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { value = (CAT_NAMEVALUE *)buf; ok(!value->pwszTag || !value->pwszTag[0], "expected empty pwszTag\n"); ok(value->fdwFlags == 0, "expected fdwFlags == 0, got %08x\n", value->fdwFlags); ok(value->Value.cbData == 0, "expected 0-length value, got %d\n", value->Value.cbData); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, CAT_NAMEVALUE_STRUCT, catNameValueWithTag, sizeof(catNameValueWithTag), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { value = (CAT_NAMEVALUE *)buf; ok(value->pwszTag && !lstrcmpW(value->pwszTag, foo), "unexpected pwszTag\n"); ok(value->fdwFlags == 0, "expected fdwFlags == 0, got %08x\n", value->fdwFlags); ok(value->Value.cbData == 0, "expected 0-length value, got %d\n", value->Value.cbData); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, CAT_NAMEVALUE_STRUCT, catNameValueWithFlags, sizeof(catNameValueWithFlags), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { value = (CAT_NAMEVALUE *)buf; ok(!value->pwszTag || !value->pwszTag[0], "expected empty pwszTag\n"); ok(value->fdwFlags == 0xf00dd00d, "expected fdwFlags == 0xf00dd00d, got %08x\n", value->fdwFlags); ok(value->Value.cbData == 0, "expected 0-length value, got %d\n", value->Value.cbData); LocalFree(buf); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, CAT_NAMEVALUE_STRUCT, catNameValueWithValue, sizeof(catNameValueWithValue), CRYPT_DECODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { value = (CAT_NAMEVALUE *)buf; ok(!value->pwszTag || !value->pwszTag[0], "expected empty pwszTag\n"); ok(value->fdwFlags == 0, "expected fdwFlags == 0, got %08x\n", value->fdwFlags); ok(value->Value.cbData == sizeof(aVal), "unexpected size %d\n", value->Value.cbData); ok(!memcmp(value->Value.pbData, aVal, value->Value.cbData), "unexpected value\n"); LocalFree(buf); } } static const WCHAR progName[] = { 'A',' ','p','r','o','g','r','a','m',0 }; static const BYTE spOpusInfoWithProgramName[] = { 0x30,0x16,0xa0,0x14,0x80,0x12,0x00,0x41,0x00,0x20,0x00,0x70,0x00,0x72,0x00,0x6f, 0x00,0x67,0x00,0x72,0x00,0x61,0x00,0x6d }; static WCHAR winehq[] = { 'h','t','t','p',':','/','/','w','i','n','e','h','q', '.','o','r','g','/',0 }; static const BYTE spOpusInfoWithMoreInfo[] = { 0x30,0x16,0xa1,0x14,0x80,0x12,0x68,0x74,0x74,0x70,0x3a,0x2f,0x2f,0x77,0x69,0x6e, 0x65,0x68,0x71,0x2e,0x6f,0x72,0x67,0x2f }; static const BYTE spOpusInfoWithPublisherInfo[] = { 0x30,0x16,0xa2,0x14,0x80,0x12,0x68,0x74,0x74,0x70,0x3a,0x2f,0x2f,0x77,0x69,0x6e, 0x65,0x68,0x71,0x2e,0x6f,0x72,0x67,0x2f }; static void test_encodeSpOpusInfo(void) { BOOL ret; LPBYTE buf; DWORD size; SPC_SP_OPUS_INFO info; SPC_LINK moreInfo; memset(&info, 0, sizeof(info)); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_SP_OPUS_INFO_STRUCT, &info, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(emptySequence), "unexpected size %d\n", size); ok(!memcmp(buf, emptySequence, size), "unexpected value\n"); LocalFree(buf); } info.pwszProgramName = progName; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_SP_OPUS_INFO_STRUCT, &info, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(spOpusInfoWithProgramName), "unexpected size %d\n", size); ok(!memcmp(buf, spOpusInfoWithProgramName, size), "unexpected value\n"); LocalFree(buf); } info.pwszProgramName = NULL; memset(&moreInfo, 0, sizeof(moreInfo)); info.pMoreInfo = &moreInfo; SetLastError(0xdeadbeef); ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_SP_OPUS_INFO_STRUCT, &info, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(!ret && GetLastError() == E_INVALIDARG, "expected E_INVALIDARG, got %08x\n", GetLastError()); moreInfo.dwLinkChoice = SPC_URL_LINK_CHOICE; moreInfo.pwszUrl = winehq; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_SP_OPUS_INFO_STRUCT, &info, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(spOpusInfoWithMoreInfo), "unexpected size %d\n", size); ok(!memcmp(buf, spOpusInfoWithMoreInfo, size), "unexpected value\n"); LocalFree(buf); } info.pMoreInfo = NULL; info.pPublisherInfo = &moreInfo; ret = pCryptEncodeObjectEx(X509_ASN_ENCODING, SPC_SP_OPUS_INFO_STRUCT, &info, CRYPT_ENCODE_ALLOC_FLAG, NULL, &buf, &size); ok(ret, "CryptEncodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(size == sizeof(spOpusInfoWithPublisherInfo), "unexpected size %d\n", size); ok(!memcmp(buf, spOpusInfoWithPublisherInfo, size), "unexpected value\n"); LocalFree(buf); } } static void test_decodeSpOpusInfo(void) { BOOL ret; DWORD size; SPC_SP_OPUS_INFO *info; ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_SP_OPUS_INFO_STRUCT, emptySequence, sizeof(emptySequence), CRYPT_DECODE_ALLOC_FLAG, NULL, &info, &size); todo_wine ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(!info->pwszProgramName, "expected NULL\n"); ok(!info->pMoreInfo, "expected NULL\n"); ok(!info->pPublisherInfo, "expected NULL\n"); LocalFree(info); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_SP_OPUS_INFO_STRUCT, spOpusInfoWithProgramName, sizeof(spOpusInfoWithProgramName), CRYPT_DECODE_ALLOC_FLAG, NULL, &info, &size); todo_wine ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(info->pwszProgramName && !lstrcmpW(info->pwszProgramName, progName), "unexpected program name\n"); ok(!info->pMoreInfo, "expected NULL\n"); ok(!info->pPublisherInfo, "expected NULL\n"); LocalFree(info); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_SP_OPUS_INFO_STRUCT, spOpusInfoWithMoreInfo, sizeof(spOpusInfoWithMoreInfo), CRYPT_DECODE_ALLOC_FLAG, NULL, &info, &size); todo_wine ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(!info->pwszProgramName, "expected NULL\n"); ok(info->pMoreInfo != NULL, "expected a value for pMoreInfo\n"); if (info->pMoreInfo) { ok(info->pMoreInfo->dwLinkChoice == SPC_URL_LINK_CHOICE, "unexpected link choice %d\n", info->pMoreInfo->dwLinkChoice); ok(!lstrcmpW(info->pMoreInfo->pwszUrl, winehq), "unexpected link value\n"); } ok(!info->pPublisherInfo, "expected NULL\n"); LocalFree(info); } ret = pCryptDecodeObjectEx(X509_ASN_ENCODING, SPC_SP_OPUS_INFO_STRUCT, spOpusInfoWithPublisherInfo, sizeof(spOpusInfoWithPublisherInfo), CRYPT_DECODE_ALLOC_FLAG, NULL, &info, &size); todo_wine ok(ret, "CryptDecodeObjectEx failed: %08x\n", GetLastError()); if (ret) { ok(!info->pwszProgramName, "expected NULL\n"); ok(!info->pMoreInfo, "expected NULL\n"); ok(info->pPublisherInfo != NULL, "expected a value for pPublisherInfo\n"); if (info->pPublisherInfo) { ok(info->pPublisherInfo->dwLinkChoice == SPC_URL_LINK_CHOICE, "unexpected link choice %d\n", info->pPublisherInfo->dwLinkChoice); ok(!lstrcmpW(info->pPublisherInfo->pwszUrl, winehq), "unexpected link value\n"); } LocalFree(info); } } START_TEST(asn) { HMODULE hCrypt32 = LoadLibraryA("crypt32.dll"); pCryptDecodeObjectEx = (void*)GetProcAddress(hCrypt32, "CryptDecodeObjectEx"); pCryptEncodeObjectEx = (void*)GetProcAddress(hCrypt32, "CryptEncodeObjectEx"); test_encodeSPCFinancialCriteria(); test_decodeSPCFinancialCriteria(); test_encodeSPCLink(); test_decodeSPCLink(); test_encodeSPCPEImage(); test_decodeSPCPEImage(); test_encodeCatMemberInfo(); test_decodeCatMemberInfo(); test_encodeCatNameValue(); test_decodeCatNameValue(); test_encodeSpOpusInfo(); test_decodeSpOpusInfo(); FreeLibrary(hCrypt32); }
282888.c
/** * Copyright (c) 2016 - 2020, Nordic Semiconductor ASA * * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form, except as embedded into a Nordic * Semiconductor ASA integrated circuit in a product or a software update for * such product, must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * * 3. Neither the name of Nordic Semiconductor ASA nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * 4. This software, with or without modification, must only be used with a * Nordic Semiconductor ASA integrated circuit. * * 5. Any software provided in binary form under this license must not be reverse * engineered, decompiled, modified and/or disassembled. * * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include "sdk_common.h" #if NRF_MODULE_ENABLED(NRF_LOG) && NRF_MODULE_ENABLED(NRF_LOG_BACKEND_FLASH) #include "nrf_log_backend_flash.h" #include "nrf_log_str_formatter.h" #include "nrf_fstorage_nvmc.h" #include "nrf_log.h" #include "nrf_atomic.h" #include "nrf_queue.h" #include "app_error.h" #include <stdbool.h> #if (NRF_LOG_BACKEND_FLASHLOG_ENABLED == 0) && (NRF_LOG_BACKEND_CRASHLOG_ENABLED == 0) #error "No flash backend enabled." #endif /** @brief Maximum logger message payload (arguments or data in hexdump) which can be stored. */ #define FLASH_LOG_MAX_PAYLOAD_SIZE (NRF_LOG_BACKEND_FLASH_SER_BUFFER_SIZE - sizeof(nrf_log_header_t)) /** @brief Size of serialization buffer in words. */ #define FLASH_LOG_SER_BUFFER_WORDS (NRF_LOG_BACKEND_FLASH_SER_BUFFER_SIZE/sizeof(uint32_t)) /** @brief Length of logger header. */ #define LOG_HEADER_LEN (sizeof(nrf_log_header_t)) /** @brief Length of logger header given in 32 bit words. */ #define LOG_HEADER_LEN_WORDS (LOG_HEADER_LEN/sizeof(uint32_t)) /** @brief Maximum possible length of standard log message. */ #define STD_LOG_MSG_MAX_LEN (LOG_HEADER_LEN + NRF_LOG_MAX_NUM_OF_ARGS*sizeof(uint32_t)) /* Buffer must be multiple of 4. */ STATIC_ASSERT((NRF_LOG_BACKEND_FLASH_SER_BUFFER_SIZE % sizeof(uint32_t)) == 0); /* Buffer must fit standard log message. */ STATIC_ASSERT(NRF_LOG_BACKEND_FLASH_SER_BUFFER_SIZE >= STD_LOG_MSG_MAX_LEN); /** @brief Flash page size in bytes. */ #define CODE_PAGE_SIZE 4096 /** @brief Start address of the area dedicated for flash log. */ #define FLASH_LOG_START_ADDR (NRF_LOG_BACKEND_FLASH_START_PAGE * CODE_PAGE_SIZE) /** @brief End address of the area dedicated for flash log. */ #define FLASH_LOG_END_ADDR (FLASH_LOG_START_ADDR + (NRF_LOG_BACKEND_PAGES * CODE_PAGE_SIZE) - 1) /** @brief Size of the area dedicated for flash log. */ #define FLASH_LOG_SIZE (NRF_LOG_BACKEND_PAGES * CODE_PAGE_SIZE) /** @brief Start address determined in runtime. * * If configuration indicates that flash log should be placed after application. * */ #if defined ( __CC_ARM ) #define RUNTIME_START_ADDR \ _Pragma("diag_suppress 170") \ ((NRF_LOG_BACKEND_FLASH_START_PAGE == 0) ? \ (CODE_PAGE_SIZE*CEIL_DIV((uint32_t)CODE_END, CODE_PAGE_SIZE)) : FLASH_LOG_START_ADDR) \ _Pragma("diag_default 170") #else #define RUNTIME_START_ADDR ((NRF_LOG_BACKEND_FLASH_START_PAGE == 0) ? \ (CODE_PAGE_SIZE*CEIL_DIV((uint32_t)CODE_END, CODE_PAGE_SIZE)) : FLASH_LOG_START_ADDR) #endif static void fstorage_evt_handler(nrf_fstorage_evt_t * p_evt); /** @brief Message queue for run time flash log. */ #if NRF_LOG_BACKEND_FLASHLOG_ENABLED NRF_QUEUE_DEF(nrf_log_entry_t *, m_flashlog_queue, NRF_LOG_BACKEND_FLASHLOG_QUEUE_SIZE, NRF_QUEUE_MODE_NO_OVERFLOW); static const nrf_queue_t * mp_flashlog_queue = &m_flashlog_queue; #else static const nrf_queue_t * mp_flashlog_queue = NULL; #endif /** @brief Message FIFO for crash log. */ #if NRF_LOG_BACKEND_CRASHLOG_ENABLED NRF_QUEUE_DEF(nrf_log_entry_t *, m_crashlog_queue, NRF_LOG_BACKEND_CRASHLOG_FIFO_SIZE, NRF_QUEUE_MODE_NO_OVERFLOW); static const nrf_queue_t * mp_crashlog_queue = &m_crashlog_queue; #else static const nrf_queue_t * mp_crashlog_queue = NULL; #endif /** @brief Fstorage instance used for flash log. */ NRF_FSTORAGE_DEF(nrf_fstorage_t m_log_flash_fstorage) = { /* Set a handler for fstorage events. */ .evt_handler = fstorage_evt_handler, .start_addr = FLASH_LOG_START_ADDR, .end_addr = FLASH_LOG_END_ADDR, }; /** @brief Flash log state. */ typedef enum { LOG_BACKEND_FLASH_ACTIVE, /**< Flash backend is active. */ LOG_BACKEND_FLASH_INACTIVE, /**< Flash backend is inactive. All incoming requests are skipped. */ LOG_BACKEND_FLASH_IN_PANIC, /**< Flash backend is in panic mode. Incoming messages are written to flash in synchronous mode. */ } log_backend_flash_state_t; static log_backend_flash_state_t m_state; /**< Flash logger backend state. */ static nrf_atomic_flag_t m_busy_flag; /**< Flag indicating if module performs flash writing. */ static uint32_t m_flash_buf[FLASH_LOG_SER_BUFFER_WORDS]; /**< Buffer used for serializing messages. */ static uint32_t m_curr_addr; /**< Address of free spot in the storage area. */ static size_t m_curr_len; /**< Length of current message being written. */ static uint32_t m_dropped; /**< Number of dropped messages. */ /** @brief Log message string injected when entering panic mode. */ static const char crashlog_str[] = "-----------CRASHLOG------------\r\n"; /** @brief Function saturates input to maximum possible length and rounds up value to be multiple * of word size. * * @param length Length value. * * @return Modified input length. */ static uint32_t saturate_align_length(uint32_t length) { length = (length > FLASH_LOG_MAX_PAYLOAD_SIZE) ? FLASH_LOG_MAX_PAYLOAD_SIZE : length; //saturate length = CEIL_DIV(length, sizeof(uint32_t))*sizeof(uint32_t); return length; } /** * @brief Function for copying logger message to the buffer. * * @param[in] p_msg Logger message. * @param[out] p_buf Output buffer where serialized message is placed. * @param[in,out] p_len Buffer size as input, length of prepared data as output. * * @return True if message fits into the buffer, false otherwise */ static bool msg_to_buf(nrf_log_entry_t * p_msg, uint8_t * p_buf, size_t * p_len) { uint32_t data_len; nrf_log_header_t header = {0}; size_t memobj_offset = HEADER_SIZE*sizeof(uint32_t); nrf_memobj_read(p_msg, &header, HEADER_SIZE*sizeof(uint32_t), 0); memcpy(p_buf, &header, sizeof(nrf_log_header_t)); p_buf += sizeof(nrf_log_header_t); switch (header.base.generic.type) { case HEADER_TYPE_STD: { data_len = header.base.std.nargs * sizeof(uint32_t); break; } case HEADER_TYPE_HEXDUMP: { data_len = saturate_align_length(header.base.hexdump.len); break; } default: *p_len = 0; return false; } nrf_memobj_read(p_msg, p_buf, data_len, memobj_offset); if (*p_len >= sizeof(nrf_log_header_t) + data_len) { *p_len = sizeof(nrf_log_header_t) + data_len; return true; } else { return false; } } /** * @brief Function for getting logger message stored in flash. * * @param[in] p_buf Pointer to the location where message is stored. * @param[out] pp_header Pointer to the log message header. * @param[out] pp_data Pointer to the log message data (arguments or data in case of hexdump). * * @return True if message was successfully fetched, false otherwise. */ static bool msg_from_buf(uint32_t * p_buf, nrf_log_header_t * * pp_header, uint8_t * * pp_data, uint32_t * p_len) { *pp_header = (nrf_log_header_t *)p_buf; *pp_data = (uint8_t *)&p_buf[LOG_HEADER_LEN_WORDS]; uint32_t data_len; switch ((*pp_header)->base.generic.type) { case HEADER_TYPE_STD: { data_len = ((*pp_header)->base.std.nargs)*sizeof(uint32_t); break; } case HEADER_TYPE_HEXDUMP: { data_len = saturate_align_length((*pp_header)->base.hexdump.len); break; } default: return false; } *p_len = LOG_HEADER_LEN + data_len; return true; } /** * @brief Function for processing log message queue. * * If writing to flash is synchronous then function drains the queue and writes all messages to flash. * If writing to flash is asynchronous then function starts single write operation. In asynchronous mode * function is called when new message is put into the queue from from flash operation callback. * * Function detects the situation that flash module reports attempt to write outside dedicated area. * In that case flash backend stops writing any new messages. * * @param p_queue Queue will log messages * @param fstorage_blocking If true it indicates that flash operations are blocking, event handler is not used. */ static void log_msg_queue_process(nrf_queue_t const * p_queue, bool fstorage_blocking) { nrf_log_entry_t * p_msg; bool busy = false; while (nrf_queue_pop(p_queue, &p_msg) == NRF_SUCCESS) { ret_code_t err_code; m_curr_len = sizeof(m_flash_buf); if (!msg_to_buf(p_msg, (uint8_t *)m_flash_buf, &m_curr_len)) { nrf_memobj_put(p_msg); continue; } err_code = nrf_fstorage_write(&m_log_flash_fstorage, m_curr_addr, m_flash_buf, m_curr_len, p_msg); if (err_code == NRF_SUCCESS) { if (fstorage_blocking) { m_curr_addr += m_curr_len; nrf_memobj_put(p_msg); } else { busy = true; break; } } else if (!fstorage_blocking && (err_code == NRF_ERROR_NO_MEM)) { // fstorage queue got full. Drop entry. nrf_memobj_put(p_msg); m_dropped++; break; } else if (err_code == NRF_ERROR_INVALID_ADDR) { // Trying to write outside the area, flash log is full. Skip any new writes. nrf_memobj_put(p_msg); m_state = LOG_BACKEND_FLASH_INACTIVE; } else { ASSERT(false); } } if (!busy) { UNUSED_RETURN_VALUE(nrf_atomic_flag_clear(&m_busy_flag)); } } static void queue_element_drop(nrf_queue_t const * p_queue) { nrf_log_entry_t * p_msg; if (nrf_queue_pop(p_queue, &p_msg) == NRF_SUCCESS) { m_dropped++; nrf_memobj_put(p_msg); } } static void fstorage_evt_handler(nrf_fstorage_evt_t * p_evt) { if (m_state == LOG_BACKEND_FLASH_ACTIVE) { switch (p_evt->id) { case NRF_FSTORAGE_EVT_WRITE_RESULT: { if (p_evt->result == NRF_SUCCESS) { m_curr_addr += m_curr_len; m_curr_len = 0; log_msg_queue_process(mp_flashlog_queue, false); } else { m_dropped++; } if (p_evt->p_param) { nrf_memobj_put((nrf_log_entry_t *)p_evt->p_param); } break; } default: break; } } else if ((m_state == LOG_BACKEND_FLASH_INACTIVE) && (p_evt->id == NRF_FSTORAGE_EVT_ERASE_RESULT) && (p_evt->addr == RUNTIME_START_ADDR)) { m_state = LOG_BACKEND_FLASH_ACTIVE; } } /** * @brief Function for enqueueing new message. * * If queue is full then the oldest message is freed. * * @param p_queue Queue. * @param p_msg Message. * * @return Number of dropped messages */ static uint32_t message_enqueue(nrf_queue_t const * p_queue, nrf_log_entry_t * p_msg) { uint32_t dropped = 0; //flag was set, busy so enqueue message while (nrf_queue_push(p_queue, &p_msg) != NRF_SUCCESS) { nrf_log_entry_t * p_old_msg; if (nrf_queue_pop(p_queue, &p_old_msg) == NRF_SUCCESS) { nrf_memobj_put(p_old_msg); dropped++; } } return dropped; } void nrf_log_backend_flashlog_put(nrf_log_backend_t const * p_backend, nrf_log_entry_t * p_msg) { if (m_state == LOG_BACKEND_FLASH_ACTIVE) { nrf_memobj_get(p_msg); m_dropped += message_enqueue(mp_flashlog_queue, p_msg); if (nrf_atomic_flag_set_fetch(&m_busy_flag) == 0) { log_msg_queue_process(mp_flashlog_queue, false); } } } void nrf_log_backend_crashlog_put(nrf_log_backend_t const * p_backend, nrf_log_entry_t * p_msg) { if (m_state != LOG_BACKEND_FLASH_INACTIVE) { nrf_memobj_get(p_msg); UNUSED_RETURN_VALUE(message_enqueue(mp_crashlog_queue, p_msg)); } if (m_state == LOG_BACKEND_FLASH_IN_PANIC) { log_msg_queue_process(mp_crashlog_queue, true); } } void nrf_log_backend_flashlog_flush(nrf_log_backend_t const * p_backend) { queue_element_drop(mp_flashlog_queue); } void nrf_log_backend_crashlog_flush(nrf_log_backend_t const * p_backend) { queue_element_drop(mp_crashlog_queue); } void nrf_log_backend_flashlog_panic_set(nrf_log_backend_t const * p_backend) { /* Empty */ } /** * @brief Function for injecting log message which will indicate start of crash log. */ static void crashlog_marker_inject(void) { nrf_log_header_t crashlog_marker_hdr = { .base = { .std = { .type = HEADER_TYPE_STD, .severity = NRF_LOG_SEVERITY_INFO_RAW, .nargs = 0, .addr = (uint32_t)crashlog_str & STD_ADDR_MASK } }, .module_id = 0, .timestamp = 0, }; m_flash_buf[0] = crashlog_marker_hdr.base.raw; m_flash_buf[1] = crashlog_marker_hdr.module_id; m_flash_buf[2] = crashlog_marker_hdr.timestamp; (void)nrf_fstorage_write(&m_log_flash_fstorage, m_curr_addr, m_flash_buf, LOG_HEADER_LEN, NULL); m_curr_addr += LOG_HEADER_LEN; } void nrf_log_backend_crashlog_panic_set(nrf_log_backend_t const * p_backend) { if (nrf_fstorage_init(&m_log_flash_fstorage, &nrf_fstorage_nvmc, NULL) == NRF_SUCCESS) { m_state = LOG_BACKEND_FLASH_IN_PANIC; /* In case of Softdevice MWU may protect access to NVMC. */ NVIC_DisableIRQ(MWU_IRQn); log_msg_queue_process(mp_flashlog_queue, true); crashlog_marker_inject(); log_msg_queue_process(mp_crashlog_queue, true); } else { m_state = LOG_BACKEND_FLASH_INACTIVE; } } /** * @brief Function for determining first empty location in area dedicated for flash logger backend. */ static uint32_t empty_addr_get(void) { uint32_t token = 0; nrf_log_header_t * p_dummy_header; uint8_t * p_dummy_data; while(nrf_log_backend_flash_next_entry_get(&token, &p_dummy_header, &p_dummy_data) == NRF_SUCCESS) { } return token; } ret_code_t nrf_log_backend_flash_init(nrf_fstorage_api_t const * p_fs_api) { ret_code_t err_code; uint32_t start_addr = RUNTIME_START_ADDR; uint32_t end_addr = start_addr + FLASH_LOG_SIZE - 1; m_log_flash_fstorage.start_addr = start_addr; m_log_flash_fstorage.end_addr = end_addr; err_code = nrf_fstorage_init(&m_log_flash_fstorage, p_fs_api, NULL); if (err_code != NRF_SUCCESS) { return err_code; } m_curr_addr = empty_addr_get(); m_state = LOG_BACKEND_FLASH_ACTIVE; return err_code; } ret_code_t nrf_log_backend_flash_next_entry_get(uint32_t * p_token, nrf_log_header_t * * pp_header, uint8_t * * pp_data) { uint32_t * p_addr = p_token; uint32_t len; *p_addr = (*p_addr == 0) ? RUNTIME_START_ADDR : *p_addr; if (nrf_fstorage_rmap(&m_log_flash_fstorage, *p_addr) == NULL) { //Supports only memories which can be mapped for reading. return NRF_ERROR_NOT_SUPPORTED; } if (msg_from_buf((uint32_t *)*p_addr, pp_header, pp_data, &len)) { *p_addr += len; return NRF_SUCCESS; } else { return NRF_ERROR_NOT_FOUND; } } ret_code_t nrf_log_backend_flash_erase(void) { ret_code_t err_code; m_state = LOG_BACKEND_FLASH_INACTIVE; err_code = nrf_fstorage_erase(&m_log_flash_fstorage, RUNTIME_START_ADDR, NRF_LOG_BACKEND_PAGES, NULL); m_curr_addr = RUNTIME_START_ADDR; return err_code; } #if NRF_LOG_BACKEND_FLASHLOG_ENABLED const nrf_log_backend_api_t nrf_log_backend_flashlog_api = { .put = nrf_log_backend_flashlog_put, .flush = nrf_log_backend_flashlog_flush, .panic_set = nrf_log_backend_flashlog_panic_set, }; #endif #if NRF_LOG_BACKEND_CRASHLOG_ENABLED const nrf_log_backend_api_t nrf_log_backend_crashlog_api = { .put = nrf_log_backend_crashlog_put, .flush = nrf_log_backend_crashlog_flush, .panic_set = nrf_log_backend_crashlog_panic_set, }; #endif #if NRF_LOG_BACKEND_FLASH_CLI_CMDS #include "nrf_cli.h" static uint8_t m_buffer[64]; static nrf_cli_t const * mp_cli; static void cli_tx(void const * p_context, char const * p_buffer, size_t len); static nrf_fprintf_ctx_t m_fprintf_ctx = { .p_io_buffer = (char *)m_buffer, .io_buffer_size = sizeof(m_buffer)-1, .io_buffer_cnt = 0, .auto_flush = true, .p_user_ctx = &mp_cli, .fwrite = cli_tx }; static void flashlog_clear_cmd(nrf_cli_t const * p_cli, size_t argc, char ** argv) { if (nrf_cli_help_requested(p_cli)) { nrf_cli_help_print(p_cli, NULL, 0); } UNUSED_RETURN_VALUE(nrf_log_backend_flash_erase()); } #include "nrf_delay.h" static void cli_tx(void const * p_context, char const * p_buffer, size_t len) { nrf_cli_t * * pp_cli = (nrf_cli_t * *)p_context; char * p_strbuf = (char *)&p_buffer[len]; *p_strbuf = '\0'; nrf_cli_fprintf((nrf_cli_t const *)*pp_cli, NRF_CLI_DEFAULT, p_buffer); // nrf_delay_ms(10); } static void entry_process(nrf_cli_t const * p_cli, nrf_log_header_t * p_header, uint8_t * p_data) { mp_cli = p_cli; nrf_log_str_formatter_entry_params_t params = { .timestamp = p_header->timestamp, .module_id = p_header->module_id, .use_colors = 0, }; switch (p_header->base.generic.type) { case HEADER_TYPE_STD: { params.severity = (nrf_log_severity_t)p_header->base.std.severity; nrf_log_std_entry_process((const char *)((uint32_t)p_header->base.std.addr), (uint32_t *)p_data, p_header->base.std.nargs, &params, &m_fprintf_ctx); break; } case HEADER_TYPE_HEXDUMP: { params.severity = (nrf_log_severity_t)p_header->base.hexdump.severity; nrf_log_hexdump_entry_process(p_data, p_header->base.hexdump.len, &params, &m_fprintf_ctx); break; } default: ASSERT(0); } } static void flashlog_read_cmd(nrf_cli_t const * p_cli, size_t argc, char ** argv) { if (nrf_cli_help_requested(p_cli)) { nrf_cli_help_print(p_cli, NULL, 0); } uint32_t token = 0; uint8_t * p_data = NULL; bool empty = true; nrf_log_header_t * p_header; while (1) { if (nrf_log_backend_flash_next_entry_get(&token, &p_header, &p_data) == NRF_SUCCESS) { entry_process(p_cli, p_header, p_data); empty = false; } else { break; } } if (empty) { nrf_cli_fprintf(p_cli, NRF_CLI_ERROR, "Flash log empty\r\n"); } } static void flashlog_status_cmd(nrf_cli_t const * p_cli, size_t argc, char ** argv) { if (nrf_cli_help_requested(p_cli)) { nrf_cli_help_print(p_cli, NULL, 0); } nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "Flash log status:\r\n"); nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "\t\t- Location (address: 0x%08X, length: %d)\r\n", RUNTIME_START_ADDR, FLASH_LOG_SIZE); nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "\t\t- Current usage:%d%% (%d of %d bytes used)\r\n", 100ul * (m_curr_addr - RUNTIME_START_ADDR)/FLASH_LOG_SIZE, m_curr_addr - RUNTIME_START_ADDR, FLASH_LOG_SIZE); nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "\t\t- Dropped logs: %d\r\n", m_dropped); } NRF_CLI_CREATE_STATIC_SUBCMD_SET(m_flashlog_cmd) { NRF_CLI_CMD(clear, NULL, "Remove logs", flashlog_clear_cmd), NRF_CLI_CMD(read, NULL, "Read stored logs", flashlog_read_cmd), NRF_CLI_CMD(status, NULL, "Flash log status", flashlog_status_cmd), NRF_CLI_SUBCMD_SET_END }; NRF_CLI_CMD_REGISTER(flashlog, &m_flashlog_cmd, "Commands for reading logs stored in non-volatile memory", NULL); #endif //NRF_LOG_BACKEND_FLASH_CLI_CMDS #endif //NRF_MODULE_ENABLED(NRF_LOG) && NRF_MODULE_ENABLED(NRF_LOG_BACKEND_FLASH)
762593.c
/****************************************************************************** * The MIT License * * Copyright (c) 2010 Perry Hung. * Copyright (c) 2011, 2012 LeafLabs, LLC. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. *****************************************************************************/ /** * @file wirish/syscalls.c * @brief newlib stubs * * Low level system routines used by newlib for basic I/O and memory * allocation. You can override most of these. */ #include <libmaple/libmaple.h> #include <sys/stat.h> #include <errno.h> #include <stddef.h> /* If CONFIG_HEAP_START (or CONFIG_HEAP_END) isn't defined, then * assume _lm_heap_start (resp. _lm_heap_end) is appropriately set by * the linker */ #ifndef CONFIG_HEAP_START extern char _lm_heap_start; #define CONFIG_HEAP_START ((void *)&_lm_heap_start) #endif #ifndef CONFIG_HEAP_END extern char _lm_heap_end; #define CONFIG_HEAP_END ((void *)&_lm_heap_end) #endif /* * _sbrk -- Increment the program break. * * Get incr bytes more RAM (for use by the heap). malloc() and * friends call this function behind the scenes. */ void *_sbrk(int incr) { static void * pbreak = NULL; /* current program break */ void * ret; if (pbreak == NULL) { pbreak = CONFIG_HEAP_START; } if ((CONFIG_HEAP_END - pbreak < incr) || (pbreak - CONFIG_HEAP_START < -incr)) { errno = ENOMEM; return (void *)-1; } ret = pbreak; pbreak += incr; return ret; } __weak int _open(const char *path __attribute__((unused)), int flags __attribute__((unused)), ...) { return 1; } __weak int _close(int fd __attribute__((unused))) { return 0; } __weak int _fstat(int fd __attribute__((unused)), struct stat *st) { st->st_mode = S_IFCHR; return 0; } __weak int _isatty(int fd __attribute__((unused))) { return 1; } __weak int isatty(int fd __attribute__((unused))) { return 1; } __weak int _lseek(int fd __attribute__((unused)), off_t pos __attribute__((unused)), int whence __attribute__((unused))) { return -1; } __weak unsigned char getch(void) { return 0; } __weak int _read(int fd __attribute__((unused)), char *buf, size_t cnt __attribute__((unused))) { *buf = getch(); return 1; } __weak void putch(unsigned char c __attribute__((unused))) { } __weak void cgets(char *s, int bufsize) { char *p; int c; int i; for (i = 0; i < bufsize; i++) { *(s+i) = 0; } // memset(s, 0, bufsize); p = s; for (p = s; p < s + bufsize-1;) { c = getch(); switch (c) { case '\r' : case '\n' : putch('\r'); putch('\n'); *p = '\n'; return; case '\b' : if (p > s) { *p-- = 0; putch('\b'); putch(' '); putch('\b'); } break; default : putch(c); *p++ = c; break; } } return; } __weak int _write(int fd __attribute__((unused)), const char *buf, size_t cnt) { int i; for (i = 0; i < cnt; i++) putch(buf[i]); return cnt; } /* Override fgets() in newlib with a version that does line editing */ __weak char *fgets(char *s, int bufsize, void *f __attribute__((unused))) { cgets(s, bufsize); return s; } __weak void _exit(int exitcode __attribute__((unused))) { while (1) ; }
856577.c
#include "custom-shell-surface.h" #include "gtk-wayland.h" #include "gdk-window-hack.h" #include <gtk/gtk.h> #include <gdk/gdk.h> #include <gdk/gdkwayland.h> static const char *custom_shell_surface_key = "wayland_custom_shell_surface"; struct _CustomShellSurfacePrivate { GtkWindow *gtk_window; }; static void custom_shell_surface_on_window_destroy (CustomShellSurface *self) { self->virtual->finalize (self); g_free (self->private); g_free (self); } static void custom_shell_surface_on_window_realize (GtkWidget *widget, CustomShellSurface *self) { g_return_if_fail (GTK_WIDGET (self->private->gtk_window) == widget); GdkWindow *gdk_window = gtk_widget_get_window (GTK_WIDGET (self->private->gtk_window)); g_return_if_fail (gdk_window); gdk_window_hack_init (gdk_window); gdk_wayland_window_set_use_custom_surface (gdk_window); } static void custom_shell_surface_on_window_map (GtkWidget *widget, CustomShellSurface *self) { g_return_if_fail (GTK_WIDGET (self->private->gtk_window) == widget); GdkWindow *gdk_window = gtk_widget_get_window (GTK_WIDGET (self->private->gtk_window)); g_return_if_fail (gdk_window); struct wl_surface *wl_surface = gdk_wayland_window_get_wl_surface (gdk_window); g_return_if_fail (wl_surface); // In some cases (observed when a mate panel has an image background) GDK will attach a buffer just after creating // the surface (see the implementation of gdk_wayland_window_show() for details). Giving the surface a role with a // buffer attached is a protocol violation, so we attach a null buffer. GDK hasn't commited the buffer it may have // attached, so we don't need to commit. wl_surface_attach (wl_surface, NULL, 0, 0); self->virtual->map (self, wl_surface); wl_surface_commit (wl_surface); wl_display_roundtrip (gdk_wayland_display_get_wl_display (gdk_display_get_default ())); } void custom_shell_surface_init (CustomShellSurface *self, GtkWindow *gtk_window) { g_assert (self->virtual); // Subclass should have set this up first self->private = g_new0 (CustomShellSurfacePrivate, 1); self->private->gtk_window = gtk_window; g_return_if_fail (gtk_window); g_return_if_fail (!gtk_widget_get_mapped (GTK_WIDGET (gtk_window))); g_object_set_data_full (G_OBJECT (gtk_window), custom_shell_surface_key, self, (GDestroyNotify) custom_shell_surface_on_window_destroy); g_signal_connect (gtk_window, "realize", G_CALLBACK (custom_shell_surface_on_window_realize), self); g_signal_connect (gtk_window, "map", G_CALLBACK (custom_shell_surface_on_window_map), self); if (gtk_widget_get_realized (GTK_WIDGET (gtk_window))) { // We must be in the process of realizing now custom_shell_surface_on_window_realize (GTK_WIDGET (gtk_window), self); } } CustomShellSurface * gtk_window_get_custom_shell_surface (GtkWindow *gtk_window) { if (!gtk_window) return NULL; return g_object_get_data (G_OBJECT (gtk_window), custom_shell_surface_key); } GtkWindow * custom_shell_surface_get_gtk_window (CustomShellSurface *self) { g_return_val_if_fail (self, NULL); return self->private->gtk_window; } void custom_shell_surface_get_window_geom (CustomShellSurface *self, GdkRectangle *geom) { g_return_if_fail (self); // TODO: Store the actual window geometry used *geom = gtk_wayland_get_logical_geom (self->private->gtk_window); } void custom_shell_surface_needs_commit (CustomShellSurface *self) { if (!self->private->gtk_window) return; GdkWindow *gdk_window = gtk_widget_get_window (GTK_WIDGET (self->private->gtk_window)); if (!gdk_window) return; struct wl_surface *wl_surface = gdk_wayland_window_get_wl_surface (gdk_window); if (!wl_surface) return; wl_surface_commit (wl_surface); } void custom_shell_surface_remap (CustomShellSurface *self) { GtkWidget *window_widget = GTK_WIDGET (self->private->gtk_window); g_return_if_fail (window_widget); gtk_widget_hide (window_widget); gtk_widget_show (window_widget); }
871231.c
/* * Copyright 2016-2020 NXP * * SPDX-License-Identifier: BSD-3-Clause */ #include "fsl_uart_sdma.h" /******************************************************************************* * Definitions ******************************************************************************/ /* Component ID definition, used by tools. */ #ifndef FSL_COMPONENT_ID #define FSL_COMPONENT_ID "platform.drivers.iuart_sdma" #endif /*<! Structure definition for uart_sdma_private_handle_t. The structure is private. */ typedef struct _uart_sdma_private_handle { UART_Type *base; uart_sdma_handle_t *handle; } uart_sdma_private_handle_t; /* UART SDMA transfer handle. */ enum _uart_sdma_tansfer_states { kUART_TxIdle, /* TX idle. */ kUART_TxBusy, /* TX busy. */ kUART_RxIdle, /* RX idle. */ kUART_RxBusy /* RX busy. */ }; /******************************************************************************* * Variables ******************************************************************************/ /*<! Private handle only used for internally. */ static UART_Type *const s_uartSdmaBases[] = UART_BASE_PTRS; static uart_sdma_private_handle_t s_sdmaPrivateHandle[ARRAY_SIZE(s_uartSdmaBases)]; /******************************************************************************* * Prototypes ******************************************************************************/ /*! * @brief UART SDMA send finished callback function. * * This function is called when UART SDMA send finished. It disables the UART * TX SDMA request and sends @ref kStatus_UART_TxIdle to UART callback. * * @param handle The SDMA handle. * @param param Callback function parameter. */ static void UART_SendSDMACallback(sdma_handle_t *handle, void *param, bool transferDone, uint32_t tcds); /*! * @brief UART SDMA receive finished callback function. * * This function is called when UART SDMA receive finished. It disables the UART * RX SDMA request and sends @ref kStatus_UART_RxIdle to UART callback. * * @param handle The SDMA handle. * @param param Callback function parameter. */ static void UART_ReceiveSDMACallback(sdma_handle_t *handle, void *param, bool transferDone, uint32_t tcds); /******************************************************************************* * Code ******************************************************************************/ static void UART_SendSDMACallback(sdma_handle_t *handle, void *param, bool transferDone, uint32_t tcds) { assert(param != NULL); uart_sdma_private_handle_t *uartPrivateHandle = (uart_sdma_private_handle_t *)param; if (transferDone) { UART_TransferAbortSendSDMA(uartPrivateHandle->base, uartPrivateHandle->handle); /* Wait for transmission complete */ while (0U == (uartPrivateHandle->base->USR2 & UART_USR2_TXDC_MASK)) { } if (uartPrivateHandle->handle->callback != NULL) { uartPrivateHandle->handle->callback(uartPrivateHandle->base, uartPrivateHandle->handle, kStatus_UART_TxIdle, uartPrivateHandle->handle->userData); } } } static void UART_ReceiveSDMACallback(sdma_handle_t *handle, void *param, bool transferDone, uint32_t tcds) { assert(param != NULL); uart_sdma_private_handle_t *uartPrivateHandle = (uart_sdma_private_handle_t *)param; if (transferDone) { /* Disable transfer. */ UART_TransferAbortReceiveSDMA(uartPrivateHandle->base, uartPrivateHandle->handle); if (uartPrivateHandle->handle->callback != NULL) { uartPrivateHandle->handle->callback(uartPrivateHandle->base, uartPrivateHandle->handle, kStatus_UART_RxIdle, uartPrivateHandle->handle->userData); } } } /*! * brief Initializes the UART handle which is used in transactional functions. * param base UART peripheral base address. * param handle Pointer to the uart_sdma_handle_t structure. * param callback UART callback, NULL means no callback. * param userData User callback function data. * param rxSdmaHandle User-requested DMA handle for RX DMA transfer. * param txSdmaHandle User-requested DMA handle for TX DMA transfer. * param eventSourceTx Eventsource for TX DMA transfer. * param eventSourceRx Eventsource for RX DMA transfer. */ void UART_TransferCreateHandleSDMA(UART_Type *base, uart_sdma_handle_t *handle, uart_sdma_transfer_callback_t callback, void *userData, sdma_handle_t *txSdmaHandle, sdma_handle_t *rxSdmaHandle, uint32_t eventSourceTx, uint32_t eventSourceRx) { assert(handle != NULL); uint32_t instance = UART_GetInstance(base); (void)memset(handle, 0, sizeof(*handle)); handle->rxState = (uint8_t)kUART_RxIdle; handle->txState = (uint8_t)kUART_TxIdle; if (rxSdmaHandle != NULL) { rxSdmaHandle->eventSource = eventSourceRx; } if (txSdmaHandle != NULL) { txSdmaHandle->eventSource = eventSourceTx; } handle->rxSdmaHandle = rxSdmaHandle; handle->txSdmaHandle = txSdmaHandle; handle->callback = callback; handle->userData = userData; s_sdmaPrivateHandle[instance].base = base; s_sdmaPrivateHandle[instance].handle = handle; /* Configure TX. */ if (txSdmaHandle != NULL) { SDMA_SetCallback(handle->txSdmaHandle, UART_SendSDMACallback, &s_sdmaPrivateHandle[instance]); } /* Configure RX. */ if (rxSdmaHandle != NULL) { SDMA_SetCallback(handle->rxSdmaHandle, UART_ReceiveSDMACallback, &s_sdmaPrivateHandle[instance]); } } /*! * brief Sends data using sDMA. * * This function sends data using sDMA. This is a non-blocking function, which returns * right away. When all data is sent, the send callback function is called. * * param base UART peripheral base address. * param handle UART handle pointer. * param xfer UART sDMA transfer structure. See #uart_transfer_t. * retval kStatus_Success if succeeded; otherwise failed. * retval kStatus_UART_TxBusy Previous transfer ongoing. * retval kStatus_InvalidArgument Invalid argument. */ status_t UART_SendSDMA(UART_Type *base, uart_sdma_handle_t *handle, uart_transfer_t *xfer) { assert(handle != NULL); assert(handle->txSdmaHandle != NULL); assert(xfer != NULL); assert(xfer->data != NULL); assert(xfer->dataSize != 0U); sdma_transfer_config_t xferConfig = {0U}; status_t status; sdma_peripheral_t perType = kSDMA_PeripheralTypeUART; /* If previous TX not finished. */ if ((uint8_t)kUART_TxBusy == handle->txState) { status = kStatus_UART_TxBusy; } else { handle->txState = (uint8_t)kUART_TxBusy; handle->txDataSizeAll = xfer->dataSize; #if defined(FSL_FEATURE_SOC_SPBA_COUNT) && (FSL_FEATURE_SOC_SPBA_COUNT > 0) bool isSpba = SDMA_IsPeripheralInSPBA((uint32_t)base); /* Judge if the instance is located in SPBA */ if (isSpba) { perType = kSDMA_PeripheralTypeUART_SP; } #endif /* FSL_FEATURE_SOC_SPBA_COUNT */ /* Prepare transfer. */ SDMA_PrepareTransfer(&xferConfig, (uint32_t)xfer->data, (uint32_t) & (base->UTXD), sizeof(uint8_t), sizeof(uint8_t), sizeof(uint8_t), (uint32_t)xfer->dataSize, handle->txSdmaHandle->eventSource, perType, kSDMA_MemoryToPeripheral); /* Submit transfer. */ SDMA_SubmitTransfer(handle->txSdmaHandle, &xferConfig); SDMA_StartTransfer(handle->txSdmaHandle); /* Enable UART TX SDMA. */ UART_EnableTxDMA(base, true); status = kStatus_Success; } return status; } /*! * brief Receives data using sDMA. * * This function receives data using sDMA. This is a non-blocking function, which returns * right away. When all data is received, the receive callback function is called. * * param base UART peripheral base address. * param handle Pointer to the uart_sdma_handle_t structure. * param xfer UART sDMA transfer structure. See #uart_transfer_t. * retval kStatus_Success if succeeded; otherwise failed. * retval kStatus_UART_RxBusy Previous transfer ongoing. * retval kStatus_InvalidArgument Invalid argument. */ status_t UART_ReceiveSDMA(UART_Type *base, uart_sdma_handle_t *handle, uart_transfer_t *xfer) { assert(handle != NULL); assert(handle->rxSdmaHandle != NULL); assert(xfer != NULL); assert(xfer->data != NULL); assert(xfer->dataSize != 0U); sdma_transfer_config_t xferConfig = {0U}; status_t status; sdma_peripheral_t perType = kSDMA_PeripheralTypeUART; /* If previous RX not finished. */ if ((uint8_t)kUART_RxBusy == handle->rxState) { status = kStatus_UART_RxBusy; } else { handle->rxState = (uint8_t)kUART_RxBusy; handle->rxDataSizeAll = xfer->dataSize; #if defined(FSL_FEATURE_SOC_SPBA_COUNT) && (FSL_FEATURE_SOC_SPBA_COUNT > 0) bool isSpba = SDMA_IsPeripheralInSPBA((uint32_t)base); /* Judge if the instance is located in SPBA */ if (isSpba) { perType = kSDMA_PeripheralTypeUART_SP; } #endif /* FSL_FEATURE_SOC_SPBA_COUNT */ /* Prepare transfer. */ SDMA_PrepareTransfer(&xferConfig, (uint32_t) & (base->URXD), (uint32_t)xfer->data, sizeof(uint8_t), sizeof(uint8_t), sizeof(uint8_t), (uint32_t)xfer->dataSize, handle->rxSdmaHandle->eventSource, perType, kSDMA_PeripheralToMemory); /* Submit transfer. */ SDMA_SubmitTransfer(handle->rxSdmaHandle, &xferConfig); SDMA_StartTransfer(handle->rxSdmaHandle); /* Enable UART RX SDMA. */ UART_EnableRxDMA(base, true); status = kStatus_Success; } return status; } /*! * brief Aborts the sent data using sDMA. * * This function aborts sent data using sDMA. * * param base UART peripheral base address. * param handle Pointer to the uart_sdma_handle_t structure. */ void UART_TransferAbortSendSDMA(UART_Type *base, uart_sdma_handle_t *handle) { assert(handle != NULL); assert(handle->txSdmaHandle != NULL); /* Disable UART TX SDMA. */ UART_EnableTxDMA(base, false); /* Stop transfer. */ SDMA_AbortTransfer(handle->txSdmaHandle); handle->txState = (uint8_t)kUART_TxIdle; } /*! * brief Aborts the receive data using sDMA. * * This function aborts receive data using sDMA. * * param base UART peripheral base address. * param handle Pointer to the uart_sdma_handle_t structure. */ void UART_TransferAbortReceiveSDMA(UART_Type *base, uart_sdma_handle_t *handle) { assert(handle != NULL); assert(handle->rxSdmaHandle != NULL); /* Disable UART RX SDMA. */ UART_EnableRxDMA(base, false); /* Stop transfer. */ SDMA_AbortTransfer(handle->rxSdmaHandle); handle->rxState = (uint8_t)kUART_RxIdle; }
93639.c
/* ** EPITECH PROJECT, 2020 ** PSU_minishell2_2019 ** File description: ** fonction_system */ #include "../include/my.h" void errno_messages(char **avv) { if (errno == EACCES) { my_putstr(avv[0]); my_putstr(": Permission denied.\n"); } if (errno == ENOEXEC) { my_putstr(avv[0]); my_putstr(": Exec format error. Wrong Architecture.\n"); } if (errno == ENOENT) { my_putstr(avv[0]); my_putstr(": Command not found.\n"); } } int suite(char **avv, char **env, pos_t *pos) { pos->path = pos->tab[pos->y]; pos->total = my_strcat(my_strcat(avv[0], "/"), pos->path); if (access(pos->total, X_OK) == -1) pos->mult++; if (access(pos->total, X_OK) == 0) { int child = 0; if (fork() == 0) execve(pos->total, avv, env); else wait(&child); pos->action_man++; return (0); } if (pos->mult == pos->total_db + 1) { my_putstr(avv[0]); my_putstr(": Command not found.\n"); return (1); } return (0); } int check_path_exist(char **env) { int i = 0; char **tabe = tab_e(env); while (tabe[i]) { if (my_strcmp(tabe[i], "PATH") == 0) return (0); i++; } return (1); } int execve_slash(char **avv, char **env) { pid_t pid; if (avv[0][0] == '/' || avv[0][0] == '.') { if ((pid = fork()) == 0) { execve(avv[0], avv, env); errno_messages(avv); kill(pid, SIGCHLD); exit(0); } else wait(pid); return (1); } return (0); } pos_t *fsystem(pos_t *pos, char **env, char **avv) { if (execve_slash(avv, env) == 1) return (pos); while (pos->action_man == 0) { initialized_value2(pos); if (check_path_exist(env) == 1 || my_getenv_sys(env, "PATH", pos) == 1) { my_putstr(avv[0]); my_putstr(": Command not found.\n"); return (pos); } if (suite(avv, env, pos) == 1) return (pos); } return (pos); }
244900.c
#include <bitcoin/feerate.h> #include <bitcoin/script.h> #include <ccan/asort/asort.h> #include <ccan/mem/mem.h> #include <ccan/tal/str/str.h> #include <common/htlc_tx.h> #include <common/initial_commit_tx.h> #include <common/keyset.h> #include <common/lease_rates.h> #include <common/memleak.h> #include <common/overflows.h> #include <common/peer_billboard.h> #include <common/status.h> #include <common/subdaemon.h> #include <common/type_to_string.h> #include <hsmd/hsmd_wiregen.h> #include <onchaind/onchain_types.h> #include <onchaind/onchaind_wiregen.h> #include <unistd.h> #include <wire/wire_sync.h> #include "onchain_types_names_gen.h" /* stdin == requests */ #define REQ_FD STDIN_FILENO #define HSM_FD 3 #define max(a, b) ((a) > (b) ? (a) : (b)) /* Required in various places: keys for commitment transaction. */ static const struct keyset *keyset; /* IFF it's their commitment tx: HSM can't derive their per-commitment point! */ static const struct pubkey *remote_per_commitment_point; /* The commitment number we're dealing with (if not mutual close) */ static u64 commit_num; /* The feerate for the transaction spending our delayed output. */ static u32 delayed_to_us_feerate; /* The feerate for transactions spending HTLC outputs. */ static u32 htlc_feerate; /* The feerate for transactions spending from revoked transactions. */ static u32 penalty_feerate; /* Min and max feerates we ever used */ static u32 min_possible_feerate, max_possible_feerate; /* The dust limit to use when we generate transactions. */ static struct amount_sat dust_limit; /* The CSV delays for each side. */ static u32 to_self_delay[NUM_SIDES]; /* Where we send money to (our wallet) */ static struct pubkey our_wallet_pubkey; /* Their revocation secret (only if they cheated). */ static const struct secret *remote_per_commitment_secret; /* one value is useful for a few witness scripts */ static const u8 ONE = 0x1; /* When to tell master about HTLCs which are missing/timed out */ static u32 reasonable_depth; /* The messages to send at that depth. */ static u8 **missing_htlc_msgs; /* Our recorded channel balance at 'chain time' */ static struct amount_msat our_msat; /* Needed for anchor outputs */ static struct pubkey funding_pubkey[NUM_SIDES]; /* At what commit number does option_static_remotekey apply? */ static u64 static_remotekey_start[NUM_SIDES]; /* Does option_anchor_outputs apply to this commitment tx? */ static bool option_anchor_outputs; /* The minimum relay feerate acceptable to the fullnode. */ static u32 min_relay_feerate; /* If we broadcast a tx, or need a delay to resolve the output. */ struct proposed_resolution { /* This can be NULL if our proposal is to simply ignore it after depth */ const struct bitcoin_tx *tx; /* Non-zero if this is CSV-delayed. */ u32 depth_required; enum tx_type tx_type; }; /* How it actually got resolved. */ struct resolution { struct bitcoin_txid txid; unsigned int depth; enum tx_type tx_type; }; struct tracked_output { enum tx_type tx_type; struct bitcoin_txid txid; u32 tx_blockheight; /* FIXME: Convert all depths to blocknums, then just get new blk msgs */ u32 depth; u32 outnum; struct amount_sat sat; enum output_type output_type; /* If it is an HTLC, this is set, wscript is non-NULL. */ struct htlc_stub htlc; const u8 *wscript; /* If it's an HTLC off our unilateral, this is their sig for htlc_tx */ const struct bitcoin_signature *remote_htlc_sig; /* Our proposed solution (if any) */ struct proposed_resolution *proposal; /* If it is resolved. */ struct resolution *resolved; /* stashed so we can pass it along to the coin ledger */ struct sha256 payment_hash; }; /* helper to compare output script with our tal'd script */ static bool wally_tx_output_scripteq(const struct wally_tx_output *out, const u8 *script) { return memeq(out->script, out->script_len, script, tal_bytelen(script)); } /* The feerate for the HTLC txs (which we grind) are the same as the * feerate for the main tx. However, there may be dust HTLCs which * were added to the fee, so we can only estimate a maximum feerate */ static void trim_maximum_feerate(struct amount_sat funding, const struct tx_parts *commitment) { size_t weight; struct amount_sat fee = funding; /* FIXME: This doesn't work for elements? */ if (chainparams->is_elements) return; weight = bitcoin_tx_core_weight(tal_count(commitment->inputs), tal_count(commitment->outputs)); /* BOLT #3: * ## Commitment Transaction *... * * `txin[0]` script bytes: 0 * * `txin[0]` witness: `0 <signature_for_pubkey1> <signature_for_pubkey2>` */ /* Account for witness (1 byte count + 1 empty + sig + sig) */ assert(tal_count(commitment->inputs) == 1); weight += bitcoin_tx_input_weight(false, 1 + 1 + 2 * bitcoin_tx_input_sig_weight()); for (size_t i = 0; i < tal_count(commitment->outputs); i++) { struct amount_asset amt; weight += bitcoin_tx_output_weight(commitment->outputs[i] ->script_len); amt = wally_tx_output_get_amount(commitment->outputs[i]); if (!amount_asset_is_main(&amt)) continue; if (!amount_sat_sub(&fee, fee, amount_asset_to_sat(&amt))) { status_failed(STATUS_FAIL_INTERNAL_ERROR, "Unable to subtract fee"); } } status_debug("reducing max_possible_feerate from %u...", max_possible_feerate); /* This is naive, but simple. */ while (amount_sat_greater(amount_tx_fee(max_possible_feerate, weight), fee)) max_possible_feerate--; status_debug("... to %u", max_possible_feerate); } static void send_coin_mvt(struct chain_coin_mvt *mvt TAKES) { wire_sync_write(REQ_FD, take(towire_onchaind_notify_coin_mvt(NULL, mvt))); if (taken(mvt)) tal_free(mvt); } static void record_their_successful_cheat(const struct bitcoin_txid *txid, u32 blockheight, struct tracked_output *out) { struct chain_coin_mvt *mvt; /* They successfully spent a delayed_to_them output * that we were expecting to revoke */ mvt = new_coin_penalty_sat(NULL, NULL, txid, &out->txid, out->outnum, blockheight, out->sat); send_coin_mvt(take(mvt)); } static void record_htlc_fulfilled(const struct bitcoin_txid *txid, struct tracked_output *out, u32 blockheight, bool we_fulfilled) { struct chain_coin_mvt *mvt; /* we're recording the *deposit* of a utxo which contained channel * funds (htlc). * * since we really don't know if this was a 'routed' or 'destination' * htlc here, we record it as a 'deposit/withdrawal' type */ mvt = new_coin_onchain_htlc_sat(NULL, NULL, txid, &out->txid, out->outnum, out->payment_hash, blockheight, out->sat, we_fulfilled); send_coin_mvt(take(mvt)); } static void update_ledger_chain_fees_msat(const struct bitcoin_txid *txid, u32 blockheight, struct amount_msat fees) { send_coin_mvt(take(new_coin_chain_fees(NULL, NULL, txid, blockheight, fees))); } static void update_ledger_chain_fees(const struct bitcoin_txid *txid, u32 blockheight, struct amount_sat fees) { struct chain_coin_mvt *mvt; mvt = new_coin_chain_fees_sat(NULL, NULL, txid, blockheight, fees); send_coin_mvt(take(mvt)); } /* Log the fees paid on this transaction as 'chain fees'. note that * you *cannot* pass a chaintopology-originated tx to this method, * as they don't have input amounts populated */ static struct amount_sat record_chain_fees_tx(const struct bitcoin_txid *txid, const struct bitcoin_tx *tx, u32 blockheight) { struct amount_sat fees; fees = bitcoin_tx_compute_fee(tx); status_debug("recording chain fees for tx %s", type_to_string(tmpctx, struct bitcoin_txid, txid)); update_ledger_chain_fees(txid, blockheight, fees); return fees; } static void add_amt(struct amount_sat *sum, struct amount_sat amt) { if (!amount_sat_add(sum, *sum, amt)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "unable to add %s to %s", type_to_string(tmpctx, struct amount_sat, &amt), type_to_string(tmpctx, struct amount_sat, sum)); } static void record_mutual_closure(const struct bitcoin_txid *txid, u32 blockheight, struct amount_sat our_out, int output_num) { struct amount_msat chain_fees, output_msat; /* First figure out 'fees' we paid on this will include * - 'residue' that can't fit onchain (< 1 sat) * - trimmed output, if our balance is < dust_limit * - fees paid for getting this tx mined */ if (!amount_sat_to_msat(&output_msat, our_out)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "unable to convert %s to msat", type_to_string(tmpctx, struct amount_sat, &our_out)); if (!amount_msat_sub(&chain_fees, our_msat, output_msat)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "unable to subtract %s from %s", type_to_string(tmpctx, struct amount_msat, &output_msat), type_to_string(tmpctx, struct amount_msat, &our_msat)); if (!amount_msat_eq(AMOUNT_MSAT(0), chain_fees)) update_ledger_chain_fees_msat(txid, blockheight, chain_fees); /* If we have no output, we exit early */ if (amount_msat_eq(AMOUNT_MSAT(0), output_msat)) return; assert(output_num > -1); /* Otherwise, we record the channel withdrawal */ send_coin_mvt(take(new_coin_withdrawal(NULL, NULL, txid, txid, output_num, blockheight, output_msat))); } static void record_chain_fees_unilateral(const struct bitcoin_txid *txid, u32 blockheight, struct amount_sat funding, struct amount_sat their_outs, struct amount_sat our_outs) { struct amount_msat trimmed; status_debug("chain_movements...recording chain fees for unilateral." " our msat balance %s, funding %s," " their_outs %s, our outs %s", type_to_string(tmpctx, struct amount_msat, &our_msat), type_to_string(tmpctx, struct amount_sat, &funding), type_to_string(tmpctx, struct amount_sat, &their_outs), type_to_string(tmpctx, struct amount_sat, &our_outs)); /* It's possible they published a commitment tx that * paid us an htlc before we updated our balance. It's also * possible that they fulfilled an htlc, but we'll just write * that down in the chain fees :/ */ if (!amount_msat_greater_eq_sat(our_msat, our_outs)) { struct amount_msat missing; if (!amount_sat_sub_msat(&missing, our_outs, our_msat)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "unable to subtract %s from %s", type_to_string(tmpctx, struct amount_msat, &our_msat), type_to_string(tmpctx, struct amount_sat, &our_outs)); /* Log the difference and update our_msat */ send_coin_mvt(take(new_coin_journal_entry(NULL, NULL, txid, NULL, 0, blockheight, missing, true))); if (!amount_msat_add(&our_msat, our_msat, missing)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "unable to add %s to %s", type_to_string(tmpctx, struct amount_msat, &missing), type_to_string(tmpctx, struct amount_msat, &our_msat)); } /* we need to figure out what we paid in fees, total. * this encompasses the actual chain fees + any trimmed outputs */ if (!amount_msat_sub_sat(&trimmed, our_msat, our_outs)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "unable to subtract %s from %s", type_to_string(tmpctx, struct amount_sat, &our_outs), type_to_string(tmpctx, struct amount_msat, &our_msat)); status_debug("logging 'chain fees' for unilateral (trimmed) %s", type_to_string(tmpctx, struct amount_msat, &trimmed)); update_ledger_chain_fees_msat(txid, blockheight, trimmed); } static void record_coin_loss(const struct bitcoin_txid *txid, u32 blockheight, struct tracked_output *out) { struct chain_coin_mvt *mvt; /* We don't for sure know that it's a 'penalty' * but we write it as that anyway... */ mvt = new_coin_penalty_sat(NULL, NULL, txid, &out->txid, out->outnum, blockheight, out->sat); send_coin_mvt(take(mvt)); } static void record_channel_withdrawal_minus_fees(const struct bitcoin_txid *tx_txid, struct tracked_output *out, u32 blockheight, struct amount_sat fees) { struct amount_sat emitted_amt; if (!amount_sat_sub(&emitted_amt, out->sat, fees)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "unable to subtract %s from %s", type_to_string(tmpctx, struct amount_sat, &fees), type_to_string(tmpctx, struct amount_sat, &out->sat)); send_coin_mvt(take(new_coin_withdrawal_sat( NULL, NULL, tx_txid, &out->txid, out->outnum, blockheight, emitted_amt))); } static void record_channel_withdrawal(const struct bitcoin_txid *tx_txid, u32 blockheight, struct tracked_output *out) { record_channel_withdrawal_minus_fees(tx_txid, out, blockheight, AMOUNT_SAT(0)); } static bool is_our_htlc_tx(struct tracked_output *out) { return out->resolved && (out->resolved->tx_type == OUR_HTLC_TIMEOUT_TX || out->resolved->tx_type == OUR_HTLC_SUCCESS_TX); } static bool is_channel_deposit(struct tracked_output *out) { return out->resolved && (out->resolved->tx_type == THEIR_HTLC_FULFILL_TO_US || out->resolved->tx_type == OUR_HTLC_SUCCESS_TX); } static void record_coin_movements(struct tracked_output *out, u32 blockheight, const struct bitcoin_tx *tx, const struct bitcoin_txid *txid) { struct amount_sat fees; /* there is a case where we've fulfilled an htlc onchain, * in which case we log a deposit to the channel */ if (is_channel_deposit(out)) record_htlc_fulfilled(txid, out, blockheight, true); /* record fees paid for the tx here */ /* FIXME: for now, every resolution generates its own tx, * this will need to be updated if we switch to batching */ fees = record_chain_fees_tx(txid, tx, blockheight); /* we don't record a channel withdrawal until we get to * the 'exit' utxo, which for local commitment htlc txs * is the child htlc_tx's output */ if (!is_our_htlc_tx(out)) record_channel_withdrawal_minus_fees(txid, out, blockheight, fees); } /* We vary feerate until signature they offered matches. */ static bool grind_htlc_tx_fee(struct amount_sat *fee, struct bitcoin_tx *tx, const struct bitcoin_signature *remotesig, const u8 *wscript, u64 weight) { struct amount_sat prev_fee = AMOUNT_SAT(UINT64_MAX), input_amt; input_amt = psbt_input_get_amount(tx->psbt, 0); for (u64 i = min_possible_feerate; i <= max_possible_feerate; i++) { /* BOLT #3: * * The fee for an HTLC-timeout transaction: * - If `option_anchors_zero_fee_htlc_tx` applies: * 1. MUST BE 0. * - Otherwise, MUST BE calculated to match: * 1. Multiply `feerate_per_kw` by 663 * (666 if `option_anchor_outputs` applies) * and divide by 1000 (rounding down). * * The fee for an HTLC-success transaction: * - If `option_anchors_zero_fee_htlc_tx` applies: * 1. MUST BE 0. * - MUST BE calculated to match: * 1. Multiply `feerate_per_kw` by 703 * (706 if `option_anchor_outputs` applies) * and divide by 1000 (rounding down). */ struct amount_sat out; *fee = amount_tx_fee(i, weight); /* Minor optimization: don't check same fee twice */ if (amount_sat_eq(*fee, prev_fee)) continue; prev_fee = *fee; if (!amount_sat_sub(&out, input_amt, *fee)) break; bitcoin_tx_output_set_amount(tx, 0, out); bitcoin_tx_finalize(tx); if (!check_tx_sig(tx, 0, NULL, wscript, &keyset->other_htlc_key, remotesig)) continue; status_debug("grind feerate_per_kw for %"PRIu64" = %"PRIu64, weight, i); return true; } return false; } static bool set_htlc_timeout_fee(struct bitcoin_tx *tx, const struct bitcoin_signature *remotesig, const u8 *wscript) { static struct amount_sat amount, fee = AMOUNT_SAT_INIT(UINT64_MAX); struct amount_asset asset = bitcoin_tx_output_get_amount(tx, 0); size_t weight; /* BOLT #3: * * The fee for an HTLC-timeout transaction: * - If `option_anchors_zero_fee_htlc_tx` applies: * 1. MUST BE 0. * - Otherwise, MUST BE calculated to match: * 1. Multiply `feerate_per_kw` by 663 (666 if `option_anchor_outputs` * applies) and divide by 1000 (rounding down). */ if (option_anchor_outputs) weight = 666; else weight = 663; weight = elements_add_overhead(weight, tx->wtx->num_inputs, tx->wtx->num_outputs); assert(amount_asset_is_main(&asset)); amount = amount_asset_to_sat(&asset); if (amount_sat_eq(fee, AMOUNT_SAT(UINT64_MAX))) { struct amount_sat grindfee; if (grind_htlc_tx_fee(&grindfee, tx, remotesig, wscript, weight)) { /* Cache this for next time */ fee = grindfee; return true; } return false; } if (!amount_sat_sub(&amount, amount, fee)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Cannot deduct htlc-timeout fee %s from tx %s", type_to_string(tmpctx, struct amount_sat, &fee), type_to_string(tmpctx, struct bitcoin_tx, tx)); bitcoin_tx_output_set_amount(tx, 0, amount); bitcoin_tx_finalize(tx); return check_tx_sig(tx, 0, NULL, wscript, &keyset->other_htlc_key, remotesig); } static void set_htlc_success_fee(struct bitcoin_tx *tx, const struct bitcoin_signature *remotesig, const u8 *wscript) { static struct amount_sat amt, fee = AMOUNT_SAT_INIT(UINT64_MAX); struct amount_asset asset; size_t weight; /* BOLT #3: * * The fee for an HTLC-success transaction: * - If `option_anchors_zero_fee_htlc_tx` applies: * 1. MUST BE 0. * - MUST BE calculated to match: * 1. Multiply `feerate_per_kw` by 703 (706 if `option_anchor_outputs` * applies) and divide by 1000 (rounding down). */ if (option_anchor_outputs) weight = 706; else weight = 703; weight = elements_add_overhead(weight, tx->wtx->num_inputs, tx->wtx->num_outputs); if (amount_sat_eq(fee, AMOUNT_SAT(UINT64_MAX))) { if (!grind_htlc_tx_fee(&fee, tx, remotesig, wscript, weight)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "htlc_success_fee can't be found " "for tx %s, signature %s, wscript %s", type_to_string(tmpctx, struct bitcoin_tx, tx), type_to_string(tmpctx, struct bitcoin_signature, remotesig), tal_hex(tmpctx, wscript)); return; } asset = bitcoin_tx_output_get_amount(tx, 0); assert(amount_asset_is_main(&asset)); amt = amount_asset_to_sat(&asset); if (!amount_sat_sub(&amt, amt, fee)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Cannot deduct htlc-success fee %s from tx %s", type_to_string(tmpctx, struct amount_sat, &fee), type_to_string(tmpctx, struct bitcoin_tx, tx)); bitcoin_tx_output_set_amount(tx, 0, amt); bitcoin_tx_finalize(tx); if (check_tx_sig(tx, 0, NULL, wscript, &keyset->other_htlc_key, remotesig)) return; status_failed(STATUS_FAIL_INTERNAL_ERROR, "htlc_success_fee %s failed sigcheck " " for tx %s, signature %s, wscript %s", type_to_string(tmpctx, struct amount_sat, &fee), type_to_string(tmpctx, struct bitcoin_tx, tx), type_to_string(tmpctx, struct bitcoin_signature, remotesig), tal_hex(tmpctx, wscript)); } static const char *tx_type_name(enum tx_type tx_type) { size_t i; for (i = 0; enum_tx_type_names[i].name; i++) if (enum_tx_type_names[i].v == tx_type) return enum_tx_type_names[i].name; return "unknown"; } static const char *output_type_name(enum output_type output_type) { size_t i; for (i = 0; enum_output_type_names[i].name; i++) if (enum_output_type_names[i].v == output_type) return enum_output_type_names[i].name; return "unknown"; } static u8 *delayed_payment_to_us(const tal_t *ctx, struct bitcoin_tx *tx, const u8 *wscript) { return towire_hsmd_sign_delayed_payment_to_us(ctx, commit_num, tx, wscript); } static u8 *remote_htlc_to_us(const tal_t *ctx, struct bitcoin_tx *tx, const u8 *wscript) { return towire_hsmd_sign_remote_htlc_to_us(ctx, remote_per_commitment_point, tx, wscript, option_anchor_outputs); } static u8 *penalty_to_us(const tal_t *ctx, struct bitcoin_tx *tx, const u8 *wscript) { return towire_hsmd_sign_penalty_to_us(ctx, remote_per_commitment_secret, tx, wscript); } /* * This covers: * 1. to-us output spend (`<local_delayedsig> 0`) * 2. the their-commitment, our HTLC timeout case (`<remotehtlcsig> 0`), * 3. the their-commitment, our HTLC redeem case (`<remotehtlcsig> <payment_preimage>`) * 4. the their-revoked-commitment, to-local (`<revocation_sig> 1`) * 5. the their-revoked-commitment, htlc (`<revocation_sig> <revocationkey>`) * * Overrides *tx_type if it all turns to dust. */ static struct bitcoin_tx *tx_to_us(const tal_t *ctx, u8 *(*hsm_sign_msg)(const tal_t *ctx, struct bitcoin_tx *tx, const u8 *wscript), struct tracked_output *out, u32 to_self_delay, u32 locktime, const void *elem, size_t elemsize, const u8 *wscript, enum tx_type *tx_type, u32 feerate) { struct bitcoin_tx *tx; struct amount_sat fee, min_out, amt; struct bitcoin_signature sig; size_t weight; u8 *msg; u8 **witness; tx = bitcoin_tx(ctx, chainparams, 1, 1, locktime); bitcoin_tx_add_input(tx, &out->txid, out->outnum, to_self_delay, NULL, out->sat, NULL, wscript); bitcoin_tx_add_output( tx, scriptpubkey_p2wpkh(tx, &our_wallet_pubkey), NULL, out->sat); /* Worst-case sig is 73 bytes */ weight = bitcoin_tx_weight(tx) + 1 + 3 + 73 + 0 + tal_count(wscript); weight = elements_add_overhead(weight, 1, 1); fee = amount_tx_fee(feerate, weight); /* Result is trivial? Spend with small feerate, but don't wait * around for it as it might not confirm. */ if (!amount_sat_add(&min_out, dust_limit, fee)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Cannot add dust_limit %s and fee %s", type_to_string(tmpctx, struct amount_sat, &dust_limit), type_to_string(tmpctx, struct amount_sat, &fee)); if (amount_sat_less(out->sat, min_out)) { /* FIXME: We should use SIGHASH_NONE so others can take it */ fee = amount_tx_fee(feerate_floor(), weight); status_unusual("TX %s amount %s too small to" " pay reasonable fee, using minimal fee" " and ignoring", tx_type_name(*tx_type), type_to_string(tmpctx, struct amount_sat, &out->sat)); *tx_type = IGNORING_TINY_PAYMENT; } /* This can only happen if feerate_floor() is still too high; shouldn't * happen! */ if (!amount_sat_sub(&amt, out->sat, fee)) { amt = dust_limit; status_broken("TX %s can't afford minimal feerate" "; setting output to %s", tx_type_name(*tx_type), type_to_string(tmpctx, struct amount_sat, &amt)); } bitcoin_tx_output_set_amount(tx, 0, amt); bitcoin_tx_finalize(tx); if (!wire_sync_write(HSM_FD, take(hsm_sign_msg(NULL, tx, wscript)))) status_failed(STATUS_FAIL_HSM_IO, "Writing sign request to hsm"); msg = wire_sync_read(tmpctx, HSM_FD); if (!msg || !fromwire_hsmd_sign_tx_reply(msg, &sig)) { status_failed(STATUS_FAIL_HSM_IO, "Reading sign_tx_reply: %s", tal_hex(tmpctx, msg)); } witness = bitcoin_witness_sig_and_element(tx, &sig, elem, elemsize, wscript); bitcoin_tx_input_set_witness(tx, 0, take(witness)); return tx; } /** replace_penalty_tx_to_us * * @brief creates a replacement TX for * a given penalty tx. * * @param ctx - the context to allocate * off of. * @param hsm_sign_msg - function to construct * the signing message to HSM. * @param penalty_tx - the original * penalty transaction. * @param output_amount - the output * amount to use instead of the * original penalty transaction. * If this amount is below the dust * limit, the output is replaced with * an `OP_RETURN` instead. * * @return the signed transaction. */ static struct bitcoin_tx * replace_penalty_tx_to_us(const tal_t *ctx, u8 *(*hsm_sign_msg)(const tal_t *ctx, struct bitcoin_tx *tx, const u8 *wscript), const struct bitcoin_tx *penalty_tx, struct amount_sat output_amount) { struct bitcoin_tx *tx; /* The penalty tx input. */ const struct wally_tx_input *input; /* Specs of the penalty tx input. */ struct bitcoin_txid input_txid; u32 input_outnum; u8 *input_wscript; u8 *input_element; struct amount_sat input_amount; /* Signature from the HSM. */ u8 *msg; struct bitcoin_signature sig; /* Witness we generate from the signature and other data. */ u8 **witness; /* Get the single input of the penalty tx. */ input = &penalty_tx->wtx->inputs[0]; /* Extract the input-side data. */ bitcoin_tx_input_get_txid(penalty_tx, 0, &input_txid); input_outnum = input->index; input_wscript = tal_dup_arr(tmpctx, u8, input->witness->items[2].witness, input->witness->items[2].witness_len, 0); input_element = tal_dup_arr(tmpctx, u8, input->witness->items[1].witness, input->witness->items[1].witness_len, 0); input_amount = psbt_input_get_amount(penalty_tx->psbt, 0); /* Create the replacement. */ tx = bitcoin_tx(ctx, chainparams, 1, 1, /*locktime*/ 0); /* Reconstruct the input. */ bitcoin_tx_add_input(tx, &input_txid, input_outnum, BITCOIN_TX_RBF_SEQUENCE, NULL, input_amount, NULL, input_wscript); /* Reconstruct the output with a smaller amount. */ if (amount_sat_greater(output_amount, dust_limit)) bitcoin_tx_add_output(tx, scriptpubkey_p2wpkh(tx, &our_wallet_pubkey), NULL, output_amount); else bitcoin_tx_add_output(tx, scriptpubkey_opreturn_padded(tx), NULL, AMOUNT_SAT(0)); /* Finalize the transaction. */ bitcoin_tx_finalize(tx); /* Ask HSM to sign it. */ if (!wire_sync_write(HSM_FD, take(hsm_sign_msg(NULL, tx, input_wscript)))) status_failed(STATUS_FAIL_HSM_IO, "While feebumping penalty: writing sign request to hsm"); msg = wire_sync_read(tmpctx, HSM_FD); if (!msg || !fromwire_hsmd_sign_tx_reply(msg, &sig)) status_failed(STATUS_FAIL_HSM_IO, "While feebumping penalty: reading sign_tx_reply: %s", tal_hex(tmpctx, msg)); /* Install the witness with the signature. */ witness = bitcoin_witness_sig_and_element(tx, &sig, input_element, tal_bytelen(input_element), input_wscript); bitcoin_tx_input_set_witness(tx, 0, take(witness)); return tx; } /** min_rbf_bump * * @brief computes the minimum RBF bump required by * BIP125, given an index. * * @desc BIP125 requires that an replacement transaction * pay, not just the fee of the evicted transactions, * but also the minimum relay fee for itself. * This function assumes that previous RBF attempts * paid exactly the return value for that attempt, on * top of the initial transaction fee. * It can serve as a baseline for other functions that * compute a suggested fee: get whichever is higher, * the fee this function suggests, or your own unique * function. * * This function is provided as a common function that * all RBF-bump computations can use. * * @param weight - the weight of the transaction you * are RBFing. * @param index - 0 makes no sense, 1 means this is * the first RBF attempt, 2 means this is the 2nd * RBF attempt, etc. * * @return the suggested total fee. */ static struct amount_sat min_rbf_bump(size_t weight, size_t index) { struct amount_sat min_relay_fee; struct amount_sat min_rbf_bump; /* Compute the minimum relay fee for a transaction of the given * weight. */ min_relay_fee = amount_tx_fee(min_relay_feerate, weight); /* For every RBF attempt, we add the min-relay-fee. * Or in other words, we multiply the min-relay-fee by the * index number of the attempt. */ if (mul_overflows_u64(index, min_relay_fee.satoshis)) /* Raw: multiplication. */ min_rbf_bump = AMOUNT_SAT(UINT64_MAX); else min_rbf_bump.satoshis = index * min_relay_fee.satoshis; /* Raw: multiplication. */ return min_rbf_bump; } /** compute_penalty_output_amount * * @brief computes the appropriate output amount for a * penalty transaction that spends a theft transaction * that is already of a specific depth. * * @param initial_amount - the outout amount of the first * penalty transaction. * @param depth - the current depth of the theft * transaction. * @param max_depth - the maximum depth of the theft * transaction, after which the theft transaction will * succeed. * @param weight - the weight of the first penalty * transaction, in Sipa. */ static struct amount_sat compute_penalty_output_amount(struct amount_sat initial_amount, u32 depth, u32 max_depth, size_t weight) { struct amount_sat max_output_amount; struct amount_sat output_amount; struct amount_sat deducted_amount; assert(depth <= max_depth); assert(depth > 0); /* The difference between initial_amount, and the fee suggested * by min_rbf_bump, is the largest allowed output amount. * * depth = 1 is the first attempt, thus maps to the 0th RBF * (i.e. the initial attempt that is not RBFed itself). * We actually start to replace at depth = 2, so we use * depth - 1 as the index for min_rbf_bump. */ if (!amount_sat_sub(&max_output_amount, initial_amount, min_rbf_bump(weight, depth - 1))) /* If min_rbf_bump is larger than the initial_amount, * we should just donate the whole output as fee, * meaning we get 0 output amount. */ return AMOUNT_SAT(0); /* Map the depth / max_depth into a number between 0->1. */ double x = (double) depth / (double) max_depth; /* Get the cube of the above position, resulting in a graph * where the y is close to 0 up to less than halfway through, * then quickly rises up to 1 as depth nears the max depth. */ double y = x * x * x; /* Scale according to the initial_amount. */ deducted_amount.satoshis = (u64) (y * initial_amount.satoshis); /* Raw: multiplication. */ /* output_amount = initial_amount - deducted_amount. */ if (!amount_sat_sub(&output_amount, initial_amount, deducted_amount)) /* If underflow, force to 0. */ output_amount = AMOUNT_SAT(0); /* If output exceeds max, return max. */ if (amount_sat_less(max_output_amount, output_amount)) return max_output_amount; return output_amount; } static void hsm_sign_local_htlc_tx(struct bitcoin_tx *tx, const u8 *wscript, struct bitcoin_signature *sig) { u8 *msg = towire_hsmd_sign_local_htlc_tx(NULL, commit_num, tx, wscript, option_anchor_outputs); if (!wire_sync_write(HSM_FD, take(msg))) status_failed(STATUS_FAIL_HSM_IO, "Writing sign_local_htlc_tx to hsm"); msg = wire_sync_read(tmpctx, HSM_FD); if (!msg || !fromwire_hsmd_sign_tx_reply(msg, sig)) status_failed(STATUS_FAIL_HSM_IO, "Reading sign_local_htlc_tx: %s", tal_hex(tmpctx, msg)); } static void hsm_get_per_commitment_point(struct pubkey *per_commitment_point) { u8 *msg = towire_hsmd_get_per_commitment_point(NULL, commit_num); struct secret *unused; if (!wire_sync_write(HSM_FD, take(msg))) status_failed(STATUS_FAIL_HSM_IO, "Writing sign_htlc_tx to hsm"); msg = wire_sync_read(tmpctx, HSM_FD); if (!msg || !fromwire_hsmd_get_per_commitment_point_reply(tmpctx, msg, per_commitment_point, &unused)) status_failed(STATUS_FAIL_HSM_IO, "Reading hsm_get_per_commitment_point_reply: %s", tal_hex(tmpctx, msg)); } static struct tracked_output * new_tracked_output(struct tracked_output ***outs, const struct bitcoin_txid *txid, u32 tx_blockheight, enum tx_type tx_type, u32 outnum, struct amount_sat sat, enum output_type output_type, const struct htlc_stub *htlc, const u8 *wscript, const struct bitcoin_signature *remote_htlc_sig TAKES) { struct tracked_output *out = tal(*outs, struct tracked_output); status_debug("Tracking output %u of %s: %s/%s", outnum, type_to_string(tmpctx, struct bitcoin_txid, txid), tx_type_name(tx_type), output_type_name(output_type)); out->tx_type = tx_type; out->txid = *txid; out->tx_blockheight = tx_blockheight; out->depth = 0; out->outnum = outnum; out->sat = sat; out->output_type = output_type; out->proposal = NULL; out->resolved = NULL; if (htlc) out->htlc = *htlc; out->wscript = tal_steal(out, wscript); if (remote_htlc_sig) out->remote_htlc_sig = tal_dup(out, struct bitcoin_signature, remote_htlc_sig); else out->remote_htlc_sig = NULL; tal_arr_expand(outs, out); return out; } static void ignore_output(struct tracked_output *out) { status_debug("Ignoring output %u of %s: %s/%s", out->outnum, type_to_string(tmpctx, struct bitcoin_txid, &out->txid), tx_type_name(out->tx_type), output_type_name(out->output_type)); out->resolved = tal(out, struct resolution); out->resolved->txid = out->txid; out->resolved->depth = 0; out->resolved->tx_type = SELF; } static enum wallet_tx_type onchain_txtype_to_wallet_txtype(enum tx_type t) { switch (t) { case FUNDING_TRANSACTION: return TX_CHANNEL_FUNDING; case MUTUAL_CLOSE: return TX_CHANNEL_CLOSE; case OUR_UNILATERAL: return TX_CHANNEL_UNILATERAL; case THEIR_HTLC_FULFILL_TO_US: case OUR_HTLC_SUCCESS_TX: return TX_CHANNEL_HTLC_SUCCESS; case OUR_HTLC_TIMEOUT_TO_US: case OUR_HTLC_TIMEOUT_TX: return TX_CHANNEL_HTLC_TIMEOUT; case OUR_DELAYED_RETURN_TO_WALLET: case SELF: return TX_CHANNEL_SWEEP; case OUR_PENALTY_TX: return TX_CHANNEL_PENALTY; case THEIR_DELAYED_CHEAT: return TX_CHANNEL_CHEAT | TX_THEIRS; case THEIR_UNILATERAL: case UNKNOWN_UNILATERAL: case THEIR_REVOKED_UNILATERAL: return TX_CHANNEL_UNILATERAL | TX_THEIRS; case THEIR_HTLC_TIMEOUT_TO_THEM: return TX_CHANNEL_HTLC_TIMEOUT | TX_THEIRS; case OUR_HTLC_FULFILL_TO_THEM: return TX_CHANNEL_HTLC_SUCCESS | TX_THEIRS; case IGNORING_TINY_PAYMENT: case UNKNOWN_TXTYPE: return TX_UNKNOWN; } abort(); } /** proposal_is_rbfable * * @brief returns true if the given proposal * would be RBFed if the output it is tracking * increases in depth without being spent. */ static bool proposal_is_rbfable(const struct proposed_resolution *proposal) { /* Future onchain resolutions, such as anchored commitments, might * want to RBF as well. */ return proposal->tx_type == OUR_PENALTY_TX; } /** proposal_should_rbf * * @brief the given output just increased its depth, * so the proposal for it should be RBFed and * rebroadcast. * * @desc precondition: the given output must have an * rbfable proposal as per `proposal_is_rbfable`. */ static void proposal_should_rbf(struct tracked_output *out, bool is_replay) { struct bitcoin_tx *tx = NULL; u32 depth; assert(out->proposal); assert(proposal_is_rbfable(out->proposal)); depth = out->depth; /* Do not RBF at depth 1. * * Since we react to *onchain* events, whatever proposal we made, * the output for that proposal is already at depth 1. * * Since our initial proposal was broadcasted with the output at * depth 1, we should not RBF until a new block arrives, which is * at depth 2. */ if (depth <= 1) return; if (out->proposal->tx_type == OUR_PENALTY_TX) { u32 max_depth = to_self_delay[REMOTE]; u32 my_depth = depth; size_t weight = bitcoin_tx_weight(out->proposal->tx); struct amount_sat initial_amount; struct amount_sat new_amount; if (max_depth >= 1) max_depth -= 1; if (my_depth >= max_depth) my_depth = max_depth; bitcoin_tx_output_get_amount_sat(out->proposal->tx, 0, &initial_amount); /* Compute the new output amount for the RBF. */ new_amount = compute_penalty_output_amount(initial_amount, my_depth, max_depth, weight); assert(amount_sat_less_eq(new_amount, initial_amount)); /* Recreate the penalty tx. */ tx = replace_penalty_tx_to_us(tmpctx, &penalty_to_us, out->proposal->tx, new_amount); status_debug("Created RBF OUR_PENALTY_TX with output %s " "(originally %s) for depth %"PRIu32"/%"PRIu32".", type_to_string(tmpctx, struct amount_sat, &new_amount), type_to_string(tmpctx, struct amount_sat, &initial_amount), depth, to_self_delay[LOCAL]); } /* Add other RBF-able proposals here. */ /* Broadcast the transaction. */ if (tx) { enum wallet_tx_type wtt; status_debug("Broadcasting RBF %s (%s) to resolve %s/%s " "depth=%"PRIu32"", tx_type_name(out->proposal->tx_type), type_to_string(tmpctx, struct bitcoin_tx, tx), tx_type_name(out->tx_type), output_type_name(out->output_type), depth); wtt = onchain_txtype_to_wallet_txtype(out->proposal->tx_type); wire_sync_write(REQ_FD, take(towire_onchaind_broadcast_tx(NULL, tx, wtt, true))); } } static void proposal_meets_depth(struct tracked_output *out, bool is_replay) { bool is_rbf = false; /* If we simply wanted to ignore it after some depth */ if (!out->proposal->tx) { ignore_output(out); return; } status_debug("Broadcasting %s (%s) to resolve %s/%s", tx_type_name(out->proposal->tx_type), type_to_string(tmpctx, struct bitcoin_tx, out->proposal->tx), tx_type_name(out->tx_type), output_type_name(out->output_type)); if (out->proposal) /* Our own penalty transactions are going to be RBFed. */ is_rbf = proposal_is_rbfable(out->proposal); wire_sync_write( REQ_FD, take(towire_onchaind_broadcast_tx( NULL, out->proposal->tx, onchain_txtype_to_wallet_txtype(out->proposal->tx_type), is_rbf))); /* Don't wait for this if we're ignoring the tiny payment. */ if (out->proposal->tx_type == IGNORING_TINY_PAYMENT) { struct bitcoin_txid txid; struct amount_sat fees; ignore_output(out); if (!is_replay) { /* log the coin movements here, since we're not * going to wait til we hear about it */ bitcoin_txid(out->proposal->tx, &txid); fees = record_chain_fees_tx(&txid, out->proposal->tx, 0); record_channel_withdrawal_minus_fees(&txid, out, 0, fees); } } /* Otherwise we will get a callback when it's in a block. */ } static void propose_resolution(struct tracked_output *out, const struct bitcoin_tx *tx, unsigned int depth_required, enum tx_type tx_type, bool is_replay) { status_debug("Propose handling %s/%s by %s (%s) after %u blocks", tx_type_name(out->tx_type), output_type_name(out->output_type), tx_type_name(tx_type), tx ? type_to_string(tmpctx, struct bitcoin_tx, tx):"IGNORING", depth_required); out->proposal = tal(out, struct proposed_resolution); out->proposal->tx = tal_steal(out->proposal, tx); out->proposal->depth_required = depth_required; out->proposal->tx_type = tx_type; if (depth_required == 0) proposal_meets_depth(out, is_replay); } static void propose_resolution_at_block(struct tracked_output *out, const struct bitcoin_tx *tx, unsigned int block_required, enum tx_type tx_type, bool is_replay) { u32 depth; /* Expiry could be in the past! */ if (block_required < out->tx_blockheight) depth = 0; else /* Note that out->tx_blockheight is already at depth 1 */ depth = block_required - out->tx_blockheight + 1; propose_resolution(out, tx, depth, tx_type, is_replay); } static bool is_valid_sig(const u8 *e) { struct bitcoin_signature sig; return signature_from_der(e, tal_count(e), &sig); } /* We ignore things which look like signatures. */ static bool input_similar(const struct wally_tx_input *i1, const struct wally_tx_input *i2) { u8 *s1, *s2; if (!memeq(i1->txhash, WALLY_TXHASH_LEN, i2->txhash, WALLY_TXHASH_LEN)) return false; if (i1->index != i2->index) return false; if (!scripteq(i1->script, i2->script)) return false; if (i1->sequence != i2->sequence) return false; if (i1->witness->num_items != i2->witness->num_items) return false; for (size_t i = 0; i < i1->witness->num_items; i++) { /* Need to wrap these in `tal_arr`s since the primitives * except to be able to call tal_bytelen on them */ s1 = tal_dup_arr(tmpctx, u8, i1->witness->items[i].witness, i1->witness->items[i].witness_len, 0); s2 = tal_dup_arr(tmpctx, u8, i2->witness->items[i].witness, i2->witness->items[i].witness_len, 0); if (scripteq(s1, s2)) continue; if (is_valid_sig(s1) && is_valid_sig(s2)) continue; return false; } return true; } /* This simple case: true if this was resolved by our proposal. */ static bool resolved_by_proposal(struct tracked_output *out, const struct tx_parts *tx_parts) { /* If there's no TX associated, it's not us. */ if (!out->proposal->tx) return false; /* Our proposal can change as feerates change. Input * comparison (ignoring signatures) works pretty well. */ if (tal_count(tx_parts->inputs) != out->proposal->tx->wtx->num_inputs) return false; for (size_t i = 0; i < tal_count(tx_parts->inputs); i++) { if (!input_similar(tx_parts->inputs[i], &out->proposal->tx->wtx->inputs[i])) return false; } out->resolved = tal(out, struct resolution); out->resolved->txid = tx_parts->txid; status_debug("Resolved %s/%s by our proposal %s (%s)", tx_type_name(out->tx_type), output_type_name(out->output_type), tx_type_name(out->proposal->tx_type), type_to_string(tmpctx, struct bitcoin_txid, &out->resolved->txid)); out->resolved->depth = 0; out->resolved->tx_type = out->proposal->tx_type; return true; } /* Otherwise, we figure out what happened and then call this. */ static void resolved_by_other(struct tracked_output *out, const struct bitcoin_txid *txid, enum tx_type tx_type) { out->resolved = tal(out, struct resolution); out->resolved->txid = *txid; out->resolved->depth = 0; out->resolved->tx_type = tx_type; status_debug("Resolved %s/%s by %s (%s)", tx_type_name(out->tx_type), output_type_name(out->output_type), tx_type_name(tx_type), type_to_string(tmpctx, struct bitcoin_txid, txid)); } static void unknown_spend(struct tracked_output *out, const struct tx_parts *tx_parts) { out->resolved = tal(out, struct resolution); out->resolved->txid = tx_parts->txid; out->resolved->depth = 0; out->resolved->tx_type = UNKNOWN_TXTYPE; status_broken("Unknown spend of %s/%s by %s", tx_type_name(out->tx_type), output_type_name(out->output_type), type_to_string(tmpctx, struct bitcoin_txid, &tx_parts->txid)); } static u64 unmask_commit_number(const struct tx_parts *tx, uint32_t locktime, enum side opener, const struct pubkey *local_payment_basepoint, const struct pubkey *remote_payment_basepoint) { u64 obscurer; const struct pubkey *keys[NUM_SIDES]; keys[LOCAL] = local_payment_basepoint; keys[REMOTE] = remote_payment_basepoint; /* BOLT #3: * * The 48-bit commitment number is obscured by `XOR` with the lower 48 bits of... */ obscurer = commit_number_obscurer(keys[opener], keys[!opener]); /* BOLT #3: * * * locktime: upper 8 bits are 0x20, lower 24 bits are the lower 24 bits of the obscured commitment number *... * * `txin[0]` sequence: upper 8 bits are 0x80, lower 24 bits are upper 24 bits of the obscured commitment number */ return ((locktime & 0x00FFFFFF) | (tx->inputs[0]->sequence & (u64)0x00FFFFFF) << 24) ^ obscurer; } static bool is_mutual_close(const struct tx_parts *tx, const u8 *local_scriptpubkey, const u8 *remote_scriptpubkey, int *local_outnum) { size_t i; bool local_matched = false, remote_matched = false; *local_outnum = -1; for (i = 0; i < tal_count(tx->outputs); i++) { /* To be paranoid, we only let each one match once. */ if (chainparams->is_elements && tx->outputs[i]->script_len == 0) { /* This is a fee output, ignore please */ continue; } else if (wally_tx_output_scripteq(tx->outputs[i], local_scriptpubkey) && !local_matched) { *local_outnum = i; local_matched = true; } else if (wally_tx_output_scripteq(tx->outputs[i], remote_scriptpubkey) && !remote_matched) remote_matched = true; else return false; } return true; } /* We only ever send out one, so matching it is easy. */ static bool is_local_commitment(const struct bitcoin_txid *txid, const struct bitcoin_txid *our_broadcast_txid) { return bitcoin_txid_eq(txid, our_broadcast_txid); } /* BOLT #5: * * Outputs that are *resolved* are considered *irrevocably resolved* * once the remote's *resolving* transaction is included in a block at least 100 * deep, on the most-work blockchain. */ static size_t num_not_irrevocably_resolved(struct tracked_output **outs) { size_t i, num = 0; for (i = 0; i < tal_count(outs); i++) { if (!outs[i]->resolved || outs[i]->resolved->depth < 100) num++; } return num; } static u32 prop_blockheight(const struct tracked_output *out) { return out->tx_blockheight + out->proposal->depth_required; } static void billboard_update(struct tracked_output **outs) { const struct tracked_output *best = NULL; /* Highest priority is to report on proposals we have */ for (size_t i = 0; i < tal_count(outs); i++) { if (!outs[i]->proposal || outs[i]->resolved) continue; if (!best || prop_blockheight(outs[i]) < prop_blockheight(best)) best = outs[i]; } if (best) { /* If we've broadcast and not seen yet, this happens */ if (best->proposal->depth_required <= best->depth) { peer_billboard(false, "%u outputs unresolved: waiting confirmation that we spent %s (%s:%u) using %s", num_not_irrevocably_resolved(outs), output_type_name(best->output_type), type_to_string(tmpctx, struct bitcoin_txid, &best->txid), best->outnum, tx_type_name(best->proposal->tx_type)); } else { peer_billboard(false, "%u outputs unresolved: in %u blocks will spend %s (%s:%u) using %s", num_not_irrevocably_resolved(outs), best->proposal->depth_required - best->depth, output_type_name(best->output_type), type_to_string(tmpctx, struct bitcoin_txid, &best->txid), best->outnum, tx_type_name(best->proposal->tx_type)); } return; } /* Now, just report on the last thing we're waiting out. */ for (size_t i = 0; i < tal_count(outs); i++) { /* FIXME: Can this happen? No proposal, no resolution? */ if (!outs[i]->resolved) continue; if (!best || outs[i]->resolved->depth < best->resolved->depth) best = outs[i]; } if (best) { peer_billboard(false, "All outputs resolved:" " waiting %u more blocks before forgetting" " channel", best->resolved->depth < 100 ? 100 - best->resolved->depth : 0); return; } /* Not sure this can happen, but take last one (there must be one!) */ best = outs[tal_count(outs)-1]; peer_billboard(false, "%u outputs unresolved: %s is one (depth %u)", num_not_irrevocably_resolved(outs), output_type_name(best->output_type), best->depth); } static void unwatch_txid(const struct bitcoin_txid *txid) { u8 *msg; msg = towire_onchaind_unwatch_tx(NULL, txid); wire_sync_write(REQ_FD, take(msg)); } static void handle_htlc_onchain_fulfill(struct tracked_output *out, const struct tx_parts *tx_parts) { const struct wally_tx_witness_item *preimage_item; struct preimage preimage; struct sha256 sha; struct ripemd160 ripemd; /* Our HTLC, they filled (must be an HTLC-success tx). */ if (out->tx_type == THEIR_UNILATERAL || out->tx_type == THEIR_REVOKED_UNILATERAL) { /* BOLT #3: * * ## HTLC-Timeout and HTLC-Success Transactions * * ... `txin[0]` witness stack: `0 <remotehtlcsig> <localhtlcsig> * <payment_preimage>` for HTLC-success */ if (tx_parts->inputs[0]->witness->num_items != 5) /* +1 for wscript */ status_failed(STATUS_FAIL_INTERNAL_ERROR, "%s/%s spent with weird witness %zu", tx_type_name(out->tx_type), output_type_name(out->output_type), tx_parts->inputs[0]->witness->num_items); preimage_item = &tx_parts->inputs[0]->witness->items[3]; } else if (out->tx_type == OUR_UNILATERAL) { /* BOLT #3: * * The remote node can redeem the HTLC with the witness: * * <remotehtlcsig> <payment_preimage> */ if (tx_parts->inputs[0]->witness->num_items != 3) /* +1 for wscript */ status_failed(STATUS_FAIL_INTERNAL_ERROR, "%s/%s spent with weird witness %zu", tx_type_name(out->tx_type), output_type_name(out->output_type), tx_parts->inputs[0]->witness->num_items); preimage_item = &tx_parts->inputs[0]->witness->items[1]; } else status_failed(STATUS_FAIL_INTERNAL_ERROR, "onchain_fulfill for %s/%s?", tx_type_name(out->tx_type), output_type_name(out->output_type)); /* cppcheck-suppress uninitvar - doesn't know status_failed exits? */ if (preimage_item->witness_len != sizeof(preimage)) { /* It's possible something terrible happened and we broadcast * an old commitment state, which they're now cleaning up. * * We stumble along. */ if (out->tx_type == OUR_UNILATERAL && preimage_item->witness_len == PUBKEY_CMPR_LEN) { status_unusual("Our cheat attempt failed, they're " "taking our htlc out (%s)", type_to_string(tmpctx, struct amount_sat, &out->sat)); return; } status_failed(STATUS_FAIL_INTERNAL_ERROR, "%s/%s spent with bad witness length %zu", tx_type_name(out->tx_type), output_type_name(out->output_type), preimage_item->witness_len); } memcpy(&preimage, preimage_item->witness, sizeof(preimage)); sha256(&sha, &preimage, sizeof(preimage)); ripemd160(&ripemd, &sha, sizeof(sha)); if (!ripemd160_eq(&ripemd, &out->htlc.ripemd)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "%s/%s spent with bad preimage %s (ripemd not %s)", tx_type_name(out->tx_type), output_type_name(out->output_type), type_to_string(tmpctx, struct preimage, &preimage), type_to_string(tmpctx, struct ripemd160, &out->htlc.ripemd)); /* we stash the payment_hash into the tracking_output so we * can pass it along, if needbe, to the coin movement tracker */ out->payment_hash = sha; /* Tell master we found a preimage. */ status_debug("%s/%s gave us preimage %s", tx_type_name(out->tx_type), output_type_name(out->output_type), type_to_string(tmpctx, struct preimage, &preimage)); wire_sync_write(REQ_FD, take(towire_onchaind_extracted_preimage(NULL, &preimage))); } static void resolve_htlc_tx(struct tracked_output ***outs, size_t out_index, const struct tx_parts *htlc_tx, u32 tx_blockheight, bool is_replay) { struct tracked_output *out; struct bitcoin_tx *tx; struct amount_sat amt; struct amount_asset asset; enum tx_type tx_type = OUR_DELAYED_RETURN_TO_WALLET; u8 *wscript = bitcoin_wscript_htlc_tx(htlc_tx, to_self_delay[LOCAL], &keyset->self_revocation_key, &keyset->self_delayed_payment_key); /* BOLT #5: * * - SHOULD resolve the HTLC-timeout transaction by spending it to * a convenient address... * - MUST wait until the `OP_CHECKSEQUENCEVERIFY` delay has passed * (as specified by the remote node's `open_channel` * `to_self_delay` field) before spending that HTLC-timeout * output. */ asset = wally_tx_output_get_amount(htlc_tx->outputs[0]); assert(amount_asset_is_main(&asset)); amt = amount_asset_to_sat(&asset); out = new_tracked_output(outs, &htlc_tx->txid, tx_blockheight, (*outs)[out_index]->resolved->tx_type, 0, amt, DELAYED_OUTPUT_TO_US, NULL, NULL, NULL); /* BOLT #3: * * ## HTLC-Timeout and HTLC-Success Transactions * * These HTLC transactions are almost identical, except the * HTLC-timeout transaction is timelocked. * * ... to collect the output, the local node uses an input with * nSequence `to_self_delay` and a witness stack `<local_delayedsig> * 0` */ tx = tx_to_us(*outs, delayed_payment_to_us, out, to_self_delay[LOCAL], 0, NULL, 0, wscript, &tx_type, htlc_feerate); propose_resolution(out, tx, to_self_delay[LOCAL], tx_type, is_replay); } /* BOLT #5: * * - MUST *resolve* the _remote node's HTLC-timeout transaction_ by spending it * using the revocation private key. * - MUST *resolve* the _remote node's HTLC-success transaction_ by spending it * using the revocation private key. */ static void steal_htlc_tx(struct tracked_output *out, struct tracked_output ***outs, const struct tx_parts *htlc_tx, u32 htlc_tx_blockheight, enum tx_type htlc_tx_type, bool is_replay) { struct bitcoin_tx *tx; enum tx_type tx_type = OUR_PENALTY_TX; struct tracked_output *htlc_out; struct amount_asset asset; struct amount_sat htlc_out_amt, fees; u8 *wscript = bitcoin_wscript_htlc_tx(htlc_tx, to_self_delay[LOCAL], &keyset->self_revocation_key, &keyset->self_delayed_payment_key); asset = wally_tx_output_get_amount(htlc_tx->outputs[0]); assert(amount_asset_is_main(&asset)); htlc_out_amt = amount_asset_to_sat(&asset); htlc_out = new_tracked_output(outs, &htlc_tx->txid, htlc_tx_blockheight, htlc_tx_type, /* htlc tx's only have 1 output */ 0, htlc_out_amt, DELAYED_CHEAT_OUTPUT_TO_THEM, &out->htlc, wscript, NULL); /* BOLT #3: * * To spend this via penalty, the remote node uses a witness stack * `<revocationsig> 1` */ tx = tx_to_us(htlc_out, penalty_to_us, htlc_out, BITCOIN_TX_RBF_SEQUENCE, 0, &ONE, sizeof(ONE), htlc_out->wscript, &tx_type, penalty_feerate); /* mark commitment tx htlc output as 'resolved by them' */ resolved_by_other(out, &htlc_tx->txid, htlc_tx_type); /* for penalties, we record *any* chain fees * paid as coming from our channel balance, so * that our balance ends up at zero */ if (!amount_sat_sub(&fees, out->sat, htlc_out->sat)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "unable to subtract %s from %s", type_to_string(tmpctx, struct amount_sat, &htlc_out->sat), type_to_string(tmpctx, struct amount_sat, &out->sat)); status_debug("recording chain fees for peer's htlc tx, that we're about to steal" " the output of. fees: %s", type_to_string(tmpctx, struct amount_sat, &fees)); if (!is_replay) update_ledger_chain_fees(&htlc_tx->txid, htlc_tx_blockheight, fees); /* annnd done! */ propose_resolution(htlc_out, tx, 0, tx_type, is_replay); } static void onchain_annotate_txout(const struct bitcoin_txid *txid, u32 outnum, enum wallet_tx_type type) { wire_sync_write(REQ_FD, take(towire_onchaind_annotate_txout( tmpctx, txid, outnum, type))); } static void onchain_annotate_txin(const struct bitcoin_txid *txid, u32 innum, enum wallet_tx_type type) { wire_sync_write(REQ_FD, take(towire_onchaind_annotate_txin( tmpctx, txid, innum, type))); } /* An output has been spent: see if it resolves something we care about. */ static void output_spent(struct tracked_output ***outs, const struct tx_parts *tx_parts, u32 input_num, u32 tx_blockheight, bool is_replay) { for (size_t i = 0; i < tal_count(*outs); i++) { struct tracked_output *out = (*outs)[i]; if (out->resolved) continue; if (!wally_tx_input_spends(tx_parts->inputs[input_num], &out->txid, out->outnum)) continue; /* Was this our resolution? */ if (resolved_by_proposal(out, tx_parts)) { /* If it's our htlc tx, we need to resolve that, too. */ if (out->resolved->tx_type == OUR_HTLC_SUCCESS_TX || out->resolved->tx_type == OUR_HTLC_TIMEOUT_TX) resolve_htlc_tx(outs, i, tx_parts, tx_blockheight, is_replay); if (!is_replay) record_coin_movements(out, tx_blockheight, out->proposal->tx, &tx_parts->txid); return; } switch (out->output_type) { case OUTPUT_TO_US: case DELAYED_OUTPUT_TO_US: unknown_spend(out, tx_parts); if (!is_replay) record_coin_loss(&tx_parts->txid, tx_blockheight, out); break; case THEIR_HTLC: if (out->tx_type == THEIR_REVOKED_UNILATERAL) { /* we've actually got a 'new' output here */ steal_htlc_tx(out, outs, tx_parts, tx_blockheight, THEIR_HTLC_TIMEOUT_TO_THEM, is_replay); } else { /* We ignore this timeout tx, since we should * resolve by ignoring once we reach depth. */ onchain_annotate_txout( &tx_parts->txid, out->outnum, TX_CHANNEL_HTLC_TIMEOUT | TX_THEIRS); } break; case OUR_HTLC: /* The only way they can spend this: fulfill; even * if it's revoked: */ /* BOLT #5: * * ## HTLC Output Handling: Local Commitment, Local Offers *... * - MUST extract the payment preimage from the * transaction input witness. *... * ## HTLC Output Handling: Remote Commitment, Local Offers *... * - MUST extract the payment preimage from the * HTLC-success transaction input witness. */ handle_htlc_onchain_fulfill(out, tx_parts); if (out->tx_type == THEIR_REVOKED_UNILATERAL) { steal_htlc_tx(out, outs, tx_parts, tx_blockheight, OUR_HTLC_FULFILL_TO_THEM, is_replay); } else { /* BOLT #5: * * ## HTLC Output Handling: Local Commitment, * Local Offers *... * - if the commitment transaction HTLC output * is spent using the payment preimage, the * output is considered *irrevocably resolved* */ ignore_output(out); if (!is_replay) record_htlc_fulfilled(&tx_parts->txid, out, tx_blockheight, false); onchain_annotate_txout( &tx_parts->txid, out->outnum, TX_CHANNEL_HTLC_SUCCESS | TX_THEIRS); } break; case FUNDING_OUTPUT: /* Master should be restarting us, as this implies * that our old tx was unspent. */ status_failed(STATUS_FAIL_INTERNAL_ERROR, "Funding output spent again!"); case DELAYED_CHEAT_OUTPUT_TO_THEM: /* They successfully spent a delayed revoked output */ resolved_by_other(out, &tx_parts->txid, THEIR_DELAYED_CHEAT); if (!is_replay) record_their_successful_cheat(&tx_parts->txid, tx_blockheight, out); break; /* Um, we don't track these! */ case OUTPUT_TO_THEM: case DELAYED_OUTPUT_TO_THEM: case ELEMENTS_FEE: case ANCHOR_TO_US: case ANCHOR_TO_THEM: status_failed(STATUS_FAIL_INTERNAL_ERROR, "Tracked spend of %s/%s?", tx_type_name(out->tx_type), output_type_name(out->output_type)); } return; } struct bitcoin_txid txid; wally_tx_input_get_txid(tx_parts->inputs[input_num], &txid); /* Not interesting to us, so unwatch the tx and all its outputs */ status_debug("Notified about tx %s output %u spend, but we don't care", type_to_string(tmpctx, struct bitcoin_txid, &txid), tx_parts->inputs[input_num]->index); unwatch_txid(&tx_parts->txid); } static void update_resolution_depth(struct tracked_output *out, u32 depth) { bool reached_reasonable_depth; status_debug("%s/%s->%s depth %u", tx_type_name(out->tx_type), output_type_name(out->output_type), tx_type_name(out->resolved->tx_type), depth); /* We only set this once. */ reached_reasonable_depth = (out->resolved->depth < reasonable_depth && depth >= reasonable_depth); /* BOLT #5: * * - if the commitment transaction HTLC output has *timed out* and * hasn't been *resolved*: * - MUST *resolve* the output by spending it using the HTLC-timeout * transaction. * - once the resolving transaction has reached reasonable depth: * - MUST fail the corresponding incoming HTLC (if any). */ if ((out->resolved->tx_type == OUR_HTLC_TIMEOUT_TX || out->resolved->tx_type == OUR_HTLC_TIMEOUT_TO_US) && reached_reasonable_depth) { u8 *msg; status_debug("%s/%s reached reasonable depth %u", tx_type_name(out->tx_type), output_type_name(out->output_type), depth); msg = towire_onchaind_htlc_timeout(out, &out->htlc); wire_sync_write(REQ_FD, take(msg)); } out->resolved->depth = depth; } static void tx_new_depth(struct tracked_output **outs, const struct bitcoin_txid *txid, u32 depth, bool is_replay) { size_t i; /* Special handling for commitment tx reaching depth */ if (bitcoin_txid_eq(&outs[0]->resolved->txid, txid) && depth >= reasonable_depth && missing_htlc_msgs) { status_debug("Sending %zu missing htlc messages", tal_count(missing_htlc_msgs)); for (i = 0; i < tal_count(missing_htlc_msgs); i++) wire_sync_write(REQ_FD, missing_htlc_msgs[i]); /* Don't do it again. */ missing_htlc_msgs = tal_free(missing_htlc_msgs); } for (i = 0; i < tal_count(outs); i++) { /* Update output depth. */ if (bitcoin_txid_eq(&outs[i]->txid, txid)) outs[i]->depth = depth; /* Is this tx resolving an output? */ if (outs[i]->resolved) { if (bitcoin_txid_eq(&outs[i]->resolved->txid, txid)) { update_resolution_depth(outs[i], depth); } continue; } /* Otherwise, is this something we have a pending * resolution for? */ if (outs[i]->proposal && bitcoin_txid_eq(&outs[i]->txid, txid) && depth >= outs[i]->proposal->depth_required) { proposal_meets_depth(outs[i], is_replay); } /* Otherwise, is this an output whose proposed resolution * we should RBF? */ if (outs[i]->proposal && bitcoin_txid_eq(&outs[i]->txid, txid) && proposal_is_rbfable(outs[i]->proposal)) proposal_should_rbf(outs[i], is_replay); } } /* BOLT #5: * * A local node: * - if it receives (or already possesses) a payment preimage for an unresolved * HTLC output that it has been offered AND for which it has committed to an * outgoing HTLC: * - MUST *resolve* the output by spending it, using the HTLC-success * transaction. * - MUST NOT reveal its own preimage when it's not the final recipient... * - MUST resolve the output of that HTLC-success transaction. * - otherwise: * - if the *remote node* is NOT irrevocably committed to the HTLC: * - MUST NOT *resolve* the output by spending it. *... * ## HTLC Output Handling: Remote Commitment, Remote Offers *... * A local node: * - if it receives (or already possesses) a payment preimage for an unresolved * HTLC output that it was offered AND for which it has committed to an * outgoing HTLC: * - MUST *resolve* the output by spending it to a convenient address. * - otherwise: * - if the remote node is NOT irrevocably committed to the HTLC: * - MUST NOT *resolve* the output by spending it. */ /* Master makes sure we only get told preimages once other node is committed. */ static void handle_preimage(struct tracked_output **outs, const struct preimage *preimage, bool is_replay) { size_t i; struct sha256 sha; struct ripemd160 ripemd; u8 **witness; sha256(&sha, preimage, sizeof(*preimage)); ripemd160(&ripemd, &sha, sizeof(sha)); for (i = 0; i < tal_count(outs); i++) { struct bitcoin_tx *tx; struct bitcoin_signature sig; if (outs[i]->output_type != THEIR_HTLC) continue; if (!ripemd160_eq(&outs[i]->htlc.ripemd, &ripemd)) continue; /* Too late? */ if (outs[i]->resolved) { status_broken("HTLC already resolved by %s" " when we found preimage", tx_type_name(outs[i]->resolved->tx_type)); return; } /* stash the payment_hash so we can track this coin movement */ outs[i]->payment_hash = sha; /* Discard any previous resolution. Could be a timeout, * could be due to multiple identical rhashes in tx. */ outs[i]->proposal = tal_free(outs[i]->proposal); /* BOLT #5: * * * ## HTLC Output Handling: Local Commitment, Remote Offers *... * A local node: * - if it receives (or already possesses) a payment preimage * for an unresolved HTLC output that it has been offered * AND for which it has committed to an outgoing HTLC: * - MUST *resolve* the output by spending it, using the * HTLC-success transaction. */ if (outs[i]->remote_htlc_sig) { struct amount_msat htlc_amount; if (!amount_sat_to_msat(&htlc_amount, outs[i]->sat)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Overflow in output %zu %s", i, type_to_string(tmpctx, struct amount_sat, &outs[i]->sat)); tx = htlc_success_tx(outs[i], chainparams, &outs[i]->txid, outs[i]->outnum, outs[i]->wscript, htlc_amount, to_self_delay[LOCAL], 0, keyset, option_anchor_outputs); set_htlc_success_fee(tx, outs[i]->remote_htlc_sig, outs[i]->wscript); hsm_sign_local_htlc_tx(tx, outs[i]->wscript, &sig); witness = bitcoin_witness_htlc_success_tx( tx, &sig, outs[i]->remote_htlc_sig, preimage, outs[i]->wscript); bitcoin_tx_input_set_witness(tx, 0, take(witness)); propose_resolution(outs[i], tx, 0, OUR_HTLC_SUCCESS_TX, is_replay); } else { enum tx_type tx_type = THEIR_HTLC_FULFILL_TO_US; /* BOLT #5: * * ## HTLC Output Handling: Remote Commitment, Remote * Offers *... * A local node: * - if it receives (or already possesses) a payment * preimage for an unresolved HTLC output that it was * offered AND for which it has committed to an * outgoing HTLC: * - MUST *resolve* the output by spending it to a * convenient address. */ tx = tx_to_us(outs[i], remote_htlc_to_us, outs[i], option_anchor_outputs ? 1 : 0, 0, preimage, sizeof(*preimage), outs[i]->wscript, &tx_type, htlc_feerate); propose_resolution(outs[i], tx, 0, tx_type, is_replay); if (!is_replay && tx_type == IGNORING_TINY_PAYMENT) { struct bitcoin_txid txid; bitcoin_txid(tx, &txid); record_htlc_fulfilled(&txid, outs[i], 0, true); } } } } #if DEVELOPER static void memleak_remove_globals(struct htable *memtable, const tal_t *topctx) { if (keyset) memleak_remove_region(memtable, keyset, sizeof(*keyset)); memleak_remove_pointer(memtable, remote_per_commitment_point); memleak_remove_pointer(memtable, remote_per_commitment_secret); memleak_remove_pointer(memtable, topctx); memleak_remove_region(memtable, missing_htlc_msgs, tal_bytelen(missing_htlc_msgs)); } static bool handle_dev_memleak(struct tracked_output **outs, const u8 *msg) { struct htable *memtable; bool found_leak; if (!fromwire_onchaind_dev_memleak(msg)) return false; memtable = memleak_find_allocations(tmpctx, msg, msg); /* Top-level context is parent of outs */ memleak_remove_globals(memtable, tal_parent(outs)); memleak_remove_region(memtable, outs, tal_bytelen(outs)); found_leak = dump_memleak(memtable, memleak_status_broken); wire_sync_write(REQ_FD, take(towire_onchaind_dev_memleak_reply(NULL, found_leak))); return true; } #else static bool handle_dev_memleak(struct tracked_output **outs, const u8 *msg) { return false; } #endif /* !DEVELOPER */ /* BOLT #5: * * A node: * - once it has broadcast a funding transaction OR sent a commitment signature * for a commitment transaction that contains an HTLC output: * - until all outputs are *irrevocably resolved*: * - MUST monitor the blockchain for transactions that spend any output that * is NOT *irrevocably resolved*. */ static void wait_for_resolved(struct tracked_output **outs) { billboard_update(outs); while (num_not_irrevocably_resolved(outs) != 0) { u8 *msg = wire_sync_read(outs, REQ_FD); struct bitcoin_txid txid; u32 input_num, depth, tx_blockheight; struct preimage preimage; bool is_replay; struct tx_parts *tx_parts; status_debug("Got new message %s", onchaind_wire_name(fromwire_peektype(msg))); if (fromwire_onchaind_depth(msg, &txid, &depth, &is_replay)) tx_new_depth(outs, &txid, depth, is_replay); else if (fromwire_onchaind_spent(msg, msg, &tx_parts, &input_num, &tx_blockheight, &is_replay)) { output_spent(&outs, tx_parts, input_num, tx_blockheight, is_replay); } else if (fromwire_onchaind_known_preimage(msg, &preimage, &is_replay)) handle_preimage(outs, &preimage, is_replay); else if (!handle_dev_memleak(outs, msg)) master_badmsg(-1, msg); billboard_update(outs); tal_free(msg); clean_tmpctx(); } wire_sync_write(REQ_FD, take(towire_onchaind_all_irrevocably_resolved(outs))); } static void init_reply(const char *what) { /* Send init_reply first, so billboard gets credited to ONCHAIND */ wire_sync_write(REQ_FD, take(towire_onchaind_init_reply(NULL))); peer_billboard(true, what); } static void handle_mutual_close(struct tracked_output **outs, const struct tx_parts *tx, u32 tx_blockheight, int our_outnum, bool is_replay) { struct amount_sat our_out; init_reply("Tracking mutual close transaction"); /* Annotate the first input as close. We can currently only have a * single input for these. */ onchain_annotate_txin(&tx->txid, 0, TX_CHANNEL_CLOSE); /* BOLT #5: * * A closing transaction *resolves* the funding transaction output. * * In the case of a mutual close, a node need not do anything else, as it has * already agreed to the output, which is sent to its specified `scriptpubkey` */ resolved_by_other(outs[0], &tx->txid, MUTUAL_CLOSE); if (!is_replay) { /* It's possible there's no to_us output */ if (our_outnum > -1) { struct amount_asset asset; asset = wally_tx_output_get_amount(tx->outputs[our_outnum]); assert(amount_asset_is_main(&asset)); our_out = amount_asset_to_sat(&asset); } else our_out = AMOUNT_SAT(0); record_mutual_closure(&tx->txid, tx_blockheight, our_out, our_outnum); } wait_for_resolved(outs); } static u8 **derive_htlc_scripts(const struct htlc_stub *htlcs, enum side side) { size_t i; u8 **htlc_scripts = tal_arr(htlcs, u8 *, tal_count(htlcs)); for (i = 0; i < tal_count(htlcs); i++) { if (htlcs[i].owner == side) htlc_scripts[i] = htlc_offered_wscript(htlc_scripts, &htlcs[i].ripemd, keyset, option_anchor_outputs); else { /* FIXME: remove abs_locktime */ struct abs_locktime ltime; if (!blocks_to_abs_locktime(htlcs[i].cltv_expiry, &ltime)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Could not convert cltv_expiry %u to locktime", htlcs[i].cltv_expiry); htlc_scripts[i] = htlc_received_wscript(htlc_scripts, &htlcs[i].ripemd, &ltime, keyset, option_anchor_outputs); } } return htlc_scripts; } static size_t resolve_our_htlc_ourcommit(struct tracked_output *out, const size_t *matches, const struct htlc_stub *htlcs, u8 **htlc_scripts, bool is_replay) { struct bitcoin_tx *tx = NULL; struct bitcoin_signature localsig; size_t i; struct amount_msat htlc_amount; u8 **witness; if (!amount_sat_to_msat(&htlc_amount, out->sat)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Overflow in our_htlc output %s", type_to_string(tmpctx, struct amount_sat, &out->sat)); assert(tal_count(matches)); /* These htlcs are all possibilities, but signature will only match * one with the correct cltv: check which that is. */ for (i = 0; i < tal_count(matches); i++) { /* Skip over duplicate HTLCs, since we only need one. */ if (i > 0 && (htlcs[matches[i]].cltv_expiry == htlcs[matches[i-1]].cltv_expiry)) continue; /* BOLT #5: * * ## HTLC Output Handling: Local Commitment, Local Offers * ... * - if the commitment transaction HTLC output has *timed out* * and hasn't been *resolved*: * - MUST *resolve* the output by spending it using the * HTLC-timeout transaction. */ tx = htlc_timeout_tx(tmpctx, chainparams, &out->txid, out->outnum, htlc_scripts[matches[i]], htlc_amount, htlcs[matches[i]].cltv_expiry, to_self_delay[LOCAL], 0, keyset, option_anchor_outputs); if (set_htlc_timeout_fee(tx, out->remote_htlc_sig, htlc_scripts[matches[i]])) break; } /* Since there's been trouble with this before, we go to some length * to give details here! */ if (i == tal_count(matches)) { char *cltvs, *wscripts; cltvs = tal_fmt(tmpctx, "%u", htlcs[matches[0]].cltv_expiry); wscripts = tal_hex(tmpctx, htlc_scripts[matches[0]]); for (i = 1; i < tal_count(matches); i++) { tal_append_fmt(&cltvs, "/%u", htlcs[matches[i]].cltv_expiry); tal_append_fmt(&wscripts, "/%s", tal_hex(tmpctx, htlc_scripts[matches[i]])); } status_failed(STATUS_FAIL_INTERNAL_ERROR, "No valid signature found for %zu htlc_timeout_txs" " feerate %u-%u," " last tx %s, input %s, signature %s," " cltvs %s wscripts %s" " %s", tal_count(matches), min_possible_feerate, max_possible_feerate, type_to_string(tmpctx, struct bitcoin_tx, tx), type_to_string(tmpctx, struct amount_sat, &out->sat), type_to_string(tmpctx, struct bitcoin_signature, out->remote_htlc_sig), cltvs, wscripts, option_anchor_outputs ? "option_anchor_outputs" : ""); } hsm_sign_local_htlc_tx(tx, htlc_scripts[matches[i]], &localsig); witness = bitcoin_witness_htlc_timeout_tx(tx, &localsig, out->remote_htlc_sig, htlc_scripts[matches[i]]); bitcoin_tx_input_set_witness(tx, 0, take(witness)); /* Steals tx onto out */ propose_resolution_at_block(out, tx, htlcs[matches[i]].cltv_expiry, OUR_HTLC_TIMEOUT_TX, is_replay); return matches[i]; } /* wscript for *received* htlcs (ie. our htlcs in their commit tx, or their * htlcs in our commit tx) includes cltv, so they must be the same for all * matching htlcs. Unless, of course, they've found a sha256 clash. */ static u32 matches_cltv(const size_t *matches, const struct htlc_stub *htlcs) { for (size_t i = 1; i < tal_count(matches); i++) { assert(matches[i] < tal_count(htlcs)); assert(htlcs[matches[i]].cltv_expiry == htlcs[matches[i-1]].cltv_expiry); } return htlcs[matches[0]].cltv_expiry; } static size_t resolve_our_htlc_theircommit(struct tracked_output *out, const size_t *matches, const struct htlc_stub *htlcs, u8 **htlc_scripts, bool is_replay) { struct bitcoin_tx *tx; enum tx_type tx_type = OUR_HTLC_TIMEOUT_TO_US; u32 cltv_expiry = matches_cltv(matches, htlcs); /* BOLT #5: * * ## HTLC Output Handling: Remote Commitment, Local Offers * ... * * - if the commitment transaction HTLC output has *timed out* AND NOT * been *resolved*: * - MUST *resolve* the output, by spending it to a convenient * address. */ tx = tx_to_us(out, remote_htlc_to_us, out, option_anchor_outputs ? 1 : 0, cltv_expiry, NULL, 0, htlc_scripts[matches[0]], &tx_type, htlc_feerate); propose_resolution_at_block(out, tx, cltv_expiry, tx_type, is_replay); /* They're all equivalent: might as well use first one. */ return matches[0]; } /* Returns which htlcs it chose to use of matches[] */ static size_t resolve_their_htlc(struct tracked_output *out, const size_t *matches, const struct htlc_stub *htlcs, u8 **htlc_scripts, bool is_replay) { size_t which_htlc; /* BOLT #5: * * ## HTLC Output Handling: Remote Commitment, Remote Offers *... * ### Requirements *... * If not otherwise resolved, once the HTLC output has expired, it is * considered *irrevocably resolved*. */ /* BOLT #5: * * ## HTLC Output Handling: Local Commitment, Remote Offers *... * ### Requirements *... * If not otherwise resolved, once the HTLC output has expired, it is * considered *irrevocably resolved*. */ /* The two cases are identical as far as default handling goes. * But in the remote commitment / remote offer (ie. caller is * handle_their_unilateral), htlcs which match may have different cltvs. * So wait until the worst case (largest HTLC). */ assert(tal_count(matches)); which_htlc = matches[0]; for (size_t i = 1; i < tal_count(matches); i++) { if (htlcs[matches[i]].cltv_expiry > htlcs[which_htlc].cltv_expiry) which_htlc = matches[i]; } /* If we hit timeout depth, resolve by ignoring. */ propose_resolution_at_block(out, NULL, htlcs[which_htlc].cltv_expiry, THEIR_HTLC_TIMEOUT_TO_THEM, is_replay); return which_htlc; } /* Return tal_arr of htlc indexes. */ static const size_t *match_htlc_output(const tal_t *ctx, const struct wally_tx_output *out, u8 **htlc_scripts) { size_t *matches = tal_arr(ctx, size_t, 0); const u8 *script = tal_dup_arr(tmpctx, u8, out->script, out->script_len, 0); /* Must be a p2wsh output */ if (!is_p2wsh(script, NULL)) return matches; for (size_t i = 0; i < tal_count(htlc_scripts); i++) { struct sha256 sha; if (!htlc_scripts[i]) continue; sha256(&sha, htlc_scripts[i], tal_count(htlc_scripts[i])); if (memeq(script + 2, tal_count(script) - 2, &sha, sizeof(sha))) tal_arr_expand(&matches, i); } return matches; } /* They must all be in the same direction, since the scripts are different for * each dir. Unless, of course, they've found a sha256 clash. */ static enum side matches_direction(const size_t *matches, const struct htlc_stub *htlcs) { for (size_t i = 1; i < tal_count(matches); i++) { assert(matches[i] < tal_count(htlcs)); assert(htlcs[matches[i]].owner == htlcs[matches[i-1]].owner); } return htlcs[matches[0]].owner; } /* Tell master about any we didn't use, if it wants to know. */ static void note_missing_htlcs(u8 **htlc_scripts, const struct htlc_stub *htlcs, const bool *tell_if_missing, const bool *tell_immediately) { for (size_t i = 0; i < tal_count(htlcs); i++) { u8 *msg; /* Used. */ if (!htlc_scripts[i]) continue; /* Doesn't care. */ if (!tell_if_missing[i]) continue; msg = towire_onchaind_missing_htlc_output(missing_htlc_msgs, &htlcs[i]); if (tell_immediately[i]) wire_sync_write(REQ_FD, take(msg)); else tal_arr_expand(&missing_htlc_msgs, msg); } } static void get_anchor_scriptpubkeys(const tal_t *ctx, u8 **anchor) { if (!option_anchor_outputs) { anchor[LOCAL] = anchor[REMOTE] = NULL; return; } for (enum side side = 0; side < NUM_SIDES; side++) { u8 *wscript = bitcoin_wscript_anchor(tmpctx, &funding_pubkey[side]); anchor[side] = scriptpubkey_p2wsh(ctx, wscript); } } static u8 *scriptpubkey_to_remote(const tal_t *ctx, const struct pubkey *remotekey, u32 csv_lock) { /* BOLT #3: * * #### `to_remote` Output * * If `option_anchors` applies to the commitment * transaction, the `to_remote` output is encumbered by a one * block csv lock. * <remote_pubkey> OP_CHECKSIGVERIFY 1 OP_CHECKSEQUENCEVERIFY * *... * Otherwise, this output is a simple P2WPKH to `remotepubkey`. */ if (option_anchor_outputs) { return scriptpubkey_p2wsh(ctx, anchor_to_remote_redeem(tmpctx, remotekey, csv_lock)); } else { return scriptpubkey_p2wpkh(ctx, remotekey); } } static void our_unilateral_to_us(struct tracked_output ***outs, const struct tx_parts *tx, u32 tx_blockheight, size_t index, struct amount_sat amt, u16 sequence, const u8 *local_scriptpubkey, const u8 *local_wscript, bool is_replay) { struct bitcoin_tx *to_us; struct tracked_output *out; enum tx_type tx_type = OUR_DELAYED_RETURN_TO_WALLET; /* BOLT #5: * * A node: * - upon discovering its *local commitment * transaction*: * - SHOULD spend the `to_local` output to a * convenient address. * - MUST wait until the `OP_CHECKSEQUENCEVERIFY` * delay has passed (as specified by the remote * node's `to_self_delay` field) before spending * the output. */ out = new_tracked_output(outs, &tx->txid, tx_blockheight, OUR_UNILATERAL, index, amt, DELAYED_OUTPUT_TO_US, NULL, NULL, NULL); /* BOLT #3: * * The output is spent by an input with * `nSequence` field set to `to_self_delay` (which can * only be valid after that duration has passed) and * witness: * * <local_delayedsig> <> */ to_us = tx_to_us(out, delayed_payment_to_us, out, sequence, 0, NULL, 0, local_wscript, &tx_type, delayed_to_us_feerate); /* BOLT #5: * * Note: if the output is spent (as recommended), the * output is *resolved* by the spending transaction */ propose_resolution(out, to_us, sequence, tx_type, is_replay); } static void handle_our_unilateral(const struct tx_parts *tx, u32 tx_blockheight, const struct basepoints basepoints[NUM_SIDES], const struct htlc_stub *htlcs, const bool *tell_if_missing, const bool *tell_immediately, const enum side opener, const struct bitcoin_signature *remote_htlc_sigs, struct tracked_output **outs, bool is_replay) { u8 **htlc_scripts; u8 *local_wscript, *script[NUM_SIDES], *anchor[NUM_SIDES]; struct pubkey local_per_commitment_point; struct keyset *ks; size_t i; struct amount_sat their_outs = AMOUNT_SAT(0), our_outs = AMOUNT_SAT(0); init_reply("Tracking our own unilateral close"); onchain_annotate_txin(&tx->txid, 0, TX_CHANNEL_UNILATERAL); /* BOLT #5: * * In this case, a node discovers its *local commitment transaction*, * which *resolves* the funding transaction output. */ resolved_by_other(outs[0], &tx->txid, OUR_UNILATERAL); /* Figure out what delayed to-us output looks like */ hsm_get_per_commitment_point(&local_per_commitment_point); /* keyset is const, we need a non-const ptr to set it up */ keyset = ks = tal(tx, struct keyset); if (!derive_keyset(&local_per_commitment_point, &basepoints[LOCAL], &basepoints[REMOTE], commit_num >= static_remotekey_start[LOCAL], ks)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Deriving keyset for %"PRIu64, commit_num); status_debug("Deconstructing unilateral tx: %"PRIu64 " using keyset: " " self_revocation_key: %s" " self_delayed_payment_key: %s" " self_payment_key: %s" " other_payment_key: %s" " self_htlc_key: %s" " other_htlc_key: %s", commit_num, type_to_string(tmpctx, struct pubkey, &keyset->self_revocation_key), type_to_string(tmpctx, struct pubkey, &keyset->self_delayed_payment_key), type_to_string(tmpctx, struct pubkey, &keyset->self_payment_key), type_to_string(tmpctx, struct pubkey, &keyset->other_payment_key), type_to_string(tmpctx, struct pubkey, &keyset->self_htlc_key), type_to_string(tmpctx, struct pubkey, &keyset->other_htlc_key)); local_wscript = to_self_wscript(tmpctx, to_self_delay[LOCAL], 1, keyset); /* Figure out what to-us output looks like. */ script[LOCAL] = scriptpubkey_p2wsh(tmpctx, local_wscript); /* Figure out what direct to-them output looks like. */ script[REMOTE] = scriptpubkey_to_remote(tmpctx, &keyset->other_payment_key, 1); /* Calculate all the HTLC scripts so we can match them */ htlc_scripts = derive_htlc_scripts(htlcs, LOCAL); status_debug("Script to-me: %u: %s (%s)", to_self_delay[LOCAL], tal_hex(tmpctx, script[LOCAL]), tal_hex(tmpctx, local_wscript)); status_debug("Script to-them: %s", tal_hex(tmpctx, script[REMOTE])); for (i = 0; i < tal_count(tx->outputs); i++) { if (tx->outputs[i]->script == NULL) continue; status_debug("Output %zu: %s", i, tal_hexstr(tmpctx, tx->outputs[i]->script, tx->outputs[i]->script_len)); } get_anchor_scriptpubkeys(tmpctx, anchor); for (i = 0; i < tal_count(tx->outputs); i++) { struct tracked_output *out; const size_t *matches; size_t which_htlc; struct amount_asset asset = wally_tx_output_get_amount(tx->outputs[i]); struct amount_sat amt; assert(amount_asset_is_main(&asset)); amt = amount_asset_to_sat(&asset); if (chainparams->is_elements && tx->outputs[i]->script_len == 0) { status_debug("OUTPUT %zu is a fee output", i); /* An empty script simply means that that this is a * fee output. */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, OUR_UNILATERAL, i, amt, ELEMENTS_FEE, NULL, NULL, NULL); ignore_output(out); continue; } else if (script[LOCAL] && wally_tx_output_scripteq(tx->outputs[i], script[LOCAL])) { our_unilateral_to_us(&outs, tx, tx_blockheight, i, amt, to_self_delay[LOCAL], script[LOCAL], local_wscript, is_replay); script[LOCAL] = NULL; add_amt(&our_outs, amt); continue; } if (script[REMOTE] && wally_tx_output_scripteq(tx->outputs[i], script[REMOTE])) { /* BOLT #5: * * - MAY ignore the `to_remote` output. * - Note: No action is required by the local * node, as `to_remote` is considered *resolved* * by the commitment transaction itself. */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, OUR_UNILATERAL, i, amt, OUTPUT_TO_THEM, NULL, NULL, NULL); ignore_output(out); script[REMOTE] = NULL; add_amt(&their_outs, amt); continue; } if (anchor[LOCAL] && wally_tx_output_scripteq(tx->outputs[i], anchor[LOCAL])) { /* FIXME: We should be able to spend this! */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, OUR_UNILATERAL, i, amt, ANCHOR_TO_US, NULL, NULL, NULL); ignore_output(out); anchor[LOCAL] = NULL; continue; } if (anchor[REMOTE] && wally_tx_output_scripteq(tx->outputs[i], anchor[REMOTE])) { out = new_tracked_output(&outs, &tx->txid, tx_blockheight, OUR_UNILATERAL, i, amt, ANCHOR_TO_THEM, NULL, NULL, NULL); ignore_output(out); anchor[REMOTE] = NULL; continue; } matches = match_htlc_output(tmpctx, tx->outputs[i], htlc_scripts); /* FIXME: limp along when this happens! */ if (tal_count(matches) == 0) { bool found = false; /* Maybe they're using option_will_fund? */ if (opener == REMOTE && script[LOCAL]) { status_debug("Grinding for our to_local"); /* We already tried `1` */ for (size_t csv = 2; csv <= LEASE_RATE_DURATION; csv++) { local_wscript = to_self_wscript(tmpctx, to_self_delay[LOCAL], csv, keyset); script[LOCAL] = scriptpubkey_p2wsh(tmpctx, local_wscript); if (!wally_tx_output_scripteq( tx->outputs[i], script[LOCAL])) continue; our_unilateral_to_us(&outs, tx, tx_blockheight, i, amt, max(to_self_delay[LOCAL], csv), script[LOCAL], local_wscript, is_replay); script[LOCAL] = NULL; add_amt(&our_outs, amt); found = true; break; } } else if (opener == LOCAL && script[REMOTE]) { status_debug("Grinding for to_remote (ours)"); /* We already tried `1` */ for (size_t csv = 2; csv <= LEASE_RATE_DURATION; csv++) { script[REMOTE] = scriptpubkey_to_remote(tmpctx, &keyset->other_payment_key, csv); if (!wally_tx_output_scripteq(tx->outputs[i], script[REMOTE])) continue; /* BOLT #5: * * - MAY ignore the `to_remote` output. * - Note: No action is required by the local * node, as `to_remote` is considered *resolved* * by the commitment transaction itself. */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, OUR_UNILATERAL, i, amt, OUTPUT_TO_THEM, NULL, NULL, NULL); ignore_output(out); script[REMOTE] = NULL; add_amt(&their_outs, amt); found = true; break; } } if (found) continue; onchain_annotate_txout(&tx->txid, i, TX_CHANNEL_PENALTY | TX_THEIRS); status_failed(STATUS_FAIL_INTERNAL_ERROR, "Could not find resolution for output %zu", i); } if (matches_direction(matches, htlcs) == LOCAL) { /* BOLT #5: * * - MUST handle HTLCs offered by itself as specified * in [HTLC Output Handling: Local Commitment, * Local Offers] */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, OUR_UNILATERAL, i, amt, OUR_HTLC, NULL, NULL, remote_htlc_sigs); /* Tells us which htlc to use */ which_htlc = resolve_our_htlc_ourcommit(out, matches, htlcs, htlc_scripts, is_replay); add_amt(&our_outs, amt); } else { out = new_tracked_output(&outs, &tx->txid, tx_blockheight, OUR_UNILATERAL, i, amt, THEIR_HTLC, NULL, NULL, remote_htlc_sigs); /* BOLT #5: * * - MUST handle HTLCs offered by the remote node * as specified in [HTLC Output Handling: Local * Commitment, Remote Offers] */ /* Tells us which htlc to use */ which_htlc = resolve_their_htlc(out, matches, htlcs, htlc_scripts, is_replay); add_amt(&their_outs, amt); } out->htlc = htlcs[which_htlc]; out->wscript = tal_steal(out, htlc_scripts[which_htlc]); /* Each of these consumes one HTLC signature */ remote_htlc_sigs++; /* We've matched this HTLC, can't do again. */ htlc_scripts[which_htlc] = NULL; } note_missing_htlcs(htlc_scripts, htlcs, tell_if_missing, tell_immediately); if (!is_replay) record_chain_fees_unilateral(&tx->txid, tx_blockheight, outs[0]->sat, their_outs, our_outs); wait_for_resolved(outs); } /* We produce individual penalty txs. It's less efficient, but avoids them * using HTLC txs to block our penalties for long enough to pass the CSV * delay */ static void steal_to_them_output(struct tracked_output *out, u32 csv, bool is_replay) { u8 *wscript; struct bitcoin_tx *tx; enum tx_type tx_type = OUR_PENALTY_TX; /* BOLT #3: * * If a revoked commitment transaction is published, the other party * can spend this output immediately with the following witness: * * <revocation_sig> 1 */ wscript = bitcoin_wscript_to_local(tmpctx, to_self_delay[REMOTE], csv, &keyset->self_revocation_key, &keyset->self_delayed_payment_key); tx = tx_to_us(tmpctx, penalty_to_us, out, BITCOIN_TX_RBF_SEQUENCE, 0, &ONE, sizeof(ONE), wscript, &tx_type, penalty_feerate); propose_resolution(out, tx, 0, tx_type, is_replay); } static void steal_htlc(struct tracked_output *out, bool is_replay) { struct bitcoin_tx *tx; enum tx_type tx_type = OUR_PENALTY_TX; u8 der[PUBKEY_CMPR_LEN]; /* BOLT #3: * * If a revoked commitment transaction is published, the remote node can * spend this output immediately with the following witness: * * <revocation_sig> <revocationpubkey> */ pubkey_to_der(der, &keyset->self_revocation_key); tx = tx_to_us(out, penalty_to_us, out, BITCOIN_TX_RBF_SEQUENCE, 0, der, sizeof(der), out->wscript, &tx_type, penalty_feerate); propose_resolution(out, tx, 0, tx_type, is_replay); } /* Tell wallet that we have discovered a UTXO from a to-remote output, * which it can spend with a little additional info we give here. */ static void tell_wallet_to_remote(const struct tx_parts *tx, unsigned int outnum, u32 tx_blockheight, const u8 *scriptpubkey, const struct pubkey *per_commit_point, bool option_static_remotekey, u32 csv_lock) { struct amount_asset asset = wally_tx_output_get_amount(tx->outputs[outnum]); struct amount_sat amt; assert(amount_asset_is_main(&asset)); amt = amount_asset_to_sat(&asset); /* A NULL per_commit_point is how we indicate the pubkey doesn't need * changing. */ if (option_static_remotekey) per_commit_point = NULL; wire_sync_write(REQ_FD, take(towire_onchaind_add_utxo(NULL, &tx->txid, outnum, per_commit_point, amt, tx_blockheight, scriptpubkey, csv_lock))); } /* When a 'cheat' transaction comes through, our accounting is * going to be off, as it's publishing/finalizing old state. * To compensate for this, we count *all* of the channel funds * as ours; any subsequent handling of utxos on this tx * will correctly mark the funds as a 'channel withdrawal' */ static void update_ledger_cheat(const struct bitcoin_txid *txid, u32 blockheight, const struct tracked_output *out) { /* how much of a difference should we update the * channel account ledger by? */ struct amount_msat amt; if (amount_msat_eq_sat(our_msat, out->sat)) return; if (!amount_sat_sub_msat(&amt, out->sat, our_msat)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "unable to subtract our balance %s from channel total %s", type_to_string(tmpctx, struct amount_msat, &our_msat), type_to_string(tmpctx, struct amount_sat, &out->sat)); /* add the difference to our ledger balance */ send_coin_mvt(take(new_coin_journal_entry(NULL, NULL, txid, &out->txid, out->outnum, blockheight, amt, true))); } static void their_unilateral_local(struct tracked_output ***outs, const struct tx_parts *tx, u32 tx_blockheight, size_t index, struct amount_sat amt, const u8 *local_scriptpubkey, enum tx_type tx_type, bool is_replay, u32 csv_lock) { struct tracked_output *out; /* BOLT #5: * * - MAY take no action in regard to the associated * `to_remote`, which is simply a P2WPKH output to * the *local node*. * - Note: `to_remote` is considered *resolved* by the * commitment transaction itself. */ out = new_tracked_output(outs, &tx->txid, tx_blockheight, tx_type, index, amt, OUTPUT_TO_US, NULL, NULL, NULL); ignore_output(out); if (!is_replay) record_channel_withdrawal(&tx->txid, tx_blockheight, out); tell_wallet_to_remote(tx, index, tx_blockheight, local_scriptpubkey, remote_per_commitment_point, commit_num >= static_remotekey_start[REMOTE], csv_lock); } /* BOLT #5: * * If any node tries to cheat by broadcasting an outdated commitment * transaction (any previous commitment transaction besides the most current * one), the other node in the channel can use its revocation private key to * claim all the funds from the channel's original funding transaction. */ static void handle_their_cheat(const struct tx_parts *tx, u32 tx_blockheight, const struct secret *revocation_preimage, const struct basepoints basepoints[NUM_SIDES], const struct htlc_stub *htlcs, const bool *tell_if_missing, const bool *tell_immediately, const enum side opener, struct tracked_output **outs, bool is_replay) { u8 **htlc_scripts; u8 *remote_wscript, *script[NUM_SIDES], *anchor[NUM_SIDES]; struct keyset *ks; struct pubkey *k; size_t i; /* We need to figure out what the 'chain fees' * for this unilateral tx are */ struct amount_sat total_outs = AMOUNT_SAT(0), fee_cost; bool amt_ok; init_reply("Tracking their illegal close: taking all funds"); onchain_annotate_txin( &tx->txid, 0, TX_CHANNEL_UNILATERAL | TX_CHANNEL_CHEAT | TX_THEIRS); if (!is_replay) update_ledger_cheat(&tx->txid, tx_blockheight, outs[0]); /* BOLT #5: * * Once a node discovers a commitment transaction for which *it* has a * revocation private key, the funding transaction output is *resolved*. */ resolved_by_other(outs[0], &tx->txid, THEIR_REVOKED_UNILATERAL); /* FIXME: Types. */ BUILD_ASSERT(sizeof(struct secret) == sizeof(*revocation_preimage)); remote_per_commitment_secret = tal_dup(tx, struct secret, (struct secret *) revocation_preimage); /* Need tmpvar for non-const. */ remote_per_commitment_point = k = tal(tx, struct pubkey); if (!pubkey_from_secret(remote_per_commitment_secret, k)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Failed derive from per_commitment_secret %s", type_to_string(tmpctx, struct secret, remote_per_commitment_secret)); status_debug("Deriving keyset %"PRIu64 ": per_commit_point=%s" " self_payment_basepoint=%s" " other_payment_basepoint=%s" " self_htlc_basepoint=%s" " other_htlc_basepoint=%s" " self_delayed_basepoint=%s" " other_revocation_basepoint=%s", commit_num, type_to_string(tmpctx, struct pubkey, remote_per_commitment_point), type_to_string(tmpctx, struct pubkey, &basepoints[REMOTE].payment), type_to_string(tmpctx, struct pubkey, &basepoints[LOCAL].payment), type_to_string(tmpctx, struct pubkey, &basepoints[REMOTE].htlc), type_to_string(tmpctx, struct pubkey, &basepoints[LOCAL].htlc), type_to_string(tmpctx, struct pubkey, &basepoints[REMOTE].delayed_payment), type_to_string(tmpctx, struct pubkey, &basepoints[LOCAL].revocation)); /* keyset is const, we need a non-const ptr to set it up */ keyset = ks = tal(tx, struct keyset); if (!derive_keyset(remote_per_commitment_point, &basepoints[REMOTE], &basepoints[LOCAL], commit_num >= static_remotekey_start[REMOTE], ks)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Deriving keyset for %"PRIu64, commit_num); status_debug("Deconstructing revoked unilateral tx: %"PRIu64 " using keyset: " " self_revocation_key: %s" " self_delayed_payment_key: %s" " self_payment_key: %s" " other_payment_key: %s" " self_htlc_key: %s" " other_htlc_key: %s" " (static_remotekey = %"PRIu64"/%"PRIu64")", commit_num, type_to_string(tmpctx, struct pubkey, &keyset->self_revocation_key), type_to_string(tmpctx, struct pubkey, &keyset->self_delayed_payment_key), type_to_string(tmpctx, struct pubkey, &keyset->self_payment_key), type_to_string(tmpctx, struct pubkey, &keyset->other_payment_key), type_to_string(tmpctx, struct pubkey, &keyset->self_htlc_key), type_to_string(tmpctx, struct pubkey, &keyset->other_htlc_key), static_remotekey_start[LOCAL], static_remotekey_start[REMOTE]); remote_wscript = to_self_wscript(tmpctx, to_self_delay[REMOTE], 1, keyset); /* Figure out what to-them output looks like. */ script[REMOTE] = scriptpubkey_p2wsh(tmpctx, remote_wscript); /* Figure out what direct to-us output looks like. */ script[LOCAL] = scriptpubkey_to_remote(tmpctx, &keyset->other_payment_key, 1); /* Calculate all the HTLC scripts so we can match them */ htlc_scripts = derive_htlc_scripts(htlcs, REMOTE); status_debug("Script to-them: %u: %s (%s)", to_self_delay[REMOTE], tal_hex(tmpctx, script[REMOTE]), tal_hex(tmpctx, remote_wscript)); status_debug("Script to-me: %s", tal_hex(tmpctx, script[LOCAL])); get_anchor_scriptpubkeys(tmpctx, anchor); for (i = 0; i < tal_count(tx->outputs); i++) { if (tx->outputs[i]->script_len == 0) continue; status_debug("Output %zu: %s", i, tal_hexstr(tmpctx, tx->outputs[i]->script, tx->outputs[i]->script_len)); } for (i = 0; i < tal_count(tx->outputs); i++) { struct tracked_output *out; const size_t *matches; size_t which_htlc; struct amount_asset asset = wally_tx_output_get_amount(tx->outputs[i]); struct amount_sat amt; assert(amount_asset_is_main(&asset)); amt = amount_asset_to_sat(&asset); if (chainparams->is_elements && tx->outputs[i]->script_len == 0) { /* An empty script simply means that that this is a * fee output. */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_REVOKED_UNILATERAL, i, amt, ELEMENTS_FEE, NULL, NULL, NULL); ignore_output(out); continue; } if (script[LOCAL] && wally_tx_output_scripteq(tx->outputs[i], script[LOCAL])) { their_unilateral_local(&outs, tx, tx_blockheight, i, amt, script[LOCAL], THEIR_REVOKED_UNILATERAL, is_replay, 1); script[LOCAL] = NULL; add_amt(&total_outs, amt); continue; } if (script[REMOTE] && wally_tx_output_scripteq(tx->outputs[i], script[REMOTE])) { /* BOLT #5: * * - MUST *resolve* the _remote node's main output_ by * spending it using the revocation private key. */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_REVOKED_UNILATERAL, i, amt, DELAYED_CHEAT_OUTPUT_TO_THEM, NULL, NULL, NULL); steal_to_them_output(out, 1, is_replay); script[REMOTE] = NULL; add_amt(&total_outs, amt); continue; } if (anchor[LOCAL] && wally_tx_output_scripteq(tx->outputs[i], anchor[LOCAL])) { /* FIXME: We should be able to spend this! */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_REVOKED_UNILATERAL, i, amt, ANCHOR_TO_US, NULL, NULL, NULL); ignore_output(out); anchor[LOCAL] = NULL; continue; } if (anchor[REMOTE] && wally_tx_output_scripteq(tx->outputs[i], anchor[REMOTE])) { out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_REVOKED_UNILATERAL, i, amt, ANCHOR_TO_THEM, NULL, NULL, NULL); ignore_output(out); anchor[REMOTE] = NULL; continue; } matches = match_htlc_output(tmpctx, tx->outputs[i], htlc_scripts); if (tal_count(matches) == 0) { bool found = false; if (opener == REMOTE && script[LOCAL]) { status_debug("Grinding for commitment to_remote" " (ours)"); /* We already tried `1` */ for (size_t csv = 2; csv <= LEASE_RATE_DURATION; csv++) { script[LOCAL] = scriptpubkey_to_remote(tmpctx, &keyset->other_payment_key, csv); if (!wally_tx_output_scripteq( tx->outputs[i], script[LOCAL])) continue; their_unilateral_local(&outs, tx, tx_blockheight, i, amt, script[LOCAL], THEIR_REVOKED_UNILATERAL, is_replay, csv); script[LOCAL] = NULL; add_amt(&total_outs, amt); found = true; break; } } else if (opener == LOCAL && script[REMOTE]) { status_debug("Grinding for commitment to_local" " (theirs)"); for (size_t csv = 2; csv <= LEASE_RATE_DURATION; csv++) { remote_wscript = to_self_wscript(tmpctx, to_self_delay[REMOTE], csv, keyset); script[REMOTE] = scriptpubkey_p2wsh(tmpctx, remote_wscript); if (!wally_tx_output_scripteq(tx->outputs[i], script[REMOTE])) continue; out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_REVOKED_UNILATERAL, i, amt, DELAYED_CHEAT_OUTPUT_TO_THEM, NULL, NULL, NULL); steal_to_them_output(out, csv, is_replay); script[REMOTE] = NULL; add_amt(&total_outs, amt); found = true; break; } } if (!found) status_broken("Could not find resolution" " for output %zu: did" " *we* cheat?", i); continue; } /* In this case, we don't care which HTLC we choose; so pick first one */ which_htlc = matches[0]; if (matches_direction(matches, htlcs) == LOCAL) { /* BOLT #5: * * - MUST *resolve* the _local node's offered HTLCs_ in one of three ways: * * spend the *commitment tx* using the payment revocation private key. * * spend the *commitment tx* once the HTLC timeout has passed. * * spend the *HTLC-success tx*, if the remote node has published it. */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_REVOKED_UNILATERAL, i, amt, OUR_HTLC, &htlcs[which_htlc], htlc_scripts[which_htlc], NULL); steal_htlc(out, is_replay); add_amt(&total_outs, amt); } else { out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_REVOKED_UNILATERAL, i, amt, THEIR_HTLC, &htlcs[which_htlc], htlc_scripts[which_htlc], NULL); /* BOLT #5: * * - MUST *resolve* the _remote node's offered HTLCs_ in one of three ways: * * spend the *commitment tx* using the payment revocation private key. * * spend the *commitment tx* using the payment preimage (if known). * * spend the *HTLC-timeout tx*, if the remote node has published it. */ steal_htlc(out, is_replay); add_amt(&total_outs, amt); } htlc_scripts[which_htlc] = NULL; } note_missing_htlcs(htlc_scripts, htlcs, tell_if_missing, tell_immediately); /* Record the fee cost for this tx, deducting it from channel balance */ amt_ok = amount_sat_sub(&fee_cost, outs[0]->sat, total_outs); assert(amt_ok); status_debug("recording chain fees for their cheat %s", type_to_string(tmpctx, struct amount_sat, &fee_cost)); if (!is_replay) update_ledger_chain_fees(&tx->txid, tx_blockheight, fee_cost); wait_for_resolved(outs); } static void handle_their_unilateral(const struct tx_parts *tx, u32 tx_blockheight, const struct pubkey *this_remote_per_commitment_point, const struct basepoints basepoints[NUM_SIDES], const struct htlc_stub *htlcs, const bool *tell_if_missing, const bool *tell_immediately, const enum side opener, struct tracked_output **outs, bool is_replay) { u8 **htlc_scripts; u8 *remote_wscript, *script[NUM_SIDES], *anchor[NUM_SIDES]; struct keyset *ks; size_t i; struct amount_sat their_outs = AMOUNT_SAT(0), our_outs = AMOUNT_SAT(0); init_reply("Tracking their unilateral close"); onchain_annotate_txin(&tx->txid, 0, TX_CHANNEL_UNILATERAL | TX_THEIRS); /* HSM can't derive this. */ remote_per_commitment_point = this_remote_per_commitment_point; /* BOLT #5: * * # Unilateral Close Handling: Remote Commitment Transaction * * The *remote node's* commitment transaction *resolves* the funding * transaction output. * * There are no delays constraining node behavior in this case, so * it's simpler for a node to handle than the case in which it * discovers its local commitment transaction (see [Unilateral Close * Handling: Local Commitment Transaction] */ resolved_by_other(outs[0], &tx->txid, THEIR_UNILATERAL); status_debug("Deriving keyset %"PRIu64 ": per_commit_point=%s" " self_payment_basepoint=%s" " other_payment_basepoint=%s" " self_htlc_basepoint=%s" " other_htlc_basepoint=%s" " self_delayed_basepoint=%s" " other_revocation_basepoint=%s", commit_num, type_to_string(tmpctx, struct pubkey, remote_per_commitment_point), type_to_string(tmpctx, struct pubkey, &basepoints[REMOTE].payment), type_to_string(tmpctx, struct pubkey, &basepoints[LOCAL].payment), type_to_string(tmpctx, struct pubkey, &basepoints[REMOTE].htlc), type_to_string(tmpctx, struct pubkey, &basepoints[LOCAL].htlc), type_to_string(tmpctx, struct pubkey, &basepoints[REMOTE].delayed_payment), type_to_string(tmpctx, struct pubkey, &basepoints[LOCAL].revocation)); /* keyset is const, we need a non-const ptr to set it up */ keyset = ks = tal(tx, struct keyset); if (!derive_keyset(remote_per_commitment_point, &basepoints[REMOTE], &basepoints[LOCAL], commit_num >= static_remotekey_start[REMOTE], ks)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Deriving keyset for %"PRIu64, commit_num); status_debug("Deconstructing unilateral tx: %"PRIu64 " using keyset: " " self_revocation_key: %s" " self_delayed_payment_key: %s" " self_payment_key: %s" " other_payment_key: %s" " self_htlc_key: %s" " other_htlc_key: %s", commit_num, type_to_string(tmpctx, struct pubkey, &keyset->self_revocation_key), type_to_string(tmpctx, struct pubkey, &keyset->self_delayed_payment_key), type_to_string(tmpctx, struct pubkey, &keyset->self_payment_key), type_to_string(tmpctx, struct pubkey, &keyset->other_payment_key), type_to_string(tmpctx, struct pubkey, &keyset->self_htlc_key), type_to_string(tmpctx, struct pubkey, &keyset->other_htlc_key)); /* Calculate all the HTLC scripts so we can match them */ htlc_scripts = derive_htlc_scripts(htlcs, REMOTE); get_anchor_scriptpubkeys(tmpctx, anchor); for (i = 0; i < tal_count(tx->outputs); i++) { if (tx->outputs[i]->script_len == 0) continue; status_debug("Output %zu: %s", i, tal_hexstr(tmpctx, tx->outputs[i]->script, tx->outputs[i]->script_len)); } remote_wscript = to_self_wscript(tmpctx, to_self_delay[REMOTE], 1, keyset); script[REMOTE] = scriptpubkey_p2wsh(tmpctx, remote_wscript); script[LOCAL] = scriptpubkey_to_remote(tmpctx, &keyset->other_payment_key, 1); status_debug("Script to-them: %u: %s (%s)", to_self_delay[REMOTE], tal_hex(tmpctx, script[REMOTE]), tal_hex(tmpctx, remote_wscript)); status_debug("Script to-me: %s", tal_hex(tmpctx, script[LOCAL])); for (i = 0; i < tal_count(tx->outputs); i++) { struct tracked_output *out; const size_t *matches; size_t which_htlc; struct amount_asset asset = wally_tx_output_get_amount(tx->outputs[i]); struct amount_sat amt; assert(amount_asset_is_main(&asset)); amt = amount_asset_to_sat(&asset); if (chainparams->is_elements && tx->outputs[i]->script_len == 0) { /* An empty script simply means that that this is a * fee output. */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_UNILATERAL, i, amt, ELEMENTS_FEE, NULL, NULL, NULL); ignore_output(out); continue; } else if (script[LOCAL] && wally_tx_output_scripteq(tx->outputs[i], script[LOCAL])) { their_unilateral_local(&outs, tx, tx_blockheight, i, amt, script[LOCAL], THEIR_UNILATERAL, is_replay, 1); script[LOCAL] = NULL; add_amt(&our_outs, amt); continue; } if (script[REMOTE] && wally_tx_output_scripteq(tx->outputs[i], script[REMOTE])) { /* BOLT #5: * * - MAY take no action in regard to the associated * `to_local`, which is a payment output to the *remote * node*. * - Note: `to_local` is considered *resolved* by the * commitment transaction itself. */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_UNILATERAL, i, amt, DELAYED_OUTPUT_TO_THEM, NULL, NULL, NULL); ignore_output(out); add_amt(&their_outs, amt); continue; } if (anchor[LOCAL] && wally_tx_output_scripteq(tx->outputs[i], anchor[LOCAL])) { /* FIXME: We should be able to spend this! */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_UNILATERAL, i, amt, ANCHOR_TO_US, NULL, NULL, NULL); ignore_output(out); anchor[LOCAL] = NULL; continue; } if (anchor[REMOTE] && wally_tx_output_scripteq(tx->outputs[i], anchor[REMOTE])) { out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_UNILATERAL, i, amt, ANCHOR_TO_THEM, NULL, NULL, NULL); ignore_output(out); anchor[REMOTE] = NULL; continue; } matches = match_htlc_output(tmpctx, tx->outputs[i], htlc_scripts); if (tal_count(matches) == 0) { bool found = false; /* We need to hunt for it (option_will_fund?) */ if (opener == REMOTE && script[LOCAL]) { status_debug("Grinding for commitment to_remote" " (ours)"); /* We already tried `1` */ for (size_t csv = 2; csv <= LEASE_RATE_DURATION; csv++) { script[LOCAL] = scriptpubkey_to_remote(tmpctx, &keyset->other_payment_key, csv); if (!wally_tx_output_scripteq( tx->outputs[i], script[LOCAL])) continue; their_unilateral_local(&outs, tx, tx_blockheight, i, amt, script[LOCAL], THEIR_UNILATERAL, is_replay, csv); script[LOCAL] = NULL; add_amt(&our_outs, amt); found = true; break; } } else if (opener == LOCAL && script[REMOTE]) { status_debug("Grinding for commitment to_local" " (theirs)"); /* We already tried `1` */ for (size_t csv = 2; csv <= LEASE_RATE_DURATION; csv++) { remote_wscript = to_self_wscript(tmpctx, to_self_delay[REMOTE], csv, keyset); script[REMOTE] = scriptpubkey_p2wsh(tmpctx, remote_wscript); if (!wally_tx_output_scripteq(tx->outputs[i], script[REMOTE])) continue; out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_UNILATERAL, i, amt, DELAYED_OUTPUT_TO_THEM, NULL, NULL, NULL); ignore_output(out); add_amt(&their_outs, amt); found = true; break; } } if (found) continue; status_failed(STATUS_FAIL_INTERNAL_ERROR, "Could not find resolution for output %zu", i); } if (matches_direction(matches, htlcs) == LOCAL) { /* BOLT #5: * * - MUST handle HTLCs offered by itself as specified in * [HTLC Output Handling: Remote Commitment, * Local Offers] */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_UNILATERAL, i, amt, OUR_HTLC, NULL, NULL, NULL); which_htlc = resolve_our_htlc_theircommit(out, matches, htlcs, htlc_scripts, is_replay); add_amt(&our_outs, amt); } else { out = new_tracked_output(&outs, &tx->txid, tx_blockheight, THEIR_UNILATERAL, i, amt, THEIR_HTLC, NULL, NULL, NULL); /* BOLT #5: * * - MUST handle HTLCs offered by the remote node as * specified in [HTLC Output Handling: Remote * Commitment, Remote Offers] */ which_htlc = resolve_their_htlc(out, matches, htlcs, htlc_scripts, is_replay); add_amt(&their_outs, amt); } out->htlc = htlcs[which_htlc]; out->wscript = tal_steal(out, htlc_scripts[which_htlc]); htlc_scripts[which_htlc] = NULL; } note_missing_htlcs(htlc_scripts, htlcs, tell_if_missing, tell_immediately); if (!is_replay) record_chain_fees_unilateral(&tx->txid, tx_blockheight, outs[0]->sat, their_outs, our_outs); wait_for_resolved(outs); } static void update_ledger_unknown(const struct bitcoin_txid *txid, u32 blockheight, struct amount_sat amt_salvaged) { /* ideally, we'd be able to capture the loss to fees (if we funded * the channel) here separately, but given that we don't know the htlc * set (and thus which outputs are trimmed), this is difficult. * * instead, we count the difference between any recoverable output * and our current channel balance as a loss (or gain) */ bool is_credit; struct amount_msat diff; /* we do nothing if the amount withdrawn via 'salvage' is * the same as our channel balance */ if (amount_msat_eq_sat(our_msat, amt_salvaged)) return; /* if we've withdrawn *less* in salvage than we have on the books * as being ours, we record the difference as a debit */ if (!amount_msat_sub_sat(&diff, our_msat, amt_salvaged)) { is_credit = false; if (!amount_sat_sub_msat(&diff, amt_salvaged, our_msat)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "overflow subtracting %s from %s", type_to_string(tmpctx, struct amount_msat, &our_msat), type_to_string(tmpctx, struct amount_sat, &amt_salvaged)); } else is_credit = true; send_coin_mvt(take(new_coin_journal_entry(NULL, NULL, txid, NULL, 0, blockheight, diff, is_credit))); } static void handle_unknown_commitment(const struct tx_parts *tx, u32 tx_blockheight, const struct pubkey *possible_remote_per_commitment_point, const struct basepoints basepoints[NUM_SIDES], const struct htlc_stub *htlcs, const bool *tell_if_missing, struct tracked_output **outs, bool is_replay) { int to_us_output = -1; /* We have two possible local scripts, depending on options */ u8 *local_scripts[2]; struct amount_sat amt_salvaged = AMOUNT_SAT(0); onchain_annotate_txin(&tx->txid, 0, TX_CHANNEL_UNILATERAL | TX_THEIRS); resolved_by_other(outs[0], &tx->txid, UNKNOWN_UNILATERAL); /* This is the not-option_static_remotekey case, if we got a hint * from them about the per-commitment point */ if (possible_remote_per_commitment_point) { struct keyset *ks = tal(tmpctx, struct keyset); if (!derive_keyset(possible_remote_per_commitment_point, &basepoints[REMOTE], &basepoints[LOCAL], false, ks)) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Deriving keyset for possible_remote_per_commitment_point %s", type_to_string(tmpctx, struct pubkey, possible_remote_per_commitment_point)); local_scripts[0] = scriptpubkey_p2wpkh(tmpctx, &ks->other_payment_key); } else { local_scripts[0] = NULL; } /* For option_will_fund, we need to figure out what CSV lock was used */ for (size_t csv = 1; csv <= LEASE_RATE_DURATION; csv++) { /* Other possible local script is for option_static_remotekey */ local_scripts[1] = scriptpubkey_to_remote(tmpctx, &basepoints[LOCAL].payment, csv); for (size_t i = 0; i < tal_count(tx->outputs); i++) { struct tracked_output *out; struct amount_asset asset = wally_tx_output_get_amount(tx->outputs[i]); struct amount_sat amt; int which_script; assert(amount_asset_is_main(&asset)); amt = amount_asset_to_sat(&asset); /* Elements can have empty output scripts (fee output) */ if (local_scripts[0] && wally_tx_output_scripteq(tx->outputs[i], local_scripts[0])) which_script = 0; else if (local_scripts[1] && wally_tx_output_scripteq(tx->outputs[i], local_scripts[1])) which_script = 1; else continue; /* BOLT #5: * * - MAY take no action in regard to the associated * `to_remote`, which is simply a P2WPKH output to * the *local node*. * - Note: `to_remote` is considered *resolved* by the * commitment transaction itself. */ out = new_tracked_output(&outs, &tx->txid, tx_blockheight, UNKNOWN_UNILATERAL, i, amt, OUTPUT_TO_US, NULL, NULL, NULL); ignore_output(out); if (!is_replay) record_channel_withdrawal(&tx->txid, tx_blockheight, out); add_amt(&amt_salvaged, amt); tell_wallet_to_remote(tx, i, tx_blockheight, local_scripts[which_script], possible_remote_per_commitment_point, which_script == 1, csv); local_scripts[0] = local_scripts[1] = NULL; to_us_output = i; goto script_found; } } script_found: if (to_us_output == -1) { status_broken("FUNDS LOST. Unknown commitment #%"PRIu64"!", commit_num); init_reply("ERROR: FUNDS LOST. Unknown commitment!"); } else { status_broken("ERROR: Unknown commitment #%"PRIu64 ", recovering our funds!", commit_num); init_reply("ERROR: Unknown commitment, recovering our funds!"); } /* update our accounting notions for this channel. * should result in a channel balance of zero */ if (!is_replay) update_ledger_unknown(&tx->txid, tx_blockheight, amt_salvaged); /* Tell master to give up on HTLCs immediately. */ for (size_t i = 0; i < tal_count(htlcs); i++) { u8 *msg; if (!tell_if_missing[i]) continue; msg = towire_onchaind_missing_htlc_output(NULL, &htlcs[i]); wire_sync_write(REQ_FD, take(msg)); } wait_for_resolved(outs); } static int cmp_htlc_cltv(const struct htlc_stub *a, const struct htlc_stub *b, void *unused) { if (a->cltv_expiry < b->cltv_expiry) return -1; else if (a->cltv_expiry > b->cltv_expiry) return 1; return 0; } int main(int argc, char *argv[]) { setup_locale(); const tal_t *ctx = tal(NULL, char); u8 *msg; struct pubkey remote_per_commit_point, old_remote_per_commit_point; enum side opener; struct basepoints basepoints[NUM_SIDES]; struct shachain shachain; struct tx_parts *tx; struct tracked_output **outs; struct bitcoin_txid our_broadcast_txid, tmptxid; struct bitcoin_signature *remote_htlc_sigs; struct amount_sat funding; u64 num_htlcs; u8 *scriptpubkey[NUM_SIDES]; struct htlc_stub *htlcs; bool *tell_if_missing, *tell_immediately; u32 locktime, tx_blockheight; struct pubkey *possible_remote_per_commitment_point; int mutual_outnum; bool open_is_replay; subdaemon_setup(argc, argv); status_setup_sync(REQ_FD); missing_htlc_msgs = tal_arr(ctx, u8 *, 0); msg = wire_sync_read(tmpctx, REQ_FD); if (!fromwire_onchaind_init(tmpctx, msg, &shachain, &chainparams, &funding, &our_msat, &old_remote_per_commit_point, &remote_per_commit_point, &to_self_delay[LOCAL], &to_self_delay[REMOTE], &delayed_to_us_feerate, &htlc_feerate, &penalty_feerate, &dust_limit, &our_broadcast_txid, &scriptpubkey[LOCAL], &scriptpubkey[REMOTE], &our_wallet_pubkey, &opener, &basepoints[LOCAL], &basepoints[REMOTE], &tx, &locktime, &tx_blockheight, &reasonable_depth, &remote_htlc_sigs, &num_htlcs, &min_possible_feerate, &max_possible_feerate, &possible_remote_per_commitment_point, &funding_pubkey[LOCAL], &funding_pubkey[REMOTE], &static_remotekey_start[LOCAL], &static_remotekey_start[REMOTE], &option_anchor_outputs, &open_is_replay, &min_relay_feerate)) { master_badmsg(WIRE_ONCHAIND_INIT, msg); } status_debug("delayed_to_us_feerate = %u, htlc_feerate = %u, " "penalty_feerate = %u", delayed_to_us_feerate, htlc_feerate, penalty_feerate); /* We need to keep tx around, but there's only one: not really a leak */ tal_steal(ctx, notleak(tx)); /* FIXME: Filter as we go, don't load them all into mem! */ htlcs = tal_arr(tmpctx, struct htlc_stub, num_htlcs); tell_if_missing = tal_arr(htlcs, bool, num_htlcs); tell_immediately = tal_arr(htlcs, bool, num_htlcs); if (!htlcs || !tell_if_missing || !tell_immediately) status_failed(STATUS_FAIL_INTERNAL_ERROR, "Can't allocate %"PRIu64" htlcs", num_htlcs); for (u64 i = 0; i < num_htlcs; i++) { msg = wire_sync_read(tmpctx, REQ_FD); if (!fromwire_onchaind_htlc(msg, &htlcs[i], &tell_if_missing[i], &tell_immediately[i])) master_badmsg(WIRE_ONCHAIND_HTLC, msg); } /* Sort by CLTV, so matches are in CLTV order (and easy to skip dups) */ asort(htlcs, tal_count(htlcs), cmp_htlc_cltv, NULL); outs = tal_arr(ctx, struct tracked_output *, 0); wally_tx_input_get_txid(tx->inputs[0], &tmptxid); new_tracked_output(&outs, &tmptxid, 0, /* We don't care about funding blockheight */ FUNDING_TRANSACTION, tx->inputs[0]->index, funding, FUNDING_OUTPUT, NULL, NULL, NULL); status_debug("Remote per-commit point: %s", type_to_string(tmpctx, struct pubkey, &remote_per_commit_point)); status_debug("Old remote per-commit point: %s", type_to_string(tmpctx, struct pubkey, &old_remote_per_commit_point)); trim_maximum_feerate(funding, tx); /* BOLT #5: * * There are three ways a channel can end: * * 1. The good way (*mutual close*): at some point the local and * remote nodes agree to close the channel. They generate a *closing * transaction* (which is similar to a commitment transaction, but * without any pending payments) and publish it on the blockchain (see * [BOLT #2: Channel Close](02-peer-protocol.md#channel-close)). */ if (is_mutual_close(tx, scriptpubkey[LOCAL], scriptpubkey[REMOTE], &mutual_outnum)) handle_mutual_close(outs, tx, tx_blockheight, mutual_outnum, open_is_replay); else { /* BOLT #5: * * 2. The bad way (*unilateral close*): something goes wrong, * possibly without evil intent on either side. Perhaps one * party crashed, for instance. One side publishes its * *latest commitment transaction*. */ struct secret revocation_preimage; commit_num = unmask_commit_number(tx, locktime, opener, &basepoints[LOCAL].payment, &basepoints[REMOTE].payment); status_debug("commitnum = %"PRIu64 ", revocations_received = %"PRIu64, commit_num, revocations_received(&shachain)); if (is_local_commitment(&tx->txid, &our_broadcast_txid)) handle_our_unilateral(tx, tx_blockheight, basepoints, htlcs, tell_if_missing, tell_immediately, opener, remote_htlc_sigs, outs, open_is_replay); /* BOLT #5: * * 3. The ugly way (*revoked transaction close*): one of the * parties deliberately tries to cheat, by publishing an * *outdated commitment transaction* (presumably, a prior * version, which is more in its favor). */ else if (shachain_get_secret(&shachain, commit_num, &revocation_preimage)) { handle_their_cheat(tx, tx_blockheight, &revocation_preimage, basepoints, htlcs, tell_if_missing, tell_immediately, opener, outs, open_is_replay); /* BOLT #5: * * There may be more than one valid, *unrevoked* commitment * transaction after a signature has been received via * `commitment_signed` and before the corresponding * `revoke_and_ack`. As such, either commitment may serve as * the *remote node's* commitment transaction; hence, the * local node is required to handle both. */ } else if (commit_num == revocations_received(&shachain)) { status_debug("Their unilateral tx, old commit point"); handle_their_unilateral(tx, tx_blockheight, &old_remote_per_commit_point, basepoints, htlcs, tell_if_missing, tell_immediately, opener, outs, open_is_replay); } else if (commit_num == revocations_received(&shachain) + 1) { status_debug("Their unilateral tx, new commit point"); handle_their_unilateral(tx, tx_blockheight, &remote_per_commit_point, basepoints, htlcs, tell_if_missing, tell_immediately, opener, outs, open_is_replay); } else { handle_unknown_commitment(tx, tx_blockheight, possible_remote_per_commitment_point, basepoints, htlcs, tell_if_missing, outs, open_is_replay); } } /* We're done! */ tal_free(ctx); daemon_shutdown(); return 0; }
435570.c
/* * POK header * * The following file is a part of the POK project. Any modification should * be made according to the POK licence. You CANNOT use this file or a part * of a file for your own project. * * For more information on the POK licence, please see our LICENCE FILE * * Please follow the coding guidelines described in doc/CODING_GUIDELINES * * Copyright (c) 2007-2022 POK team */ #include <libc/string.h> size_t strlen(const char *s) { int d0; register int __res; __asm__ __volatile__("repne\n\t" "scasb\n\t" "notl %0\n\t" "decl %0" : "=c"(__res), "=&D"(d0) : "1"(s), "a"(0), "0"(0xffffffff)); return __res; }
412236.c
/** ****************************************************************************** * @file stm32wbxx_hal_irda.c * @author MCD Application Team * @brief IRDA HAL module driver. * This file provides firmware functions to manage the following * functionalities of the IrDA (Infrared Data Association) Peripheral * (IRDA) * + Initialization and de-initialization functions * + IO operation functions * + Peripheral State and Errors functions * + Peripheral Control functions * @verbatim ============================================================================== ##### How to use this driver ##### ============================================================================== [..] The IRDA HAL driver can be used as follows: (#) Declare a IRDA_HandleTypeDef handle structure (eg. IRDA_HandleTypeDef hirda). (#) Initialize the IRDA low level resources by implementing the HAL_IRDA_MspInit() API in setting the associated USART or UART in IRDA mode: (++) Enable the USARTx/UARTx interface clock. (++) USARTx/UARTx pins configuration: (+++) Enable the clock for the USARTx/UARTx GPIOs. (+++) Configure these USARTx/UARTx pins (TX as alternate function pull-up, RX as alternate function Input). (++) NVIC configuration if you need to use interrupt process (HAL_IRDA_Transmit_IT() and HAL_IRDA_Receive_IT() APIs): (+++) Configure the USARTx/UARTx interrupt priority. (+++) Enable the NVIC USARTx/UARTx IRQ handle. (+++) The specific IRDA interrupts (Transmission complete interrupt, RXNE interrupt and Error Interrupts) will be managed using the macros __HAL_IRDA_ENABLE_IT() and __HAL_IRDA_DISABLE_IT() inside the transmit and receive process. (++) DMA Configuration if you need to use DMA process (HAL_IRDA_Transmit_DMA() and HAL_IRDA_Receive_DMA() APIs): (+++) Declare a DMA handle structure for the Tx/Rx channel. (+++) Enable the DMAx interface clock. (+++) Configure the declared DMA handle structure with the required Tx/Rx parameters. (+++) Configure the DMA Tx/Rx channel. (+++) Associate the initialized DMA handle to the IRDA DMA Tx/Rx handle. (+++) Configure the priority and enable the NVIC for the transfer complete interrupt on the DMA Tx/Rx channel. (#) Program the Baud Rate, Word Length and Parity and Mode(Receiver/Transmitter), the normal or low power mode and the clock prescaler in the hirda handle Init structure. (#) Initialize the IRDA registers by calling the HAL_IRDA_Init() API: (++) This API configures also the low level Hardware GPIO, CLOCK, CORTEX...etc) by calling the customized HAL_IRDA_MspInit() API. -@@- The specific IRDA interrupts (Transmission complete interrupt, RXNE interrupt and Error Interrupts) will be managed using the macros __HAL_IRDA_ENABLE_IT() and __HAL_IRDA_DISABLE_IT() inside the transmit and receive process. (#) Three operation modes are available within this driver : *** Polling mode IO operation *** ================================= [..] (+) Send an amount of data in blocking mode using HAL_IRDA_Transmit() (+) Receive an amount of data in blocking mode using HAL_IRDA_Receive() *** Interrupt mode IO operation *** =================================== [..] (+) Send an amount of data in non-blocking mode using HAL_IRDA_Transmit_IT() (+) At transmission end of transfer HAL_IRDA_TxCpltCallback() is executed and user can add his own code by customization of function pointer HAL_IRDA_TxCpltCallback() (+) Receive an amount of data in non-blocking mode using HAL_IRDA_Receive_IT() (+) At reception end of transfer HAL_IRDA_RxCpltCallback() is executed and user can add his own code by customization of function pointer HAL_IRDA_RxCpltCallback() (+) In case of transfer Error, HAL_IRDA_ErrorCallback() function is executed and user can add his own code by customization of function pointer HAL_IRDA_ErrorCallback() *** DMA mode IO operation *** ============================== [..] (+) Send an amount of data in non-blocking mode (DMA) using HAL_IRDA_Transmit_DMA() (+) At transmission half of transfer HAL_IRDA_TxHalfCpltCallback() is executed and user can add his own code by customization of function pointer HAL_IRDA_TxHalfCpltCallback() (+) At transmission end of transfer HAL_IRDA_TxCpltCallback() is executed and user can add his own code by customization of function pointer HAL_IRDA_TxCpltCallback() (+) Receive an amount of data in non-blocking mode (DMA) using HAL_IRDA_Receive_DMA() (+) At reception half of transfer HAL_IRDA_RxHalfCpltCallback() is executed and user can add his own code by customization of function pointer HAL_IRDA_RxHalfCpltCallback() (+) At reception end of transfer HAL_IRDA_RxCpltCallback() is executed and user can add his own code by customization of function pointer HAL_IRDA_RxCpltCallback() (+) In case of transfer Error, HAL_IRDA_ErrorCallback() function is executed and user can add his own code by customization of function pointer HAL_IRDA_ErrorCallback() *** IRDA HAL driver macros list *** ==================================== [..] Below the list of most used macros in IRDA HAL driver. (+) __HAL_IRDA_ENABLE: Enable the IRDA peripheral (+) __HAL_IRDA_DISABLE: Disable the IRDA peripheral (+) __HAL_IRDA_GET_FLAG : Check whether the specified IRDA flag is set or not (+) __HAL_IRDA_CLEAR_FLAG : Clear the specified IRDA pending flag (+) __HAL_IRDA_ENABLE_IT: Enable the specified IRDA interrupt (+) __HAL_IRDA_DISABLE_IT: Disable the specified IRDA interrupt (+) __HAL_IRDA_GET_IT_SOURCE: Check whether or not the specified IRDA interrupt is enabled [..] (@) You can refer to the IRDA HAL driver header file for more useful macros ##### Callback registration ##### ================================== [..] The compilation define USE_HAL_IRDA_REGISTER_CALLBACKS when set to 1 allows the user to configure dynamically the driver callbacks. [..] Use Function @ref HAL_IRDA_RegisterCallback() to register a user callback. Function @ref HAL_IRDA_RegisterCallback() allows to register following callbacks: (+) TxHalfCpltCallback : Tx Half Complete Callback. (+) TxCpltCallback : Tx Complete Callback. (+) RxHalfCpltCallback : Rx Half Complete Callback. (+) RxCpltCallback : Rx Complete Callback. (+) ErrorCallback : Error Callback. (+) AbortCpltCallback : Abort Complete Callback. (+) AbortTransmitCpltCallback : Abort Transmit Complete Callback. (+) AbortReceiveCpltCallback : Abort Receive Complete Callback. (+) MspInitCallback : IRDA MspInit. (+) MspDeInitCallback : IRDA MspDeInit. This function takes as parameters the HAL peripheral handle, the Callback ID and a pointer to the user callback function. [..] Use function @ref HAL_IRDA_UnRegisterCallback() to reset a callback to the default weak (surcharged) function. @ref HAL_IRDA_UnRegisterCallback() takes as parameters the HAL peripheral handle, and the Callback ID. This function allows to reset following callbacks: (+) TxHalfCpltCallback : Tx Half Complete Callback. (+) TxCpltCallback : Tx Complete Callback. (+) RxHalfCpltCallback : Rx Half Complete Callback. (+) RxCpltCallback : Rx Complete Callback. (+) ErrorCallback : Error Callback. (+) AbortCpltCallback : Abort Complete Callback. (+) AbortTransmitCpltCallback : Abort Transmit Complete Callback. (+) AbortReceiveCpltCallback : Abort Receive Complete Callback. (+) MspInitCallback : IRDA MspInit. (+) MspDeInitCallback : IRDA MspDeInit. [..] By default, after the @ref HAL_IRDA_Init() and when the state is HAL_IRDA_STATE_RESET all callbacks are set to the corresponding weak (surcharged) functions: examples @ref HAL_IRDA_TxCpltCallback(), @ref HAL_IRDA_RxHalfCpltCallback(). Exception done for MspInit and MspDeInit functions that are respectively reset to the legacy weak (surcharged) functions in the @ref HAL_IRDA_Init() and @ref HAL_IRDA_DeInit() only when these callbacks are null (not registered beforehand). If not, MspInit or MspDeInit are not null, the @ref HAL_IRDA_Init() and @ref HAL_IRDA_DeInit() keep and use the user MspInit/MspDeInit callbacks (registered beforehand). [..] Callbacks can be registered/unregistered in HAL_IRDA_STATE_READY state only. Exception done MspInit/MspDeInit that can be registered/unregistered in HAL_IRDA_STATE_READY or HAL_IRDA_STATE_RESET state, thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. In that case first register the MspInit/MspDeInit user callbacks using @ref HAL_IRDA_RegisterCallback() before calling @ref HAL_IRDA_DeInit() or @ref HAL_IRDA_Init() function. [..] When The compilation define USE_HAL_IRDA_REGISTER_CALLBACKS is set to 0 or not defined, the callback registration feature is not available and weak (surcharged) callbacks are used. @endverbatim ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2019 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32wbxx_hal.h" /** @addtogroup STM32WBxx_HAL_Driver * @{ */ /** @defgroup IRDA IRDA * @brief HAL IRDA module driver * @{ */ #ifdef HAL_IRDA_MODULE_ENABLED /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /** @defgroup IRDA_Private_Constants IRDA Private Constants * @{ */ #define IRDA_TEACK_REACK_TIMEOUT 1000U /*!< IRDA TX or RX enable acknowledge time-out value */ #define IRDA_CR1_FIELDS ((uint32_t)(USART_CR1_M | USART_CR1_PCE \ | USART_CR1_PS | USART_CR1_TE | USART_CR1_RE)) /*!< UART or USART CR1 fields of parameters set by IRDA_SetConfig API */ #define USART_BRR_MIN 0x10U /*!< USART BRR minimum authorized value */ #define USART_BRR_MAX 0x0000FFFFU /*!< USART BRR maximum authorized value */ /** * @} */ /* Private macros ------------------------------------------------------------*/ /** @defgroup IRDA_Private_Macros IRDA Private Macros * @{ */ /** @brief BRR division operation to set BRR register in 16-bit oversampling mode. * @param __PCLK__ IRDA clock source. * @param __BAUD__ Baud rate set by the user. * @param __PRESCALER__ IRDA clock prescaler value. * @retval Division result */ #define IRDA_DIV_SAMPLING16(__PCLK__, __BAUD__, __PRESCALER__) ((((__PCLK__)/IRDAPrescTable[(__PRESCALER__)]) + ((__BAUD__)/2U)) / (__BAUD__)) /** * @} */ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /** @addtogroup IRDA_Private_Functions * @{ */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) void IRDA_InitCallbacksToDefault(IRDA_HandleTypeDef *hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACKS */ static HAL_StatusTypeDef IRDA_SetConfig(IRDA_HandleTypeDef *hirda); static HAL_StatusTypeDef IRDA_CheckIdleState(IRDA_HandleTypeDef *hirda); static HAL_StatusTypeDef IRDA_WaitOnFlagUntilTimeout(IRDA_HandleTypeDef *hirda, uint32_t Flag, FlagStatus Status, uint32_t Tickstart, uint32_t Timeout); static void IRDA_EndTxTransfer(IRDA_HandleTypeDef *hirda); static void IRDA_EndRxTransfer(IRDA_HandleTypeDef *hirda); static void IRDA_DMATransmitCplt(DMA_HandleTypeDef *hdma); static void IRDA_DMATransmitHalfCplt(DMA_HandleTypeDef *hdma); static void IRDA_DMAReceiveCplt(DMA_HandleTypeDef *hdma); static void IRDA_DMAReceiveHalfCplt(DMA_HandleTypeDef *hdma); static void IRDA_DMAError(DMA_HandleTypeDef *hdma); static void IRDA_DMAAbortOnError(DMA_HandleTypeDef *hdma); static void IRDA_DMATxAbortCallback(DMA_HandleTypeDef *hdma); static void IRDA_DMARxAbortCallback(DMA_HandleTypeDef *hdma); static void IRDA_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma); static void IRDA_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma); static void IRDA_Transmit_IT(IRDA_HandleTypeDef *hirda); static void IRDA_EndTransmit_IT(IRDA_HandleTypeDef *hirda); static void IRDA_Receive_IT(IRDA_HandleTypeDef *hirda); /** * @} */ /* Exported functions --------------------------------------------------------*/ /** @defgroup IRDA_Exported_Functions IRDA Exported Functions * @{ */ /** @defgroup IRDA_Exported_Functions_Group1 Initialization and de-initialization functions * @brief Initialization and Configuration functions * @verbatim ============================================================================== ##### Initialization and Configuration functions ##### ============================================================================== [..] This subsection provides a set of functions allowing to initialize the USARTx in asynchronous IRDA mode. (+) For the asynchronous mode only these parameters can be configured: (++) Baud Rate (++) Word Length (++) Parity: If the parity is enabled, then the MSB bit of the data written in the data register is transmitted but is changed by the parity bit. (++) Power mode (++) Prescaler setting (++) Receiver/transmitter modes [..] The HAL_IRDA_Init() API follows the USART asynchronous configuration procedures (details for the procedures are available in reference manual). @endverbatim Depending on the frame length defined by the M1 and M0 bits (7-bit, 8-bit or 9-bit), the possible IRDA frame formats are listed in the following table. Table 1. IRDA frame format. +-----------------------------------------------------------------------+ | M1 bit | M0 bit | PCE bit | IRDA frame | |---------|---------|-----------|---------------------------------------| | 0 | 0 | 0 | | SB | 8 bit data | STB | | |---------|---------|-----------|---------------------------------------| | 0 | 0 | 1 | | SB | 7 bit data | PB | STB | | |---------|---------|-----------|---------------------------------------| | 0 | 1 | 0 | | SB | 9 bit data | STB | | |---------|---------|-----------|---------------------------------------| | 0 | 1 | 1 | | SB | 8 bit data | PB | STB | | |---------|---------|-----------|---------------------------------------| | 1 | 0 | 0 | | SB | 7 bit data | STB | | |---------|---------|-----------|---------------------------------------| | 1 | 0 | 1 | | SB | 6 bit data | PB | STB | | +-----------------------------------------------------------------------+ * @{ */ /** * @brief Initialize the IRDA mode according to the specified * parameters in the IRDA_InitTypeDef and initialize the associated handle. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_Init(IRDA_HandleTypeDef *hirda) { /* Check the IRDA handle allocation */ if (hirda == NULL) { return HAL_ERROR; } /* Check the USART/UART associated to the IRDA handle */ assert_param(IS_IRDA_INSTANCE(hirda->Instance)); if (hirda->gState == HAL_IRDA_STATE_RESET) { /* Allocate lock resource and initialize it */ hirda->Lock = HAL_UNLOCKED; #if USE_HAL_IRDA_REGISTER_CALLBACKS == 1 IRDA_InitCallbacksToDefault(hirda); if (hirda->MspInitCallback == NULL) { hirda->MspInitCallback = HAL_IRDA_MspInit; } /* Init the low level hardware */ hirda->MspInitCallback(hirda); #else /* Init the low level hardware : GPIO, CLOCK */ HAL_IRDA_MspInit(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACKS */ } hirda->gState = HAL_IRDA_STATE_BUSY; /* Disable the Peripheral to update the configuration registers */ __HAL_IRDA_DISABLE(hirda); /* Set the IRDA Communication parameters */ if (IRDA_SetConfig(hirda) == HAL_ERROR) { return HAL_ERROR; } /* In IRDA mode, the following bits must be kept cleared: - LINEN, STOP and CLKEN bits in the USART_CR2 register, - SCEN and HDSEL bits in the USART_CR3 register.*/ CLEAR_BIT(hirda->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN | USART_CR2_STOP)); CLEAR_BIT(hirda->Instance->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL)); /* set the UART/USART in IRDA mode */ hirda->Instance->CR3 |= USART_CR3_IREN; /* Enable the Peripheral */ __HAL_IRDA_ENABLE(hirda); /* TEACK and/or REACK to check before moving hirda->gState and hirda->RxState to Ready */ return (IRDA_CheckIdleState(hirda)); } /** * @brief DeInitialize the IRDA peripheral. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_DeInit(IRDA_HandleTypeDef *hirda) { /* Check the IRDA handle allocation */ if (hirda == NULL) { return HAL_ERROR; } /* Check the USART/UART associated to the IRDA handle */ assert_param(IS_IRDA_INSTANCE(hirda->Instance)); hirda->gState = HAL_IRDA_STATE_BUSY; /* DeInit the low level hardware */ #if USE_HAL_IRDA_REGISTER_CALLBACKS == 1 if (hirda->MspDeInitCallback == NULL) { hirda->MspDeInitCallback = HAL_IRDA_MspDeInit; } /* DeInit the low level hardware */ hirda->MspDeInitCallback(hirda); #else HAL_IRDA_MspDeInit(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACKS */ /* Disable the Peripheral */ __HAL_IRDA_DISABLE(hirda); hirda->ErrorCode = HAL_IRDA_ERROR_NONE; hirda->gState = HAL_IRDA_STATE_RESET; hirda->RxState = HAL_IRDA_STATE_RESET; /* Process Unlock */ __HAL_UNLOCK(hirda); return HAL_OK; } /** * @brief Initialize the IRDA MSP. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ __weak void HAL_IRDA_MspInit(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE: This function should not be modified, when the callback is needed, the HAL_IRDA_MspInit can be implemented in the user file */ } /** * @brief DeInitialize the IRDA MSP. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ __weak void HAL_IRDA_MspDeInit(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE: This function should not be modified, when the callback is needed, the HAL_IRDA_MspDeInit can be implemented in the user file */ } #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /** * @brief Register a User IRDA Callback * To be used instead of the weak predefined callback * @param hirda irda handle * @param CallbackID ID of the callback to be registered * This parameter can be one of the following values: * @arg @ref HAL_IRDA_TX_HALFCOMPLETE_CB_ID Tx Half Complete Callback ID * @arg @ref HAL_IRDA_TX_COMPLETE_CB_ID Tx Complete Callback ID * @arg @ref HAL_IRDA_RX_HALFCOMPLETE_CB_ID Rx Half Complete Callback ID * @arg @ref HAL_IRDA_RX_COMPLETE_CB_ID Rx Complete Callback ID * @arg @ref HAL_IRDA_ERROR_CB_ID Error Callback ID * @arg @ref HAL_IRDA_ABORT_COMPLETE_CB_ID Abort Complete Callback ID * @arg @ref HAL_IRDA_ABORT_TRANSMIT_COMPLETE_CB_ID Abort Transmit Complete Callback ID * @arg @ref HAL_IRDA_ABORT_RECEIVE_COMPLETE_CB_ID Abort Receive Complete Callback ID * @arg @ref HAL_IRDA_MSPINIT_CB_ID MspInit Callback ID * @arg @ref HAL_IRDA_MSPDEINIT_CB_ID MspDeInit Callback ID * @param pCallback pointer to the Callback function * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_RegisterCallback(IRDA_HandleTypeDef *hirda, HAL_IRDA_CallbackIDTypeDef CallbackID, pIRDA_CallbackTypeDef pCallback) { HAL_StatusTypeDef status = HAL_OK; if (pCallback == NULL) { /* Update the error code */ hirda->ErrorCode |= HAL_IRDA_ERROR_INVALID_CALLBACK; return HAL_ERROR; } /* Process locked */ __HAL_LOCK(hirda); if (hirda->gState == HAL_IRDA_STATE_READY) { switch (CallbackID) { case HAL_IRDA_TX_HALFCOMPLETE_CB_ID : hirda->TxHalfCpltCallback = pCallback; break; case HAL_IRDA_TX_COMPLETE_CB_ID : hirda->TxCpltCallback = pCallback; break; case HAL_IRDA_RX_HALFCOMPLETE_CB_ID : hirda->RxHalfCpltCallback = pCallback; break; case HAL_IRDA_RX_COMPLETE_CB_ID : hirda->RxCpltCallback = pCallback; break; case HAL_IRDA_ERROR_CB_ID : hirda->ErrorCallback = pCallback; break; case HAL_IRDA_ABORT_COMPLETE_CB_ID : hirda->AbortCpltCallback = pCallback; break; case HAL_IRDA_ABORT_TRANSMIT_COMPLETE_CB_ID : hirda->AbortTransmitCpltCallback = pCallback; break; case HAL_IRDA_ABORT_RECEIVE_COMPLETE_CB_ID : hirda->AbortReceiveCpltCallback = pCallback; break; case HAL_IRDA_MSPINIT_CB_ID : hirda->MspInitCallback = pCallback; break; case HAL_IRDA_MSPDEINIT_CB_ID : hirda->MspDeInitCallback = pCallback; break; default : /* Update the error code */ hirda->ErrorCode |= HAL_IRDA_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; break; } } else if (hirda->gState == HAL_IRDA_STATE_RESET) { switch (CallbackID) { case HAL_IRDA_MSPINIT_CB_ID : hirda->MspInitCallback = pCallback; break; case HAL_IRDA_MSPDEINIT_CB_ID : hirda->MspDeInitCallback = pCallback; break; default : /* Update the error code */ hirda->ErrorCode |= HAL_IRDA_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; break; } } else { /* Update the error code */ hirda->ErrorCode |= HAL_IRDA_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; } /* Release Lock */ __HAL_UNLOCK(hirda); return status; } /** * @brief Unregister an IRDA callback * IRDA callback is redirected to the weak predefined callback * @param hirda irda handle * @param CallbackID ID of the callback to be unregistered * This parameter can be one of the following values: * @arg @ref HAL_IRDA_TX_HALFCOMPLETE_CB_ID Tx Half Complete Callback ID * @arg @ref HAL_IRDA_TX_COMPLETE_CB_ID Tx Complete Callback ID * @arg @ref HAL_IRDA_RX_HALFCOMPLETE_CB_ID Rx Half Complete Callback ID * @arg @ref HAL_IRDA_RX_COMPLETE_CB_ID Rx Complete Callback ID * @arg @ref HAL_IRDA_ERROR_CB_ID Error Callback ID * @arg @ref HAL_IRDA_ABORT_COMPLETE_CB_ID Abort Complete Callback ID * @arg @ref HAL_IRDA_ABORT_TRANSMIT_COMPLETE_CB_ID Abort Transmit Complete Callback ID * @arg @ref HAL_IRDA_ABORT_RECEIVE_COMPLETE_CB_ID Abort Receive Complete Callback ID * @arg @ref HAL_IRDA_MSPINIT_CB_ID MspInit Callback ID * @arg @ref HAL_IRDA_MSPDEINIT_CB_ID MspDeInit Callback ID * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_UnRegisterCallback(IRDA_HandleTypeDef *hirda, HAL_IRDA_CallbackIDTypeDef CallbackID) { HAL_StatusTypeDef status = HAL_OK; /* Process locked */ __HAL_LOCK(hirda); if (HAL_IRDA_STATE_READY == hirda->gState) { switch (CallbackID) { case HAL_IRDA_TX_HALFCOMPLETE_CB_ID : hirda->TxHalfCpltCallback = HAL_IRDA_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ break; case HAL_IRDA_TX_COMPLETE_CB_ID : hirda->TxCpltCallback = HAL_IRDA_TxCpltCallback; /* Legacy weak TxCpltCallback */ break; case HAL_IRDA_RX_HALFCOMPLETE_CB_ID : hirda->RxHalfCpltCallback = HAL_IRDA_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ break; case HAL_IRDA_RX_COMPLETE_CB_ID : hirda->RxCpltCallback = HAL_IRDA_RxCpltCallback; /* Legacy weak RxCpltCallback */ break; case HAL_IRDA_ERROR_CB_ID : hirda->ErrorCallback = HAL_IRDA_ErrorCallback; /* Legacy weak ErrorCallback */ break; case HAL_IRDA_ABORT_COMPLETE_CB_ID : hirda->AbortCpltCallback = HAL_IRDA_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ break; case HAL_IRDA_ABORT_TRANSMIT_COMPLETE_CB_ID : hirda->AbortTransmitCpltCallback = HAL_IRDA_AbortTransmitCpltCallback; /* Legacy weak AbortTransmitCpltCallback */ break; case HAL_IRDA_ABORT_RECEIVE_COMPLETE_CB_ID : hirda->AbortReceiveCpltCallback = HAL_IRDA_AbortReceiveCpltCallback; /* Legacy weak AbortReceiveCpltCallback */ break; case HAL_IRDA_MSPINIT_CB_ID : hirda->MspInitCallback = HAL_IRDA_MspInit; /* Legacy weak MspInitCallback */ break; case HAL_IRDA_MSPDEINIT_CB_ID : hirda->MspDeInitCallback = HAL_IRDA_MspDeInit; /* Legacy weak MspDeInitCallback */ break; default : /* Update the error code */ hirda->ErrorCode |= HAL_IRDA_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; break; } } else if (HAL_IRDA_STATE_RESET == hirda->gState) { switch (CallbackID) { case HAL_IRDA_MSPINIT_CB_ID : hirda->MspInitCallback = HAL_IRDA_MspInit; break; case HAL_IRDA_MSPDEINIT_CB_ID : hirda->MspDeInitCallback = HAL_IRDA_MspDeInit; break; default : /* Update the error code */ hirda->ErrorCode |= HAL_IRDA_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; break; } } else { /* Update the error code */ hirda->ErrorCode |= HAL_IRDA_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; } /* Release Lock */ __HAL_UNLOCK(hirda); return status; } #endif /* USE_HAL_IRDA_REGISTER_CALLBACKS */ /** * @} */ /** @defgroup IRDA_Exported_Functions_Group2 IO operation functions * @brief IRDA Transmit and Receive functions * @verbatim =============================================================================== ##### IO operation functions ##### =============================================================================== [..] This subsection provides a set of functions allowing to manage the IRDA data transfers. [..] IrDA is a half duplex communication protocol. If the Transmitter is busy, any data on the IrDA receive line will be ignored by the IrDA decoder and if the Receiver is busy, data on the TX from the USART to IrDA will not be encoded by IrDA. While receiving data, transmission should be avoided as the data to be transmitted could be corrupted. (#) There are two modes of transfer: (++) Blocking mode: the communication is performed in polling mode. The HAL status of all data processing is returned by the same function after finishing transfer. (++) Non-Blocking mode: the communication is performed using Interrupts or DMA, these API's return the HAL status. The end of the data processing will be indicated through the dedicated IRDA IRQ when using Interrupt mode or the DMA IRQ when using DMA mode. The HAL_IRDA_TxCpltCallback(), HAL_IRDA_RxCpltCallback() user callbacks will be executed respectively at the end of the Transmit or Receive process The HAL_IRDA_ErrorCallback() user callback will be executed when a communication error is detected (#) Blocking mode APIs are : (++) HAL_IRDA_Transmit() (++) HAL_IRDA_Receive() (#) Non Blocking mode APIs with Interrupt are : (++) HAL_IRDA_Transmit_IT() (++) HAL_IRDA_Receive_IT() (++) HAL_IRDA_IRQHandler() (#) Non Blocking mode functions with DMA are : (++) HAL_IRDA_Transmit_DMA() (++) HAL_IRDA_Receive_DMA() (++) HAL_IRDA_DMAPause() (++) HAL_IRDA_DMAResume() (++) HAL_IRDA_DMAStop() (#) A set of Transfer Complete Callbacks are provided in Non Blocking mode: (++) HAL_IRDA_TxHalfCpltCallback() (++) HAL_IRDA_TxCpltCallback() (++) HAL_IRDA_RxHalfCpltCallback() (++) HAL_IRDA_RxCpltCallback() (++) HAL_IRDA_ErrorCallback() (#) Non-Blocking mode transfers could be aborted using Abort API's : (+) HAL_IRDA_Abort() (+) HAL_IRDA_AbortTransmit() (+) HAL_IRDA_AbortReceive() (+) HAL_IRDA_Abort_IT() (+) HAL_IRDA_AbortTransmit_IT() (+) HAL_IRDA_AbortReceive_IT() (#) For Abort services based on interrupts (HAL_IRDA_Abortxxx_IT), a set of Abort Complete Callbacks are provided: (+) HAL_IRDA_AbortCpltCallback() (+) HAL_IRDA_AbortTransmitCpltCallback() (+) HAL_IRDA_AbortReceiveCpltCallback() (#) In Non-Blocking mode transfers, possible errors are split into 2 categories. Errors are handled as follows : (+) Error is considered as Recoverable and non blocking : Transfer could go till end, but error severity is to be evaluated by user : this concerns Frame Error, Parity Error or Noise Error in Interrupt mode reception . Received character is then retrieved and stored in Rx buffer, Error code is set to allow user to identify error type, and HAL_IRDA_ErrorCallback() user callback is executed. Transfer is kept ongoing on IRDA side. If user wants to abort it, Abort services should be called by user. (+) Error is considered as Blocking : Transfer could not be completed properly and is aborted. This concerns Overrun Error In Interrupt mode reception and all errors in DMA mode. Error code is set to allow user to identify error type, and HAL_IRDA_ErrorCallback() user callback is executed. @endverbatim * @{ */ /** * @brief Send an amount of data in blocking mode. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @param pData Pointer to data buffer. * @param Size Amount of data to be sent. * @param Timeout Specify timeout value. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_Transmit(IRDA_HandleTypeDef *hirda, uint8_t *pData, uint16_t Size, uint32_t Timeout) { uint8_t *pdata8bits; uint16_t *pdata16bits; uint32_t tickstart; /* Check that a Tx process is not already ongoing */ if (hirda->gState == HAL_IRDA_STATE_READY) { if ((pData == NULL) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hirda); hirda->ErrorCode = HAL_IRDA_ERROR_NONE; hirda->gState = HAL_IRDA_STATE_BUSY_TX; /* Init tickstart for timeout managment*/ tickstart = HAL_GetTick(); hirda->TxXferSize = Size; hirda->TxXferCount = Size; /* In case of 9bits/No Parity transfer, pData needs to be handled as a uint16_t pointer */ if ((hirda->Init.WordLength == IRDA_WORDLENGTH_9B) && (hirda->Init.Parity == IRDA_PARITY_NONE)) { pdata8bits = NULL; pdata16bits = (uint16_t *) pData; /* Derogation R.11.3 */ } else { pdata8bits = pData; pdata16bits = NULL; } while (hirda->TxXferCount > 0U) { hirda->TxXferCount--; if (IRDA_WaitOnFlagUntilTimeout(hirda, IRDA_FLAG_TXE, RESET, tickstart, Timeout) != HAL_OK) { return HAL_TIMEOUT; } if (pdata8bits == NULL) { hirda->Instance->TDR = (uint16_t)(*pdata16bits & 0x01FFU); pdata16bits++; } else { hirda->Instance->TDR = (uint8_t)(*pdata8bits & 0xFFU); pdata8bits++; } } if (IRDA_WaitOnFlagUntilTimeout(hirda, IRDA_FLAG_TC, RESET, tickstart, Timeout) != HAL_OK) { return HAL_TIMEOUT; } /* At end of Tx process, restore hirda->gState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(hirda); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Receive an amount of data in blocking mode. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @param pData Pointer to data buffer. * @param Size Amount of data to be received. * @param Timeout Specify timeout value. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_Receive(IRDA_HandleTypeDef *hirda, uint8_t *pData, uint16_t Size, uint32_t Timeout) { uint8_t *pdata8bits; uint16_t *pdata16bits; uint16_t uhMask; uint32_t tickstart; /* Check that a Rx process is not already ongoing */ if (hirda->RxState == HAL_IRDA_STATE_READY) { if ((pData == NULL) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hirda); hirda->ErrorCode = HAL_IRDA_ERROR_NONE; hirda->RxState = HAL_IRDA_STATE_BUSY_RX; /* Init tickstart for timeout managment*/ tickstart = HAL_GetTick(); hirda->RxXferSize = Size; hirda->RxXferCount = Size; /* Computation of the mask to apply to RDR register of the UART associated to the IRDA */ IRDA_MASK_COMPUTATION(hirda); uhMask = hirda->Mask; /* In case of 9bits/No Parity transfer, pRxData needs to be handled as a uint16_t pointer */ if ((hirda->Init.WordLength == IRDA_WORDLENGTH_9B) && (hirda->Init.Parity == IRDA_PARITY_NONE)) { pdata8bits = NULL; pdata16bits = (uint16_t *) pData; /* Derogation R.11.3 */ } else { pdata8bits = pData; pdata16bits = NULL; } /* Check data remaining to be received */ while (hirda->RxXferCount > 0U) { hirda->RxXferCount--; if (IRDA_WaitOnFlagUntilTimeout(hirda, IRDA_FLAG_RXNE, RESET, tickstart, Timeout) != HAL_OK) { return HAL_TIMEOUT; } if (pdata8bits == NULL) { *pdata16bits = (uint16_t)(hirda->Instance->RDR & uhMask); pdata16bits++; } else { *pdata8bits = (uint8_t)(hirda->Instance->RDR & (uint8_t)uhMask); pdata8bits++; } } /* At end of Rx process, restore hirda->RxState to Ready */ hirda->RxState = HAL_IRDA_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(hirda); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Send an amount of data in interrupt mode. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @param pData Pointer to data buffer. * @param Size Amount of data to be sent. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_Transmit_IT(IRDA_HandleTypeDef *hirda, uint8_t *pData, uint16_t Size) { /* Check that a Tx process is not already ongoing */ if (hirda->gState == HAL_IRDA_STATE_READY) { if ((pData == NULL) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hirda); hirda->pTxBuffPtr = pData; hirda->TxXferSize = Size; hirda->TxXferCount = Size; hirda->ErrorCode = HAL_IRDA_ERROR_NONE; hirda->gState = HAL_IRDA_STATE_BUSY_TX; /* Process Unlocked */ __HAL_UNLOCK(hirda); /* Enable the IRDA Transmit Data Register Empty Interrupt */ SET_BIT(hirda->Instance->CR1, USART_CR1_TXEIE_TXFNFIE); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Receive an amount of data in interrupt mode. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @param pData Pointer to data buffer. * @param Size Amount of data to be received. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_Receive_IT(IRDA_HandleTypeDef *hirda, uint8_t *pData, uint16_t Size) { /* Check that a Rx process is not already ongoing */ if (hirda->RxState == HAL_IRDA_STATE_READY) { if ((pData == NULL) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hirda); hirda->pRxBuffPtr = pData; hirda->RxXferSize = Size; hirda->RxXferCount = Size; /* Computation of the mask to apply to the RDR register of the UART associated to the IRDA */ IRDA_MASK_COMPUTATION(hirda); hirda->ErrorCode = HAL_IRDA_ERROR_NONE; hirda->RxState = HAL_IRDA_STATE_BUSY_RX; /* Process Unlocked */ __HAL_UNLOCK(hirda); /* Enable the IRDA Parity Error and Data Register not empty Interrupts */ SET_BIT(hirda->Instance->CR1, USART_CR1_PEIE | USART_CR1_RXNEIE_RXFNEIE); /* Enable the IRDA Error Interrupt: (Frame error, noise error, overrun error) */ SET_BIT(hirda->Instance->CR3, USART_CR3_EIE); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Send an amount of data in DMA mode. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @param pData pointer to data buffer. * @param Size amount of data to be sent. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_Transmit_DMA(IRDA_HandleTypeDef *hirda, uint8_t *pData, uint16_t Size) { /* Check that a Tx process is not already ongoing */ if (hirda->gState == HAL_IRDA_STATE_READY) { if ((pData == NULL) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hirda); hirda->pTxBuffPtr = pData; hirda->TxXferSize = Size; hirda->TxXferCount = Size; hirda->ErrorCode = HAL_IRDA_ERROR_NONE; hirda->gState = HAL_IRDA_STATE_BUSY_TX; /* Set the IRDA DMA transfer complete callback */ hirda->hdmatx->XferCpltCallback = IRDA_DMATransmitCplt; /* Set the IRDA DMA half transfer complete callback */ hirda->hdmatx->XferHalfCpltCallback = IRDA_DMATransmitHalfCplt; /* Set the DMA error callback */ hirda->hdmatx->XferErrorCallback = IRDA_DMAError; /* Set the DMA abort callback */ hirda->hdmatx->XferAbortCallback = NULL; /* Enable the IRDA transmit DMA channel */ if (HAL_DMA_Start_IT(hirda->hdmatx, (uint32_t)hirda->pTxBuffPtr, (uint32_t)&hirda->Instance->TDR, Size) == HAL_OK) { /* Clear the TC flag in the ICR register */ __HAL_IRDA_CLEAR_FLAG(hirda, IRDA_CLEAR_TCF); /* Process Unlocked */ __HAL_UNLOCK(hirda); /* Enable the DMA transfer for transmit request by setting the DMAT bit in the USART CR3 register */ SET_BIT(hirda->Instance->CR3, USART_CR3_DMAT); return HAL_OK; } else { /* Set error code to DMA */ hirda->ErrorCode = HAL_IRDA_ERROR_DMA; /* Process Unlocked */ __HAL_UNLOCK(hirda); /* Restore hirda->gState to ready */ hirda->gState = HAL_IRDA_STATE_READY; return HAL_ERROR; } } else { return HAL_BUSY; } } /** * @brief Receive an amount of data in DMA mode. * @note When the IRDA parity is enabled (PCE = 1), the received data contains * the parity bit (MSB position). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @param pData Pointer to data buffer. * @param Size Amount of data to be received. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_Receive_DMA(IRDA_HandleTypeDef *hirda, uint8_t *pData, uint16_t Size) { /* Check that a Rx process is not already ongoing */ if (hirda->RxState == HAL_IRDA_STATE_READY) { if ((pData == NULL) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(hirda); hirda->pRxBuffPtr = pData; hirda->RxXferSize = Size; hirda->ErrorCode = HAL_IRDA_ERROR_NONE; hirda->RxState = HAL_IRDA_STATE_BUSY_RX; /* Set the IRDA DMA transfer complete callback */ hirda->hdmarx->XferCpltCallback = IRDA_DMAReceiveCplt; /* Set the IRDA DMA half transfer complete callback */ hirda->hdmarx->XferHalfCpltCallback = IRDA_DMAReceiveHalfCplt; /* Set the DMA error callback */ hirda->hdmarx->XferErrorCallback = IRDA_DMAError; /* Set the DMA abort callback */ hirda->hdmarx->XferAbortCallback = NULL; /* Enable the DMA channel */ if (HAL_DMA_Start_IT(hirda->hdmarx, (uint32_t)&hirda->Instance->RDR, (uint32_t)hirda->pRxBuffPtr, Size) == HAL_OK) { /* Process Unlocked */ __HAL_UNLOCK(hirda); /* Enable the UART Parity Error Interrupt */ SET_BIT(hirda->Instance->CR1, USART_CR1_PEIE); /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ SET_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* Enable the DMA transfer for the receiver request by setting the DMAR bit in the USART CR3 register */ SET_BIT(hirda->Instance->CR3, USART_CR3_DMAR); return HAL_OK; } else { /* Set error code to DMA */ hirda->ErrorCode = HAL_IRDA_ERROR_DMA; /* Process Unlocked */ __HAL_UNLOCK(hirda); /* Restore hirda->RxState to ready */ hirda->RxState = HAL_IRDA_STATE_READY; return HAL_ERROR; } } else { return HAL_BUSY; } } /** * @brief Pause the DMA Transfer. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_DMAPause(IRDA_HandleTypeDef *hirda) { /* Process Locked */ __HAL_LOCK(hirda); if (hirda->gState == HAL_IRDA_STATE_BUSY_TX) { if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAT)) { /* Disable the IRDA DMA Tx request */ CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAT); } } if (hirda->RxState == HAL_IRDA_STATE_BUSY_RX) { if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) { /* Disable PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(hirda->Instance->CR1, USART_CR1_PEIE); CLEAR_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* Disable the IRDA DMA Rx request */ CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAR); } } /* Process Unlocked */ __HAL_UNLOCK(hirda); return HAL_OK; } /** * @brief Resume the DMA Transfer. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_DMAResume(IRDA_HandleTypeDef *hirda) { /* Process Locked */ __HAL_LOCK(hirda); if (hirda->gState == HAL_IRDA_STATE_BUSY_TX) { /* Enable the IRDA DMA Tx request */ SET_BIT(hirda->Instance->CR3, USART_CR3_DMAT); } if (hirda->RxState == HAL_IRDA_STATE_BUSY_RX) { /* Clear the Overrun flag before resuming the Rx transfer*/ __HAL_IRDA_CLEAR_OREFLAG(hirda); /* Reenable PE and ERR (Frame error, noise error, overrun error) interrupts */ SET_BIT(hirda->Instance->CR1, USART_CR1_PEIE); SET_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* Enable the IRDA DMA Rx request */ SET_BIT(hirda->Instance->CR3, USART_CR3_DMAR); } /* Process Unlocked */ __HAL_UNLOCK(hirda); return HAL_OK; } /** * @brief Stop the DMA Transfer. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_DMAStop(IRDA_HandleTypeDef *hirda) { /* The Lock is not implemented on this API to allow the user application to call the HAL IRDA API under callbacks HAL_IRDA_TxCpltCallback() / HAL_IRDA_RxCpltCallback() / HAL_IRDA_TxHalfCpltCallback / HAL_IRDA_RxHalfCpltCallback: indeed, when HAL_DMA_Abort() API is called, the DMA TX/RX Transfer or Half Transfer complete interrupt is generated if the DMA transfer interruption occurs at the middle or at the end of the stream and the corresponding call back is executed. */ /* Stop IRDA DMA Tx request if ongoing */ if (hirda->gState == HAL_IRDA_STATE_BUSY_TX) { if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAT)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAT); /* Abort the IRDA DMA Tx channel */ if (hirda->hdmatx != NULL) { if (HAL_DMA_Abort(hirda->hdmatx) != HAL_OK) { if (HAL_DMA_GetError(hirda->hdmatx) == HAL_DMA_ERROR_TIMEOUT) { /* Set error code to DMA */ hirda->ErrorCode = HAL_IRDA_ERROR_DMA; return HAL_TIMEOUT; } } } IRDA_EndTxTransfer(hirda); } } /* Stop IRDA DMA Rx request if ongoing */ if (hirda->RxState == HAL_IRDA_STATE_BUSY_RX) { if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAR); /* Abort the IRDA DMA Rx channel */ if (hirda->hdmarx != NULL) { if (HAL_DMA_Abort(hirda->hdmarx) != HAL_OK) { if (HAL_DMA_GetError(hirda->hdmarx) == HAL_DMA_ERROR_TIMEOUT) { /* Set error code to DMA */ hirda->ErrorCode = HAL_IRDA_ERROR_DMA; return HAL_TIMEOUT; } } } IRDA_EndRxTransfer(hirda); } } return HAL_OK; } /** * @brief Abort ongoing transfers (blocking mode). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable IRDA Interrupts (Tx and Rx) * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) * - Set handle State to READY * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_Abort(IRDA_HandleTypeDef *hirda) { /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_RXNEIE_RXFNEIE | USART_CR1_PEIE | USART_CR1_TXEIE_TXFNFIE | USART_CR1_TCIE)); CLEAR_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* Disable the IRDA DMA Tx request if enabled */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAT)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAT); /* Abort the IRDA DMA Tx channel : use blocking DMA Abort API (no callback) */ if (hirda->hdmatx != NULL) { /* Set the IRDA DMA Abort callback to Null. No call back execution at end of DMA abort procedure */ hirda->hdmatx->XferAbortCallback = NULL; if (HAL_DMA_Abort(hirda->hdmatx) != HAL_OK) { if (HAL_DMA_GetError(hirda->hdmatx) == HAL_DMA_ERROR_TIMEOUT) { /* Set error code to DMA */ hirda->ErrorCode = HAL_IRDA_ERROR_DMA; return HAL_TIMEOUT; } } } } /* Disable the IRDA DMA Rx request if enabled */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAR); /* Abort the IRDA DMA Rx channel : use blocking DMA Abort API (no callback) */ if (hirda->hdmarx != NULL) { /* Set the IRDA DMA Abort callback to Null. No call back execution at end of DMA abort procedure */ hirda->hdmarx->XferAbortCallback = NULL; if (HAL_DMA_Abort(hirda->hdmarx) != HAL_OK) { if (HAL_DMA_GetError(hirda->hdmarx) == HAL_DMA_ERROR_TIMEOUT) { /* Set error code to DMA */ hirda->ErrorCode = HAL_IRDA_ERROR_DMA; return HAL_TIMEOUT; } } } } /* Reset Tx and Rx transfer counters */ hirda->TxXferCount = 0U; hirda->RxXferCount = 0U; /* Clear the Error flags in the ICR register */ __HAL_IRDA_CLEAR_FLAG(hirda, IRDA_CLEAR_OREF | IRDA_CLEAR_NEF | IRDA_CLEAR_PEF | IRDA_CLEAR_FEF); /* Restore hirda->gState and hirda->RxState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; hirda->RxState = HAL_IRDA_STATE_READY; /* Reset Handle ErrorCode to No Error */ hirda->ErrorCode = HAL_IRDA_ERROR_NONE; return HAL_OK; } /** * @brief Abort ongoing Transmit transfer (blocking mode). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @note This procedure could be used for aborting any ongoing Tx transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable IRDA Interrupts (Tx) * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) * - Set handle State to READY * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_AbortTransmit(IRDA_HandleTypeDef *hirda) { /* Disable TXEIE and TCIE interrupts */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_TXEIE_TXFNFIE | USART_CR1_TCIE)); /* Disable the IRDA DMA Tx request if enabled */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAT)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAT); /* Abort the IRDA DMA Tx channel : use blocking DMA Abort API (no callback) */ if (hirda->hdmatx != NULL) { /* Set the IRDA DMA Abort callback to Null. No call back execution at end of DMA abort procedure */ hirda->hdmatx->XferAbortCallback = NULL; if (HAL_DMA_Abort(hirda->hdmatx) != HAL_OK) { if (HAL_DMA_GetError(hirda->hdmatx) == HAL_DMA_ERROR_TIMEOUT) { /* Set error code to DMA */ hirda->ErrorCode = HAL_IRDA_ERROR_DMA; return HAL_TIMEOUT; } } } } /* Reset Tx transfer counter */ hirda->TxXferCount = 0U; /* Restore hirda->gState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; return HAL_OK; } /** * @brief Abort ongoing Receive transfer (blocking mode). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @note This procedure could be used for aborting any ongoing Rx transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable IRDA Interrupts (Rx) * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) * - Set handle State to READY * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_AbortReceive(IRDA_HandleTypeDef *hirda) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_RXNEIE_RXFNEIE | USART_CR1_PEIE)); CLEAR_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* Disable the IRDA DMA Rx request if enabled */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAR); /* Abort the IRDA DMA Rx channel : use blocking DMA Abort API (no callback) */ if (hirda->hdmarx != NULL) { /* Set the IRDA DMA Abort callback to Null. No call back execution at end of DMA abort procedure */ hirda->hdmarx->XferAbortCallback = NULL; if (HAL_DMA_Abort(hirda->hdmarx) != HAL_OK) { if (HAL_DMA_GetError(hirda->hdmarx) == HAL_DMA_ERROR_TIMEOUT) { /* Set error code to DMA */ hirda->ErrorCode = HAL_IRDA_ERROR_DMA; return HAL_TIMEOUT; } } } } /* Reset Rx transfer counter */ hirda->RxXferCount = 0U; /* Clear the Error flags in the ICR register */ __HAL_IRDA_CLEAR_FLAG(hirda, IRDA_CLEAR_OREF | IRDA_CLEAR_NEF | IRDA_CLEAR_PEF | IRDA_CLEAR_FEF); /* Restore hirda->RxState to Ready */ hirda->RxState = HAL_IRDA_STATE_READY; return HAL_OK; } /** * @brief Abort ongoing transfers (Interrupt mode). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable IRDA Interrupts (Tx and Rx) * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) * - Set handle State to READY * - At abort completion, call user abort complete callback * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be * considered as completed only when user abort complete callback is executed (not when exiting function). * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_Abort_IT(IRDA_HandleTypeDef *hirda) { uint32_t abortcplt = 1U; /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_RXNEIE_RXFNEIE | USART_CR1_PEIE | USART_CR1_TXEIE_TXFNFIE | USART_CR1_TCIE)); CLEAR_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* If DMA Tx and/or DMA Rx Handles are associated to IRDA Handle, DMA Abort complete callbacks should be initialised before any call to DMA Abort functions */ /* DMA Tx Handle is valid */ if (hirda->hdmatx != NULL) { /* Set DMA Abort Complete callback if IRDA DMA Tx request if enabled. Otherwise, set it to NULL */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAT)) { hirda->hdmatx->XferAbortCallback = IRDA_DMATxAbortCallback; } else { hirda->hdmatx->XferAbortCallback = NULL; } } /* DMA Rx Handle is valid */ if (hirda->hdmarx != NULL) { /* Set DMA Abort Complete callback if IRDA DMA Rx request if enabled. Otherwise, set it to NULL */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) { hirda->hdmarx->XferAbortCallback = IRDA_DMARxAbortCallback; } else { hirda->hdmarx->XferAbortCallback = NULL; } } /* Disable the IRDA DMA Tx request if enabled */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAT)) { /* Disable DMA Tx at UART level */ CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAT); /* Abort the IRDA DMA Tx channel : use non blocking DMA Abort API (callback) */ if (hirda->hdmatx != NULL) { /* IRDA Tx DMA Abort callback has already been initialised : will lead to call HAL_IRDA_AbortCpltCallback() at end of DMA abort procedure */ /* Abort DMA TX */ if (HAL_DMA_Abort_IT(hirda->hdmatx) != HAL_OK) { hirda->hdmatx->XferAbortCallback = NULL; } else { abortcplt = 0U; } } } /* Disable the IRDA DMA Rx request if enabled */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAR); /* Abort the IRDA DMA Rx channel : use non blocking DMA Abort API (callback) */ if (hirda->hdmarx != NULL) { /* IRDA Rx DMA Abort callback has already been initialised : will lead to call HAL_IRDA_AbortCpltCallback() at end of DMA abort procedure */ /* Abort DMA RX */ if (HAL_DMA_Abort_IT(hirda->hdmarx) != HAL_OK) { hirda->hdmarx->XferAbortCallback = NULL; abortcplt = 1U; } else { abortcplt = 0U; } } } /* if no DMA abort complete callback execution is required => call user Abort Complete callback */ if (abortcplt == 1U) { /* Reset Tx and Rx transfer counters */ hirda->TxXferCount = 0U; hirda->RxXferCount = 0U; /* Reset errorCode */ hirda->ErrorCode = HAL_IRDA_ERROR_NONE; /* Clear the Error flags in the ICR register */ __HAL_IRDA_CLEAR_FLAG(hirda, IRDA_CLEAR_OREF | IRDA_CLEAR_NEF | IRDA_CLEAR_PEF | IRDA_CLEAR_FEF); /* Restore hirda->gState and hirda->RxState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; hirda->RxState = HAL_IRDA_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Abort complete callback */ hirda->AbortCpltCallback(hirda); #else /* Call legacy weak Abort complete callback */ HAL_IRDA_AbortCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } return HAL_OK; } /** * @brief Abort ongoing Transmit transfer (Interrupt mode). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @note This procedure could be used for aborting any ongoing Tx transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable IRDA Interrupts (Tx) * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) * - Set handle State to READY * - At abort completion, call user abort complete callback * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be * considered as completed only when user abort complete callback is executed (not when exiting function). * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_AbortTransmit_IT(IRDA_HandleTypeDef *hirda) { /* Disable TXEIE and TCIE interrupts */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_TXEIE_TXFNFIE | USART_CR1_TCIE)); /* Disable the IRDA DMA Tx request if enabled */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAT)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAT); /* Abort the IRDA DMA Tx channel : use non blocking DMA Abort API (callback) */ if (hirda->hdmatx != NULL) { /* Set the IRDA DMA Abort callback : will lead to call HAL_IRDA_AbortCpltCallback() at end of DMA abort procedure */ hirda->hdmatx->XferAbortCallback = IRDA_DMATxOnlyAbortCallback; /* Abort DMA TX */ if (HAL_DMA_Abort_IT(hirda->hdmatx) != HAL_OK) { /* Call Directly hirda->hdmatx->XferAbortCallback function in case of error */ hirda->hdmatx->XferAbortCallback(hirda->hdmatx); } } else { /* Reset Tx transfer counter */ hirda->TxXferCount = 0U; /* Restore hirda->gState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Abort Transmit Complete Callback */ hirda->AbortTransmitCpltCallback(hirda); #else /* Call legacy weak Abort Transmit Complete Callback */ HAL_IRDA_AbortTransmitCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } } else { /* Reset Tx transfer counter */ hirda->TxXferCount = 0U; /* Restore hirda->gState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Abort Transmit Complete Callback */ hirda->AbortTransmitCpltCallback(hirda); #else /* Call legacy weak Abort Transmit Complete Callback */ HAL_IRDA_AbortTransmitCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } return HAL_OK; } /** * @brief Abort ongoing Receive transfer (Interrupt mode). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @note This procedure could be used for aborting any ongoing Rx transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable IRDA Interrupts (Rx) * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) * - Set handle State to READY * - At abort completion, call user abort complete callback * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be * considered as completed only when user abort complete callback is executed (not when exiting function). * @retval HAL status */ HAL_StatusTypeDef HAL_IRDA_AbortReceive_IT(IRDA_HandleTypeDef *hirda) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_RXNEIE_RXFNEIE | USART_CR1_PEIE)); CLEAR_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* Disable the IRDA DMA Rx request if enabled */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAR); /* Abort the IRDA DMA Rx channel : use non blocking DMA Abort API (callback) */ if (hirda->hdmarx != NULL) { /* Set the IRDA DMA Abort callback : will lead to call HAL_IRDA_AbortCpltCallback() at end of DMA abort procedure */ hirda->hdmarx->XferAbortCallback = IRDA_DMARxOnlyAbortCallback; /* Abort DMA RX */ if (HAL_DMA_Abort_IT(hirda->hdmarx) != HAL_OK) { /* Call Directly hirda->hdmarx->XferAbortCallback function in case of error */ hirda->hdmarx->XferAbortCallback(hirda->hdmarx); } } else { /* Reset Rx transfer counter */ hirda->RxXferCount = 0U; /* Clear the Error flags in the ICR register */ __HAL_IRDA_CLEAR_FLAG(hirda, IRDA_CLEAR_OREF | IRDA_CLEAR_NEF | IRDA_CLEAR_PEF | IRDA_CLEAR_FEF); /* Restore hirda->RxState to Ready */ hirda->RxState = HAL_IRDA_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Abort Receive Complete Callback */ hirda->AbortReceiveCpltCallback(hirda); #else /* Call legacy weak Abort Receive Complete Callback */ HAL_IRDA_AbortReceiveCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } } else { /* Reset Rx transfer counter */ hirda->RxXferCount = 0U; /* Clear the Error flags in the ICR register */ __HAL_IRDA_CLEAR_FLAG(hirda, IRDA_CLEAR_OREF | IRDA_CLEAR_NEF | IRDA_CLEAR_PEF | IRDA_CLEAR_FEF); /* Restore hirda->RxState to Ready */ hirda->RxState = HAL_IRDA_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Abort Receive Complete Callback */ hirda->AbortReceiveCpltCallback(hirda); #else /* Call legacy weak Abort Receive Complete Callback */ HAL_IRDA_AbortReceiveCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } return HAL_OK; } /** * @brief Handle IRDA interrupt request. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ void HAL_IRDA_IRQHandler(IRDA_HandleTypeDef *hirda) { uint32_t isrflags = READ_REG(hirda->Instance->ISR); uint32_t cr1its = READ_REG(hirda->Instance->CR1); uint32_t cr3its; uint32_t errorflags; /* If no error occurs */ errorflags = (isrflags & (uint32_t)(USART_ISR_PE | USART_ISR_FE | USART_ISR_ORE | USART_ISR_NE)); if (errorflags == 0U) { /* IRDA in mode Receiver ---------------------------------------------------*/ if (((isrflags & USART_ISR_RXNE_RXFNE) != 0U) && ((cr1its & USART_CR1_RXNEIE_RXFNEIE) != 0U)) { IRDA_Receive_IT(hirda); return; } } /* If some errors occur */ cr3its = READ_REG(hirda->Instance->CR3); if ((errorflags != 0U) && (((cr3its & USART_CR3_EIE) != 0U) || ((cr1its & (USART_CR1_RXNEIE_RXFNEIE | USART_CR1_PEIE)) != 0U))) { /* IRDA parity error interrupt occurred -------------------------------------*/ if (((isrflags & USART_ISR_PE) != 0U) && ((cr1its & USART_CR1_PEIE) != 0U)) { __HAL_IRDA_CLEAR_IT(hirda, IRDA_CLEAR_PEF); hirda->ErrorCode |= HAL_IRDA_ERROR_PE; } /* IRDA frame error interrupt occurred --------------------------------------*/ if (((isrflags & USART_ISR_FE) != 0U) && ((cr3its & USART_CR3_EIE) != 0U)) { __HAL_IRDA_CLEAR_IT(hirda, IRDA_CLEAR_FEF); hirda->ErrorCode |= HAL_IRDA_ERROR_FE; } /* IRDA noise error interrupt occurred --------------------------------------*/ if (((isrflags & USART_ISR_NE) != 0U) && ((cr3its & USART_CR3_EIE) != 0U)) { __HAL_IRDA_CLEAR_IT(hirda, IRDA_CLEAR_NEF); hirda->ErrorCode |= HAL_IRDA_ERROR_NE; } /* IRDA Over-Run interrupt occurred -----------------------------------------*/ if (((isrflags & USART_ISR_ORE) != 0U) && (((cr1its & USART_CR1_RXNEIE_RXFNEIE) != 0U) || ((cr3its & USART_CR3_EIE) != 0U))) { __HAL_IRDA_CLEAR_IT(hirda, IRDA_CLEAR_OREF); hirda->ErrorCode |= HAL_IRDA_ERROR_ORE; } /* Call IRDA Error Call back function if need be --------------------------*/ if (hirda->ErrorCode != HAL_IRDA_ERROR_NONE) { /* IRDA in mode Receiver ---------------------------------------------------*/ if (((isrflags & USART_ISR_RXNE_RXFNE) != 0U) && ((cr1its & USART_CR1_RXNEIE_RXFNEIE) != 0U)) { IRDA_Receive_IT(hirda); } /* If Overrun error occurs, or if any error occurs in DMA mode reception, consider error as blocking */ if ((HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) || ((hirda->ErrorCode & HAL_IRDA_ERROR_ORE) != 0U)) { /* Blocking error : transfer is aborted Set the IRDA state ready to be able to start again the process, Disable Rx Interrupts, and disable Rx DMA request, if ongoing */ IRDA_EndRxTransfer(hirda); /* Disable the IRDA DMA Rx request if enabled */ if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAR); /* Abort the IRDA DMA Rx channel */ if (hirda->hdmarx != NULL) { /* Set the IRDA DMA Abort callback : will lead to call HAL_IRDA_ErrorCallback() at end of DMA abort procedure */ hirda->hdmarx->XferAbortCallback = IRDA_DMAAbortOnError; /* Abort DMA RX */ if (HAL_DMA_Abort_IT(hirda->hdmarx) != HAL_OK) { /* Call Directly hirda->hdmarx->XferAbortCallback function in case of error */ hirda->hdmarx->XferAbortCallback(hirda->hdmarx); } } else { #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered user error callback */ hirda->ErrorCallback(hirda); #else /* Call legacy weak user error callback */ HAL_IRDA_ErrorCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } } else { #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered user error callback */ hirda->ErrorCallback(hirda); #else /* Call legacy weak user error callback */ HAL_IRDA_ErrorCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } } else { /* Non Blocking error : transfer could go on. Error is notified to user through user error callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered user error callback */ hirda->ErrorCallback(hirda); #else /* Call legacy weak user error callback */ HAL_IRDA_ErrorCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ hirda->ErrorCode = HAL_IRDA_ERROR_NONE; } } return; } /* End if some error occurs */ /* IRDA in mode Transmitter ------------------------------------------------*/ if (((isrflags & USART_ISR_TXE_TXFNF) != 0U) && ((cr1its & USART_CR1_TXEIE_TXFNFIE) != 0U)) { IRDA_Transmit_IT(hirda); return; } /* IRDA in mode Transmitter (transmission end) -----------------------------*/ if (((isrflags & USART_ISR_TC) != 0U) && ((cr1its & USART_CR1_TCIE) != 0U)) { IRDA_EndTransmit_IT(hirda); return; } } /** * @brief Tx Transfer completed callback. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ __weak void HAL_IRDA_TxCpltCallback(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE : This function should not be modified, when the callback is needed, the HAL_IRDA_TxCpltCallback can be implemented in the user file. */ } /** * @brief Tx Half Transfer completed callback. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified USART module. * @retval None */ __weak void HAL_IRDA_TxHalfCpltCallback(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE : This function should not be modified, when the callback is needed, the HAL_IRDA_TxHalfCpltCallback can be implemented in the user file. */ } /** * @brief Rx Transfer completed callback. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ __weak void HAL_IRDA_RxCpltCallback(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE : This function should not be modified, when the callback is needed, the HAL_IRDA_RxCpltCallback can be implemented in the user file. */ } /** * @brief Rx Half Transfer complete callback. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ __weak void HAL_IRDA_RxHalfCpltCallback(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE : This function should not be modified, when the callback is needed, the HAL_IRDA_RxHalfCpltCallback can be implemented in the user file. */ } /** * @brief IRDA error callback. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ __weak void HAL_IRDA_ErrorCallback(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE : This function should not be modified, when the callback is needed, the HAL_IRDA_ErrorCallback can be implemented in the user file. */ } /** * @brief IRDA Abort Complete callback. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ __weak void HAL_IRDA_AbortCpltCallback(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE : This function should not be modified, when the callback is needed, the HAL_IRDA_AbortCpltCallback can be implemented in the user file. */ } /** * @brief IRDA Abort Complete callback. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ __weak void HAL_IRDA_AbortTransmitCpltCallback(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE : This function should not be modified, when the callback is needed, the HAL_IRDA_AbortTransmitCpltCallback can be implemented in the user file. */ } /** * @brief IRDA Abort Receive Complete callback. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ __weak void HAL_IRDA_AbortReceiveCpltCallback(IRDA_HandleTypeDef *hirda) { /* Prevent unused argument(s) compilation warning */ UNUSED(hirda); /* NOTE : This function should not be modified, when the callback is needed, the HAL_IRDA_AbortReceiveCpltCallback can be implemented in the user file. */ } /** * @} */ /** @defgroup IRDA_Exported_Functions_Group4 Peripheral State and Error functions * @brief IRDA State and Errors functions * @verbatim ============================================================================== ##### Peripheral State and Error functions ##### ============================================================================== [..] This subsection provides a set of functions allowing to return the State of IrDA communication process and also return Peripheral Errors occurred during communication process (+) HAL_IRDA_GetState() API can be helpful to check in run-time the state of the IRDA peripheral handle. (+) HAL_IRDA_GetError() checks in run-time errors that could occur during communication. @endverbatim * @{ */ /** * @brief Return the IRDA handle state. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval HAL state */ HAL_IRDA_StateTypeDef HAL_IRDA_GetState(IRDA_HandleTypeDef *hirda) { /* Return IRDA handle state */ uint32_t temp1, temp2; temp1 = (uint32_t)hirda->gState; temp2 = (uint32_t)hirda->RxState; return (HAL_IRDA_StateTypeDef)(temp1 | temp2); } /** * @brief Return the IRDA handle error code. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval IRDA Error Code */ uint32_t HAL_IRDA_GetError(IRDA_HandleTypeDef *hirda) { return hirda->ErrorCode; } /** * @} */ /** * @} */ /** @defgroup IRDA_Private_Functions IRDA Private Functions * @{ */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /** * @brief Initialize the callbacks to their default values. * @param hirda IRDA handle. * @retval none */ void IRDA_InitCallbacksToDefault(IRDA_HandleTypeDef *hirda) { /* Init the IRDA Callback settings */ hirda->TxHalfCpltCallback = HAL_IRDA_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ hirda->TxCpltCallback = HAL_IRDA_TxCpltCallback; /* Legacy weak TxCpltCallback */ hirda->RxHalfCpltCallback = HAL_IRDA_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ hirda->RxCpltCallback = HAL_IRDA_RxCpltCallback; /* Legacy weak RxCpltCallback */ hirda->ErrorCallback = HAL_IRDA_ErrorCallback; /* Legacy weak ErrorCallback */ hirda->AbortCpltCallback = HAL_IRDA_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ hirda->AbortTransmitCpltCallback = HAL_IRDA_AbortTransmitCpltCallback; /* Legacy weak AbortTransmitCpltCallback */ hirda->AbortReceiveCpltCallback = HAL_IRDA_AbortReceiveCpltCallback; /* Legacy weak AbortReceiveCpltCallback */ } #endif /* USE_HAL_IRDA_REGISTER_CALLBACKS */ /** * @brief Configure the IRDA peripheral. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval HAL status */ static HAL_StatusTypeDef IRDA_SetConfig(IRDA_HandleTypeDef *hirda) { uint32_t tmpreg; IRDA_ClockSourceTypeDef clocksource; HAL_StatusTypeDef ret = HAL_OK; const uint16_t IRDAPrescTable[12] = {1U, 2U, 4U, 6U, 8U, 10U, 12U, 16U, 32U, 64U, 128U, 256U}; /* Check the communication parameters */ assert_param(IS_IRDA_BAUDRATE(hirda->Init.BaudRate)); assert_param(IS_IRDA_WORD_LENGTH(hirda->Init.WordLength)); assert_param(IS_IRDA_PARITY(hirda->Init.Parity)); assert_param(IS_IRDA_TX_RX_MODE(hirda->Init.Mode)); assert_param(IS_IRDA_PRESCALER(hirda->Init.Prescaler)); assert_param(IS_IRDA_POWERMODE(hirda->Init.PowerMode)); assert_param(IS_IRDA_CLOCKPRESCALER(hirda->Init.ClockPrescaler)); /*-------------------------- USART CR1 Configuration -----------------------*/ /* Configure the IRDA Word Length, Parity and transfer Mode: Set the M bits according to hirda->Init.WordLength value Set PCE and PS bits according to hirda->Init.Parity value Set TE and RE bits according to hirda->Init.Mode value */ tmpreg = (uint32_t)hirda->Init.WordLength | hirda->Init.Parity | hirda->Init.Mode ; MODIFY_REG(hirda->Instance->CR1, IRDA_CR1_FIELDS, tmpreg); /*-------------------------- USART CR3 Configuration -----------------------*/ MODIFY_REG(hirda->Instance->CR3, USART_CR3_IRLP, hirda->Init.PowerMode); /*--------------------- USART clock PRESC Configuration ----------------*/ /* Configure * - IRDA Clock Prescaler: set PRESCALER according to hirda->Init.ClockPrescaler value */ MODIFY_REG(hirda->Instance->PRESC, USART_PRESC_PRESCALER, hirda->Init.ClockPrescaler); /*-------------------------- USART GTPR Configuration ----------------------*/ MODIFY_REG(hirda->Instance->GTPR, USART_GTPR_PSC, hirda->Init.Prescaler); /*-------------------------- USART BRR Configuration -----------------------*/ IRDA_GETCLOCKSOURCE(hirda, clocksource); tmpreg = 0U; switch (clocksource) { case IRDA_CLOCKSOURCE_PCLK2: tmpreg = (uint16_t)(IRDA_DIV_SAMPLING16(HAL_RCC_GetPCLK2Freq(), hirda->Init.BaudRate, hirda->Init.ClockPrescaler)); break; case IRDA_CLOCKSOURCE_HSI: tmpreg = (uint16_t)(IRDA_DIV_SAMPLING16(HSI_VALUE, hirda->Init.BaudRate, hirda->Init.ClockPrescaler)); break; case IRDA_CLOCKSOURCE_SYSCLK: tmpreg = (uint16_t)(IRDA_DIV_SAMPLING16(HAL_RCC_GetSysClockFreq(), hirda->Init.BaudRate, hirda->Init.ClockPrescaler)); break; case IRDA_CLOCKSOURCE_LSE: tmpreg = (uint16_t)(IRDA_DIV_SAMPLING16((uint32_t)LSE_VALUE, hirda->Init.BaudRate, hirda->Init.ClockPrescaler)); break; default: ret = HAL_ERROR; break; } /* USARTDIV must be greater than or equal to 0d16 */ if ((tmpreg >= USART_BRR_MIN) && (tmpreg <= USART_BRR_MAX)) { hirda->Instance->BRR = tmpreg; } else { ret = HAL_ERROR; } return ret; } /** * @brief Check the IRDA Idle State. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval HAL status */ static HAL_StatusTypeDef IRDA_CheckIdleState(IRDA_HandleTypeDef *hirda) { uint32_t tickstart; /* Initialize the IRDA ErrorCode */ hirda->ErrorCode = HAL_IRDA_ERROR_NONE; /* Init tickstart for timeout managment*/ tickstart = HAL_GetTick(); /* Check if the Transmitter is enabled */ if ((hirda->Instance->CR1 & USART_CR1_TE) == USART_CR1_TE) { /* Wait until TEACK flag is set */ if (IRDA_WaitOnFlagUntilTimeout(hirda, USART_ISR_TEACK, RESET, tickstart, IRDA_TEACK_REACK_TIMEOUT) != HAL_OK) { /* Timeout occurred */ return HAL_TIMEOUT; } } /* Check if the Receiver is enabled */ if ((hirda->Instance->CR1 & USART_CR1_RE) == USART_CR1_RE) { /* Wait until REACK flag is set */ if (IRDA_WaitOnFlagUntilTimeout(hirda, USART_ISR_REACK, RESET, tickstart, IRDA_TEACK_REACK_TIMEOUT) != HAL_OK) { /* Timeout occurred */ return HAL_TIMEOUT; } } /* Initialize the IRDA state*/ hirda->gState = HAL_IRDA_STATE_READY; hirda->RxState = HAL_IRDA_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(hirda); return HAL_OK; } /** * @brief Handle IRDA Communication Timeout. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @param Flag Specifies the IRDA flag to check. * @param Status Flag status (SET or RESET) * @param Tickstart Tick start value * @param Timeout Timeout duration * @retval HAL status */ static HAL_StatusTypeDef IRDA_WaitOnFlagUntilTimeout(IRDA_HandleTypeDef *hirda, uint32_t Flag, FlagStatus Status, uint32_t Tickstart, uint32_t Timeout) { /* Wait until flag is set */ while ((__HAL_IRDA_GET_FLAG(hirda, Flag) ? SET : RESET) == Status) { /* Check for the Timeout */ if (Timeout != HAL_MAX_DELAY) { if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) { /* Disable TXE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts for the interrupt process */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_RXNEIE_RXFNEIE | USART_CR1_PEIE | USART_CR1_TXEIE_TXFNFIE)); CLEAR_BIT(hirda->Instance->CR3, USART_CR3_EIE); hirda->gState = HAL_IRDA_STATE_READY; hirda->RxState = HAL_IRDA_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(hirda); return HAL_TIMEOUT; } } } return HAL_OK; } /** * @brief End ongoing Tx transfer on IRDA peripheral (following error detection or Transmit completion). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ static void IRDA_EndTxTransfer(IRDA_HandleTypeDef *hirda) { /* Disable TXEIE and TCIE interrupts */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_TXEIE_TXFNFIE | USART_CR1_TCIE)); /* At end of Tx process, restore hirda->gState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; } /** * @brief End ongoing Rx transfer on UART peripheral (following error detection or Reception completion). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ static void IRDA_EndRxTransfer(IRDA_HandleTypeDef *hirda) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_RXNEIE_RXFNEIE | USART_CR1_PEIE)); CLEAR_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* At end of Rx process, restore hirda->RxState to Ready */ hirda->RxState = HAL_IRDA_STATE_READY; } /** * @brief DMA IRDA transmit process complete callback. * @param hdma Pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void IRDA_DMATransmitCplt(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)(hdma->Parent); /* DMA Normal mode */ if (HAL_IS_BIT_CLR(hdma->Instance->CCR, DMA_CCR_CIRC)) { hirda->TxXferCount = 0U; /* Disable the DMA transfer for transmit request by resetting the DMAT bit in the IRDA CR3 register */ CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAT); /* Enable the IRDA Transmit Complete Interrupt */ SET_BIT(hirda->Instance->CR1, USART_CR1_TCIE); } /* DMA Circular mode */ else { #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Tx complete callback */ hirda->TxCpltCallback(hirda); #else /* Call legacy weak Tx complete callback */ HAL_IRDA_TxCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } } /** * @brief DMA IRDA transmit process half complete callback. * @param hdma Pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void IRDA_DMATransmitHalfCplt(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)(hdma->Parent); #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Tx Half complete callback */ hirda->TxHalfCpltCallback(hirda); #else /* Call legacy weak Tx complete callback */ HAL_IRDA_TxHalfCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } /** * @brief DMA IRDA receive process complete callback. * @param hdma Pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void IRDA_DMAReceiveCplt(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)(hdma->Parent); /* DMA Normal mode */ if (HAL_IS_BIT_CLR(hdma->Instance->CCR, DMA_CCR_CIRC)) { hirda->RxXferCount = 0U; /* Disable PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(hirda->Instance->CR1, USART_CR1_PEIE); CLEAR_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* Disable the DMA transfer for the receiver request by resetting the DMAR bit in the IRDA CR3 register */ CLEAR_BIT(hirda->Instance->CR3, USART_CR3_DMAR); /* At end of Rx process, restore hirda->RxState to Ready */ hirda->RxState = HAL_IRDA_STATE_READY; } #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Rx complete callback */ hirda->RxCpltCallback(hirda); #else /* Call legacy weak Rx complete callback */ HAL_IRDA_RxCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACKS */ } /** * @brief DMA IRDA receive process half complete callback. * @param hdma Pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void IRDA_DMAReceiveHalfCplt(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)(hdma->Parent); #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /*Call registered Rx Half complete callback*/ hirda->RxHalfCpltCallback(hirda); #else /* Call legacy weak Rx Half complete callback */ HAL_IRDA_RxHalfCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } /** * @brief DMA IRDA communication error callback. * @param hdma Pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void IRDA_DMAError(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)(hdma->Parent); /* Stop IRDA DMA Tx request if ongoing */ if (hirda->gState == HAL_IRDA_STATE_BUSY_TX) { if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAT)) { hirda->TxXferCount = 0U; IRDA_EndTxTransfer(hirda); } } /* Stop IRDA DMA Rx request if ongoing */ if (hirda->RxState == HAL_IRDA_STATE_BUSY_RX) { if (HAL_IS_BIT_SET(hirda->Instance->CR3, USART_CR3_DMAR)) { hirda->RxXferCount = 0U; IRDA_EndRxTransfer(hirda); } } hirda->ErrorCode |= HAL_IRDA_ERROR_DMA; #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered user error callback */ hirda->ErrorCallback(hirda); #else /* Call legacy weak user error callback */ HAL_IRDA_ErrorCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } /** * @brief DMA IRDA communication abort callback, when initiated by HAL services on Error * (To be called at end of DMA Abort procedure following error occurrence). * @param hdma DMA handle. * @retval None */ static void IRDA_DMAAbortOnError(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)(hdma->Parent); hirda->RxXferCount = 0U; hirda->TxXferCount = 0U; #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered user error callback */ hirda->ErrorCallback(hirda); #else /* Call legacy weak user error callback */ HAL_IRDA_ErrorCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } /** * @brief DMA IRDA Tx communication abort callback, when initiated by user * (To be called at end of DMA Tx Abort procedure following user abort request). * @note When this callback is executed, User Abort complete call back is called only if no * Abort still ongoing for Rx DMA Handle. * @param hdma DMA handle. * @retval None */ static void IRDA_DMATxAbortCallback(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)(hdma->Parent); hirda->hdmatx->XferAbortCallback = NULL; /* Check if an Abort process is still ongoing */ if (hirda->hdmarx != NULL) { if (hirda->hdmarx->XferAbortCallback != NULL) { return; } } /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ hirda->TxXferCount = 0U; hirda->RxXferCount = 0U; /* Reset errorCode */ hirda->ErrorCode = HAL_IRDA_ERROR_NONE; /* Clear the Error flags in the ICR register */ __HAL_IRDA_CLEAR_FLAG(hirda, IRDA_CLEAR_OREF | IRDA_CLEAR_NEF | IRDA_CLEAR_PEF | IRDA_CLEAR_FEF); /* Restore hirda->gState and hirda->RxState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; hirda->RxState = HAL_IRDA_STATE_READY; /* Call user Abort complete callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Abort complete callback */ hirda->AbortCpltCallback(hirda); #else /* Call legacy weak Abort complete callback */ HAL_IRDA_AbortCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } /** * @brief DMA IRDA Rx communication abort callback, when initiated by user * (To be called at end of DMA Rx Abort procedure following user abort request). * @note When this callback is executed, User Abort complete call back is called only if no * Abort still ongoing for Tx DMA Handle. * @param hdma DMA handle. * @retval None */ static void IRDA_DMARxAbortCallback(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)(hdma->Parent); hirda->hdmarx->XferAbortCallback = NULL; /* Check if an Abort process is still ongoing */ if (hirda->hdmatx != NULL) { if (hirda->hdmatx->XferAbortCallback != NULL) { return; } } /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ hirda->TxXferCount = 0U; hirda->RxXferCount = 0U; /* Reset errorCode */ hirda->ErrorCode = HAL_IRDA_ERROR_NONE; /* Clear the Error flags in the ICR register */ __HAL_IRDA_CLEAR_FLAG(hirda, IRDA_CLEAR_OREF | IRDA_CLEAR_NEF | IRDA_CLEAR_PEF | IRDA_CLEAR_FEF); /* Restore hirda->gState and hirda->RxState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; hirda->RxState = HAL_IRDA_STATE_READY; /* Call user Abort complete callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Abort complete callback */ hirda->AbortCpltCallback(hirda); #else /* Call legacy weak Abort complete callback */ HAL_IRDA_AbortCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } /** * @brief DMA IRDA Tx communication abort callback, when initiated by user by a call to * HAL_IRDA_AbortTransmit_IT API (Abort only Tx transfer) * (This callback is executed at end of DMA Tx Abort procedure following user abort request, * and leads to user Tx Abort Complete callback execution). * @param hdma DMA handle. * @retval None */ static void IRDA_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)(hdma->Parent); hirda->TxXferCount = 0U; /* Restore hirda->gState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; /* Call user Abort complete callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Abort Transmit Complete Callback */ hirda->AbortTransmitCpltCallback(hirda); #else /* Call legacy weak Abort Transmit Complete Callback */ HAL_IRDA_AbortTransmitCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } /** * @brief DMA IRDA Rx communication abort callback, when initiated by user by a call to * HAL_IRDA_AbortReceive_IT API (Abort only Rx transfer) * (This callback is executed at end of DMA Rx Abort procedure following user abort request, * and leads to user Rx Abort Complete callback execution). * @param hdma DMA handle. * @retval None */ static void IRDA_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma) { IRDA_HandleTypeDef *hirda = (IRDA_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; hirda->RxXferCount = 0U; /* Clear the Error flags in the ICR register */ __HAL_IRDA_CLEAR_FLAG(hirda, IRDA_CLEAR_OREF | IRDA_CLEAR_NEF | IRDA_CLEAR_PEF | IRDA_CLEAR_FEF); /* Restore hirda->RxState to Ready */ hirda->RxState = HAL_IRDA_STATE_READY; /* Call user Abort complete callback */ #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Abort Receive Complete Callback */ hirda->AbortReceiveCpltCallback(hirda); #else /* Call legacy weak Abort Receive Complete Callback */ HAL_IRDA_AbortReceiveCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } /** * @brief Send an amount of data in interrupt mode. * @note Function is called under interruption only, once * interruptions have been enabled by HAL_IRDA_Transmit_IT(). * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ static void IRDA_Transmit_IT(IRDA_HandleTypeDef *hirda) { uint16_t *tmp; /* Check that a Tx process is ongoing */ if (hirda->gState == HAL_IRDA_STATE_BUSY_TX) { if (hirda->TxXferCount == 0U) { /* Disable the IRDA Transmit Data Register Empty Interrupt */ CLEAR_BIT(hirda->Instance->CR1, USART_CR1_TXEIE_TXFNFIE); /* Enable the IRDA Transmit Complete Interrupt */ SET_BIT(hirda->Instance->CR1, USART_CR1_TCIE); } else { if ((hirda->Init.WordLength == IRDA_WORDLENGTH_9B) && (hirda->Init.Parity == IRDA_PARITY_NONE)) { tmp = (uint16_t *) hirda->pTxBuffPtr; /* Derogation R.11.3 */ hirda->Instance->TDR = (uint16_t)(*tmp & 0x01FFU); hirda->pTxBuffPtr += 2U; } else { hirda->Instance->TDR = (uint8_t)(*hirda->pTxBuffPtr & 0xFFU); hirda->pTxBuffPtr++; } hirda->TxXferCount--; } } } /** * @brief Wrap up transmission in non-blocking mode. * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ static void IRDA_EndTransmit_IT(IRDA_HandleTypeDef *hirda) { /* Disable the IRDA Transmit Complete Interrupt */ CLEAR_BIT(hirda->Instance->CR1, USART_CR1_TCIE); /* Tx process is ended, restore hirda->gState to Ready */ hirda->gState = HAL_IRDA_STATE_READY; #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Tx complete callback */ hirda->TxCpltCallback(hirda); #else /* Call legacy weak Tx complete callback */ HAL_IRDA_TxCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACK */ } /** * @brief Receive an amount of data in interrupt mode. * @note Function is called under interruption only, once * interruptions have been enabled by HAL_IRDA_Receive_IT() * @param hirda Pointer to a IRDA_HandleTypeDef structure that contains * the configuration information for the specified IRDA module. * @retval None */ static void IRDA_Receive_IT(IRDA_HandleTypeDef *hirda) { uint16_t *tmp; uint16_t uhMask = hirda->Mask; uint16_t uhdata; /* Check that a Rx process is ongoing */ if (hirda->RxState == HAL_IRDA_STATE_BUSY_RX) { uhdata = (uint16_t) READ_REG(hirda->Instance->RDR); if ((hirda->Init.WordLength == IRDA_WORDLENGTH_9B) && (hirda->Init.Parity == IRDA_PARITY_NONE)) { tmp = (uint16_t *) hirda->pRxBuffPtr; /* Derogation R.11.3 */ *tmp = (uint16_t)(uhdata & uhMask); hirda->pRxBuffPtr += 2U; } else { *hirda->pRxBuffPtr = (uint8_t)(uhdata & (uint8_t)uhMask); hirda->pRxBuffPtr++; } hirda->RxXferCount--; if (hirda->RxXferCount == 0U) { /* Disable the IRDA Parity Error Interrupt and RXNE interrupt */ CLEAR_BIT(hirda->Instance->CR1, (USART_CR1_RXNEIE_RXFNEIE | USART_CR1_PEIE)); /* Disable the IRDA Error Interrupt: (Frame error, noise error, overrun error) */ CLEAR_BIT(hirda->Instance->CR3, USART_CR3_EIE); /* Rx process is completed, restore hirda->RxState to Ready */ hirda->RxState = HAL_IRDA_STATE_READY; #if (USE_HAL_IRDA_REGISTER_CALLBACKS == 1) /* Call registered Rx complete callback */ hirda->RxCpltCallback(hirda); #else /* Call legacy weak Rx complete callback */ HAL_IRDA_RxCpltCallback(hirda); #endif /* USE_HAL_IRDA_REGISTER_CALLBACKS */ } } else { /* Clear RXNE interrupt flag */ __HAL_IRDA_SEND_REQ(hirda, IRDA_RXDATA_FLUSH_REQUEST); } } /** * @} */ #endif /* HAL_IRDA_MODULE_ENABLED */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
652064.c
/* @author Michael Rohs @date 15.10.2015. 26.09.2020 @copyright Apache License, Version 2.0 */ #include "base.h" #include "string.h" #include "list.h" #undef free // use the 'real' free here #undef exit // use the 'real' exit here //#undef xmalloc //#undef xcalloc //////////////////////////////////////////////////////////////////////////// // Memory allocation // http://www.gnu.org/software/libc/manual/html_node/Hooks-for-Malloc.html#Hooks-for-Malloc // this GNU C solution does not work on Mac OS X // Mac OS X solution does not work on other platforms // so simply use preprocessor, does not catch things like strdup (stderr, or use macro for that as well) typedef struct BaseAllocInfo { Any p; size_t size; const char *file; const char *function; int line; struct BaseAllocInfo *next; } BaseAllocInfo; BaseAllocInfo *base_alloc_info = NULL; void base_free(Any p) { #if 0 // debug output printf("base_free: Calling free on %p\n", p); for (BaseAllocInfo *dp = base_alloc_info; dp != NULL; dp = dp->next) { printf("%p\n", dp->p); } #endif bool removed = false; BaseAllocInfo *ai = base_alloc_info; if (ai != NULL) { if (ai->p == p) { // remove first BaseAllocInfo *del = ai; base_alloc_info = ai->next; free(del); removed = true; } else { // remove other than first for (; ai != NULL; ai = ai->next) { if (ai->next != NULL && ai->next->p == p) { BaseAllocInfo *del = ai->next; ai->next = ai->next->next; free(del); removed = true; break; } } } } if (!removed) { fprintf(stderr, "base_free: trying to free unknown pointer %p\n", p); } free(p); } static int exit_status = EXIT_SUCCESS; void base_exit(int status) { // printsln("base_exit called"); exit_status = status; exit(status); } static bool base_atexit_registered = false; void base_atexit(void); static bool do_memory_check = false; void base_init(void) { if (!base_atexit_registered) { atexit(base_atexit); base_atexit_registered = true; } } void report_memory_leaks(bool do_check) { base_init(); do_memory_check = do_check; } Any base_malloc(const char *file, const char *function, int line, size_t size) { // allocate four bytes more than requested and fill with garbage, // such that non-terminated strings will produce an unexpected result Any p = malloc(size + 4); if (p == NULL) { fprintf(stderr, "%s, line %d: malloc(%lu) called in base_malloc returned NULL!\n", file, line, (unsigned long)size); base_exit(EXIT_FAILURE); } // printf("%s, line %d: malloc(%lu) returned %lx\n", file, line, (unsigned long)size, (unsigned long)p); // fill with garbage memset(p, '?', size + 3); ((char*)p)[size + 3] = '\0'; BaseAllocInfo *ai = malloc(sizeof(BaseAllocInfo)); if (ai == NULL) { fprintf(stderr, "%s, line %d: malloc(sizeof(BaseAllocInfo)) called in base_malloc returned NULL!\n", file, line); base_exit(EXIT_FAILURE); } ai->p = p; ai->size = size; ai->file = file; ai->function = function; ai->line = line; ai->next = base_alloc_info; base_alloc_info = ai; return p; } Any base_calloc(const char *file, const char *function, int line, size_t num, size_t size) { // printf("%s, line %d: xcalloc(%lu, %lu)\n", file, line, (unsigned long)num, (unsigned long)size); Any p = calloc(num, size); if (p == NULL) { fprintf(stderr, "%s, line %d: calloc(%lu, %lu) called in base_calloc returned NULL!\n", file, line, (unsigned long)num, (unsigned long)size); base_exit(EXIT_FAILURE); } // printf("%s, line %d: xcalloc(%lu, %lu) returned %lx\n", file, line, (unsigned long)num, (unsigned long)size, (unsigned long)p); BaseAllocInfo *ai = malloc(sizeof(BaseAllocInfo)); if (ai == NULL) { fprintf(stderr, "%s, line %d: malloc(sizeof(BaseAllocInfo)) called in base_calloc returned NULL!\n", file, line); base_exit(EXIT_FAILURE); } ai->p = p; ai->size = num * size; ai->file = file; ai->function = function; ai->line = line; ai->next = base_alloc_info; base_alloc_info = ai; // printf("base_calloc entered %p\n", base_alloc_info->p); return p; } static void base_check_memory(void) { // printsln("Checking for memory leaks:"); int n = 0; // number of memory leaks size_t s = 0; // total number of leaked bytes for (BaseAllocInfo *ai = base_alloc_info; ai != NULL; ai = ai->next) { if (n < 5) { // only show the first ones explicitly fprintf(stderr, "%5lu bytes allocated in %s (%s, line %d) not freed\n", (unsigned long)ai->size, ai->function, ai->file, ai->line); } n++; s += ai->size; } if (n > 0) { fprintf(stderr, "%d memory leak%s, %lu bytes total\n", n, n == 1 ? "" : "s", (unsigned long)s); } else { // fprintf(stderr, "No memory leaks.\n"); } } //////////////////////////////////////////////////////////////////////////// // Conversion int i_of_s(String s) { require_not_null(s); return atoi(s); } double d_of_s(String s) { require_not_null(s); return atof(s); } double d_of_s_sub(String s, int start, int end) { require_not_null(s); int n = s_length(s); if (n <= 0 || end <= 0 || start >= n || start >= end) return 0.0; String t = s_sub(s, start, end); double d = atof(t); s_free(t); return d; } //////////////////////////////////////////////////////////////////////////// // Output void printi(int i) { printf("%d", i); } void printiln(int i) { printf("%d\n", i); } void printd(double d) { printf("%g", d); } void printdln(double d) { printf("%g\n", d); } void printc(char c) { printf("%c", c); } void printcln(char c) { printf("%c\n", c); } void prints(String s) { require_not_null(s); printf("%s", s); } void printsln(String s) { require_not_null(s); printf("%s\n", s); } void printb(bool b) { printf("%s", b ? "true" : "false"); } void printbln(bool b) { printf("%s\n", b ? "true" : "false"); } void println() { printf("\n"); } void printia(int *a, int n) { require_not_null(a); require("non-negative length", n >= 0); putchar('['); if (n > 0) { printf("%d", a[0]); } for (int i = 1; i < n; i++) { printf(" %d", a[i]); } putchar(']'); } void printialn(int *a, int n) { require_not_null(a); require("non-negative length", n >= 0); printia(a, n); println(); } void printda(double *a, int n) { require_not_null(a); require("non-negative length", n >= 0); putchar('['); if (n > 0) { printf("%g", a[0]); } for (int i = 1; i < n; i++) { printf(" %g", a[i]); } putchar(']'); } void printdaln(double *a, int n) { require_not_null(a); require("non-negative length", n >= 0); printda(a, n); println(); } void printsa(String *a, int n) { require_not_null(a); require("non-negative length", n >= 0); putchar('['); if (n > 0) { printf("\"%s\"", a[0]); } for (int i = 1; i < n; i++) { printf(" \"%s\"", a[i]); } putchar(']'); } void printsaln(String *a, int n) { require_not_null(a); require("non-negative length", n >= 0); printsa(a, n); println(); } void printca(char *a, int n) { require_not_null(a); require("non-negative length", n >= 0); putchar('['); if (n > 0) { printf("'%c'", a[0]); } for (int i = 1; i < n; i++) { printf(" '%c'", a[i]); } putchar(']'); } void printcaln(char *a, int n) { require_not_null(a); require("non-negative length", n >= 0); printca(a, n); println(); } void printba(Byte *a, int n) { require_not_null(a); require("non-negative length", n >= 0); putchar('['); if (n > 0) { printf("%d", a[0]); } for (int i = 1; i < n; i++) { printf(" %d", a[i]); } putchar(']'); } void printbaln(Byte *a, int n) { require_not_null(a); require("non-negative length", n >= 0); printba(a, n); println(); } void printboa(bool *a, int n) { require_not_null(a); require("non-negative length", n >= 0); putchar('['); if (n > 0) { printf("%s", a[0] ? "true" : "false"); } for (int i = 1; i < n; i++) { printf(" %s", a[i] ? "true" : "false"); } putchar(']'); } void printboaln(bool *a, int n) { require_not_null(a); require("non-negative length", n >= 0); printboa(a, n); println(); } //////////////////////////////////////////////////////////////////////////// // Input void get_line(char *line, int n) { require_not_null(line); require("not too small", n >= 8); fgets(line, n, stdin); n = strlen(line); if (n >= 1 && (line[n-1] == '\n' || line[n-1] == '\r')) line[n-1] = '\0'; if (n >= 2 && (line[n-2] == '\n' || line[n-2] == '\r')) line[n-2] = '\0'; // printf("%d %d %d\n", line[n-2], line[n-1], line[n]); // printf("'%s'\n", line); } String s_input(int n) { if (n < 8) n = 8; char *line = base_malloc(__FILE__, __func__, __LINE__, n); *line = '\0'; fgets(line, n, stdin); n = strlen(line); if (n >= 1 && (line[n-1] == '\n' || line[n-1] == '\r')) line[n-1] = '\0'; if (n >= 2 && (line[n-2] == '\n' || line[n-2] == '\r')) line[n-2] = '\0'; n = strlen(line); String s = base_malloc(__FILE__, __func__, __LINE__, n + 1); strcpy(s, line); base_free(line); return s; } int i_input(void) { String s = s_input(100); int i = i_of_s(s); s_free(s); return i; } double d_input(void) { String s = s_input(100); double d = d_of_s(s); s_free(s); return d; } //////////////////////////////////////////////////////////////////////////// // Files String s_read_file(String name) { require_not_null(name); FILE *f = fopen(name, "r"); // removes \r from read content, only leaves \n if (f == NULL) { fprintf(stderr, "%s: Cannot open %s\n", (String)__func__, name); base_exit(EXIT_FAILURE); } fseek (f, 0, SEEK_END); long size = ftell(f); rewind(f); char *s = base_malloc(__FILE__, __func__, __LINE__, size + 1); if (s == NULL) { fprintf(stderr, "%s: Cannot allocate memory.\n", (String)__func__); base_exit(EXIT_FAILURE); } long sizeRead = fread(s, 1, size, f); // assert: size >= sizeRead (> if file contains \r characters) // printf("size = %lu, sizeRead = %lu, feof = %d\n", size, sizeRead, feof(f)); if (sizeRead < size && feof(f) == 0) { fprintf(stderr, "%s: Cannot read file %s to end.\n", (String)__func__, name); base_exit(EXIT_FAILURE); } s[sizeRead] = '\0'; fclose(f); return s; } void s_write_file(String name, String data) { require_not_null(name); require_not_null(data); FILE *f = fopen(name, "w"); if (f == NULL) { fprintf(stderr, "%s: Cannot open %s\n", (String)__func__, name); base_exit(EXIT_FAILURE); } size_t n_data = strlen(data); size_t n_written = fwrite(data , 1, n_data, f); if (n_written != n_data) { fprintf(stderr, "%s: Cannot write data to file %s.\n", (String)__func__, name); base_exit(EXIT_FAILURE); } fclose(f); } void write_file_data(String name, Byte *data, int n_data) { require_not_null(name); require_not_null(data); require("non-negative length", n_data >= 0); FILE *f = fopen(name, "w"); if (f == NULL) { fprintf(stderr, "%s: Cannot open %s\n", (String)__func__, name); base_exit(EXIT_FAILURE); } size_t n_written = fwrite(data , 1, n_data, f); if (n_written != n_data) { fprintf(stderr, "%s: Cannot write data to file %s.\n", (String)__func__, name); base_exit(EXIT_FAILURE); } fclose(f); } //////////////////////////////////////////////////////////////////////////// // Random numbers static bool srand_called = false; int i_rnd(int i) { require("positive range", i > 0); if (!srand_called) { srand(time(NULL) << 10); srand_called = true; } int result = 0; if (RAND_MAX == 32767) { result = ((rand() << 16) | rand()) % i; } else { result = rand() % i; } ensure("random number in range", 0 <= result && result < i); return result; } double d_rnd(double i) { require("positive range", i > 0); if (!srand_called) { srand(time(NULL) << 10); srand_called = true; } double r = (double) rand() / (double) RAND_MAX; double result = i * r; ensure("random number in range", 0 <= result && result < i); return result; } bool b_rnd(void) { return i_rnd(2) == 0; } //////////////////////////////////////////////////////////////////////////// // Testing int base_check_count = 0; int base_check_success_count = 0; // http://www.gnu.org/software/libc/manual/html_node/Cleanups-on-Exit.html#Cleanups-on-Exit void base_atexit(void) { // if not a successful exit, supress further output if (exit_status == EXIT_SUCCESS) { // summary information about tests (if any) if (base_check_count > 0) { int fail_count = base_check_count - base_check_success_count; if (fail_count <= 0) { if (base_check_count == 1) { fprintf(stderr, "The test passed!\n"); } else if (base_check_count == 2) { fprintf(stderr, "Both tests passed!\n"); } else if (base_check_count >= 3) { fprintf(stderr, "All %d tests passed!\n", base_check_count); } } else { if (base_check_count == 1) { fprintf(stderr, "The test failed.\n"); } else { if (base_check_success_count == 0) { fprintf(stderr, "0 of %d tests passed.\n", base_check_count); } else { fprintf(stderr, "%d of %d tests failed.\n", fail_count, base_check_count); } } } } // information about memory leaks (if any) if (do_memory_check) { base_check_memory(); } } } #if 0 void base_atexit_test(void) { base_check_count = 0; base_check_success_count = 0; base_atexit(); base_check_count = 1; base_check_success_count = 0; base_atexit(); base_check_count = 1; base_check_success_count = 1; base_atexit(); base_check_count = 2; base_check_success_count = 0; base_atexit(); base_check_count = 2; base_check_success_count = 1; base_atexit(); base_check_count = 2; base_check_success_count = 2; base_atexit(); base_check_count = 3; base_check_success_count = 0; base_atexit(); base_check_count = 3; base_check_success_count = 1; base_atexit(); base_check_count = 3; base_check_success_count = 2; base_atexit(); base_check_count = 3; base_check_success_count = 3; base_atexit(); } #endif bool base_test_equal_b(const char *file, int line, bool a, bool e) { base_init(); base_check_count++; if (a == e) { printf("%s, line %d: Test passed.\n", file, line); base_check_success_count++; return true; } else { printf("%s, line %d: Actual value ", file, line); printb(a); prints(" differs from expected value "); printb(e); printsln("."); return false; } } bool base_test_equal_i(const char *file, int line, int a, int e) { base_init(); base_check_count++; if (a == e) { printf("%s, line %d: Test passed.\n", file, line); base_check_success_count++; return true; } else { printf("%s, line %d: Actual value %d differs from expected value %d.\n", file, line, a, e); return false; } } bool base_test_within_d(const char *file, int line, double a, double e, double epsilon) { base_init(); base_check_count++; if (fabs(a - e) <= epsilon) { printf("%s, line %d: Test passed.\n", file, line); base_check_success_count++; return true; } else { printf("%s, line %d: Actual value %g is not within %g of expected value %g.\n", file, line, a, epsilon, e); return false; } } bool base_test_within_i(const char *file, int line, int a, int e, int epsilon) { base_init(); base_check_count++; if (abs(a - e) <= epsilon) { printf("%s, line %d: Test passed.\n", file, line); base_check_success_count++; return true; } else { printf("%s, line %d: Actual value %d is not within %d of expected value %d.\n", file, line, a, epsilon, e); return false; } } bool base_test_equal_c(const char *file, int line, char a, char e) { base_init(); base_check_count++; if (a == e) { printf("%s, line %d: Test passed.\n", file, line); base_check_success_count++; return true; } else { printf("%s, line %d: Actual value '%c' differs from expected value '%c'.\n", file, line, a, e); return false; } } bool base_test_equal_s(const char *file, int line, String a, String e) { base_init(); base_check_count++; if (strcmp(a, e) == 0) { printf("%s, line %d: Test passed.\n", file, line); base_check_success_count++; return true; } else { printf("%s, line %d: Actual value \"%s\" differs from expected value \"%s\".\n", file, line, a, e); return false; } } bool base_test_equal_ca(const char *file, int line, Array a, char *e, int ne) { base_init(); base_check_count++; if (a->n != ne) { printf("%s, line %d: Actual length %d " "differs from expected length %d\n", file, line, a->n, ne); return false; } if (a->s != sizeof(char)) { printf("%s, line %d: Actual element size %d " "differs from expected element size %lu\n", file, line, a->s, (unsigned long)sizeof(char)); return false; } if (a->n < 0) { printf("%s, line %d: Invalid lengths %d\n", file, line, a->n); return false; } if (ne < 0) { printf("%s, line %d: Invalid lengths %d\n", file, line, ne); return false; } if (a->n > 0 && a->a == NULL) { printf("%s, line %d: Actual value array is NULL\n", file, line); return false; } if (ne > 0 && e == NULL) { printf("%s, line %d: Expected value array is NULL\n", file, line); return false; } char *ca = a->a; for (int i = 0; i < a->n; i++) { if (ca[i] != e[i]) { printf("%s, line %d: Actual value ", file, line); printca(ca, a->n); prints(" differs from expected value "); printca(e, ne); printf(" at index %d.\n", i); return false; } } printf("%s, line %d: Test passed.\n", file, line); base_check_success_count++; return true; } bool base_test_equal_boa(const char *file, int line, Array a, bool *e, int ne) { base_init(); base_check_count++; if (a->n != ne) { printf("%s, line %d: Actual length %d " "differs from expected length %d\n", file, line, a->n, ne); return false; } if (a->s != sizeof(bool)) { printf("%s, line %d: Actual element size %d " "differs from expected element size %lu\n", file, line, a->s, (unsigned long)sizeof(bool)); return false; } if (a->n < 0) { printf("%s, line %d: Invalid lengths %d\n", file, line, a->n); return false; } if (ne < 0) { printf("%s, line %d: Invalid lengths %d\n", file, line, ne); return false; } if (a->n > 0 && a->a == NULL) { printf("%s, line %d: Actual value array is NULL\n", file, line); return false; } if (ne > 0 && e == NULL) { printf("%s, line %d: Expected value array is NULL\n", file, line); return false; } bool *ba = a->a; for (int i = 0; i < a->n; i++) { if (ba[i] != e[i]) { printf("%s, line %d: Actual value ", file, line); printboa(ba, a->n); prints(" differs from expected value "); printboa(e, ne); printf(" at index %d.\n", i); return false; } } printf("%s, line %d: Test passed.\n", file, line); base_check_success_count++; return true; } bool base_test_equal_struct(const char *file, int line, Any actual, Any expected, Any predicate) { bool (*pred)(Any, Any) = predicate; base_init(); base_check_count++; if (pred(actual, expected)) { printf("%s, line %d: Test passed.\n", file, line); base_check_success_count++; return true; } else { printf("%s, line %d: Test failed. Actual value differs from expected value.\n", file, line); return false; } } void base_count_check(void) { base_init(); base_check_count++; } void base_count_success(void) { base_init(); base_check_success_count++; } //////////////////////////////////////////////////////////////////////////// // Testing base itself int baseTest(void) { /* printiln(123); printi(123); println(); printdln(123456.123456); printd(123456.123456); println(); printcln('a'); printc('x'); printc('y'); printcln('z'); printc('a'); printc('\t'); printc('b'); println(); printsln("hello"); prints("hello"); println(); int as[] = { 1, 2, 3, 123, 234, 345, 1234, -2345 }; printia(as, 8); println(); printialn(as, 8); printialn(as, 7); printialn(as, 9); double ds[] = { 1.1, 2.2, 3.3, 123.123, 234.234, 345.345, 1234.1234, -2345.2345 }; printda(ds, 8); println(); printdaln(ds, 8); String ss[] = { "a", "b", "c", "ab", "hello", "world" }; printsa(ss, 6); println(); printsaln(ss, 6); */ return 0; }
966080.c
//############################################################################ // // FILE: SD_SPI_Initialization.c // // TITLE: SD/MMC Initialization Functions // //############################################################################ // Author: Hector Ta // Release Date: Dec 2021 //############################################################################ /* *********************************************************** * You may not use the Program in non-TI devices. * ********************************************************* */ #include "DSP2833x_Device.h" // DSP2833x Headerfile Include File #include "DSP2833x_Examples.h" // DSP2833x Examples Include File #include "SD.h" //SD Include File //Global Variables Uint16 response, ocr_contents[5], csd_contents[16], cid_contents[16]; Uint16 card_status[2], data_manipulation = TRUE, high_capacity = FALSE, crc_enabled = FALSE; //######################### CHECK_CARD_INSERTION ############################# void sd_card_insertion() { Uint16 i; //After Card Detection, SD protocol states that card needs 74 clock //cycles with the DATA IN line high for chip to stabilize. CS does not //need to be active for this action. sdstatus.bit.bit10_HighCapacity= 0; sdstatus.bit.bit0_CardDetected = 0; CS_HIGH; //Pull CS high for(i=0;i<10;i++){ //Transmit 0xFF for 80 clock cycles spi_xmit_byte(DUMMY_DATA); } SD_process ++; } //######################### CHECK_CARD_INSERTION ############################# //########################## SD_INITIALIZATION ############################### void sd_initialization() { CS_LOW; //Pull CS low data_manipulation = FALSE; //Register manipulation function //Transmit GO IDLE STATE command to card with CS low to select SPI mode //and put the card in IDLE mode. spi_xmit_command(GO_IDLE_STATE, STUFF_BITS, INITIAL_CRC); RESET_RESPONSE; //Reset response while(response != IN_IDLE_STATE) //Wait until card responds with IDLE response sd_command_response(); sdstatus.bit.bit0_CardDetected = 1; // GpioDataRegs.GPASET.bit.GPIO20 = TRUE; //Disable SDIO Mode LED, Optional // GpioDataRegs.GPACLEAR.bit.GPIO5 = TRUE; //Emit SPI Mode LED, Optional RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; crc_enabled = TRUE; //CRC is always enabled for SEND_IF_COND command spi_xmit_command(SEND_IF_COND, INTERFACE_COND, DUMMY_CRC); //Transmit SEND_IF_COND command crc_enabled = FALSE; //CRC is disabled for SPI mode //Wait until card responds with IDLE response while((response != IN_IDLE_STATE) && (response != ILLEGAL_COMMAND_IDLE_STATE)) response = spi_xmit_byte(DUMMY_DATA); //If SEND_IF_COND returned illegal command call sd_version1_initialization for //standard capacity card initialization. Otherwise call sd_version2_initialization //for high capacity card initialization if(response == ILLEGAL_COMMAND_IDLE_STATE) sd_version1_initialization(); else if(response == IN_IDLE_STATE) sd_version2_initialization(); CS_HIGH; //Pull CS high //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; SD_process++; SpiaRegs.SPIBRR = 0x0003; //Adjust Clock to 10.0 MHz (10.0 Mbps) } //########################## SD_INITIALIZATION ############################### //###################### SD_VERSION1_INITIALIZATION ########################## void sd_version1_initialization() { RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; spi_xmit_command(READ_OCR, STUFF_BITS, DUMMY_CRC); //Transmit READ_OCR command //Wait until card responds with IN_IDLE_STATE response while(response != IN_IDLE_STATE) sd_command_response(); sd_ocr_response(); //Call OCR response function //If host voltage is not compatible with card voltage, do not communicate //further with card if(ocr_contents[1] != SUPPORTED_VOLTAGE) sd_error(); RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; while(response != SUCCESS) { RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; //Before transmitting application specific commands, the APP_CMD //must be transmitted spi_xmit_command(APP_CMD, STUFF_BITS, DUMMY_CRC); //Wait until card responds with IN_IDLE_STATE response while(response != IN_IDLE_STATE) sd_command_response(); RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; //Transmit SEND OP COND command spi_xmit_command(SD_SEND_OP_COND, STUFF_BITS, DUMMY_CRC); sd_command_response(); //Receive response } RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; } //###################### SD_VERSION1_INITIALIZATION ########################## //###################### SD_VERSION2_INITIALIZATION ########################## void sd_version2_initialization() { Uint16 i, send_if_cond_response[4], ccs_check; //Receive SEND_IF_COND response for(i=0;i<3;i++) send_if_cond_response[i] = spi_xmit_byte(DUMMY_DATA); //If voltage accepted or check pattern does not match, do not communicate further if((send_if_cond_response[2] != SUPPLY_VOLTAGE) || (send_if_cond_response[3] != CHECK_PATTERN)) sd_error(); RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; while((response != SUCCESS)&&(SD_process)) { RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; //Before transmitting application specific commands, the APP_CMD //must be transmitted spi_xmit_command(APP_CMD, STUFF_BITS, DUMMY_CRC); while(response != IN_IDLE_STATE)//Wait until card responds with IN_IDLE_STATE response sd_command_response(); RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; //Transmit SEND_OP_COND command spi_xmit_command(SD_SEND_OP_COND, VER2_OP_COND, DUMMY_CRC); sd_command_response(); //Receive response } RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; spi_xmit_command(READ_OCR, STUFF_BITS, DUMMY_CRC); //Transmit READ_OCR command //Wait until card responds with SUCCESS response while((response != SUCCESS)&&(SD_process)) sd_command_response(); sd_ocr_response(); //Call OCR response function ccs_check = ocr_contents[0] & HIGH_CAPACITY; //Mask ocr_contents to test for High Capacity if(ccs_check == HIGH_CAPACITY){ //Check if card is High Capacity high_capacity = TRUE; sdstatus.bit.bit10_HighCapacity= 1; } RESET_RESPONSE; //Reset response //After receiving response clock must be active for 8 clock cycles EIGHT_CLOCK_CYCLE_DELAY; } //###################### SD_VERSION2_INITIALIZATION ##########################
675722.c
/** ****************************************************************************** * @file stm32h7xx_hal_hcd.c * @author MCD Application Team * @brief HCD HAL module driver. * This file provides firmware functions to manage the following * functionalities of the USB Peripheral Controller: * + Initialization and de-initialization functions * + IO operation functions * + Peripheral Control functions * + Peripheral State functions * @verbatim ============================================================================== ##### How to use this driver ##### ============================================================================== [..] (#)Declare a HCD_HandleTypeDef handle structure, for example: HCD_HandleTypeDef hhcd; (#)Fill parameters of Init structure in HCD handle (#)Call HAL_HCD_Init() API to initialize the HCD peripheral (Core, Host core, ...) (#)Initialize the HCD low level resources through the HAL_HCD_MspInit() API: (##) Enable the HCD/USB Low Level interface clock using the following macros (+++) __HAL_RCC_USB_OTG_FS_CLK_ENABLE(); (+++) __HAL_RCC_USB_OTG_HS_CLK_ENABLE(); (For High Speed Mode) (+++) __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE(); (For High Speed Mode) (##) Initialize the related GPIO clocks (##) Configure HCD pin-out (##) Configure HCD NVIC interrupt (#)Associate the Upper USB Host stack to the HAL HCD Driver: (##) hhcd.pData = phost; (#)Enable HCD transmission and reception: (##) HAL_HCD_Start(); @endverbatim ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2017 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32h7xx_hal.h" /** @addtogroup STM32H7xx_HAL_Driver * @{ */ #ifdef HAL_HCD_MODULE_ENABLED #if defined (USB_OTG_FS) || defined (USB_OTG_HS) /** @defgroup HCD HCD * @brief HCD HAL module driver * @{ */ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /** @defgroup HCD_Private_Functions HCD Private Functions * @{ */ static void HCD_HC_IN_IRQHandler(HCD_HandleTypeDef *hhcd, uint8_t chnum); static void HCD_HC_OUT_IRQHandler(HCD_HandleTypeDef *hhcd, uint8_t chnum); static void HCD_RXQLVL_IRQHandler(HCD_HandleTypeDef *hhcd); static void HCD_Port_IRQHandler(HCD_HandleTypeDef *hhcd); /** * @} */ /* Exported functions --------------------------------------------------------*/ /** @defgroup HCD_Exported_Functions HCD Exported Functions * @{ */ /** @defgroup HCD_Exported_Functions_Group1 Initialization and de-initialization functions * @brief Initialization and Configuration functions * @verbatim =============================================================================== ##### Initialization and de-initialization functions ##### =============================================================================== [..] This section provides functions allowing to: @endverbatim * @{ */ /** * @brief Initialize the host driver. * @param hhcd HCD handle * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_Init(HCD_HandleTypeDef *hhcd) { USB_OTG_GlobalTypeDef *USBx; /* Check the HCD handle allocation */ if (hhcd == NULL) { return HAL_ERROR; } /* Check the parameters */ assert_param(IS_HCD_ALL_INSTANCE(hhcd->Instance)); USBx = hhcd->Instance; if (hhcd->State == HAL_HCD_STATE_RESET) { /* Allocate lock resource and initialize it */ hhcd->Lock = HAL_UNLOCKED; #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) hhcd->SOFCallback = HAL_HCD_SOF_Callback; hhcd->ConnectCallback = HAL_HCD_Connect_Callback; hhcd->DisconnectCallback = HAL_HCD_Disconnect_Callback; hhcd->PortEnabledCallback = HAL_HCD_PortEnabled_Callback; hhcd->PortDisabledCallback = HAL_HCD_PortDisabled_Callback; hhcd->HC_NotifyURBChangeCallback = HAL_HCD_HC_NotifyURBChange_Callback; if (hhcd->MspInitCallback == NULL) { hhcd->MspInitCallback = HAL_HCD_MspInit; } /* Init the low level hardware */ hhcd->MspInitCallback(hhcd); #else /* Init the low level hardware : GPIO, CLOCK, NVIC... */ HAL_HCD_MspInit(hhcd); #endif /* (USE_HAL_HCD_REGISTER_CALLBACKS) */ } hhcd->State = HAL_HCD_STATE_BUSY; /* Disable DMA mode for FS instance */ if ((USBx->CID & (0x1U << 8)) == 0U) { hhcd->Init.dma_enable = 0U; } /* Disable the Interrupts */ __HAL_HCD_DISABLE(hhcd); /* Init the Core (common init.) */ (void)USB_CoreInit(hhcd->Instance, hhcd->Init); /* Force Host Mode*/ (void)USB_SetCurrentMode(hhcd->Instance, USB_HOST_MODE); /* Init Host */ (void)USB_HostInit(hhcd->Instance, hhcd->Init); hhcd->State = HAL_HCD_STATE_READY; return HAL_OK; } /** * @brief Initialize a host channel. * @param hhcd HCD handle * @param ch_num Channel number. * This parameter can be a value from 1 to 15 * @param epnum Endpoint number. * This parameter can be a value from 1 to 15 * @param dev_address Current device address * This parameter can be a value from 0 to 255 * @param speed Current device speed. * This parameter can be one of these values: * HCD_SPEED_HIGH: High speed mode, * HCD_SPEED_FULL: Full speed mode, * HCD_SPEED_LOW: Low speed mode * @param ep_type Endpoint Type. * This parameter can be one of these values: * EP_TYPE_CTRL: Control type, * EP_TYPE_ISOC: Isochronous type, * EP_TYPE_BULK: Bulk type, * EP_TYPE_INTR: Interrupt type * @param mps Max Packet Size. * This parameter can be a value from 0 to32K * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_HC_Init(HCD_HandleTypeDef *hhcd, uint8_t ch_num, uint8_t epnum, uint8_t dev_address, uint8_t speed, uint8_t ep_type, uint16_t mps) { HAL_StatusTypeDef status; __HAL_LOCK(hhcd); hhcd->hc[ch_num].do_ping = 0U; hhcd->hc[ch_num].dev_addr = dev_address; hhcd->hc[ch_num].max_packet = mps; hhcd->hc[ch_num].ch_num = ch_num; hhcd->hc[ch_num].ep_type = ep_type; hhcd->hc[ch_num].ep_num = epnum & 0x7FU; if ((epnum & 0x80U) == 0x80U) { hhcd->hc[ch_num].ep_is_in = 1U; } else { hhcd->hc[ch_num].ep_is_in = 0U; } hhcd->hc[ch_num].speed = speed; status = USB_HC_Init(hhcd->Instance, ch_num, epnum, dev_address, speed, ep_type, mps); __HAL_UNLOCK(hhcd); return status; } /** * @brief Halt a host channel. * @param hhcd HCD handle * @param ch_num Channel number. * This parameter can be a value from 1 to 15 * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_HC_Halt(HCD_HandleTypeDef *hhcd, uint8_t ch_num) { HAL_StatusTypeDef status = HAL_OK; __HAL_LOCK(hhcd); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_UNLOCK(hhcd); return status; } /** * @brief DeInitialize the host driver. * @param hhcd HCD handle * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_DeInit(HCD_HandleTypeDef *hhcd) { /* Check the HCD handle allocation */ if (hhcd == NULL) { return HAL_ERROR; } hhcd->State = HAL_HCD_STATE_BUSY; #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) if (hhcd->MspDeInitCallback == NULL) { hhcd->MspDeInitCallback = HAL_HCD_MspDeInit; /* Legacy weak MspDeInit */ } /* DeInit the low level hardware */ hhcd->MspDeInitCallback(hhcd); #else /* DeInit the low level hardware: CLOCK, NVIC.*/ HAL_HCD_MspDeInit(hhcd); #endif /* USE_HAL_HCD_REGISTER_CALLBACKS */ __HAL_HCD_DISABLE(hhcd); hhcd->State = HAL_HCD_STATE_RESET; return HAL_OK; } /** * @brief Initialize the HCD MSP. * @param hhcd HCD handle * @retval None */ __weak void HAL_HCD_MspInit(HCD_HandleTypeDef *hhcd) { /* Prevent unused argument(s) compilation warning */ UNUSED(hhcd); /* NOTE : This function should not be modified, when the callback is needed, the HAL_HCD_MspInit could be implemented in the user file */ } /** * @brief DeInitialize the HCD MSP. * @param hhcd HCD handle * @retval None */ __weak void HAL_HCD_MspDeInit(HCD_HandleTypeDef *hhcd) { /* Prevent unused argument(s) compilation warning */ UNUSED(hhcd); /* NOTE : This function should not be modified, when the callback is needed, the HAL_HCD_MspDeInit could be implemented in the user file */ } /** * @} */ /** @defgroup HCD_Exported_Functions_Group2 Input and Output operation functions * @brief HCD IO operation functions * @verbatim =============================================================================== ##### IO operation functions ##### =============================================================================== [..] This subsection provides a set of functions allowing to manage the USB Host Data Transfer @endverbatim * @{ */ /** * @brief Submit a new URB for processing. * @param hhcd HCD handle * @param ch_num Channel number. * This parameter can be a value from 1 to 15 * @param direction Channel number. * This parameter can be one of these values: * 0 : Output / 1 : Input * @param ep_type Endpoint Type. * This parameter can be one of these values: * EP_TYPE_CTRL: Control type/ * EP_TYPE_ISOC: Isochronous type/ * EP_TYPE_BULK: Bulk type/ * EP_TYPE_INTR: Interrupt type/ * @param token Endpoint Type. * This parameter can be one of these values: * 0: HC_PID_SETUP / 1: HC_PID_DATA1 * @param pbuff pointer to URB data * @param length Length of URB data * @param do_ping activate do ping protocol (for high speed only). * This parameter can be one of these values: * 0 : do ping inactive / 1 : do ping active * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_HC_SubmitRequest(HCD_HandleTypeDef *hhcd, uint8_t ch_num, uint8_t direction, uint8_t ep_type, uint8_t token, uint8_t *pbuff, uint16_t length, uint8_t do_ping) { hhcd->hc[ch_num].ep_is_in = direction; hhcd->hc[ch_num].ep_type = ep_type; if (token == 0U) { hhcd->hc[ch_num].data_pid = HC_PID_SETUP; hhcd->hc[ch_num].do_ping = do_ping; } else { hhcd->hc[ch_num].data_pid = HC_PID_DATA1; } /* Manage Data Toggle */ switch (ep_type) { case EP_TYPE_CTRL: if ((token == 1U) && (direction == 0U)) /*send data */ { if (length == 0U) { /* For Status OUT stage, Length==0, Status Out PID = 1 */ hhcd->hc[ch_num].toggle_out = 1U; } /* Set the Data Toggle bit as per the Flag */ if (hhcd->hc[ch_num].toggle_out == 0U) { /* Put the PID 0 */ hhcd->hc[ch_num].data_pid = HC_PID_DATA0; } else { /* Put the PID 1 */ hhcd->hc[ch_num].data_pid = HC_PID_DATA1; } } break; case EP_TYPE_BULK: if (direction == 0U) { /* Set the Data Toggle bit as per the Flag */ if (hhcd->hc[ch_num].toggle_out == 0U) { /* Put the PID 0 */ hhcd->hc[ch_num].data_pid = HC_PID_DATA0; } else { /* Put the PID 1 */ hhcd->hc[ch_num].data_pid = HC_PID_DATA1; } } else { if (hhcd->hc[ch_num].toggle_in == 0U) { hhcd->hc[ch_num].data_pid = HC_PID_DATA0; } else { hhcd->hc[ch_num].data_pid = HC_PID_DATA1; } } break; case EP_TYPE_INTR: if (direction == 0U) { /* Set the Data Toggle bit as per the Flag */ if (hhcd->hc[ch_num].toggle_out == 0U) { /* Put the PID 0 */ hhcd->hc[ch_num].data_pid = HC_PID_DATA0; } else { /* Put the PID 1 */ hhcd->hc[ch_num].data_pid = HC_PID_DATA1; } } else { if (hhcd->hc[ch_num].toggle_in == 0U) { hhcd->hc[ch_num].data_pid = HC_PID_DATA0; } else { hhcd->hc[ch_num].data_pid = HC_PID_DATA1; } } break; case EP_TYPE_ISOC: hhcd->hc[ch_num].data_pid = HC_PID_DATA0; break; default: break; } hhcd->hc[ch_num].xfer_buff = pbuff; hhcd->hc[ch_num].xfer_len = length; hhcd->hc[ch_num].urb_state = URB_IDLE; hhcd->hc[ch_num].xfer_count = 0U; hhcd->hc[ch_num].ch_num = ch_num; hhcd->hc[ch_num].state = HC_IDLE; return USB_HC_StartXfer(hhcd->Instance, &hhcd->hc[ch_num], (uint8_t)hhcd->Init.dma_enable); } /** * @brief Handle HCD interrupt request. * @param hhcd HCD handle * @retval None */ void HAL_HCD_IRQHandler(HCD_HandleTypeDef *hhcd) { USB_OTG_GlobalTypeDef *USBx = hhcd->Instance; uint32_t USBx_BASE = (uint32_t)USBx; uint32_t i, interrupt; /* Ensure that we are in device mode */ if (USB_GetMode(hhcd->Instance) == USB_OTG_MODE_HOST) { /* Avoid spurious interrupt */ if (__HAL_HCD_IS_INVALID_INTERRUPT(hhcd)) { return; } if (__HAL_HCD_GET_FLAG(hhcd, USB_OTG_GINTSTS_PXFR_INCOMPISOOUT)) { /* Incorrect mode, acknowledge the interrupt */ __HAL_HCD_CLEAR_FLAG(hhcd, USB_OTG_GINTSTS_PXFR_INCOMPISOOUT); } if (__HAL_HCD_GET_FLAG(hhcd, USB_OTG_GINTSTS_IISOIXFR)) { /* Incorrect mode, acknowledge the interrupt */ __HAL_HCD_CLEAR_FLAG(hhcd, USB_OTG_GINTSTS_IISOIXFR); } if (__HAL_HCD_GET_FLAG(hhcd, USB_OTG_GINTSTS_PTXFE)) { /* Incorrect mode, acknowledge the interrupt */ __HAL_HCD_CLEAR_FLAG(hhcd, USB_OTG_GINTSTS_PTXFE); } if (__HAL_HCD_GET_FLAG(hhcd, USB_OTG_GINTSTS_MMIS)) { /* Incorrect mode, acknowledge the interrupt */ __HAL_HCD_CLEAR_FLAG(hhcd, USB_OTG_GINTSTS_MMIS); } /* Handle Host Disconnect Interrupts */ if (__HAL_HCD_GET_FLAG(hhcd, USB_OTG_GINTSTS_DISCINT)) { __HAL_HCD_CLEAR_FLAG(hhcd, USB_OTG_GINTSTS_DISCINT); if ((USBx_HPRT0 & USB_OTG_HPRT_PCSTS) == 0U) { /* Handle Host Port Disconnect Interrupt */ #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) hhcd->DisconnectCallback(hhcd); #else HAL_HCD_Disconnect_Callback(hhcd); #endif /* USE_HAL_HCD_REGISTER_CALLBACKS */ (void)USB_InitFSLSPClkSel(hhcd->Instance, HCFG_48_MHZ); } } /* Handle Host Port Interrupts */ if (__HAL_HCD_GET_FLAG(hhcd, USB_OTG_GINTSTS_HPRTINT)) { HCD_Port_IRQHandler(hhcd); } /* Handle Host SOF Interrupt */ if (__HAL_HCD_GET_FLAG(hhcd, USB_OTG_GINTSTS_SOF)) { #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) hhcd->SOFCallback(hhcd); #else HAL_HCD_SOF_Callback(hhcd); #endif /* USE_HAL_HCD_REGISTER_CALLBACKS */ __HAL_HCD_CLEAR_FLAG(hhcd, USB_OTG_GINTSTS_SOF); } /* Handle Host channel Interrupt */ if (__HAL_HCD_GET_FLAG(hhcd, USB_OTG_GINTSTS_HCINT)) { interrupt = USB_HC_ReadInterrupt(hhcd->Instance); for (i = 0U; i < hhcd->Init.Host_channels; i++) { if ((interrupt & (1UL << (i & 0xFU))) != 0U) { if ((USBx_HC(i)->HCCHAR & USB_OTG_HCCHAR_EPDIR) == USB_OTG_HCCHAR_EPDIR) { HCD_HC_IN_IRQHandler(hhcd, (uint8_t)i); } else { HCD_HC_OUT_IRQHandler(hhcd, (uint8_t)i); } } } __HAL_HCD_CLEAR_FLAG(hhcd, USB_OTG_GINTSTS_HCINT); } /* Handle Rx Queue Level Interrupts */ if ((__HAL_HCD_GET_FLAG(hhcd, USB_OTG_GINTSTS_RXFLVL)) != 0U) { USB_MASK_INTERRUPT(hhcd->Instance, USB_OTG_GINTSTS_RXFLVL); HCD_RXQLVL_IRQHandler(hhcd); USB_UNMASK_INTERRUPT(hhcd->Instance, USB_OTG_GINTSTS_RXFLVL); } } } /** * @brief SOF callback. * @param hhcd HCD handle * @retval None */ __weak void HAL_HCD_SOF_Callback(HCD_HandleTypeDef *hhcd) { /* Prevent unused argument(s) compilation warning */ UNUSED(hhcd); /* NOTE : This function should not be modified, when the callback is needed, the HAL_HCD_SOF_Callback could be implemented in the user file */ } /** * @brief Connection Event callback. * @param hhcd HCD handle * @retval None */ __weak void HAL_HCD_Connect_Callback(HCD_HandleTypeDef *hhcd) { /* Prevent unused argument(s) compilation warning */ UNUSED(hhcd); /* NOTE : This function should not be modified, when the callback is needed, the HAL_HCD_Connect_Callback could be implemented in the user file */ } /** * @brief Disconnection Event callback. * @param hhcd HCD handle * @retval None */ __weak void HAL_HCD_Disconnect_Callback(HCD_HandleTypeDef *hhcd) { /* Prevent unused argument(s) compilation warning */ UNUSED(hhcd); /* NOTE : This function should not be modified, when the callback is needed, the HAL_HCD_Disconnect_Callback could be implemented in the user file */ } /** * @brief Port Enabled Event callback. * @param hhcd HCD handle * @retval None */ __weak void HAL_HCD_PortEnabled_Callback(HCD_HandleTypeDef *hhcd) { /* Prevent unused argument(s) compilation warning */ UNUSED(hhcd); /* NOTE : This function should not be modified, when the callback is needed, the HAL_HCD_Disconnect_Callback could be implemented in the user file */ } /** * @brief Port Disabled Event callback. * @param hhcd HCD handle * @retval None */ __weak void HAL_HCD_PortDisabled_Callback(HCD_HandleTypeDef *hhcd) { /* Prevent unused argument(s) compilation warning */ UNUSED(hhcd); /* NOTE : This function should not be modified, when the callback is needed, the HAL_HCD_Disconnect_Callback could be implemented in the user file */ } /** * @brief Notify URB state change callback. * @param hhcd HCD handle * @param chnum Channel number. * This parameter can be a value from 1 to 15 * @param urb_state: * This parameter can be one of these values: * URB_IDLE/ * URB_DONE/ * URB_NOTREADY/ * URB_NYET/ * URB_ERROR/ * URB_STALL/ * @retval None */ __weak void HAL_HCD_HC_NotifyURBChange_Callback(HCD_HandleTypeDef *hhcd, uint8_t chnum, HCD_URBStateTypeDef urb_state) { /* Prevent unused argument(s) compilation warning */ UNUSED(hhcd); UNUSED(chnum); UNUSED(urb_state); /* NOTE : This function should not be modified, when the callback is needed, the HAL_HCD_HC_NotifyURBChange_Callback could be implemented in the user file */ } #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) /** * @brief Register a User USB HCD Callback * To be used instead of the weak predefined callback * @param hhcd USB HCD handle * @param CallbackID ID of the callback to be registered * This parameter can be one of the following values: * @arg @ref HAL_HCD_SOF_CB_ID USB HCD SOF callback ID * @arg @ref HAL_HCD_CONNECT_CB_ID USB HCD Connect callback ID * @arg @ref HAL_HCD_DISCONNECT_CB_ID OTG HCD Disconnect callback ID * @arg @ref HAL_HCD_PORT_ENABLED_CB_ID USB HCD Port Enable callback ID * @arg @ref HAL_HCD_PORT_DISABLED_CB_ID USB HCD Port Disable callback ID * @arg @ref HAL_HCD_MSPINIT_CB_ID MspDeInit callback ID * @arg @ref HAL_HCD_MSPDEINIT_CB_ID MspDeInit callback ID * @param pCallback pointer to the Callback function * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_RegisterCallback(HCD_HandleTypeDef *hhcd, HAL_HCD_CallbackIDTypeDef CallbackID, pHCD_CallbackTypeDef pCallback) { HAL_StatusTypeDef status = HAL_OK; if (pCallback == NULL) { /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; return HAL_ERROR; } /* Process locked */ __HAL_LOCK(hhcd); if (hhcd->State == HAL_HCD_STATE_READY) { switch (CallbackID) { case HAL_HCD_SOF_CB_ID : hhcd->SOFCallback = pCallback; break; case HAL_HCD_CONNECT_CB_ID : hhcd->ConnectCallback = pCallback; break; case HAL_HCD_DISCONNECT_CB_ID : hhcd->DisconnectCallback = pCallback; break; case HAL_HCD_PORT_ENABLED_CB_ID : hhcd->PortEnabledCallback = pCallback; break; case HAL_HCD_PORT_DISABLED_CB_ID : hhcd->PortDisabledCallback = pCallback; break; case HAL_HCD_MSPINIT_CB_ID : hhcd->MspInitCallback = pCallback; break; case HAL_HCD_MSPDEINIT_CB_ID : hhcd->MspDeInitCallback = pCallback; break; default : /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; break; } } else if (hhcd->State == HAL_HCD_STATE_RESET) { switch (CallbackID) { case HAL_HCD_MSPINIT_CB_ID : hhcd->MspInitCallback = pCallback; break; case HAL_HCD_MSPDEINIT_CB_ID : hhcd->MspDeInitCallback = pCallback; break; default : /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; break; } } else { /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; } /* Release Lock */ __HAL_UNLOCK(hhcd); return status; } /** * @brief Unregister an USB HCD Callback * USB HCD callabck is redirected to the weak predefined callback * @param hhcd USB HCD handle * @param CallbackID ID of the callback to be unregistered * This parameter can be one of the following values: * @arg @ref HAL_HCD_SOF_CB_ID USB HCD SOF callback ID * @arg @ref HAL_HCD_CONNECT_CB_ID USB HCD Connect callback ID * @arg @ref HAL_HCD_DISCONNECT_CB_ID OTG HCD Disconnect callback ID * @arg @ref HAL_HCD_PORT_ENABLED_CB_ID USB HCD Port Enabled callback ID * @arg @ref HAL_HCD_PORT_DISABLED_CB_ID USB HCD Port Disabled callback ID * @arg @ref HAL_HCD_MSPINIT_CB_ID MspDeInit callback ID * @arg @ref HAL_HCD_MSPDEINIT_CB_ID MspDeInit callback ID * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_UnRegisterCallback(HCD_HandleTypeDef *hhcd, HAL_HCD_CallbackIDTypeDef CallbackID) { HAL_StatusTypeDef status = HAL_OK; /* Process locked */ __HAL_LOCK(hhcd); /* Setup Legacy weak Callbacks */ if (hhcd->State == HAL_HCD_STATE_READY) { switch (CallbackID) { case HAL_HCD_SOF_CB_ID : hhcd->SOFCallback = HAL_HCD_SOF_Callback; break; case HAL_HCD_CONNECT_CB_ID : hhcd->ConnectCallback = HAL_HCD_Connect_Callback; break; case HAL_HCD_DISCONNECT_CB_ID : hhcd->DisconnectCallback = HAL_HCD_Disconnect_Callback; break; case HAL_HCD_PORT_ENABLED_CB_ID : hhcd->PortEnabledCallback = HAL_HCD_PortEnabled_Callback; break; case HAL_HCD_PORT_DISABLED_CB_ID : hhcd->PortDisabledCallback = HAL_HCD_PortDisabled_Callback; break; case HAL_HCD_MSPINIT_CB_ID : hhcd->MspInitCallback = HAL_HCD_MspInit; break; case HAL_HCD_MSPDEINIT_CB_ID : hhcd->MspDeInitCallback = HAL_HCD_MspDeInit; break; default : /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; break; } } else if (hhcd->State == HAL_HCD_STATE_RESET) { switch (CallbackID) { case HAL_HCD_MSPINIT_CB_ID : hhcd->MspInitCallback = HAL_HCD_MspInit; break; case HAL_HCD_MSPDEINIT_CB_ID : hhcd->MspDeInitCallback = HAL_HCD_MspDeInit; break; default : /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; break; } } else { /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; } /* Release Lock */ __HAL_UNLOCK(hhcd); return status; } /** * @brief Register USB HCD Host Channel Notify URB Change Callback * To be used instead of the weak HAL_HCD_HC_NotifyURBChange_Callback() predefined callback * @param hhcd HCD handle * @param pCallback pointer to the USB HCD Host Channel Notify URB Change Callback function * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_RegisterHC_NotifyURBChangeCallback(HCD_HandleTypeDef *hhcd, pHCD_HC_NotifyURBChangeCallbackTypeDef pCallback) { HAL_StatusTypeDef status = HAL_OK; if (pCallback == NULL) { /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; return HAL_ERROR; } /* Process locked */ __HAL_LOCK(hhcd); if (hhcd->State == HAL_HCD_STATE_READY) { hhcd->HC_NotifyURBChangeCallback = pCallback; } else { /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; } /* Release Lock */ __HAL_UNLOCK(hhcd); return status; } /** * @brief UnRegister the USB HCD Host Channel Notify URB Change Callback * USB HCD Host Channel Notify URB Change Callback is redirected to the weak HAL_HCD_HC_NotifyURBChange_Callback() predefined callback * @param hhcd HCD handle * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_UnRegisterHC_NotifyURBChangeCallback(HCD_HandleTypeDef *hhcd) { HAL_StatusTypeDef status = HAL_OK; /* Process locked */ __HAL_LOCK(hhcd); if (hhcd->State == HAL_HCD_STATE_READY) { hhcd->HC_NotifyURBChangeCallback = HAL_HCD_HC_NotifyURBChange_Callback; /* Legacy weak DataOutStageCallback */ } else { /* Update the error code */ hhcd->ErrorCode |= HAL_HCD_ERROR_INVALID_CALLBACK; /* Return error status */ status = HAL_ERROR; } /* Release Lock */ __HAL_UNLOCK(hhcd); return status; } #endif /* USE_HAL_HCD_REGISTER_CALLBACKS */ /** * @} */ /** @defgroup HCD_Exported_Functions_Group3 Peripheral Control functions * @brief Management functions * @verbatim =============================================================================== ##### Peripheral Control functions ##### =============================================================================== [..] This subsection provides a set of functions allowing to control the HCD data transfers. @endverbatim * @{ */ /** * @brief Start the host driver. * @param hhcd HCD handle * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_Start(HCD_HandleTypeDef *hhcd) { __HAL_LOCK(hhcd); __HAL_HCD_ENABLE(hhcd); (void)USB_DriveVbus(hhcd->Instance, 1U); __HAL_UNLOCK(hhcd); return HAL_OK; } /** * @brief Stop the host driver. * @param hhcd HCD handle * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_Stop(HCD_HandleTypeDef *hhcd) { __HAL_LOCK(hhcd); (void)USB_StopHost(hhcd->Instance); __HAL_UNLOCK(hhcd); return HAL_OK; } /** * @brief Reset the host port. * @param hhcd HCD handle * @retval HAL status */ HAL_StatusTypeDef HAL_HCD_ResetPort(HCD_HandleTypeDef *hhcd) { return (USB_ResetPort(hhcd->Instance)); } /** * @} */ /** @defgroup HCD_Exported_Functions_Group4 Peripheral State functions * @brief Peripheral State functions * @verbatim =============================================================================== ##### Peripheral State functions ##### =============================================================================== [..] This subsection permits to get in run-time the status of the peripheral and the data flow. @endverbatim * @{ */ /** * @brief Return the HCD handle state. * @param hhcd HCD handle * @retval HAL state */ HCD_StateTypeDef HAL_HCD_GetState(HCD_HandleTypeDef *hhcd) { return hhcd->State; } /** * @brief Return URB state for a channel. * @param hhcd HCD handle * @param chnum Channel number. * This parameter can be a value from 1 to 15 * @retval URB state. * This parameter can be one of these values: * URB_IDLE/ * URB_DONE/ * URB_NOTREADY/ * URB_NYET/ * URB_ERROR/ * URB_STALL */ HCD_URBStateTypeDef HAL_HCD_HC_GetURBState(HCD_HandleTypeDef *hhcd, uint8_t chnum) { return hhcd->hc[chnum].urb_state; } /** * @brief Return the last host transfer size. * @param hhcd HCD handle * @param chnum Channel number. * This parameter can be a value from 1 to 15 * @retval last transfer size in byte */ uint32_t HAL_HCD_HC_GetXferCount(HCD_HandleTypeDef *hhcd, uint8_t chnum) { return hhcd->hc[chnum].xfer_count; } /** * @brief Return the Host Channel state. * @param hhcd HCD handle * @param chnum Channel number. * This parameter can be a value from 1 to 15 * @retval Host channel state * This parameter can be one of these values: * HC_IDLE/ * HC_XFRC/ * HC_HALTED/ * HC_NYET/ * HC_NAK/ * HC_STALL/ * HC_XACTERR/ * HC_BBLERR/ * HC_DATATGLERR */ HCD_HCStateTypeDef HAL_HCD_HC_GetState(HCD_HandleTypeDef *hhcd, uint8_t chnum) { return hhcd->hc[chnum].state; } /** * @brief Return the current Host frame number. * @param hhcd HCD handle * @retval Current Host frame number */ uint32_t HAL_HCD_GetCurrentFrame(HCD_HandleTypeDef *hhcd) { return (USB_GetCurrentFrame(hhcd->Instance)); } /** * @brief Return the Host enumeration speed. * @param hhcd HCD handle * @retval Enumeration speed */ uint32_t HAL_HCD_GetCurrentSpeed(HCD_HandleTypeDef *hhcd) { return (USB_GetHostSpeed(hhcd->Instance)); } /** * @} */ /** * @} */ /** @addtogroup HCD_Private_Functions * @{ */ /** * @brief Handle Host Channel IN interrupt requests. * @param hhcd HCD handle * @param chnum Channel number. * This parameter can be a value from 1 to 15 * @retval none */ static void HCD_HC_IN_IRQHandler(HCD_HandleTypeDef *hhcd, uint8_t chnum) { USB_OTG_GlobalTypeDef *USBx = hhcd->Instance; uint32_t USBx_BASE = (uint32_t)USBx; uint32_t ch_num = (uint32_t)chnum; uint32_t tmpreg; if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_AHBERR) == USB_OTG_HCINT_AHBERR) { __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_AHBERR); __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_BBERR) == USB_OTG_HCINT_BBERR) { __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_BBERR); hhcd->hc[ch_num].state = HC_BBLERR; __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_ACK) == USB_OTG_HCINT_ACK) { __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_ACK); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_STALL) == USB_OTG_HCINT_STALL) { __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); hhcd->hc[ch_num].state = HC_STALL; __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_NAK); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_STALL); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_DTERR) == USB_OTG_HCINT_DTERR) { __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_NAK); hhcd->hc[ch_num].state = HC_DATATGLERR; __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_DTERR); } else { /* ... */ } if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_FRMOR) == USB_OTG_HCINT_FRMOR) { __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_FRMOR); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_XFRC) == USB_OTG_HCINT_XFRC) { if (hhcd->Init.dma_enable != 0U) { hhcd->hc[ch_num].xfer_count = hhcd->hc[ch_num].xfer_len - \ (USBx_HC(ch_num)->HCTSIZ & USB_OTG_HCTSIZ_XFRSIZ); } hhcd->hc[ch_num].state = HC_XFRC; hhcd->hc[ch_num].ErrCnt = 0U; __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_XFRC); if ((hhcd->hc[ch_num].ep_type == EP_TYPE_CTRL) || (hhcd->hc[ch_num].ep_type == EP_TYPE_BULK)) { __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_NAK); } else if (hhcd->hc[ch_num].ep_type == EP_TYPE_INTR) { USBx_HC(ch_num)->HCCHAR |= USB_OTG_HCCHAR_ODDFRM; hhcd->hc[ch_num].urb_state = URB_DONE; #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) hhcd->HC_NotifyURBChangeCallback(hhcd, (uint8_t)ch_num, hhcd->hc[ch_num].urb_state); #else HAL_HCD_HC_NotifyURBChange_Callback(hhcd, (uint8_t)ch_num, hhcd->hc[ch_num].urb_state); #endif /* USE_HAL_HCD_REGISTER_CALLBACKS */ } else if (hhcd->hc[ch_num].ep_type == EP_TYPE_ISOC) { hhcd->hc[ch_num].urb_state = URB_DONE; hhcd->hc[ch_num].toggle_in ^= 1U; #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) hhcd->HC_NotifyURBChangeCallback(hhcd, (uint8_t)ch_num, hhcd->hc[ch_num].urb_state); #else HAL_HCD_HC_NotifyURBChange_Callback(hhcd, (uint8_t)ch_num, hhcd->hc[ch_num].urb_state); #endif /* USE_HAL_HCD_REGISTER_CALLBACKS */ } else { /* ... */ } hhcd->hc[ch_num].toggle_in ^= 1U; } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_CHH) == USB_OTG_HCINT_CHH) { __HAL_HCD_MASK_HALT_HC_INT(ch_num); if (hhcd->hc[ch_num].state == HC_XFRC) { hhcd->hc[ch_num].urb_state = URB_DONE; } else if (hhcd->hc[ch_num].state == HC_STALL) { hhcd->hc[ch_num].urb_state = URB_STALL; } else if ((hhcd->hc[ch_num].state == HC_XACTERR) || (hhcd->hc[ch_num].state == HC_DATATGLERR)) { hhcd->hc[ch_num].ErrCnt++; if (hhcd->hc[ch_num].ErrCnt > 3U) { hhcd->hc[ch_num].ErrCnt = 0U; hhcd->hc[ch_num].urb_state = URB_ERROR; } else { hhcd->hc[ch_num].urb_state = URB_NOTREADY; } /* re-activate the channel */ tmpreg = USBx_HC(ch_num)->HCCHAR; tmpreg &= ~USB_OTG_HCCHAR_CHDIS; tmpreg |= USB_OTG_HCCHAR_CHENA; USBx_HC(ch_num)->HCCHAR = tmpreg; } else if (hhcd->hc[ch_num].state == HC_NAK) { hhcd->hc[ch_num].urb_state = URB_NOTREADY; /* re-activate the channel */ tmpreg = USBx_HC(ch_num)->HCCHAR; tmpreg &= ~USB_OTG_HCCHAR_CHDIS; tmpreg |= USB_OTG_HCCHAR_CHENA; USBx_HC(ch_num)->HCCHAR = tmpreg; } else if (hhcd->hc[ch_num].state == HC_BBLERR) { hhcd->hc[ch_num].ErrCnt++; hhcd->hc[ch_num].urb_state = URB_ERROR; } else { /* ... */ } __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_CHH); HAL_HCD_HC_NotifyURBChange_Callback(hhcd, (uint8_t)ch_num, hhcd->hc[ch_num].urb_state); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_TXERR) == USB_OTG_HCINT_TXERR) { __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); hhcd->hc[ch_num].ErrCnt++; hhcd->hc[ch_num].state = HC_XACTERR; (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_TXERR); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_NAK) == USB_OTG_HCINT_NAK) { if (hhcd->hc[ch_num].ep_type == EP_TYPE_INTR) { hhcd->hc[ch_num].ErrCnt = 0U; __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); } else if ((hhcd->hc[ch_num].ep_type == EP_TYPE_CTRL) || (hhcd->hc[ch_num].ep_type == EP_TYPE_BULK)) { hhcd->hc[ch_num].ErrCnt = 0U; if (hhcd->Init.dma_enable == 0U) { hhcd->hc[ch_num].state = HC_NAK; __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); } } else { /* ... */ } __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_NAK); } else { /* ... */ } } /** * @brief Handle Host Channel OUT interrupt requests. * @param hhcd HCD handle * @param chnum Channel number. * This parameter can be a value from 1 to 15 * @retval none */ static void HCD_HC_OUT_IRQHandler(HCD_HandleTypeDef *hhcd, uint8_t chnum) { USB_OTG_GlobalTypeDef *USBx = hhcd->Instance; uint32_t USBx_BASE = (uint32_t)USBx; uint32_t ch_num = (uint32_t)chnum; uint32_t tmpreg; if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_AHBERR) == USB_OTG_HCINT_AHBERR) { __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_AHBERR); __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_ACK) == USB_OTG_HCINT_ACK) { __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_ACK); if (hhcd->hc[ch_num].do_ping == 1U) { hhcd->hc[ch_num].do_ping = 0U; hhcd->hc[ch_num].urb_state = URB_NOTREADY; __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); } } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_NYET) == USB_OTG_HCINT_NYET) { hhcd->hc[ch_num].state = HC_NYET; hhcd->hc[ch_num].do_ping = 1U; hhcd->hc[ch_num].ErrCnt = 0U; __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_NYET); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_FRMOR) == USB_OTG_HCINT_FRMOR) { __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_FRMOR); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_XFRC) == USB_OTG_HCINT_XFRC) { hhcd->hc[ch_num].ErrCnt = 0U; __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_XFRC); hhcd->hc[ch_num].state = HC_XFRC; } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_STALL) == USB_OTG_HCINT_STALL) { __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_STALL); __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); hhcd->hc[ch_num].state = HC_STALL; } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_NAK) == USB_OTG_HCINT_NAK) { hhcd->hc[ch_num].ErrCnt = 0U; hhcd->hc[ch_num].state = HC_NAK; if (hhcd->hc[ch_num].do_ping == 0U) { if (hhcd->hc[ch_num].speed == HCD_SPEED_HIGH) { hhcd->hc[ch_num].do_ping = 1U; } } __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_NAK); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_TXERR) == USB_OTG_HCINT_TXERR) { __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); hhcd->hc[ch_num].state = HC_XACTERR; __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_TXERR); } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_DTERR) == USB_OTG_HCINT_DTERR) { __HAL_HCD_UNMASK_HALT_HC_INT(ch_num); (void)USB_HC_Halt(hhcd->Instance, (uint8_t)ch_num); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_NAK); __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_DTERR); hhcd->hc[ch_num].state = HC_DATATGLERR; } else if ((USBx_HC(ch_num)->HCINT & USB_OTG_HCINT_CHH) == USB_OTG_HCINT_CHH) { __HAL_HCD_MASK_HALT_HC_INT(ch_num); if (hhcd->hc[ch_num].state == HC_XFRC) { hhcd->hc[ch_num].urb_state = URB_DONE; if ((hhcd->hc[ch_num].ep_type == EP_TYPE_BULK) || (hhcd->hc[ch_num].ep_type == EP_TYPE_INTR)) { hhcd->hc[ch_num].toggle_out ^= 1U; } } else if (hhcd->hc[ch_num].state == HC_NAK) { hhcd->hc[ch_num].urb_state = URB_NOTREADY; } else if (hhcd->hc[ch_num].state == HC_NYET) { hhcd->hc[ch_num].urb_state = URB_NOTREADY; } else if (hhcd->hc[ch_num].state == HC_STALL) { hhcd->hc[ch_num].urb_state = URB_STALL; } else if ((hhcd->hc[ch_num].state == HC_XACTERR) || (hhcd->hc[ch_num].state == HC_DATATGLERR)) { hhcd->hc[ch_num].ErrCnt++; if (hhcd->hc[ch_num].ErrCnt > 3U) { hhcd->hc[ch_num].ErrCnt = 0U; hhcd->hc[ch_num].urb_state = URB_ERROR; } else { hhcd->hc[ch_num].urb_state = URB_NOTREADY; } /* re-activate the channel */ tmpreg = USBx_HC(ch_num)->HCCHAR; tmpreg &= ~USB_OTG_HCCHAR_CHDIS; tmpreg |= USB_OTG_HCCHAR_CHENA; USBx_HC(ch_num)->HCCHAR = tmpreg; } else { /* ... */ } __HAL_HCD_CLEAR_HC_INT(ch_num, USB_OTG_HCINT_CHH); HAL_HCD_HC_NotifyURBChange_Callback(hhcd, (uint8_t)ch_num, hhcd->hc[ch_num].urb_state); } else { /* ... */ } } /** * @brief Handle Rx Queue Level interrupt requests. * @param hhcd HCD handle * @retval none */ static void HCD_RXQLVL_IRQHandler(HCD_HandleTypeDef *hhcd) { USB_OTG_GlobalTypeDef *USBx = hhcd->Instance; uint32_t USBx_BASE = (uint32_t)USBx; uint32_t pktsts; uint32_t pktcnt; uint32_t temp; uint32_t tmpreg; uint32_t ch_num; temp = hhcd->Instance->GRXSTSP; ch_num = temp & USB_OTG_GRXSTSP_EPNUM; pktsts = (temp & USB_OTG_GRXSTSP_PKTSTS) >> 17; pktcnt = (temp & USB_OTG_GRXSTSP_BCNT) >> 4; switch (pktsts) { case GRXSTS_PKTSTS_IN: /* Read the data into the host buffer. */ if ((pktcnt > 0U) && (hhcd->hc[ch_num].xfer_buff != (void *)0)) { (void)USB_ReadPacket(hhcd->Instance, hhcd->hc[ch_num].xfer_buff, (uint16_t)pktcnt); /*manage multiple Xfer */ hhcd->hc[ch_num].xfer_buff += pktcnt; hhcd->hc[ch_num].xfer_count += pktcnt; if ((USBx_HC(ch_num)->HCTSIZ & USB_OTG_HCTSIZ_PKTCNT) > 0U) { /* re-activate the channel when more packets are expected */ tmpreg = USBx_HC(ch_num)->HCCHAR; tmpreg &= ~USB_OTG_HCCHAR_CHDIS; tmpreg |= USB_OTG_HCCHAR_CHENA; USBx_HC(ch_num)->HCCHAR = tmpreg; hhcd->hc[ch_num].toggle_in ^= 1U; } } break; case GRXSTS_PKTSTS_DATA_TOGGLE_ERR: break; case GRXSTS_PKTSTS_IN_XFER_COMP: case GRXSTS_PKTSTS_CH_HALTED: default: break; } } /** * @brief Handle Host Port interrupt requests. * @param hhcd HCD handle * @retval None */ static void HCD_Port_IRQHandler(HCD_HandleTypeDef *hhcd) { USB_OTG_GlobalTypeDef *USBx = hhcd->Instance; uint32_t USBx_BASE = (uint32_t)USBx; __IO uint32_t hprt0, hprt0_dup; /* Handle Host Port Interrupts */ hprt0 = USBx_HPRT0; hprt0_dup = USBx_HPRT0; hprt0_dup &= ~(USB_OTG_HPRT_PENA | USB_OTG_HPRT_PCDET | \ USB_OTG_HPRT_PENCHNG | USB_OTG_HPRT_POCCHNG); /* Check whether Port Connect detected */ if ((hprt0 & USB_OTG_HPRT_PCDET) == USB_OTG_HPRT_PCDET) { if ((hprt0 & USB_OTG_HPRT_PCSTS) == USB_OTG_HPRT_PCSTS) { #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) hhcd->ConnectCallback(hhcd); #else HAL_HCD_Connect_Callback(hhcd); #endif /* USE_HAL_HCD_REGISTER_CALLBACKS */ } hprt0_dup |= USB_OTG_HPRT_PCDET; } /* Check whether Port Enable Changed */ if ((hprt0 & USB_OTG_HPRT_PENCHNG) == USB_OTG_HPRT_PENCHNG) { hprt0_dup |= USB_OTG_HPRT_PENCHNG; if ((hprt0 & USB_OTG_HPRT_PENA) == USB_OTG_HPRT_PENA) { if (hhcd->Init.phy_itface == USB_OTG_EMBEDDED_PHY) { if ((hprt0 & USB_OTG_HPRT_PSPD) == (HPRT0_PRTSPD_LOW_SPEED << 17)) { (void)USB_InitFSLSPClkSel(hhcd->Instance, HCFG_6_MHZ); } else { (void)USB_InitFSLSPClkSel(hhcd->Instance, HCFG_48_MHZ); } } else { if (hhcd->Init.speed == HCD_SPEED_FULL) { USBx_HOST->HFIR = 60000U; } } #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) hhcd->PortEnabledCallback(hhcd); #else HAL_HCD_PortEnabled_Callback(hhcd); #endif /* USE_HAL_HCD_REGISTER_CALLBACKS */ } else { #if (USE_HAL_HCD_REGISTER_CALLBACKS == 1U) hhcd->PortDisabledCallback(hhcd); #else HAL_HCD_PortDisabled_Callback(hhcd); #endif /* USE_HAL_HCD_REGISTER_CALLBACKS */ } } /* Check for an overcurrent */ if ((hprt0 & USB_OTG_HPRT_POCCHNG) == USB_OTG_HPRT_POCCHNG) { hprt0_dup |= USB_OTG_HPRT_POCCHNG; } /* Clear Port Interrupts */ USBx_HPRT0 = hprt0_dup; } /** * @} */ /** * @} */ #endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ #endif /* HAL_HCD_MODULE_ENABLED */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
214548.c
// Copyright 2016-2017 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <sys/param.h> #include "esp_attr.h" #include "esp_err.h" #include "esp_pm.h" #include "esp_log.h" #include "esp32/clk.h" #include "esp_private/crosscore_int.h" #include "soc/rtc.h" #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "freertos/xtensa_timer.h" #include "xtensa/core-macros.h" #include "esp_private/pm_impl.h" #include "esp_private/pm_trace.h" #include "esp_private/esp_timer_impl.h" #include "esp32/pm.h" #include "esp_sleep.h" /* CCOMPARE update timeout, in CPU cycles. Any value above ~600 cycles will work * for the purpose of detecting a deadlock. */ #define CCOMPARE_UPDATE_TIMEOUT 1000000 /* When changing CCOMPARE, don't allow changes if the difference is less * than this. This is to prevent setting CCOMPARE below CCOUNT. */ #define CCOMPARE_MIN_CYCLES_IN_FUTURE 1000 /* When light sleep is used, wake this number of microseconds earlier than * the next tick. */ #define LIGHT_SLEEP_EARLY_WAKEUP_US 100 /* Minimal divider at which REF_CLK_FREQ can be obtained */ #define REF_CLK_DIV_MIN 10 #define MHZ 1000000 #ifdef CONFIG_PM_PROFILING #define WITH_PROFILING #endif static portMUX_TYPE s_switch_lock = portMUX_INITIALIZER_UNLOCKED; /* The following state variables are protected using s_switch_lock: */ /* Current sleep mode; When switching, contains old mode until switch is complete */ static pm_mode_t s_mode = PM_MODE_CPU_MAX; /* True when switch is in progress */ static volatile bool s_is_switching; /* When switch is in progress, this is the mode we are switching into */ static pm_mode_t s_new_mode = PM_MODE_CPU_MAX; /* Number of times each mode was locked */ static size_t s_mode_lock_counts[PM_MODE_COUNT]; /* Bit mask of locked modes. BIT(i) is set iff s_mode_lock_counts[i] > 0. */ static uint32_t s_mode_mask; /* Divider and multiplier used to adjust (ccompare - ccount) duration. * Only set to non-zero values when switch is in progress. */ static uint32_t s_ccount_div; static uint32_t s_ccount_mul; #if CONFIG_FREERTOS_USE_TICKLESS_IDLE /* Indicates if light sleep entry was skipped in vApplicationSleep for given CPU. * This in turn gets used in IDLE hook to decide if `waiti` needs * to be invoked or not. */ static bool s_skipped_light_sleep[portNUM_PROCESSORS]; #if portNUM_PROCESSORS == 2 /* When light sleep is finished on one CPU, it is possible that the other CPU * will enter light sleep again very soon, before interrupts on the first CPU * get a chance to run. To avoid such situation, set a flag for the other CPU to * skip light sleep attempt. */ static bool s_skip_light_sleep[portNUM_PROCESSORS]; #endif // portNUM_PROCESSORS == 2 #endif // CONFIG_FREERTOS_USE_TICKLESS_IDLE /* Indicates to the ISR hook that CCOMPARE needs to be updated on the given CPU. * Used in conjunction with cross-core interrupt to update CCOMPARE on the other CPU. */ static volatile bool s_need_update_ccompare[portNUM_PROCESSORS]; /* A flag indicating that Idle hook has run on a given CPU; * Next interrupt on the same CPU will take s_rtos_lock_handle. */ static bool s_core_idle[portNUM_PROCESSORS]; /* When no RTOS tasks are active, these locks are released to allow going into * a lower power mode. Used by ISR hook and idle hook. */ static esp_pm_lock_handle_t s_rtos_lock_handle[portNUM_PROCESSORS]; /* Lookup table of CPU frequency configs to be used in each mode. * Initialized by esp_pm_impl_init and modified by esp_pm_configure. */ rtc_cpu_freq_config_t s_cpu_freq_by_mode[PM_MODE_COUNT]; /* Whether automatic light sleep is enabled */ static bool s_light_sleep_en = false; /* When configuration is changed, current frequency may not match the * newly configured frequency for the current mode. This is an indicator * to the mode switch code to get the actual current frequency instead of * relying on the current mode. */ static bool s_config_changed = false; #ifdef WITH_PROFILING /* Time, in microseconds, spent so far in each mode */ static pm_time_t s_time_in_mode[PM_MODE_COUNT]; /* Timestamp, in microseconds, when the mode switch last happened */ static pm_time_t s_last_mode_change_time; /* User-readable mode names, used by esp_pm_impl_dump_stats */ static const char* s_mode_names[] = { "SLEEP", "APB_MIN", "APB_MAX", "CPU_MAX" }; #endif // WITH_PROFILING static const char* TAG = "pm_esp32"; static void update_ccompare(); static void do_switch(pm_mode_t new_mode); static void leave_idle(); static void on_freq_update(uint32_t old_ticks_per_us, uint32_t ticks_per_us); pm_mode_t esp_pm_impl_get_mode(esp_pm_lock_type_t type, int arg) { (void) arg; if (type == ESP_PM_CPU_FREQ_MAX) { return PM_MODE_CPU_MAX; } else if (type == ESP_PM_APB_FREQ_MAX) { return PM_MODE_APB_MAX; } else if (type == ESP_PM_NO_LIGHT_SLEEP) { return PM_MODE_APB_MIN; } else { // unsupported mode abort(); } } esp_err_t esp_pm_configure(const void* vconfig) { #ifndef CONFIG_PM_ENABLE return ESP_ERR_NOT_SUPPORTED; #endif const esp_pm_config_esp32_t* config = (const esp_pm_config_esp32_t*) vconfig; #ifndef CONFIG_FREERTOS_USE_TICKLESS_IDLE if (config->light_sleep_enable) { return ESP_ERR_NOT_SUPPORTED; } #endif int min_freq_mhz = config->min_freq_mhz; int max_freq_mhz = config->max_freq_mhz; if (min_freq_mhz == 0 && max_freq_mhz == 0) { /* For compatibility, handle deprecated fields, min_cpu_freq and max_cpu_freq. */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" min_freq_mhz = rtc_clk_cpu_freq_value(config->min_cpu_freq) / MHZ; max_freq_mhz = rtc_clk_cpu_freq_value(config->max_cpu_freq) / MHZ; #pragma GCC diagnostic pop } if (min_freq_mhz > max_freq_mhz) { return ESP_ERR_INVALID_ARG; } rtc_cpu_freq_config_t freq_config; if (!rtc_clk_cpu_freq_mhz_to_config(min_freq_mhz, &freq_config)) { ESP_LOGW(TAG, "invalid min_freq_mhz value (%d)", min_freq_mhz); return ESP_ERR_INVALID_ARG; } int xtal_freq_mhz = (int) rtc_clk_xtal_freq_get(); if (min_freq_mhz < xtal_freq_mhz && min_freq_mhz * MHZ / REF_CLK_FREQ < REF_CLK_DIV_MIN) { ESP_LOGW(TAG, "min_freq_mhz should be >= %d", REF_CLK_FREQ * REF_CLK_DIV_MIN / MHZ); return ESP_ERR_INVALID_ARG; } if (!rtc_clk_cpu_freq_mhz_to_config(max_freq_mhz, &freq_config)) { ESP_LOGW(TAG, "invalid max_freq_mhz value (%d)", max_freq_mhz); return ESP_ERR_INVALID_ARG; } int apb_max_freq = max_freq_mhz; /* CPU frequency in APB_MAX mode */ if (max_freq_mhz == 240) { /* We can't switch between 240 and 80/160 without disabling PLL, * so use 240MHz CPU frequency when 80MHz APB frequency is requested. */ apb_max_freq = 240; } else if (max_freq_mhz == 160 || max_freq_mhz == 80) { /* Otherwise, can use 80MHz * CPU frequency when 80MHz APB frequency is requested. */ apb_max_freq = 80; } apb_max_freq = MAX(apb_max_freq, min_freq_mhz); ESP_LOGI(TAG, "Frequency switching config: " "CPU_MAX: %d, APB_MAX: %d, APB_MIN: %d, Light sleep: %s", max_freq_mhz, apb_max_freq, min_freq_mhz, config->light_sleep_enable ? "ENABLED" : "DISABLED"); portENTER_CRITICAL(&s_switch_lock); rtc_clk_cpu_freq_mhz_to_config(max_freq_mhz, &s_cpu_freq_by_mode[PM_MODE_CPU_MAX]); rtc_clk_cpu_freq_mhz_to_config(apb_max_freq, &s_cpu_freq_by_mode[PM_MODE_APB_MAX]); rtc_clk_cpu_freq_mhz_to_config(min_freq_mhz, &s_cpu_freq_by_mode[PM_MODE_APB_MIN]); s_cpu_freq_by_mode[PM_MODE_LIGHT_SLEEP] = s_cpu_freq_by_mode[PM_MODE_APB_MIN]; s_light_sleep_en = config->light_sleep_enable; s_config_changed = true; portEXIT_CRITICAL(&s_switch_lock); return ESP_OK; } static pm_mode_t IRAM_ATTR get_lowest_allowed_mode() { /* TODO: optimize using ffs/clz */ if (s_mode_mask >= BIT(PM_MODE_CPU_MAX)) { return PM_MODE_CPU_MAX; } else if (s_mode_mask >= BIT(PM_MODE_APB_MAX)) { return PM_MODE_APB_MAX; } else if (s_mode_mask >= BIT(PM_MODE_APB_MIN) || !s_light_sleep_en) { return PM_MODE_APB_MIN; } else { return PM_MODE_LIGHT_SLEEP; } } void IRAM_ATTR esp_pm_impl_switch_mode(pm_mode_t mode, pm_mode_switch_t lock_or_unlock, pm_time_t now) { bool need_switch = false; uint32_t mode_mask = BIT(mode); portENTER_CRITICAL(&s_switch_lock); uint32_t count; if (lock_or_unlock == MODE_LOCK) { count = ++s_mode_lock_counts[mode]; } else { count = s_mode_lock_counts[mode]--; } if (count == 1) { if (lock_or_unlock == MODE_LOCK) { s_mode_mask |= mode_mask; } else { s_mode_mask &= ~mode_mask; } need_switch = true; } pm_mode_t new_mode = s_mode; if (need_switch) { new_mode = get_lowest_allowed_mode(); #ifdef WITH_PROFILING if (s_last_mode_change_time != 0) { pm_time_t diff = now - s_last_mode_change_time; s_time_in_mode[s_mode] += diff; } s_last_mode_change_time = now; #endif // WITH_PROFILING } portEXIT_CRITICAL(&s_switch_lock); if (need_switch && new_mode != s_mode) { do_switch(new_mode); } } /** * @brief Update clock dividers in esp_timer and FreeRTOS, and adjust CCOMPARE * values on both CPUs. * @param old_ticks_per_us old CPU frequency * @param ticks_per_us new CPU frequency */ static void IRAM_ATTR on_freq_update(uint32_t old_ticks_per_us, uint32_t ticks_per_us) { uint32_t old_apb_ticks_per_us = MIN(old_ticks_per_us, 80); uint32_t apb_ticks_per_us = MIN(ticks_per_us, 80); /* Update APB frequency value used by the timer */ if (old_apb_ticks_per_us != apb_ticks_per_us) { esp_timer_impl_update_apb_freq(apb_ticks_per_us); } /* Calculate new tick divisor */ _xt_tick_divisor = ticks_per_us * MHZ / XT_TICK_PER_SEC; int core_id = xPortGetCoreID(); if (s_rtos_lock_handle[core_id] != NULL) { ESP_PM_TRACE_ENTER(CCOMPARE_UPDATE, core_id); /* ccount_div and ccount_mul are used in esp_pm_impl_update_ccompare * to calculate new CCOMPARE value. */ s_ccount_div = old_ticks_per_us; s_ccount_mul = ticks_per_us; /* Update CCOMPARE value on this CPU */ update_ccompare(); #if portNUM_PROCESSORS == 2 /* Send interrupt to the other CPU to update CCOMPARE value */ int other_core_id = (core_id == 0) ? 1 : 0; s_need_update_ccompare[other_core_id] = true; esp_crosscore_int_send_freq_switch(other_core_id); int timeout = 0; while (s_need_update_ccompare[other_core_id]) { if (++timeout == CCOMPARE_UPDATE_TIMEOUT) { assert(false && "failed to update CCOMPARE, possible deadlock"); } } #endif // portNUM_PROCESSORS == 2 s_ccount_mul = 0; s_ccount_div = 0; ESP_PM_TRACE_EXIT(CCOMPARE_UPDATE, core_id); } } /** * Perform the switch to new power mode. * Currently only changes the CPU frequency and adjusts clock dividers. * No light sleep yet. * @param new_mode mode to switch to */ static void IRAM_ATTR do_switch(pm_mode_t new_mode) { const int core_id = xPortGetCoreID(); do { portENTER_CRITICAL_ISR(&s_switch_lock); if (!s_is_switching) { break; } if (s_new_mode <= new_mode) { portEXIT_CRITICAL_ISR(&s_switch_lock); return; } if (s_need_update_ccompare[core_id]) { s_need_update_ccompare[core_id] = false; } portEXIT_CRITICAL_ISR(&s_switch_lock); } while (true); s_new_mode = new_mode; s_is_switching = true; bool config_changed = s_config_changed; s_config_changed = false; portEXIT_CRITICAL_ISR(&s_switch_lock); rtc_cpu_freq_config_t new_config = s_cpu_freq_by_mode[new_mode]; rtc_cpu_freq_config_t old_config; if (!config_changed) { old_config = s_cpu_freq_by_mode[s_mode]; } else { rtc_clk_cpu_freq_get_config(&old_config); } if (new_config.freq_mhz != old_config.freq_mhz) { uint32_t old_ticks_per_us = old_config.freq_mhz; uint32_t new_ticks_per_us = new_config.freq_mhz; bool switch_down = new_ticks_per_us < old_ticks_per_us; ESP_PM_TRACE_ENTER(FREQ_SWITCH, core_id); if (switch_down) { on_freq_update(old_ticks_per_us, new_ticks_per_us); } rtc_clk_cpu_freq_set_config_fast(&new_config); if (!switch_down) { on_freq_update(old_ticks_per_us, new_ticks_per_us); } ESP_PM_TRACE_EXIT(FREQ_SWITCH, core_id); } portENTER_CRITICAL_ISR(&s_switch_lock); s_mode = new_mode; s_is_switching = false; portEXIT_CRITICAL_ISR(&s_switch_lock); } /** * @brief Calculate new CCOMPARE value based on s_ccount_{mul,div} * * Adjusts CCOMPARE value so that the interrupt happens at the same time as it * would happen without the frequency change. * Assumes that the new_frequency = old_frequency * s_ccount_mul / s_ccount_div. */ static void IRAM_ATTR update_ccompare() { uint32_t ccount = XTHAL_GET_CCOUNT(); uint32_t ccompare = XTHAL_GET_CCOMPARE(XT_TIMER_INDEX); if ((ccompare - CCOMPARE_MIN_CYCLES_IN_FUTURE) - ccount < UINT32_MAX / 2) { uint32_t diff = ccompare - ccount; uint32_t diff_scaled = (diff * s_ccount_mul + s_ccount_div - 1) / s_ccount_div; if (diff_scaled < _xt_tick_divisor) { uint32_t new_ccompare = ccount + diff_scaled; XTHAL_SET_CCOMPARE(XT_TIMER_INDEX, new_ccompare); } } } static void IRAM_ATTR leave_idle() { int core_id = xPortGetCoreID(); if (s_core_idle[core_id]) { // TODO: possible optimization: raise frequency here first esp_pm_lock_acquire(s_rtos_lock_handle[core_id]); s_core_idle[core_id] = false; } } void esp_pm_impl_idle_hook() { int core_id = xPortGetCoreID(); uint32_t state = portENTER_CRITICAL_NESTED(); if (!s_core_idle[core_id]) { esp_pm_lock_release(s_rtos_lock_handle[core_id]); s_core_idle[core_id] = true; } portEXIT_CRITICAL_NESTED(state); ESP_PM_TRACE_ENTER(IDLE, core_id); } void IRAM_ATTR esp_pm_impl_isr_hook() { int core_id = xPortGetCoreID(); ESP_PM_TRACE_ENTER(ISR_HOOK, core_id); #if portNUM_PROCESSORS == 2 if (s_need_update_ccompare[core_id]) { update_ccompare(); s_need_update_ccompare[core_id] = false; } else { leave_idle(); } #else leave_idle(); #endif // portNUM_PROCESSORS == 2 ESP_PM_TRACE_EXIT(ISR_HOOK, core_id); } void esp_pm_impl_waiti() { #if CONFIG_FREERTOS_USE_TICKLESS_IDLE int core_id = xPortGetCoreID(); if (s_skipped_light_sleep[core_id]) { asm("waiti 0"); /* Interrupt took the CPU out of waiti and s_rtos_lock_handle[core_id] * is now taken. However since we are back to idle task, we can release * the lock so that vApplicationSleep can attempt to enter light sleep. */ esp_pm_impl_idle_hook(); s_skipped_light_sleep[core_id] = false; } #else asm("waiti 0"); #endif // CONFIG_FREERTOS_USE_TICKLESS_IDLE } #if CONFIG_FREERTOS_USE_TICKLESS_IDLE static inline bool IRAM_ATTR should_skip_light_sleep(int core_id) { #if portNUM_PROCESSORS == 2 if (s_skip_light_sleep[core_id]) { s_skip_light_sleep[core_id] = false; s_skipped_light_sleep[core_id] = true; return true; } #endif // portNUM_PROCESSORS == 2 if (s_mode != PM_MODE_LIGHT_SLEEP || s_is_switching) { s_skipped_light_sleep[core_id] = true; } else { s_skipped_light_sleep[core_id] = false; } return s_skipped_light_sleep[core_id]; } static inline void IRAM_ATTR other_core_should_skip_light_sleep(int core_id) { #if portNUM_PROCESSORS == 2 s_skip_light_sleep[!core_id] = true; #endif } void IRAM_ATTR vApplicationSleep( TickType_t xExpectedIdleTime ) { portENTER_CRITICAL(&s_switch_lock); int core_id = xPortGetCoreID(); if (!should_skip_light_sleep(core_id)) { /* Calculate how much we can sleep */ int64_t next_esp_timer_alarm = esp_timer_get_next_alarm(); int64_t now = esp_timer_get_time(); int64_t time_until_next_alarm = next_esp_timer_alarm - now; int64_t wakeup_delay_us = portTICK_PERIOD_MS * 1000LL * xExpectedIdleTime; int64_t sleep_time_us = MIN(wakeup_delay_us, time_until_next_alarm); if (sleep_time_us >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP * portTICK_PERIOD_MS * 1000LL) { esp_sleep_enable_timer_wakeup(sleep_time_us - LIGHT_SLEEP_EARLY_WAKEUP_US); #ifdef CONFIG_PM_TRACE /* to force tracing GPIOs to keep state */ esp_sleep_pd_config(ESP_PD_DOMAIN_RTC_PERIPH, ESP_PD_OPTION_ON); #endif /* Enter sleep */ ESP_PM_TRACE_ENTER(SLEEP, core_id); int64_t sleep_start = esp_timer_get_time(); esp_light_sleep_start(); int64_t slept_us = esp_timer_get_time() - sleep_start; ESP_PM_TRACE_EXIT(SLEEP, core_id); uint32_t slept_ticks = slept_us / (portTICK_PERIOD_MS * 1000LL); if (slept_ticks > 0) { /* Adjust RTOS tick count based on the amount of time spent in sleep */ vTaskStepTick(slept_ticks); /* Trigger tick interrupt, since sleep time was longer * than portTICK_PERIOD_MS. Note that setting INTSET does not * work for timer interrupt, and changing CCOMPARE would clear * the interrupt flag. */ XTHAL_SET_CCOUNT(XTHAL_GET_CCOMPARE(XT_TIMER_INDEX) - 16); while (!(XTHAL_GET_INTERRUPT() & BIT(XT_TIMER_INTNUM))) { ; } } other_core_should_skip_light_sleep(core_id); } } portEXIT_CRITICAL(&s_switch_lock); } #endif //CONFIG_FREERTOS_USE_TICKLESS_IDLE #ifdef WITH_PROFILING void esp_pm_impl_dump_stats(FILE* out) { pm_time_t time_in_mode[PM_MODE_COUNT]; portENTER_CRITICAL_ISR(&s_switch_lock); memcpy(time_in_mode, s_time_in_mode, sizeof(time_in_mode)); pm_time_t last_mode_change_time = s_last_mode_change_time; pm_mode_t cur_mode = s_mode; pm_time_t now = pm_get_time(); portEXIT_CRITICAL_ISR(&s_switch_lock); time_in_mode[cur_mode] += now - last_mode_change_time; fprintf(out, "Mode stats:\n"); for (int i = 0; i < PM_MODE_COUNT; ++i) { if (i == PM_MODE_LIGHT_SLEEP && !s_light_sleep_en) { /* don't display light sleep mode if it's not enabled */ continue; } fprintf(out, "%8s %3dM %12lld %2d%%\n", s_mode_names[i], s_cpu_freq_by_mode[i].freq_mhz, time_in_mode[i], (int) (time_in_mode[i] * 100 / now)); } } #endif // WITH_PROFILING void esp_pm_impl_init() { #ifdef CONFIG_PM_TRACE esp_pm_trace_init(); #endif ESP_ERROR_CHECK(esp_pm_lock_create(ESP_PM_CPU_FREQ_MAX, 0, "rtos0", &s_rtos_lock_handle[0])); ESP_ERROR_CHECK(esp_pm_lock_acquire(s_rtos_lock_handle[0])); #if portNUM_PROCESSORS == 2 ESP_ERROR_CHECK(esp_pm_lock_create(ESP_PM_CPU_FREQ_MAX, 0, "rtos1", &s_rtos_lock_handle[1])); ESP_ERROR_CHECK(esp_pm_lock_acquire(s_rtos_lock_handle[1])); #endif // portNUM_PROCESSORS == 2 /* Configure all modes to use the default CPU frequency. * This will be modified later by a call to esp_pm_configure. */ rtc_cpu_freq_config_t default_config; if (!rtc_clk_cpu_freq_mhz_to_config(CONFIG_ESP32_DEFAULT_CPU_FREQ_MHZ, &default_config)) { assert(false && "unsupported frequency"); } for (size_t i = 0; i < PM_MODE_COUNT; ++i) { s_cpu_freq_by_mode[i] = default_config; } }
352839.c
/* Configuration file parsing and CONFIG GET/SET commands implementation. * * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "cluster.h" #include <fcntl.h> #include <sys/stat.h> /*----------------------------------------------------------------------------- * Config file name-value maps. *----------------------------------------------------------------------------*/ typedef struct configEnum { const char *name; const int val; } configEnum; configEnum maxmemory_policy_enum[] = { {"volatile-lru", MAXMEMORY_VOLATILE_LRU}, {"volatile-lfu", MAXMEMORY_VOLATILE_LFU}, {"volatile-random",MAXMEMORY_VOLATILE_RANDOM}, {"volatile-ttl",MAXMEMORY_VOLATILE_TTL}, {"allkeys-lru",MAXMEMORY_ALLKEYS_LRU}, {"allkeys-lfu",MAXMEMORY_ALLKEYS_LFU}, {"allkeys-random",MAXMEMORY_ALLKEYS_RANDOM}, {"noeviction",MAXMEMORY_NO_EVICTION}, {NULL, 0} }; configEnum syslog_facility_enum[] = { {"user", LOG_USER}, {"local0", LOG_LOCAL0}, {"local1", LOG_LOCAL1}, {"local2", LOG_LOCAL2}, {"local3", LOG_LOCAL3}, {"local4", LOG_LOCAL4}, {"local5", LOG_LOCAL5}, {"local6", LOG_LOCAL6}, {"local7", LOG_LOCAL7}, {NULL, 0} }; configEnum loglevel_enum[] = { {"debug", LL_DEBUG}, {"verbose", LL_VERBOSE}, {"notice", LL_NOTICE}, {"warning", LL_WARNING}, {NULL,0} }; configEnum supervised_mode_enum[] = { {"upstart", SUPERVISED_UPSTART}, {"systemd", SUPERVISED_SYSTEMD}, {"auto", SUPERVISED_AUTODETECT}, {"no", SUPERVISED_NONE}, {NULL, 0} }; configEnum aof_fsync_enum[] = { {"everysec", AOF_FSYNC_EVERYSEC}, {"always", AOF_FSYNC_ALWAYS}, {"no", AOF_FSYNC_NO}, {NULL, 0} }; /* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = { {0, 0, 0}, /* normal */ {1024*1024*256, 1024*1024*64, 60}, /* slave */ {1024*1024*32, 1024*1024*8, 60} /* pubsub */ }; /*----------------------------------------------------------------------------- * Enum access functions *----------------------------------------------------------------------------*/ /* Get enum value from name. If there is no match INT_MIN is returned. */ int configEnumGetValue(configEnum *ce, char *name) { while(ce->name != NULL) { if (!strcasecmp(ce->name,name)) return ce->val; ce++; } return INT_MIN; } /* Get enum name from value. If no match is found NULL is returned. */ const char *configEnumGetName(configEnum *ce, int val) { while(ce->name != NULL) { if (ce->val == val) return ce->name; ce++; } return NULL; } /* Wrapper for configEnumGetName() returning "unknown" instead of NULL if * there is no match. */ const char *configEnumGetNameOrUnknown(configEnum *ce, int val) { const char *name = configEnumGetName(ce,val); return name ? name : "unknown"; } /* Used for INFO generation. */ const char *evictPolicyToString(void) { return configEnumGetNameOrUnknown(maxmemory_policy_enum,server.maxmemory_policy); } /*----------------------------------------------------------------------------- * Config file parsing *----------------------------------------------------------------------------*/ int yesnotoi(char *s) { if (!strcasecmp(s,"yes")) return 1; else if (!strcasecmp(s,"no")) return 0; else return -1; } void appendServerSaveParams(time_t seconds, int changes) { server.saveparams = zrealloc(server.saveparams,sizeof(struct saveparam)*(server.saveparamslen+1), MALLOC_LOCAL); server.saveparams[server.saveparamslen].seconds = seconds; server.saveparams[server.saveparamslen].changes = changes; server.saveparamslen++; } void resetServerSaveParams(void) { zfree(server.saveparams); server.saveparams = NULL; server.saveparamslen = 0; } void queueLoadModule(sds path, sds *argv, int argc) { int i; struct moduleLoadQueueEntry *loadmod; loadmod = zmalloc(sizeof(struct moduleLoadQueueEntry), MALLOC_LOCAL); loadmod->argv = zmalloc(sizeof(robj*)*argc, MALLOC_LOCAL); loadmod->path = sdsnew(path); loadmod->argc = argc; for (i = 0; i < argc; i++) { loadmod->argv[i] = createRawStringObject(argv[i],sdslen(argv[i])); } listAddNodeTail(server.loadmodule_queue,loadmod); } void loadServerConfigFromString(char *config) { char *err = NULL; int linenum = 0, totlines, i; int slaveof_linenum = 0; sds *lines; lines = sdssplitlen(config,strlen(config),"\n",1,&totlines); for (i = 0; i < totlines; i++) { sds *argv; int argc; linenum = i+1; lines[i] = sdstrim(lines[i]," \t\r\n"); /* Skip comments and blank lines */ if (lines[i][0] == '#' || lines[i][0] == '\0') continue; /* Split into arguments */ argv = sdssplitargs(lines[i],&argc); if (argv == NULL) { err = "Unbalanced quotes in configuration line"; goto loaderr; } /* Skip this line if the resulting command vector is empty. */ if (argc == 0) { sdsfreesplitres(argv,argc); continue; } sdstolower(argv[0]); /* Execute config directives */ if (!strcasecmp(argv[0],"timeout") && argc == 2) { server.maxidletime = atoi(argv[1]); if (server.maxidletime < 0) { err = "Invalid timeout value"; goto loaderr; } } else if (!strcasecmp(argv[0],"tcp-keepalive") && argc == 2) { server.tcpkeepalive = atoi(argv[1]); if (server.tcpkeepalive < 0) { err = "Invalid tcp-keepalive value"; goto loaderr; } } else if (!strcasecmp(argv[0],"protected-mode") && argc == 2) { if ((server.protected_mode = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"gopher-enabled") && argc == 2) { if ((server.gopher_enabled = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"port") && argc == 2) { server.port = atoi(argv[1]); if (server.port < 0 || server.port > 65535) { err = "Invalid port"; goto loaderr; } } else if (!strcasecmp(argv[0],"tcp-backlog") && argc == 2) { server.tcp_backlog = atoi(argv[1]); if (server.tcp_backlog < 0) { err = "Invalid backlog value"; goto loaderr; } } else if (!strcasecmp(argv[0],"bind") && argc >= 2) { int j, addresses = argc-1; if (addresses > CONFIG_BINDADDR_MAX) { err = "Too many bind addresses specified"; goto loaderr; } for (j = 0; j < addresses; j++) server.bindaddr[j] = zstrdup(argv[j+1]); server.bindaddr_count = addresses; } else if (!strcasecmp(argv[0],"unixsocket") && argc == 2) { server.unixsocket = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"unixsocketperm") && argc == 2) { errno = 0; server.unixsocketperm = (mode_t)strtol(argv[1], NULL, 8); if (errno || server.unixsocketperm > 0777) { err = "Invalid socket file permissions"; goto loaderr; } } else if (!strcasecmp(argv[0],"save")) { if (argc == 3) { int seconds = atoi(argv[1]); int changes = atoi(argv[2]); if (seconds < 1 || changes < 0) { err = "Invalid save parameters"; goto loaderr; } appendServerSaveParams(seconds,changes); } else if (argc == 2 && !strcasecmp(argv[1],"")) { resetServerSaveParams(); } } else if (!strcasecmp(argv[0],"dir") && argc == 2) { if (chdir(argv[1]) == -1) { serverLog(LL_WARNING,"Can't chdir to '%s': %s", argv[1], strerror(errno)); exit(1); } } else if (!strcasecmp(argv[0],"loglevel") && argc == 2) { server.verbosity = configEnumGetValue(loglevel_enum,argv[1]); if (server.verbosity == INT_MIN) { err = "Invalid log level. " "Must be one of debug, verbose, notice, warning"; goto loaderr; } } else if (!strcasecmp(argv[0],"logfile") && argc == 2) { FILE *logfp; zfree(server.logfile); server.logfile = zstrdup(argv[1]); if (server.logfile[0] != '\0') { /* Test if we are able to open the file. The server will not * be able to abort just for this problem later... */ logfp = fopen(server.logfile,"a"); if (logfp == NULL) { err = sdscatprintf(sdsempty(), "Can't open the log file: %s", strerror(errno)); goto loaderr; } fclose(logfp); } } else if (!strcasecmp(argv[0],"aclfile") && argc == 2) { zfree(server.acl_filename); server.acl_filename = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"always-show-logo") && argc == 2) { if ((server.always_show_logo = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"syslog-enabled") && argc == 2) { if ((server.syslog_enabled = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"syslog-ident") && argc == 2) { if (server.syslog_ident) zfree(server.syslog_ident); server.syslog_ident = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"syslog-facility") && argc == 2) { server.syslog_facility = configEnumGetValue(syslog_facility_enum,argv[1]); if (server.syslog_facility == INT_MIN) { err = "Invalid log facility. Must be one of USER or between LOCAL0-LOCAL7"; goto loaderr; } } else if (!strcasecmp(argv[0],"databases") && argc == 2) { server.dbnum = atoi(argv[1]); if (server.dbnum < 1) { err = "Invalid number of databases"; goto loaderr; } } else if (!strcasecmp(argv[0],"include") && argc == 2) { loadServerConfig(argv[1],NULL); } else if (!strcasecmp(argv[0],"maxclients") && argc == 2) { server.maxclients = atoi(argv[1]); if (server.maxclients < 1) { err = "Invalid max clients limit"; goto loaderr; } } else if (!strcasecmp(argv[0],"maxmemory") && argc == 2) { server.maxmemory = memtoll(argv[1],NULL); } else if (!strcasecmp(argv[0],"maxmemory-policy") && argc == 2) { server.maxmemory_policy = configEnumGetValue(maxmemory_policy_enum,argv[1]); if (server.maxmemory_policy == INT_MIN) { err = "Invalid maxmemory policy"; goto loaderr; } } else if (!strcasecmp(argv[0],"maxmemory-samples") && argc == 2) { server.maxmemory_samples = atoi(argv[1]); if (server.maxmemory_samples <= 0) { err = "maxmemory-samples must be 1 or greater"; goto loaderr; } } else if ((!strcasecmp(argv[0],"proto-max-bulk-len")) && argc == 2) { server.proto_max_bulk_len = memtoll(argv[1],NULL); } else if ((!strcasecmp(argv[0],"client-query-buffer-limit")) && argc == 2) { server.client_max_querybuf_len = memtoll(argv[1],NULL); } else if (!strcasecmp(argv[0],"lfu-log-factor") && argc == 2) { server.lfu_log_factor = atoi(argv[1]); if (server.lfu_log_factor < 0) { err = "lfu-log-factor must be 0 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"lfu-decay-time") && argc == 2) { server.lfu_decay_time = atoi(argv[1]); if (server.lfu_decay_time < 0) { err = "lfu-decay-time must be 0 or greater"; goto loaderr; } } else if ((!strcasecmp(argv[0],"slaveof") || !strcasecmp(argv[0],"replicaof")) && argc == 3) { slaveof_linenum = linenum; server.masterhost = sdsnew(argv[1]); server.masterport = atoi(argv[2]); server.repl_state = REPL_STATE_CONNECT; } else if ((!strcasecmp(argv[0],"repl-ping-slave-period") || !strcasecmp(argv[0],"repl-ping-replica-period")) && argc == 2) { server.repl_ping_slave_period = atoi(argv[1]); if (server.repl_ping_slave_period <= 0) { err = "repl-ping-replica-period must be 1 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-timeout") && argc == 2) { server.repl_timeout = atoi(argv[1]); if (server.repl_timeout <= 0) { err = "repl-timeout must be 1 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-disable-tcp-nodelay") && argc==2) { if ((server.repl_disable_tcp_nodelay = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-diskless-sync") && argc==2) { if ((server.repl_diskless_sync = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-diskless-sync-delay") && argc==2) { server.repl_diskless_sync_delay = atoi(argv[1]); if (server.repl_diskless_sync_delay < 0) { err = "repl-diskless-sync-delay can't be negative"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-backlog-size") && argc == 2) { long long size = memtoll(argv[1],NULL); if (size <= 0) { err = "repl-backlog-size must be 1 or greater."; goto loaderr; } resizeReplicationBacklog(size); } else if (!strcasecmp(argv[0],"repl-backlog-ttl") && argc == 2) { server.repl_backlog_time_limit = atoi(argv[1]); if (server.repl_backlog_time_limit < 0) { err = "repl-backlog-ttl can't be negative "; goto loaderr; } } else if (!strcasecmp(argv[0],"masteruser") && argc == 2) { zfree(server.masteruser); server.masteruser = argv[1][0] ? zstrdup(argv[1]) : NULL; } else if (!strcasecmp(argv[0],"masterauth") && argc == 2) { zfree(server.masterauth); server.masterauth = argv[1][0] ? zstrdup(argv[1]) : NULL; } else if ((!strcasecmp(argv[0],"slave-serve-stale-data") || !strcasecmp(argv[0],"replica-serve-stale-data")) && argc == 2) { if ((server.repl_serve_stale_data = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if ((!strcasecmp(argv[0],"slave-read-only") || !strcasecmp(argv[0],"replica-read-only")) && argc == 2) { if ((server.repl_slave_ro = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if ((!strcasecmp(argv[0],"slave-ignore-maxmemory") || !strcasecmp(argv[0],"replica-ignore-maxmemory")) && argc == 2) { if ((server.repl_slave_ignore_maxmemory = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"rdbcompression") && argc == 2) { if ((server.rdb_compression = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"rdbchecksum") && argc == 2) { if ((server.rdb_checksum = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"activerehashing") && argc == 2) { if ((server.activerehashing = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"lazyfree-lazy-eviction") && argc == 2) { if ((server.lazyfree_lazy_eviction = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"lazyfree-lazy-expire") && argc == 2) { if ((server.lazyfree_lazy_expire = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"lazyfree-lazy-server-del") && argc == 2){ if ((server.lazyfree_lazy_server_del = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if ((!strcasecmp(argv[0],"slave-lazy-flush") || !strcasecmp(argv[0],"replica-lazy-flush")) && argc == 2) { if ((server.repl_slave_lazy_flush = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"activedefrag") && argc == 2) { if ((server.active_defrag_enabled = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } if (server.active_defrag_enabled) { #ifndef HAVE_DEFRAG err = "active defrag can't be enabled without proper jemalloc support"; goto loaderr; #endif } } else if (!strcasecmp(argv[0],"daemonize") && argc == 2) { if ((server.daemonize = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"dynamic-hz") && argc == 2) { if ((server.dynamic_hz = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"hz") && argc == 2) { server.config_hz = atoi(argv[1]); if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ; if (server.config_hz > CONFIG_MAX_HZ) server.config_hz = CONFIG_MAX_HZ; } else if (!strcasecmp(argv[0],"appendonly") && argc == 2) { int yes; if ((yes = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } server.aof_state = yes ? AOF_ON : AOF_OFF; } else if (!strcasecmp(argv[0],"appendfilename") && argc == 2) { if (!pathIsBaseName(argv[1])) { err = "appendfilename can't be a path, just a filename"; goto loaderr; } zfree(server.aof_filename); server.aof_filename = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"no-appendfsync-on-rewrite") && argc == 2) { if ((server.aof_no_fsync_on_rewrite= yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"appendfsync") && argc == 2) { server.aof_fsync = configEnumGetValue(aof_fsync_enum,argv[1]); if (server.aof_fsync == INT_MIN) { err = "argument must be 'no', 'always' or 'everysec'"; goto loaderr; } } else if (!strcasecmp(argv[0],"auto-aof-rewrite-percentage") && argc == 2) { server.aof_rewrite_perc = atoi(argv[1]); if (server.aof_rewrite_perc < 0) { err = "Invalid negative percentage for AOF auto rewrite"; goto loaderr; } } else if (!strcasecmp(argv[0],"auto-aof-rewrite-min-size") && argc == 2) { server.aof_rewrite_min_size = memtoll(argv[1],NULL); } else if (!strcasecmp(argv[0],"aof-rewrite-incremental-fsync") && argc == 2) { if ((server.aof_rewrite_incremental_fsync = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"rdb-save-incremental-fsync") && argc == 2) { if ((server.rdb_save_incremental_fsync = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"aof-load-truncated") && argc == 2) { if ((server.aof_load_truncated = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"aof-use-rdb-preamble") && argc == 2) { if ((server.aof_use_rdb_preamble = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"requirepass") && argc == 2) { if (strlen(argv[1]) > CONFIG_AUTHPASS_MAX_LEN) { err = "Password is longer than CONFIG_AUTHPASS_MAX_LEN"; goto loaderr; } /* The old "requirepass" directive just translates to setting * a password to the default user. */ ACLSetUser(DefaultUser,"resetpass",-1); sds aclop = sdscatprintf(sdsempty(),">%s",argv[1]); ACLSetUser(DefaultUser,aclop,sdslen(aclop)); sdsfree(aclop); } else if (!strcasecmp(argv[0],"pidfile") && argc == 2) { zfree(server.pidfile); server.pidfile = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"dbfilename") && argc == 2) { if (!pathIsBaseName(argv[1])) { err = "dbfilename can't be a path, just a filename"; goto loaderr; } zfree(server.rdb_filename); server.rdb_filename = zstrdup(argv[1]); } else if(!strcasecmp(argv[0],"db-s3-object") && argc == 2) { zfree(server.rdb_s3bucketpath); server.rdb_s3bucketpath = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"active-defrag-threshold-lower") && argc == 2) { server.active_defrag_threshold_lower = atoi(argv[1]); if (server.active_defrag_threshold_lower < 0 || server.active_defrag_threshold_lower > 1000) { err = "active-defrag-threshold-lower must be between 0 and 1000"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-threshold-upper") && argc == 2) { server.active_defrag_threshold_upper = atoi(argv[1]); if (server.active_defrag_threshold_upper < 0 || server.active_defrag_threshold_upper > 1000) { err = "active-defrag-threshold-upper must be between 0 and 1000"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-ignore-bytes") && argc == 2) { server.active_defrag_ignore_bytes = memtoll(argv[1], NULL); if (server.active_defrag_ignore_bytes <= 0) { err = "active-defrag-ignore-bytes must above 0"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-cycle-min") && argc == 2) { server.active_defrag_cycle_min = atoi(argv[1]); if (server.active_defrag_cycle_min < 1 || server.active_defrag_cycle_min > 99) { err = "active-defrag-cycle-min must be between 1 and 99"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-cycle-max") && argc == 2) { server.active_defrag_cycle_max = atoi(argv[1]); if (server.active_defrag_cycle_max < 1 || server.active_defrag_cycle_max > 99) { err = "active-defrag-cycle-max must be between 1 and 99"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-max-scan-fields") && argc == 2) { server.active_defrag_max_scan_fields = strtoll(argv[1],NULL,10); if (server.active_defrag_max_scan_fields < 1) { err = "active-defrag-max-scan-fields must be positive"; goto loaderr; } } else if (!strcasecmp(argv[0],"hash-max-ziplist-entries") && argc == 2) { server.hash_max_ziplist_entries = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"hash-max-ziplist-value") && argc == 2) { server.hash_max_ziplist_value = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"stream-node-max-bytes") && argc == 2) { server.stream_node_max_bytes = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"stream-node-max-entries") && argc == 2) { server.stream_node_max_entries = atoi(argv[1]); } else if (!strcasecmp(argv[0],"list-max-ziplist-entries") && argc == 2){ /* DEAD OPTION */ } else if (!strcasecmp(argv[0],"list-max-ziplist-value") && argc == 2) { /* DEAD OPTION */ } else if (!strcasecmp(argv[0],"list-max-ziplist-size") && argc == 2) { server.list_max_ziplist_size = atoi(argv[1]); } else if (!strcasecmp(argv[0],"list-compress-depth") && argc == 2) { server.list_compress_depth = atoi(argv[1]); } else if (!strcasecmp(argv[0],"set-max-intset-entries") && argc == 2) { server.set_max_intset_entries = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"zset-max-ziplist-entries") && argc == 2) { server.zset_max_ziplist_entries = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"zset-max-ziplist-value") && argc == 2) { server.zset_max_ziplist_value = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"hll-sparse-max-bytes") && argc == 2) { server.hll_sparse_max_bytes = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"rename-command") && argc == 3) { struct redisCommand *cmd = lookupCommand(argv[1]); int retval; if (!cmd) { err = "No such command in rename-command"; goto loaderr; } /* If the target command name is the empty string we just * remove it from the command table. */ retval = dictDelete(server.commands, argv[1]); serverAssert(retval == DICT_OK); /* Otherwise we re-add the command under a different name. */ if (sdslen(argv[2]) != 0) { sds copy = sdsdup(argv[2]); retval = dictAdd(server.commands, copy, cmd); if (retval != DICT_OK) { sdsfree(copy); err = "Target command name already exists"; goto loaderr; } } } else if (!strcasecmp(argv[0],"cluster-enabled") && argc == 2) { if ((server.cluster_enabled = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-config-file") && argc == 2) { zfree(server.cluster_configfile); server.cluster_configfile = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"cluster-announce-ip") && argc == 2) { zfree(server.cluster_announce_ip); server.cluster_announce_ip = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"cluster-announce-port") && argc == 2) { server.cluster_announce_port = atoi(argv[1]); if (server.cluster_announce_port < 0 || server.cluster_announce_port > 65535) { err = "Invalid port"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-announce-bus-port") && argc == 2) { server.cluster_announce_bus_port = atoi(argv[1]); if (server.cluster_announce_bus_port < 0 || server.cluster_announce_bus_port > 65535) { err = "Invalid port"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-require-full-coverage") && argc == 2) { if ((server.cluster_require_full_coverage = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-node-timeout") && argc == 2) { server.cluster_node_timeout = strtoll(argv[1],NULL,10); if (server.cluster_node_timeout <= 0) { err = "cluster node timeout must be 1 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-migration-barrier") && argc == 2) { server.cluster_migration_barrier = atoi(argv[1]); if (server.cluster_migration_barrier < 0) { err = "cluster migration barrier must zero or positive"; goto loaderr; } } else if ((!strcasecmp(argv[0],"cluster-slave-validity-factor") || !strcasecmp(argv[0],"cluster-replica-validity-factor")) && argc == 2) { server.cluster_slave_validity_factor = atoi(argv[1]); if (server.cluster_slave_validity_factor < 0) { err = "cluster replica validity factor must be zero or positive"; goto loaderr; } } else if ((!strcasecmp(argv[0],"cluster-slave-no-failover") || !strcasecmp(argv[0],"cluster-replica-no-failover")) && argc == 2) { server.cluster_slave_no_failover = yesnotoi(argv[1]); if (server.cluster_slave_no_failover == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"lua-time-limit") && argc == 2) { server.lua_time_limit = strtoll(argv[1],NULL,10); } else if (!strcasecmp(argv[0],"lua-replicate-commands") && argc == 2) { server.lua_always_replicate_commands = yesnotoi(argv[1]); } else if (!strcasecmp(argv[0],"slowlog-log-slower-than") && argc == 2) { server.slowlog_log_slower_than = strtoll(argv[1],NULL,10); } else if (!strcasecmp(argv[0],"latency-monitor-threshold") && argc == 2) { server.latency_monitor_threshold = strtoll(argv[1],NULL,10); if (server.latency_monitor_threshold < 0) { err = "The latency threshold can't be negative"; goto loaderr; } } else if (!strcasecmp(argv[0],"slowlog-max-len") && argc == 2) { server.slowlog_max_len = strtoll(argv[1],NULL,10); } else if (!strcasecmp(argv[0],"client-output-buffer-limit") && argc == 5) { int class = getClientTypeByName(argv[1]); unsigned long long hard, soft; int soft_seconds; if (class == -1 || class == CLIENT_TYPE_MASTER) { err = "Unrecognized client limit class: the user specified " "an invalid one, or 'master' which has no buffer limits."; goto loaderr; } hard = memtoll(argv[2],NULL); soft = memtoll(argv[3],NULL); soft_seconds = atoi(argv[4]); if (soft_seconds < 0) { err = "Negative number of seconds in soft limit is invalid"; goto loaderr; } server.client_obuf_limits[class].hard_limit_bytes = hard; server.client_obuf_limits[class].soft_limit_bytes = soft; server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; } else if (!strcasecmp(argv[0],"stop-writes-on-bgsave-error") && argc == 2) { if ((server.stop_writes_on_bgsave_err = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if ((!strcasecmp(argv[0],"slave-priority") || !strcasecmp(argv[0],"replica-priority")) && argc == 2) { server.slave_priority = atoi(argv[1]); } else if ((!strcasecmp(argv[0],"slave-announce-ip") || !strcasecmp(argv[0],"replica-announce-ip")) && argc == 2) { zfree(server.slave_announce_ip); server.slave_announce_ip = zstrdup(argv[1]); } else if ((!strcasecmp(argv[0],"slave-announce-port") || !strcasecmp(argv[0],"replica-announce-port")) && argc == 2) { server.slave_announce_port = atoi(argv[1]); if (server.slave_announce_port < 0 || server.slave_announce_port > 65535) { err = "Invalid port"; goto loaderr; } } else if ((!strcasecmp(argv[0],"min-slaves-to-write") || !strcasecmp(argv[0],"min-replicas-to-write")) && argc == 2) { server.repl_min_slaves_to_write = atoi(argv[1]); if (server.repl_min_slaves_to_write < 0) { err = "Invalid value for min-replicas-to-write."; goto loaderr; } } else if ((!strcasecmp(argv[0],"min-slaves-max-lag") || !strcasecmp(argv[0],"min-replicas-max-lag")) && argc == 2) { server.repl_min_slaves_max_lag = atoi(argv[1]); if (server.repl_min_slaves_max_lag < 0) { err = "Invalid value for min-replicas-max-lag."; goto loaderr; } } else if (!strcasecmp(argv[0],"notify-keyspace-events") && argc == 2) { int flags = keyspaceEventsStringToFlags(argv[1]); if (flags == -1) { err = "Invalid event class character. Use 'g$lshzxeA'."; goto loaderr; } server.notify_keyspace_events = flags; } else if (!strcasecmp(argv[0],"supervised") && argc == 2) { server.supervised_mode = configEnumGetValue(supervised_mode_enum,argv[1]); if (server.supervised_mode == INT_MIN) { err = "Invalid option for 'supervised'. " "Allowed values: 'upstart', 'systemd', 'auto', or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"user") && argc >= 2) { int argc_err; if (ACLAppendUserForLoading(argv,argc,&argc_err) == C_ERR) { char buf[1024]; char *errmsg = ACLSetUserStringError(); snprintf(buf,sizeof(buf),"Error in user declaration '%s': %s", argv[argc_err],errmsg); err = buf; goto loaderr; } } else if (!strcasecmp(argv[0],"loadmodule") && argc >= 2) { queueLoadModule(argv[1],&argv[2],argc-2); } else if (!strcasecmp(argv[0],"sentinel")) { /* argc == 1 is handled by main() as we need to enter the sentinel * mode ASAP. */ if (argc != 1) { if (!server.sentinel_mode) { err = "sentinel directive while not in sentinel mode"; goto loaderr; } err = sentinelHandleConfiguration(argv+1,argc-1); if (err) goto loaderr; } } else if (!strcasecmp(argv[0],"scratch-file-path")) { #ifdef USE_MEMKIND storage_init(argv[1], server.maxmemory); #else err = "KeyDB not compliled with scratch-file support."; goto loaderr; #endif } else if (!strcasecmp(argv[0],"server-threads") && argc == 2) { server.cthreads = atoi(argv[1]); if (server.cthreads <= 0 || server.cthreads > MAX_EVENT_LOOPS) { err = "Invalid number of threads specified"; goto loaderr; } } else if (!strcasecmp(argv[0],"server-thread-affinity") && argc == 2) { if (strcasecmp(argv[1], "true") == 0) { server.fThreadAffinity = TRUE; } else if (strcasecmp(argv[1], "false") == 0) { server.fThreadAffinity = FALSE; } else { err = "Unknown argument: server-thread-affinity expects either true or false"; goto loaderr; } } else { err = "Bad directive or wrong number of arguments"; goto loaderr; } sdsfreesplitres(argv,argc); } /* Sanity checks. */ if (server.cluster_enabled && server.masterhost) { linenum = slaveof_linenum; i = linenum-1; err = "replicaof directive not allowed in cluster mode"; goto loaderr; } sdsfreesplitres(lines,totlines); return; loaderr: fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR ***\n"); fprintf(stderr, "Reading the configuration file, at line %d\n", linenum); fprintf(stderr, ">>> '%s'\n", lines[i]); fprintf(stderr, "%s\n", err); exit(1); } /* Load the server configuration from the specified filename. * The function appends the additional configuration directives stored * in the 'options' string to the config file before loading. * * Both filename and options can be NULL, in such a case are considered * empty. This way loadServerConfig can be used to just load a file or * just load a string. */ void loadServerConfig(char *filename, char *options) { sds config = sdsempty(); char buf[CONFIG_MAX_LINE+1]; /* Load the file content */ if (filename) { FILE *fp; if (filename[0] == '-' && filename[1] == '\0') { fp = stdin; } else { if ((fp = fopen(filename,"r")) == NULL) { serverLog(LL_WARNING, "Fatal error, can't open config file '%s'", filename); exit(1); } } while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL) config = sdscat(config,buf); if (fp != stdin) fclose(fp); } /* Append the additional options */ if (options) { config = sdscat(config,"\n"); config = sdscat(config,options); } loadServerConfigFromString(config); sdsfree(config); } /*----------------------------------------------------------------------------- * CONFIG SET implementation *----------------------------------------------------------------------------*/ #define config_set_bool_field(_name,_var) \ } else if (!strcasecmp(ptrFromObj(c->argv[2]),_name)) { \ int yn = yesnotoi(ptrFromObj(o)); \ if (yn == -1) goto badfmt; \ _var = yn; #define config_set_numerical_field(_name,_var,min,max) \ } else if (!strcasecmp(ptrFromObj(c->argv[2]),_name)) { \ if (getLongLongFromObject(o,&ll) == C_ERR) goto badfmt; \ if (min != LLONG_MIN && ll < min) goto badfmt; \ if (max != LLONG_MAX && ll > max) goto badfmt; \ _var = ll; #define config_set_memory_field(_name,_var) \ } else if (!strcasecmp(ptrFromObj(c->argv[2]),_name)) { \ ll = memtoll(ptrFromObj(o),&err); \ if (err || ll < 0) goto badfmt; \ _var = ll; #define config_set_enum_field(_name,_var,_enumvar) \ } else if (!strcasecmp(ptrFromObj(c->argv[2]),_name)) { \ int enumval = configEnumGetValue(_enumvar,ptrFromObj(o)); \ if (enumval == INT_MIN) goto badfmt; \ _var = enumval; #define config_set_special_field(_name) \ } else if (!strcasecmp(ptrFromObj(c->argv[2]),_name)) { #define config_set_special_field_with_alias(_name1,_name2) \ } else if (!strcasecmp(ptrFromObj(c->argv[2]),_name1) || \ !strcasecmp(ptrFromObj(c->argv[2]),_name2)) { #define config_set_else } else void configSetCommand(client *c) { robj *o; long long ll; int err; serverAssertWithInfo(c,c->argv[2],sdsEncodedObject(c->argv[2])); serverAssertWithInfo(c,c->argv[3],sdsEncodedObject(c->argv[3])); o = c->argv[3]; if (0) { /* this starts the config_set macros else-if chain. */ /* Special fields that can't be handled with general macros. */ config_set_special_field("dbfilename") { if (!pathIsBaseName(ptrFromObj(o))) { addReplyError(c, "dbfilename can't be a path, just a filename"); return; } zfree(server.rdb_filename); server.rdb_filename = zstrdup(ptrFromObj(o)); } config_set_special_field("requirepass") { if (sdslen(ptrFromObj(o)) > CONFIG_AUTHPASS_MAX_LEN) goto badfmt; /* The old "requirepass" directive just translates to setting * a password to the default user. */ ACLSetUser(DefaultUser,"resetpass",-1); sds aclop = sdscatprintf(sdsempty(),">%s",(char*)ptrFromObj(o)); ACLSetUser(DefaultUser,aclop,sdslen(aclop)); sdsfree(aclop); } config_set_special_field("masteruser") { zfree(server.masteruser); server.masteruser = ((char*)ptrFromObj(o))[0] ? zstrdup(ptrFromObj(o)) : NULL; } config_set_special_field("masterauth") { zfree(server.masterauth); server.masterauth = ((char*)ptrFromObj(o))[0] ? zstrdup(ptrFromObj(o)) : NULL; } config_set_special_field("cluster-announce-ip") { zfree(server.cluster_announce_ip); server.cluster_announce_ip = ((char*)ptrFromObj(o))[0] ? zstrdup(ptrFromObj(o)) : NULL; } config_set_special_field("maxclients") { int orig_value = server.maxclients; if (getLongLongFromObject(o,&ll) == C_ERR || ll < 1) goto badfmt; /* Try to check if the OS is capable of supporting so many FDs. */ server.maxclients = ll; serverAssert(FALSE); if (ll > orig_value) { adjustOpenFilesLimit(); if (server.maxclients != ll) { addReplyErrorFormat(c,"The operating system is not able to handle the specified number of clients, try with %d", server.maxclients); server.maxclients = orig_value; return; } if ((unsigned int) aeGetSetSize(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el) < server.maxclients + CONFIG_FDSET_INCR) { for (int iel = 0; iel < server.cthreads; ++iel) { if (aeResizeSetSize(server.rgthreadvar[iel].el, server.maxclients + CONFIG_FDSET_INCR) == AE_ERR) { addReplyError(c,"The event loop API used by Redis is not able to handle the specified number of clients"); server.maxclients = orig_value; return; } } } } } config_set_special_field("appendonly") { int enable = yesnotoi(ptrFromObj(o)); if (enable == -1) goto badfmt; if (enable == 0 && server.aof_state != AOF_OFF) { stopAppendOnly(); } else if (enable && server.aof_state == AOF_OFF) { if (startAppendOnly() == C_ERR) { addReplyError(c, "Unable to turn on AOF. Check server logs."); return; } } } config_set_special_field("save") { int vlen, j; sds *v = sdssplitlen(ptrFromObj(o),sdslen(ptrFromObj(o))," ",1,&vlen); /* Perform sanity check before setting the new config: * - Even number of args * - Seconds >= 1, changes >= 0 */ if (vlen & 1) { sdsfreesplitres(v,vlen); goto badfmt; } for (j = 0; j < vlen; j++) { char *eptr; long val; val = strtoll(v[j], &eptr, 10); if (eptr[0] != '\0' || ((j & 1) == 0 && val < 1) || ((j & 1) == 1 && val < 0)) { sdsfreesplitres(v,vlen); goto badfmt; } } /* Finally set the new config */ resetServerSaveParams(); for (j = 0; j < vlen; j += 2) { time_t seconds; int changes; seconds = strtoll(v[j],NULL,10); changes = strtoll(v[j+1],NULL,10); appendServerSaveParams(seconds, changes); } sdsfreesplitres(v,vlen); } config_set_special_field("dir") { if (chdir((char*)ptrFromObj(o)) == -1) { addReplyErrorFormat(c,"Changing directory: %s", strerror(errno)); return; } } config_set_special_field("client-output-buffer-limit") { int vlen, j; sds *v = sdssplitlen(ptrFromObj(o),sdslen(ptrFromObj(o))," ",1,&vlen); /* We need a multiple of 4: <class> <hard> <soft> <soft_seconds> */ if (vlen % 4) { sdsfreesplitres(v,vlen); goto badfmt; } /* Sanity check of single arguments, so that we either refuse the * whole configuration string or accept it all, even if a single * error in a single client class is present. */ for (j = 0; j < vlen; j++) { long val; if ((j % 4) == 0) { int class = getClientTypeByName(v[j]); if (class == -1 || class == CLIENT_TYPE_MASTER) { sdsfreesplitres(v,vlen); goto badfmt; } } else { val = memtoll(v[j], &err); if (err || val < 0) { sdsfreesplitres(v,vlen); goto badfmt; } } } /* Finally set the new config */ for (j = 0; j < vlen; j += 4) { int class; unsigned long long hard, soft; int soft_seconds; class = getClientTypeByName(v[j]); hard = strtoll(v[j+1],NULL,10); soft = strtoll(v[j+2],NULL,10); soft_seconds = strtoll(v[j+3],NULL,10); server.client_obuf_limits[class].hard_limit_bytes = hard; server.client_obuf_limits[class].soft_limit_bytes = soft; server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; } sdsfreesplitres(v,vlen); } config_set_special_field("notify-keyspace-events") { int flags = keyspaceEventsStringToFlags(ptrFromObj(o)); if (flags == -1) goto badfmt; server.notify_keyspace_events = flags; } config_set_special_field_with_alias("slave-announce-ip", "replica-announce-ip") { zfree(server.slave_announce_ip); server.slave_announce_ip = ((char*)ptrFromObj(o))[0] ? zstrdup(ptrFromObj(o)) : NULL; /* Boolean fields. * config_set_bool_field(name,var). */ } config_set_bool_field( "rdbcompression", server.rdb_compression) { } config_set_bool_field( "repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay) { } config_set_bool_field( "repl-diskless-sync",server.repl_diskless_sync) { } config_set_bool_field( "cluster-require-full-coverage",server.cluster_require_full_coverage) { } config_set_bool_field( "cluster-slave-no-failover",server.cluster_slave_no_failover) { } config_set_bool_field( "cluster-replica-no-failover",server.cluster_slave_no_failover) { } config_set_bool_field( "aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync) { } config_set_bool_field( "rdb-save-incremental-fsync",server.rdb_save_incremental_fsync) { } config_set_bool_field( "aof-load-truncated",server.aof_load_truncated) { } config_set_bool_field( "aof-use-rdb-preamble",server.aof_use_rdb_preamble) { } config_set_bool_field( "slave-serve-stale-data",server.repl_serve_stale_data) { } config_set_bool_field( "replica-serve-stale-data",server.repl_serve_stale_data) { } config_set_bool_field( "slave-read-only",server.repl_slave_ro) { } config_set_bool_field( "replica-read-only",server.repl_slave_ro) { } config_set_bool_field( "slave-ignore-maxmemory",server.repl_slave_ignore_maxmemory) { } config_set_bool_field( "replica-ignore-maxmemory",server.repl_slave_ignore_maxmemory) { } config_set_bool_field( "activerehashing",server.activerehashing) { } config_set_bool_field( "activedefrag",server.active_defrag_enabled) { #ifndef HAVE_DEFRAG if (server.active_defrag_enabled) { server.active_defrag_enabled = 0; addReplyError(c, "-DISABLED Active defragmentation cannot be enabled: it " "requires a Redis server compiled with a modified Jemalloc " "like the one shipped by default with the Redis source " "distribution"); return; } #endif } config_set_bool_field( "protected-mode",server.protected_mode) { } config_set_bool_field( "gopher-enabled",server.gopher_enabled) { } config_set_bool_field( "stop-writes-on-bgsave-error",server.stop_writes_on_bgsave_err) { } config_set_bool_field( "lazyfree-lazy-eviction",server.lazyfree_lazy_eviction) { } config_set_bool_field( "lazyfree-lazy-expire",server.lazyfree_lazy_expire) { } config_set_bool_field( "lazyfree-lazy-server-del",server.lazyfree_lazy_server_del) { } config_set_bool_field( "slave-lazy-flush",server.repl_slave_lazy_flush) { } config_set_bool_field( "replica-lazy-flush",server.repl_slave_lazy_flush) { } config_set_bool_field( "no-appendfsync-on-rewrite",server.aof_no_fsync_on_rewrite) { } config_set_bool_field( "dynamic-hz",server.dynamic_hz) { /* Numerical fields. * config_set_numerical_field(name,var,min,max) */ } config_set_numerical_field( "tcp-keepalive",server.tcpkeepalive,0,INT_MAX) { } config_set_numerical_field( "maxmemory-samples",server.maxmemory_samples,1,INT_MAX) { } config_set_numerical_field( "lfu-log-factor",server.lfu_log_factor,0,INT_MAX) { } config_set_numerical_field( "lfu-decay-time",server.lfu_decay_time,0,INT_MAX) { } config_set_numerical_field( "timeout",server.maxidletime,0,INT_MAX) { } config_set_numerical_field( "active-defrag-threshold-lower",server.active_defrag_threshold_lower,0,1000) { } config_set_numerical_field( "active-defrag-threshold-upper",server.active_defrag_threshold_upper,0,1000) { } config_set_memory_field( "active-defrag-ignore-bytes",server.active_defrag_ignore_bytes) { } config_set_numerical_field( "active-defrag-cycle-min",server.active_defrag_cycle_min,1,99) { } config_set_numerical_field( "active-defrag-cycle-max",server.active_defrag_cycle_max,1,99) { } config_set_numerical_field( "active-defrag-max-scan-fields",server.active_defrag_max_scan_fields,1,LONG_MAX) { } config_set_numerical_field( "auto-aof-rewrite-percentage",server.aof_rewrite_perc,0,INT_MAX){ } config_set_numerical_field( "hash-max-ziplist-entries",server.hash_max_ziplist_entries,0,LONG_MAX) { } config_set_numerical_field( "hash-max-ziplist-value",server.hash_max_ziplist_value,0,LONG_MAX) { } config_set_numerical_field( "stream-node-max-bytes",server.stream_node_max_bytes,0,LONG_MAX) { } config_set_numerical_field( "stream-node-max-entries",server.stream_node_max_entries,0,LLONG_MAX) { } config_set_numerical_field( "list-max-ziplist-size",server.list_max_ziplist_size,INT_MIN,INT_MAX) { } config_set_numerical_field( "list-compress-depth",server.list_compress_depth,0,INT_MAX) { } config_set_numerical_field( "set-max-intset-entries",server.set_max_intset_entries,0,LONG_MAX) { } config_set_numerical_field( "zset-max-ziplist-entries",server.zset_max_ziplist_entries,0,LONG_MAX) { } config_set_numerical_field( "zset-max-ziplist-value",server.zset_max_ziplist_value,0,LONG_MAX) { } config_set_numerical_field( "hll-sparse-max-bytes",server.hll_sparse_max_bytes,0,LONG_MAX) { } config_set_numerical_field( "lua-time-limit",server.lua_time_limit,0,LONG_MAX) { } config_set_numerical_field( "slowlog-log-slower-than",server.slowlog_log_slower_than,-1,LLONG_MAX) { } config_set_numerical_field( "slowlog-max-len",ll,0,LONG_MAX) { /* Cast to unsigned. */ server.slowlog_max_len = (unsigned long)ll; } config_set_numerical_field( "latency-monitor-threshold",server.latency_monitor_threshold,0,LLONG_MAX){ } config_set_numerical_field( "repl-ping-slave-period",server.repl_ping_slave_period,1,INT_MAX) { } config_set_numerical_field( "repl-ping-replica-period",server.repl_ping_slave_period,1,INT_MAX) { } config_set_numerical_field( "repl-timeout",server.repl_timeout,1,INT_MAX) { } config_set_numerical_field( "repl-backlog-ttl",server.repl_backlog_time_limit,0,LONG_MAX) { } config_set_numerical_field( "repl-diskless-sync-delay",server.repl_diskless_sync_delay,0,INT_MAX) { } config_set_numerical_field( "slave-priority",server.slave_priority,0,INT_MAX) { } config_set_numerical_field( "replica-priority",server.slave_priority,0,INT_MAX) { } config_set_numerical_field( "slave-announce-port",server.slave_announce_port,0,65535) { } config_set_numerical_field( "replica-announce-port",server.slave_announce_port,0,65535) { } config_set_numerical_field( "min-slaves-to-write",server.repl_min_slaves_to_write,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( "min-replicas-to-write",server.repl_min_slaves_to_write,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( "min-slaves-max-lag",server.repl_min_slaves_max_lag,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( "min-replicas-max-lag",server.repl_min_slaves_max_lag,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( "cluster-node-timeout",server.cluster_node_timeout,0,LLONG_MAX) { } config_set_numerical_field( "cluster-announce-port",server.cluster_announce_port,0,65535) { } config_set_numerical_field( "cluster-announce-bus-port",server.cluster_announce_bus_port,0,65535) { } config_set_numerical_field( "cluster-migration-barrier",server.cluster_migration_barrier,0,INT_MAX){ } config_set_numerical_field( "cluster-slave-validity-factor",server.cluster_slave_validity_factor,0,INT_MAX) { } config_set_numerical_field( "cluster-replica-validity-factor",server.cluster_slave_validity_factor,0,INT_MAX) { } config_set_numerical_field( "hz",server.config_hz,0,INT_MAX) { /* Hz is more an hint from the user, so we accept values out of range * but cap them to reasonable values. */ if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ; if (server.config_hz > CONFIG_MAX_HZ) server.config_hz = CONFIG_MAX_HZ; } config_set_numerical_field( "watchdog-period",ll,0,INT_MAX) { if (ll) enableWatchdog(ll); else disableWatchdog(); /* Memory fields. * config_set_memory_field(name,var) */ } config_set_memory_field("maxmemory",server.maxmemory) { if (server.maxmemory) { if (server.maxmemory < zmalloc_used_memory()) { serverLog(LL_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in key eviction and/or the inability to accept new write commands depending on the maxmemory-policy."); } freeMemoryIfNeededAndSafe(); } } config_set_memory_field( "proto-max-bulk-len",server.proto_max_bulk_len) { } config_set_memory_field( "client-query-buffer-limit",server.client_max_querybuf_len) { } config_set_memory_field("repl-backlog-size",ll) { resizeReplicationBacklog(ll); } config_set_memory_field("auto-aof-rewrite-min-size",ll) { server.aof_rewrite_min_size = ll; /* Enumeration fields. * config_set_enum_field(name,var,enum_var) */ } config_set_enum_field( "loglevel",server.verbosity,loglevel_enum) { } config_set_enum_field( "maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum) { } config_set_enum_field( "appendfsync",server.aof_fsync,aof_fsync_enum) { /* Everyhing else is an error... */ } config_set_else { addReplyErrorFormat(c,"Unsupported CONFIG parameter: %s", (char*)ptrFromObj(c->argv[2])); return; } /* On success we just return a generic OK for all the options. */ addReply(c,shared.ok); return; badfmt: /* Bad format errors */ addReplyErrorFormat(c,"Invalid argument '%s' for CONFIG SET '%s'", (char*)ptrFromObj(o), (char*)ptrFromObj(c->argv[2])); } /*----------------------------------------------------------------------------- * CONFIG GET implementation *----------------------------------------------------------------------------*/ #define config_get_string_field(_name,_var) do { \ if (stringmatch(pattern,_name,1)) { \ addReplyBulkCString(c,_name); \ addReplyBulkCString(c,_var ? _var : ""); \ matches++; \ } \ } while(0); #define config_get_bool_field(_name,_var) do { \ if (stringmatch(pattern,_name,1)) { \ addReplyBulkCString(c,_name); \ addReplyBulkCString(c,_var ? "yes" : "no"); \ matches++; \ } \ } while(0); #define config_get_numerical_field(_name,_var) do { \ if (stringmatch(pattern,_name,1)) { \ ll2string(buf,sizeof(buf),_var); \ addReplyBulkCString(c,_name); \ addReplyBulkCString(c,buf); \ matches++; \ } \ } while(0); #define config_get_enum_field(_name,_var,_enumvar) do { \ if (stringmatch(pattern,_name,1)) { \ addReplyBulkCString(c,_name); \ addReplyBulkCString(c,configEnumGetNameOrUnknown(_enumvar,_var)); \ matches++; \ } \ } while(0); void configGetCommand(client *c) { robj *o = c->argv[2]; void *replylen = addReplyDeferredLen(c); char *pattern = ptrFromObj(o); char buf[128]; int matches = 0; serverAssertWithInfo(c,o,sdsEncodedObject(o)); /* String values */ config_get_string_field("dbfilename",server.rdb_filename); config_get_string_field("masteruser",server.masteruser); config_get_string_field("masterauth",server.masterauth); config_get_string_field("cluster-announce-ip",server.cluster_announce_ip); config_get_string_field("unixsocket",server.unixsocket); config_get_string_field("logfile",server.logfile); config_get_string_field("aclfile",server.acl_filename); config_get_string_field("pidfile",server.pidfile); config_get_string_field("slave-announce-ip",server.slave_announce_ip); config_get_string_field("replica-announce-ip",server.slave_announce_ip); /* Numerical values */ config_get_numerical_field("maxmemory",server.maxmemory); config_get_numerical_field("proto-max-bulk-len",server.proto_max_bulk_len); config_get_numerical_field("client-query-buffer-limit",server.client_max_querybuf_len); config_get_numerical_field("maxmemory-samples",server.maxmemory_samples); config_get_numerical_field("lfu-log-factor",server.lfu_log_factor); config_get_numerical_field("lfu-decay-time",server.lfu_decay_time); config_get_numerical_field("timeout",server.maxidletime); config_get_numerical_field("active-defrag-threshold-lower",server.active_defrag_threshold_lower); config_get_numerical_field("active-defrag-threshold-upper",server.active_defrag_threshold_upper); config_get_numerical_field("active-defrag-ignore-bytes",server.active_defrag_ignore_bytes); config_get_numerical_field("active-defrag-cycle-min",server.active_defrag_cycle_min); config_get_numerical_field("active-defrag-cycle-max",server.active_defrag_cycle_max); config_get_numerical_field("active-defrag-max-scan-fields",server.active_defrag_max_scan_fields); config_get_numerical_field("auto-aof-rewrite-percentage", server.aof_rewrite_perc); config_get_numerical_field("auto-aof-rewrite-min-size", server.aof_rewrite_min_size); config_get_numerical_field("hash-max-ziplist-entries", server.hash_max_ziplist_entries); config_get_numerical_field("hash-max-ziplist-value", server.hash_max_ziplist_value); config_get_numerical_field("stream-node-max-bytes", server.stream_node_max_bytes); config_get_numerical_field("stream-node-max-entries", server.stream_node_max_entries); config_get_numerical_field("list-max-ziplist-size", server.list_max_ziplist_size); config_get_numerical_field("list-compress-depth", server.list_compress_depth); config_get_numerical_field("set-max-intset-entries", server.set_max_intset_entries); config_get_numerical_field("zset-max-ziplist-entries", server.zset_max_ziplist_entries); config_get_numerical_field("zset-max-ziplist-value", server.zset_max_ziplist_value); config_get_numerical_field("hll-sparse-max-bytes", server.hll_sparse_max_bytes); config_get_numerical_field("lua-time-limit",server.lua_time_limit); config_get_numerical_field("slowlog-log-slower-than", server.slowlog_log_slower_than); config_get_numerical_field("latency-monitor-threshold", server.latency_monitor_threshold); config_get_numerical_field("slowlog-max-len", server.slowlog_max_len); config_get_numerical_field("port",server.port); config_get_numerical_field("cluster-announce-port",server.cluster_announce_port); config_get_numerical_field("cluster-announce-bus-port",server.cluster_announce_bus_port); config_get_numerical_field("tcp-backlog",server.tcp_backlog); config_get_numerical_field("databases",server.dbnum); config_get_numerical_field("repl-ping-slave-period",server.repl_ping_slave_period); config_get_numerical_field("repl-ping-replica-period",server.repl_ping_slave_period); config_get_numerical_field("repl-timeout",server.repl_timeout); config_get_numerical_field("repl-backlog-size",server.repl_backlog_size); config_get_numerical_field("repl-backlog-ttl",server.repl_backlog_time_limit); config_get_numerical_field("maxclients",server.maxclients); config_get_numerical_field("watchdog-period",server.watchdog_period); config_get_numerical_field("slave-priority",server.slave_priority); config_get_numerical_field("replica-priority",server.slave_priority); config_get_numerical_field("slave-announce-port",server.slave_announce_port); config_get_numerical_field("replica-announce-port",server.slave_announce_port); config_get_numerical_field("min-slaves-to-write",server.repl_min_slaves_to_write); config_get_numerical_field("min-replicas-to-write",server.repl_min_slaves_to_write); config_get_numerical_field("min-slaves-max-lag",server.repl_min_slaves_max_lag); config_get_numerical_field("min-replicas-max-lag",server.repl_min_slaves_max_lag); config_get_numerical_field("hz",server.config_hz); config_get_numerical_field("cluster-node-timeout",server.cluster_node_timeout); config_get_numerical_field("cluster-migration-barrier",server.cluster_migration_barrier); config_get_numerical_field("cluster-slave-validity-factor",server.cluster_slave_validity_factor); config_get_numerical_field("cluster-replica-validity-factor",server.cluster_slave_validity_factor); config_get_numerical_field("repl-diskless-sync-delay",server.repl_diskless_sync_delay); config_get_numerical_field("tcp-keepalive",server.tcpkeepalive); /* Bool (yes/no) values */ config_get_bool_field("cluster-require-full-coverage", server.cluster_require_full_coverage); config_get_bool_field("cluster-slave-no-failover", server.cluster_slave_no_failover); config_get_bool_field("cluster-replica-no-failover", server.cluster_slave_no_failover); config_get_bool_field("no-appendfsync-on-rewrite", server.aof_no_fsync_on_rewrite); config_get_bool_field("slave-serve-stale-data", server.repl_serve_stale_data); config_get_bool_field("replica-serve-stale-data", server.repl_serve_stale_data); config_get_bool_field("slave-read-only", server.repl_slave_ro); config_get_bool_field("replica-read-only", server.repl_slave_ro); config_get_bool_field("slave-ignore-maxmemory", server.repl_slave_ignore_maxmemory); config_get_bool_field("replica-ignore-maxmemory", server.repl_slave_ignore_maxmemory); config_get_bool_field("stop-writes-on-bgsave-error", server.stop_writes_on_bgsave_err); config_get_bool_field("daemonize", server.daemonize); config_get_bool_field("rdbcompression", server.rdb_compression); config_get_bool_field("rdbchecksum", server.rdb_checksum); config_get_bool_field("activerehashing", server.activerehashing); config_get_bool_field("activedefrag", server.active_defrag_enabled); config_get_bool_field("protected-mode", server.protected_mode); config_get_bool_field("gopher-enabled", server.gopher_enabled); config_get_bool_field("repl-disable-tcp-nodelay", server.repl_disable_tcp_nodelay); config_get_bool_field("repl-diskless-sync", server.repl_diskless_sync); config_get_bool_field("aof-rewrite-incremental-fsync", server.aof_rewrite_incremental_fsync); config_get_bool_field("rdb-save-incremental-fsync", server.rdb_save_incremental_fsync); config_get_bool_field("aof-load-truncated", server.aof_load_truncated); config_get_bool_field("aof-use-rdb-preamble", server.aof_use_rdb_preamble); config_get_bool_field("lazyfree-lazy-eviction", server.lazyfree_lazy_eviction); config_get_bool_field("lazyfree-lazy-expire", server.lazyfree_lazy_expire); config_get_bool_field("lazyfree-lazy-server-del", server.lazyfree_lazy_server_del); config_get_bool_field("slave-lazy-flush", server.repl_slave_lazy_flush); config_get_bool_field("replica-lazy-flush", server.repl_slave_lazy_flush); config_get_bool_field("dynamic-hz", server.dynamic_hz); /* Enum values */ config_get_enum_field("maxmemory-policy", server.maxmemory_policy,maxmemory_policy_enum); config_get_enum_field("loglevel", server.verbosity,loglevel_enum); config_get_enum_field("supervised", server.supervised_mode,supervised_mode_enum); config_get_enum_field("appendfsync", server.aof_fsync,aof_fsync_enum); config_get_enum_field("syslog-facility", server.syslog_facility,syslog_facility_enum); /* Everything we can't handle with macros follows. */ if (stringmatch(pattern,"appendonly",1)) { addReplyBulkCString(c,"appendonly"); addReplyBulkCString(c,server.aof_state == AOF_OFF ? "no" : "yes"); matches++; } if (stringmatch(pattern,"dir",1)) { char buf[1024]; if (getcwd(buf,sizeof(buf)) == NULL) buf[0] = '\0'; addReplyBulkCString(c,"dir"); addReplyBulkCString(c,buf); matches++; } if (stringmatch(pattern,"save",1)) { sds buf = sdsempty(); int j; for (j = 0; j < server.saveparamslen; j++) { buf = sdscatprintf(buf,"%jd %d", (intmax_t)server.saveparams[j].seconds, server.saveparams[j].changes); if (j != server.saveparamslen-1) buf = sdscatlen(buf," ",1); } addReplyBulkCString(c,"save"); addReplyBulkCString(c,buf); sdsfree(buf); matches++; } if (stringmatch(pattern,"client-output-buffer-limit",1)) { sds buf = sdsempty(); int j; for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) { buf = sdscatprintf(buf,"%s %llu %llu %ld", getClientTypeName(j), server.client_obuf_limits[j].hard_limit_bytes, server.client_obuf_limits[j].soft_limit_bytes, (long) server.client_obuf_limits[j].soft_limit_seconds); if (j != CLIENT_TYPE_OBUF_COUNT-1) buf = sdscatlen(buf," ",1); } addReplyBulkCString(c,"client-output-buffer-limit"); addReplyBulkCString(c,buf); sdsfree(buf); matches++; } if (stringmatch(pattern,"unixsocketperm",1)) { char buf[32]; snprintf(buf,sizeof(buf),"%o",server.unixsocketperm); addReplyBulkCString(c,"unixsocketperm"); addReplyBulkCString(c,buf); matches++; } if (stringmatch(pattern,"slaveof",1) || stringmatch(pattern,"replicaof",1)) { char *optname = stringmatch(pattern,"slaveof",1) ? "slaveof" : "replicaof"; char buf[256]; addReplyBulkCString(c,optname); if (server.masterhost) snprintf(buf,sizeof(buf),"%s %d", server.masterhost, server.masterport); else buf[0] = '\0'; addReplyBulkCString(c,buf); matches++; } if (stringmatch(pattern,"notify-keyspace-events",1)) { robj *flagsobj = createObject(OBJ_STRING, keyspaceEventsFlagsToString(server.notify_keyspace_events)); addReplyBulkCString(c,"notify-keyspace-events"); addReplyBulk(c,flagsobj); decrRefCount(flagsobj); matches++; } if (stringmatch(pattern,"bind",1)) { sds aux = sdsjoin(server.bindaddr,server.bindaddr_count," "); addReplyBulkCString(c,"bind"); addReplyBulkCString(c,aux); sdsfree(aux); matches++; } if (stringmatch(pattern,"requirepass",1)) { addReplyBulkCString(c,"requirepass"); sds password = ACLDefaultUserFirstPassword(); if (password) { addReplyBulkCBuffer(c,password,sdslen(password)); } else { addReplyBulkCString(c,""); } matches++; } setDeferredMapLen(c,replylen,matches); } /*----------------------------------------------------------------------------- * CONFIG REWRITE implementation *----------------------------------------------------------------------------*/ #define REDIS_CONFIG_REWRITE_SIGNATURE "# Generated by CONFIG REWRITE" /* We use the following dictionary type to store where a configuration * option is mentioned in the old configuration file, so it's * like "maxmemory" -> list of line numbers (first line is zero). */ uint64_t dictSdsCaseHash(const void *key); int dictSdsKeyCaseCompare(void *privdata, const void *key1, const void *key2); void dictSdsDestructor(void *privdata, void *val); void dictListDestructor(void *privdata, void *val); /* Sentinel config rewriting is implemented inside sentinel.c by * rewriteConfigSentinelOption(). */ void rewriteConfigSentinelOption(struct rewriteConfigState *state); dictType optionToLineDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor /* val destructor */ }; dictType optionSetDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL /* val destructor */ }; /* The config rewrite state. */ struct rewriteConfigState { dict *option_to_line; /* Option -> list of config file lines map */ dict *rewritten; /* Dictionary of already processed options */ int numlines; /* Number of lines in current config */ sds *lines; /* Current lines as an array of sds strings */ int has_tail; /* True if we already added directives that were not present in the original config file. */ }; /* Append the new line to the current configuration state. */ void rewriteConfigAppendLine(struct rewriteConfigState *state, sds line) { state->lines = zrealloc(state->lines, sizeof(char*) * (state->numlines+1), MALLOC_LOCAL); state->lines[state->numlines++] = line; } /* Populate the option -> list of line numbers map. */ void rewriteConfigAddLineNumberToOption(struct rewriteConfigState *state, sds option, int linenum) { list *l = dictFetchValue(state->option_to_line,option); if (l == NULL) { l = listCreate(); dictAdd(state->option_to_line,sdsdup(option),l); } listAddNodeTail(l,(void*)(long)linenum); } /* Add the specified option to the set of processed options. * This is useful as only unused lines of processed options will be blanked * in the config file, while options the rewrite process does not understand * remain untouched. */ void rewriteConfigMarkAsProcessed(struct rewriteConfigState *state, const char *option) { sds opt = sdsnew(option); if (dictAdd(state->rewritten,opt,NULL) != DICT_OK) sdsfree(opt); } /* Read the old file, split it into lines to populate a newly created * config rewrite state, and return it to the caller. * * If it is impossible to read the old file, NULL is returned. * If the old file does not exist at all, an empty state is returned. */ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) { FILE *fp = fopen(path,"r"); struct rewriteConfigState *state = zmalloc(sizeof(*state), MALLOC_LOCAL); char buf[CONFIG_MAX_LINE+1]; int linenum = -1; if (fp == NULL && errno != ENOENT) return NULL; state->option_to_line = dictCreate(&optionToLineDictType,NULL); state->rewritten = dictCreate(&optionSetDictType,NULL); state->numlines = 0; state->lines = NULL; state->has_tail = 0; if (fp == NULL) return state; /* Read the old file line by line, populate the state. */ while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL) { int argc; sds *argv; sds line = sdstrim(sdsnew(buf),"\r\n\t "); linenum++; /* Zero based, so we init at -1 */ /* Handle comments and empty lines. */ if (line[0] == '#' || line[0] == '\0') { if (!state->has_tail && !strcmp(line,REDIS_CONFIG_REWRITE_SIGNATURE)) state->has_tail = 1; rewriteConfigAppendLine(state,line); continue; } /* Not a comment, split into arguments. */ argv = sdssplitargs(line,&argc); if (argv == NULL) { /* Apparently the line is unparsable for some reason, for * instance it may have unbalanced quotes. Load it as a * comment. */ sds aux = sdsnew("# ??? "); aux = sdscatsds(aux,line); sdsfree(line); rewriteConfigAppendLine(state,aux); continue; } sdstolower(argv[0]); /* We only want lowercase config directives. */ /* Now we populate the state according to the content of this line. * Append the line and populate the option -> line numbers map. */ rewriteConfigAppendLine(state,line); /* Translate options using the word "slave" to the corresponding name * "replica", before adding such option to the config name -> lines * mapping. */ char *p = strstr(argv[0],"slave"); if (p) { sds alt = sdsempty(); alt = sdscatlen(alt,argv[0],p-argv[0]);; alt = sdscatlen(alt,"replica",7); alt = sdscatlen(alt,p+5,strlen(p+5)); sdsfree(argv[0]); argv[0] = alt; } rewriteConfigAddLineNumberToOption(state,argv[0],linenum); sdsfreesplitres(argv,argc); } fclose(fp); return state; } /* Rewrite the specified configuration option with the new "line". * It progressively uses lines of the file that were already used for the same * configuration option in the old version of the file, removing that line from * the map of options -> line numbers. * * If there are lines associated with a given configuration option and * "force" is non-zero, the line is appended to the configuration file. * Usually "force" is true when an option has not its default value, so it * must be rewritten even if not present previously. * * The first time a line is appended into a configuration file, a comment * is added to show that starting from that point the config file was generated * by CONFIG REWRITE. * * "line" is either used, or freed, so the caller does not need to free it * in any way. */ void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *option, sds line, int force) { sds o = sdsnew(option); list *l = dictFetchValue(state->option_to_line,o); rewriteConfigMarkAsProcessed(state,option); if (!l && !force) { /* Option not used previously, and we are not forced to use it. */ sdsfree(line); sdsfree(o); return; } if (l) { listNode *ln = listFirst(l); int linenum = (long) ln->value; /* There are still lines in the old configuration file we can reuse * for this option. Replace the line with the new one. */ listDelNode(l,ln); if (listLength(l) == 0) dictDelete(state->option_to_line,o); sdsfree(state->lines[linenum]); state->lines[linenum] = line; } else { /* Append a new line. */ if (!state->has_tail) { rewriteConfigAppendLine(state, sdsnew(REDIS_CONFIG_REWRITE_SIGNATURE)); state->has_tail = 1; } rewriteConfigAppendLine(state,line); } sdsfree(o); } /* Write the long long 'bytes' value as a string in a way that is parsable * inside redis.conf. If possible uses the GB, MB, KB notation. */ int rewriteConfigFormatMemory(char *buf, size_t len, long long bytes) { int gb = 1024*1024*1024; int mb = 1024*1024; int kb = 1024; if (bytes && (bytes % gb) == 0) { return snprintf(buf,len,"%lldgb",bytes/gb); } else if (bytes && (bytes % mb) == 0) { return snprintf(buf,len,"%lldmb",bytes/mb); } else if (bytes && (bytes % kb) == 0) { return snprintf(buf,len,"%lldkb",bytes/kb); } else { return snprintf(buf,len,"%lld",bytes); } } /* Rewrite a simple "option-name <bytes>" configuration option. */ void rewriteConfigBytesOption(struct rewriteConfigState *state, char *option, long long value, long long defvalue) { char buf[64]; int force = value != defvalue; sds line; rewriteConfigFormatMemory(buf,sizeof(buf),value); line = sdscatprintf(sdsempty(),"%s %s",option,buf); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite a yes/no option. */ void rewriteConfigYesNoOption(struct rewriteConfigState *state, char *option, int value, int defvalue) { int force = value != defvalue; sds line = sdscatprintf(sdsempty(),"%s %s",option, value ? "yes" : "no"); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite a string option. */ void rewriteConfigStringOption(struct rewriteConfigState *state, char *option, char *value, char *defvalue) { int force = 1; sds line; /* String options set to NULL need to be not present at all in the * configuration file to be set to NULL again at the next reboot. */ if (value == NULL) { rewriteConfigMarkAsProcessed(state,option); return; } /* Set force to zero if the value is set to its default. */ if (defvalue && strcmp(value,defvalue) == 0) force = 0; line = sdsnew(option); line = sdscatlen(line, " ", 1); line = sdscatrepr(line, value, strlen(value)); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite a numerical (long long range) option. */ void rewriteConfigNumericalOption(struct rewriteConfigState *state, char *option, long long value, long long defvalue) { int force = value != defvalue; sds line = sdscatprintf(sdsempty(),"%s %lld",option,value); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite a octal option. */ void rewriteConfigOctalOption(struct rewriteConfigState *state, char *option, int value, int defvalue) { int force = value != defvalue; sds line = sdscatprintf(sdsempty(),"%s %o",option,value); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite an enumeration option. It takes as usually state and option name, * and in addition the enumeration array and the default value for the * option. */ void rewriteConfigEnumOption(struct rewriteConfigState *state, char *option, int value, configEnum *ce, int defval) { sds line; const char *name = configEnumGetNameOrUnknown(ce,value); int force = value != defval; line = sdscatprintf(sdsempty(),"%s %s",option,name); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite the syslog-facility option. */ void rewriteConfigSyslogfacilityOption(struct rewriteConfigState *state) { int value = server.syslog_facility; int force = value != LOG_LOCAL0; const char *name = NULL, *option = "syslog-facility"; sds line; name = configEnumGetNameOrUnknown(syslog_facility_enum,value); line = sdscatprintf(sdsempty(),"%s %s",option,name); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite the save option. */ void rewriteConfigSaveOption(struct rewriteConfigState *state) { int j; sds line; /* Note that if there are no save parameters at all, all the current * config line with "save" will be detected as orphaned and deleted, * resulting into no RDB persistence as expected. */ for (j = 0; j < server.saveparamslen; j++) { line = sdscatprintf(sdsempty(),"save %ld %d", (long) server.saveparams[j].seconds, server.saveparams[j].changes); rewriteConfigRewriteLine(state,"save",line,1); } /* Mark "save" as processed in case server.saveparamslen is zero. */ rewriteConfigMarkAsProcessed(state,"save"); } /* Rewrite the user option. */ void rewriteConfigUserOption(struct rewriteConfigState *state) { /* If there is a user file defined we just mark this configuration * directive as processed, so that all the lines containing users * inside the config file gets discarded. */ if (server.acl_filename[0] != '\0') { rewriteConfigMarkAsProcessed(state,"user"); return; } /* Otherwise scan the list of users and rewrite every line. Note that * in case the list here is empty, the effect will just be to comment * all the users directive inside the config file. */ raxIterator ri; raxStart(&ri,Users); raxSeek(&ri,"^",NULL,0); while(raxNext(&ri)) { user *u = ri.data; sds line = sdsnew("user "); line = sdscatsds(line,u->name); line = sdscatlen(line," ",1); sds descr = ACLDescribeUser(u); line = sdscatsds(line,descr); sdsfree(descr); rewriteConfigRewriteLine(state,"user",line,1); } raxStop(&ri); /* Mark "user" as processed in case there are no defined users. */ rewriteConfigMarkAsProcessed(state,"user"); } /* Rewrite the dir option, always using absolute paths.*/ void rewriteConfigDirOption(struct rewriteConfigState *state) { char cwd[1024]; if (getcwd(cwd,sizeof(cwd)) == NULL) { rewriteConfigMarkAsProcessed(state,"dir"); return; /* no rewrite on error. */ } rewriteConfigStringOption(state,"dir",cwd,NULL); } /* Rewrite the slaveof option. */ void rewriteConfigSlaveofOption(struct rewriteConfigState *state, char *option) { sds line; /* If this is a master, we want all the slaveof config options * in the file to be removed. Note that if this is a cluster instance * we don't want a slaveof directive inside redis.conf. */ if (server.cluster_enabled || server.masterhost == NULL) { rewriteConfigMarkAsProcessed(state,option); return; } line = sdscatprintf(sdsempty(),"%s %s %d", option, server.masterhost, server.masterport); rewriteConfigRewriteLine(state,option,line,1); } /* Rewrite the notify-keyspace-events option. */ void rewriteConfigNotifykeyspaceeventsOption(struct rewriteConfigState *state) { int force = server.notify_keyspace_events != 0; char *option = "notify-keyspace-events"; sds line, flags; flags = keyspaceEventsFlagsToString(server.notify_keyspace_events); line = sdsnew(option); line = sdscatlen(line, " ", 1); line = sdscatrepr(line, flags, sdslen(flags)); sdsfree(flags); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite the client-output-buffer-limit option. */ void rewriteConfigClientoutputbufferlimitOption(struct rewriteConfigState *state) { int j; char *option = "client-output-buffer-limit"; for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) { int force = (server.client_obuf_limits[j].hard_limit_bytes != clientBufferLimitsDefaults[j].hard_limit_bytes) || (server.client_obuf_limits[j].soft_limit_bytes != clientBufferLimitsDefaults[j].soft_limit_bytes) || (server.client_obuf_limits[j].soft_limit_seconds != clientBufferLimitsDefaults[j].soft_limit_seconds); sds line; char hard[64], soft[64]; rewriteConfigFormatMemory(hard,sizeof(hard), server.client_obuf_limits[j].hard_limit_bytes); rewriteConfigFormatMemory(soft,sizeof(soft), server.client_obuf_limits[j].soft_limit_bytes); const char *typename = getClientTypeName(j); if (!strcmp(typename,"slave")) typename = "replica"; line = sdscatprintf(sdsempty(),"%s %s %s %s %ld", option, typename, hard, soft, (long) server.client_obuf_limits[j].soft_limit_seconds); rewriteConfigRewriteLine(state,option,line,force); } } /* Rewrite the bind option. */ void rewriteConfigBindOption(struct rewriteConfigState *state) { int force = 1; sds line, addresses; char *option = "bind"; /* Nothing to rewrite if we don't have bind addresses. */ if (server.bindaddr_count == 0) { rewriteConfigMarkAsProcessed(state,option); return; } /* Rewrite as bind <addr1> <addr2> ... <addrN> */ addresses = sdsjoin(server.bindaddr,server.bindaddr_count," "); line = sdsnew(option); line = sdscatlen(line, " ", 1); line = sdscatsds(line, addresses); sdsfree(addresses); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite the requirepass option. */ void rewriteConfigRequirepassOption(struct rewriteConfigState *state, char *option) { int force = 1; sds line; sds password = ACLDefaultUserFirstPassword(); /* If there is no password set, we don't want the requirepass option * to be present in the configuration at all. */ if (password == NULL) { rewriteConfigMarkAsProcessed(state,option); return; } line = sdsnew(option); line = sdscatlen(line, " ", 1); line = sdscatsds(line, password); rewriteConfigRewriteLine(state,option,line,force); } /* Glue together the configuration lines in the current configuration * rewrite state into a single string, stripping multiple empty lines. */ sds rewriteConfigGetContentFromState(struct rewriteConfigState *state) { sds content = sdsempty(); int j, was_empty = 0; for (j = 0; j < state->numlines; j++) { /* Every cluster of empty lines is turned into a single empty line. */ if (sdslen(state->lines[j]) == 0) { if (was_empty) continue; was_empty = 1; } else { was_empty = 0; } content = sdscatsds(content,state->lines[j]); content = sdscatlen(content,"\n",1); } return content; } /* Free the configuration rewrite state. */ void rewriteConfigReleaseState(struct rewriteConfigState *state) { sdsfreesplitres(state->lines,state->numlines); dictRelease(state->option_to_line); dictRelease(state->rewritten); zfree(state); } /* At the end of the rewrite process the state contains the remaining * map between "option name" => "lines in the original config file". * Lines used by the rewrite process were removed by the function * rewriteConfigRewriteLine(), all the other lines are "orphaned" and * should be replaced by empty lines. * * This function does just this, iterating all the option names and * blanking all the lines still associated. */ void rewriteConfigRemoveOrphaned(struct rewriteConfigState *state) { dictIterator *di = dictGetIterator(state->option_to_line); dictEntry *de; while((de = dictNext(di)) != NULL) { list *l = dictGetVal(de); sds option = dictGetKey(de); /* Don't blank lines about options the rewrite process * don't understand. */ if (dictFind(state->rewritten,option) == NULL) { serverLog(LL_DEBUG,"Not rewritten option: %s", option); continue; } while(listLength(l)) { listNode *ln = listFirst(l); int linenum = (long) ln->value; sdsfree(state->lines[linenum]); state->lines[linenum] = sdsempty(); listDelNode(l,ln); } } dictReleaseIterator(di); } /* This function overwrites the old configuration file with the new content. * * 1) The old file length is obtained. * 2) If the new content is smaller, padding is added. * 3) A single write(2) call is used to replace the content of the file. * 4) Later the file is truncated to the length of the new content. * * This way we are sure the file is left in a consistent state even if the * process is stopped between any of the four operations. * * The function returns 0 on success, otherwise -1 is returned and errno * set accordingly. */ int rewriteConfigOverwriteFile(char *configfile, sds content) { int retval = 0; int fd = open(configfile,O_RDWR|O_CREAT,0644); int content_size = sdslen(content), padding = 0; struct stat sb; sds content_padded; /* 1) Open the old file (or create a new one if it does not * exist), get the size. */ if (fd == -1) return -1; /* errno set by open(). */ if (fstat(fd,&sb) == -1) { close(fd); return -1; /* errno set by fstat(). */ } /* 2) Pad the content at least match the old file size. */ content_padded = sdsdup(content); if (content_size < sb.st_size) { /* If the old file was bigger, pad the content with * a newline plus as many "#" chars as required. */ padding = sb.st_size - content_size; content_padded = sdsgrowzero(content_padded,sb.st_size); content_padded[content_size] = '\n'; memset(content_padded+content_size+1,'#',padding-1); } /* 3) Write the new content using a single write(2). */ if (write(fd,content_padded,strlen(content_padded)) == -1) { retval = -1; goto cleanup; } /* 4) Truncate the file to the right length if we used padding. */ if (padding) { if (ftruncate(fd,content_size) == -1) { /* Non critical error... */ } } cleanup: sdsfree(content_padded); close(fd); return retval; } /* Rewrite the configuration file at "path". * If the configuration file already exists, we try at best to retain comments * and overall structure. * * Configuration parameters that are at their default value, unless already * explicitly included in the old configuration file, are not rewritten. * * On error -1 is returned and errno is set accordingly, otherwise 0. */ int rewriteConfig(char *path) { struct rewriteConfigState *state; sds newcontent; int retval; /* Step 1: read the old config into our rewrite state. */ if ((state = rewriteConfigReadOldFile(path)) == NULL) return -1; /* Step 2: rewrite every single option, replacing or appending it inside * the rewrite state. */ rewriteConfigYesNoOption(state,"daemonize",server.daemonize,0); rewriteConfigStringOption(state,"pidfile",server.pidfile,CONFIG_DEFAULT_PID_FILE); rewriteConfigNumericalOption(state,"port",server.port,CONFIG_DEFAULT_SERVER_PORT); rewriteConfigNumericalOption(state,"cluster-announce-port",server.cluster_announce_port,CONFIG_DEFAULT_CLUSTER_ANNOUNCE_PORT); rewriteConfigNumericalOption(state,"cluster-announce-bus-port",server.cluster_announce_bus_port,CONFIG_DEFAULT_CLUSTER_ANNOUNCE_BUS_PORT); rewriteConfigNumericalOption(state,"tcp-backlog",server.tcp_backlog,CONFIG_DEFAULT_TCP_BACKLOG); rewriteConfigBindOption(state); rewriteConfigStringOption(state,"unixsocket",server.unixsocket,NULL); rewriteConfigOctalOption(state,"unixsocketperm",server.unixsocketperm,CONFIG_DEFAULT_UNIX_SOCKET_PERM); rewriteConfigNumericalOption(state,"timeout",server.maxidletime,CONFIG_DEFAULT_CLIENT_TIMEOUT); rewriteConfigNumericalOption(state,"tcp-keepalive",server.tcpkeepalive,CONFIG_DEFAULT_TCP_KEEPALIVE); rewriteConfigNumericalOption(state,"replica-announce-port",server.slave_announce_port,CONFIG_DEFAULT_SLAVE_ANNOUNCE_PORT); rewriteConfigEnumOption(state,"loglevel",server.verbosity,loglevel_enum,CONFIG_DEFAULT_VERBOSITY); rewriteConfigStringOption(state,"logfile",server.logfile,CONFIG_DEFAULT_LOGFILE); rewriteConfigStringOption(state,"aclfile",server.acl_filename,CONFIG_DEFAULT_ACL_FILENAME); rewriteConfigYesNoOption(state,"syslog-enabled",server.syslog_enabled,CONFIG_DEFAULT_SYSLOG_ENABLED); rewriteConfigStringOption(state,"syslog-ident",server.syslog_ident,CONFIG_DEFAULT_SYSLOG_IDENT); rewriteConfigSyslogfacilityOption(state); rewriteConfigSaveOption(state); rewriteConfigUserOption(state); rewriteConfigNumericalOption(state,"databases",server.dbnum,CONFIG_DEFAULT_DBNUM); rewriteConfigYesNoOption(state,"stop-writes-on-bgsave-error",server.stop_writes_on_bgsave_err,CONFIG_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR); rewriteConfigYesNoOption(state,"rdbcompression",server.rdb_compression,CONFIG_DEFAULT_RDB_COMPRESSION); rewriteConfigYesNoOption(state,"rdbchecksum",server.rdb_checksum,CONFIG_DEFAULT_RDB_CHECKSUM); rewriteConfigStringOption(state,"dbfilename",server.rdb_filename,CONFIG_DEFAULT_RDB_FILENAME); rewriteConfigDirOption(state); rewriteConfigSlaveofOption(state,"replicaof"); rewriteConfigStringOption(state,"replica-announce-ip",server.slave_announce_ip,CONFIG_DEFAULT_SLAVE_ANNOUNCE_IP); rewriteConfigStringOption(state,"masteruser",server.masteruser,NULL); rewriteConfigStringOption(state,"masterauth",server.masterauth,NULL); rewriteConfigStringOption(state,"cluster-announce-ip",server.cluster_announce_ip,NULL); rewriteConfigYesNoOption(state,"replica-serve-stale-data",server.repl_serve_stale_data,CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA); rewriteConfigYesNoOption(state,"replica-read-only",server.repl_slave_ro,CONFIG_DEFAULT_SLAVE_READ_ONLY); rewriteConfigYesNoOption(state,"replica-ignore-maxmemory",server.repl_slave_ignore_maxmemory,CONFIG_DEFAULT_SLAVE_IGNORE_MAXMEMORY); rewriteConfigNumericalOption(state,"repl-ping-replica-period",server.repl_ping_slave_period,CONFIG_DEFAULT_REPL_PING_SLAVE_PERIOD); rewriteConfigNumericalOption(state,"repl-timeout",server.repl_timeout,CONFIG_DEFAULT_REPL_TIMEOUT); rewriteConfigBytesOption(state,"repl-backlog-size",server.repl_backlog_size,CONFIG_DEFAULT_REPL_BACKLOG_SIZE); rewriteConfigBytesOption(state,"repl-backlog-ttl",server.repl_backlog_time_limit,CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT); rewriteConfigYesNoOption(state,"repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay,CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY); rewriteConfigYesNoOption(state,"repl-diskless-sync",server.repl_diskless_sync,CONFIG_DEFAULT_REPL_DISKLESS_SYNC); rewriteConfigNumericalOption(state,"repl-diskless-sync-delay",server.repl_diskless_sync_delay,CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY); rewriteConfigNumericalOption(state,"replica-priority",server.slave_priority,CONFIG_DEFAULT_SLAVE_PRIORITY); rewriteConfigNumericalOption(state,"min-replicas-to-write",server.repl_min_slaves_to_write,CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE); rewriteConfigNumericalOption(state,"min-replicas-max-lag",server.repl_min_slaves_max_lag,CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG); rewriteConfigRequirepassOption(state,"requirepass"); rewriteConfigNumericalOption(state,"maxclients",server.maxclients,CONFIG_DEFAULT_MAX_CLIENTS); rewriteConfigBytesOption(state,"maxmemory",server.maxmemory,CONFIG_DEFAULT_MAXMEMORY); rewriteConfigBytesOption(state,"proto-max-bulk-len",server.proto_max_bulk_len,CONFIG_DEFAULT_PROTO_MAX_BULK_LEN); rewriteConfigBytesOption(state,"client-query-buffer-limit",server.client_max_querybuf_len,PROTO_MAX_QUERYBUF_LEN); rewriteConfigEnumOption(state,"maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum,CONFIG_DEFAULT_MAXMEMORY_POLICY); rewriteConfigNumericalOption(state,"maxmemory-samples",server.maxmemory_samples,CONFIG_DEFAULT_MAXMEMORY_SAMPLES); rewriteConfigNumericalOption(state,"lfu-log-factor",server.lfu_log_factor,CONFIG_DEFAULT_LFU_LOG_FACTOR); rewriteConfigNumericalOption(state,"lfu-decay-time",server.lfu_decay_time,CONFIG_DEFAULT_LFU_DECAY_TIME); rewriteConfigNumericalOption(state,"active-defrag-threshold-lower",server.active_defrag_threshold_lower,CONFIG_DEFAULT_DEFRAG_THRESHOLD_LOWER); rewriteConfigNumericalOption(state,"active-defrag-threshold-upper",server.active_defrag_threshold_upper,CONFIG_DEFAULT_DEFRAG_THRESHOLD_UPPER); rewriteConfigBytesOption(state,"active-defrag-ignore-bytes",server.active_defrag_ignore_bytes,CONFIG_DEFAULT_DEFRAG_IGNORE_BYTES); rewriteConfigNumericalOption(state,"active-defrag-cycle-min",server.active_defrag_cycle_min,CONFIG_DEFAULT_DEFRAG_CYCLE_MIN); rewriteConfigNumericalOption(state,"active-defrag-cycle-max",server.active_defrag_cycle_max,CONFIG_DEFAULT_DEFRAG_CYCLE_MAX); rewriteConfigNumericalOption(state,"active-defrag-max-scan-fields",server.active_defrag_max_scan_fields,CONFIG_DEFAULT_DEFRAG_MAX_SCAN_FIELDS); rewriteConfigYesNoOption(state,"appendonly",server.aof_state != AOF_OFF,0); rewriteConfigStringOption(state,"appendfilename",server.aof_filename,CONFIG_DEFAULT_AOF_FILENAME); rewriteConfigEnumOption(state,"appendfsync",server.aof_fsync,aof_fsync_enum,CONFIG_DEFAULT_AOF_FSYNC); rewriteConfigYesNoOption(state,"no-appendfsync-on-rewrite",server.aof_no_fsync_on_rewrite,CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE); rewriteConfigNumericalOption(state,"auto-aof-rewrite-percentage",server.aof_rewrite_perc,AOF_REWRITE_PERC); rewriteConfigBytesOption(state,"auto-aof-rewrite-min-size",server.aof_rewrite_min_size,AOF_REWRITE_MIN_SIZE); rewriteConfigNumericalOption(state,"lua-time-limit",server.lua_time_limit,LUA_SCRIPT_TIME_LIMIT); rewriteConfigYesNoOption(state,"cluster-enabled",server.cluster_enabled,0); rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); rewriteConfigYesNoOption(state,"cluster-require-full-coverage",server.cluster_require_full_coverage,CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE); rewriteConfigYesNoOption(state,"cluster-replica-no-failover",server.cluster_slave_no_failover,CLUSTER_DEFAULT_SLAVE_NO_FAILOVER); rewriteConfigNumericalOption(state,"cluster-node-timeout",server.cluster_node_timeout,CLUSTER_DEFAULT_NODE_TIMEOUT); rewriteConfigNumericalOption(state,"cluster-migration-barrier",server.cluster_migration_barrier,CLUSTER_DEFAULT_MIGRATION_BARRIER); rewriteConfigNumericalOption(state,"cluster-replica-validity-factor",server.cluster_slave_validity_factor,CLUSTER_DEFAULT_SLAVE_VALIDITY); rewriteConfigNumericalOption(state,"slowlog-log-slower-than",server.slowlog_log_slower_than,CONFIG_DEFAULT_SLOWLOG_LOG_SLOWER_THAN); rewriteConfigNumericalOption(state,"latency-monitor-threshold",server.latency_monitor_threshold,CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD); rewriteConfigNumericalOption(state,"slowlog-max-len",server.slowlog_max_len,CONFIG_DEFAULT_SLOWLOG_MAX_LEN); rewriteConfigNotifykeyspaceeventsOption(state); rewriteConfigNumericalOption(state,"hash-max-ziplist-entries",server.hash_max_ziplist_entries,OBJ_HASH_MAX_ZIPLIST_ENTRIES); rewriteConfigNumericalOption(state,"hash-max-ziplist-value",server.hash_max_ziplist_value,OBJ_HASH_MAX_ZIPLIST_VALUE); rewriteConfigNumericalOption(state,"stream-node-max-bytes",server.stream_node_max_bytes,OBJ_STREAM_NODE_MAX_BYTES); rewriteConfigNumericalOption(state,"stream-node-max-entries",server.stream_node_max_entries,OBJ_STREAM_NODE_MAX_ENTRIES); rewriteConfigNumericalOption(state,"list-max-ziplist-size",server.list_max_ziplist_size,OBJ_LIST_MAX_ZIPLIST_SIZE); rewriteConfigNumericalOption(state,"list-compress-depth",server.list_compress_depth,OBJ_LIST_COMPRESS_DEPTH); rewriteConfigNumericalOption(state,"set-max-intset-entries",server.set_max_intset_entries,OBJ_SET_MAX_INTSET_ENTRIES); rewriteConfigNumericalOption(state,"zset-max-ziplist-entries",server.zset_max_ziplist_entries,OBJ_ZSET_MAX_ZIPLIST_ENTRIES); rewriteConfigNumericalOption(state,"zset-max-ziplist-value",server.zset_max_ziplist_value,OBJ_ZSET_MAX_ZIPLIST_VALUE); rewriteConfigNumericalOption(state,"hll-sparse-max-bytes",server.hll_sparse_max_bytes,CONFIG_DEFAULT_HLL_SPARSE_MAX_BYTES); rewriteConfigYesNoOption(state,"activerehashing",server.activerehashing,CONFIG_DEFAULT_ACTIVE_REHASHING); rewriteConfigYesNoOption(state,"activedefrag",server.active_defrag_enabled,CONFIG_DEFAULT_ACTIVE_DEFRAG); rewriteConfigYesNoOption(state,"protected-mode",server.protected_mode,CONFIG_DEFAULT_PROTECTED_MODE); rewriteConfigYesNoOption(state,"gopher-enabled",server.gopher_enabled,CONFIG_DEFAULT_GOPHER_ENABLED); rewriteConfigClientoutputbufferlimitOption(state); rewriteConfigNumericalOption(state,"hz",server.config_hz,CONFIG_DEFAULT_HZ); rewriteConfigYesNoOption(state,"aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync,CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC); rewriteConfigYesNoOption(state,"rdb-save-incremental-fsync",server.rdb_save_incremental_fsync,CONFIG_DEFAULT_RDB_SAVE_INCREMENTAL_FSYNC); rewriteConfigYesNoOption(state,"aof-load-truncated",server.aof_load_truncated,CONFIG_DEFAULT_AOF_LOAD_TRUNCATED); rewriteConfigYesNoOption(state,"aof-use-rdb-preamble",server.aof_use_rdb_preamble,CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE); rewriteConfigEnumOption(state,"supervised",server.supervised_mode,supervised_mode_enum,SUPERVISED_NONE); rewriteConfigYesNoOption(state,"lazyfree-lazy-eviction",server.lazyfree_lazy_eviction,CONFIG_DEFAULT_LAZYFREE_LAZY_EVICTION); rewriteConfigYesNoOption(state,"lazyfree-lazy-expire",server.lazyfree_lazy_expire,CONFIG_DEFAULT_LAZYFREE_LAZY_EXPIRE); rewriteConfigYesNoOption(state,"lazyfree-lazy-server-del",server.lazyfree_lazy_server_del,CONFIG_DEFAULT_LAZYFREE_LAZY_SERVER_DEL); rewriteConfigYesNoOption(state,"replica-lazy-flush",server.repl_slave_lazy_flush,CONFIG_DEFAULT_SLAVE_LAZY_FLUSH); rewriteConfigYesNoOption(state,"dynamic-hz",server.dynamic_hz,CONFIG_DEFAULT_DYNAMIC_HZ); /* Rewrite Sentinel config if in Sentinel mode. */ if (server.sentinel_mode) rewriteConfigSentinelOption(state); /* Step 3: remove all the orphaned lines in the old file, that is, lines * that were used by a config option and are no longer used, like in case * of multiple "save" options or duplicated options. */ rewriteConfigRemoveOrphaned(state); /* Step 4: generate a new configuration file from the modified state * and write it into the original file. */ newcontent = rewriteConfigGetContentFromState(state); retval = rewriteConfigOverwriteFile(server.configfile,newcontent); sdsfree(newcontent); rewriteConfigReleaseState(state); return retval; } /*----------------------------------------------------------------------------- * CONFIG command entry point *----------------------------------------------------------------------------*/ void configCommand(client *c) { /* Only allow CONFIG GET while loading. */ if (server.loading && strcasecmp(ptrFromObj(c->argv[1]),"get")) { addReplyError(c,"Only CONFIG GET is allowed during loading"); return; } if (c->argc == 2 && !strcasecmp(ptrFromObj(c->argv[1]),"help")) { const char *help[] = { "GET <pattern> -- Return parameters matching the glob-like <pattern> and their values.", "SET <parameter> <value> -- Set parameter to value.", "RESETSTAT -- Reset statistics reported by INFO.", "REWRITE -- Rewrite the configuration file.", NULL }; addReplyHelp(c, help); } else if (!strcasecmp(ptrFromObj(c->argv[1]),"set") && c->argc == 4) { configSetCommand(c); } else if (!strcasecmp(ptrFromObj(c->argv[1]),"get") && c->argc == 3) { configGetCommand(c); } else if (!strcasecmp(ptrFromObj(c->argv[1]),"resetstat") && c->argc == 2) { resetServerStats(); resetCommandTableStats(); addReply(c,shared.ok); } else if (!strcasecmp(ptrFromObj(c->argv[1]),"rewrite") && c->argc == 2) { if (server.configfile == NULL) { addReplyError(c,"The server is running without a config file"); return; } if (rewriteConfig(server.configfile) == -1) { serverLog(LL_WARNING,"CONFIG REWRITE failed: %s", strerror(errno)); addReplyErrorFormat(c,"Rewriting config file: %s", strerror(errno)); } else { serverLog(LL_WARNING,"CONFIG REWRITE executed with success."); addReply(c,shared.ok); } } else { addReplySubcommandSyntaxError(c); return; } }
967308.c
#define RAYGUI_IMPLEMENTATION #include "raygui.h"
695264.c
/****************************************************************************** * Code generated with sympy 0.7.6 * * * * See http://www.sympy.org/ for more information. * * * * This file is part of 'project' * ******************************************************************************/ #include "thumb_metacarpal_root_z_0.h" #include <math.h> double thumb_metacarpal_root_z_0() { double thumb_metacarpal_root_z_0_result; thumb_metacarpal_root_z_0_result = 0; return thumb_metacarpal_root_z_0_result; }
232186.c
// // Created by adamzeng on 2019-09-28. // #include <stdio.h> #include "sbuf.h" /** Global variables */ int readcnt; /** Initially = 0 */ sem_t mutex, w; /** Both initially = 1 */ void reader(void) { while (1) { P(&mutex); readcnt++; if (readcnt == 1) { /** First in */ P(&w); } V(&mutex); /** Critical section */ /** Reading happens */ P(&mutex); readcnt--; if (readcnt == 0) { /** Last out */ V(&w); } V(&mutex); } } void writer(void) { while (1) { P(&w); /** Critical section */ /** Reading happens */ V(&w); } }
573801.c
#include <stdio.h> int main() { int n = 5, num; for(int i = 1; i <= n; i++) { for(int s = n - 1; s >= i; s--) printf(" "); num = 1; for(int j = 1; j <= i; j++) { printf("%2d ", num); num = num * (i - j)/j; } printf("\n"); } return 0; }
493883.c
#include <std.h> #include <daemons.h> #include "../seneca.h" inherit "/std/psionportal"; void create(){ ::create(); set_property("indoors",1); set_property("light",2); set_travel(PAVED_ROAD); set_terrain(CITY); set_name("Inside a Marble Rotunda"); set_short("%^BOLD%^%^WHITE%^Inside a Marble Rotunda%^RESET%^"); set_long("%^BOLD%^%^WHITE%^Inside a Marble Rotunda, Seneca%^RESET%^\n"+ "This rotunda has been constructed from pure %^BOLD%^white marble"+ "%^RESET%^. The walls have been formed from perfectly cut stones, "+ "all intersecting flawlessly to leave no holes or gaps between them. "+ "The room is also a perfect circle, with a raised dias dominating the "+ "center of the room. On that dias is a%^BOLD%^%^BLACK%^ complicated "+ "carving %^RESET%^of a circle, several feet in diameter. At "+ "evenly-spaced intervals around the circle are three small pillars, "+ "each seemingly embedded within the marble itself and carved from "+ "what appear to be varied kinds of precious rocks and "+ "gems in shades of %^BLUE%^blue%^RESET%^, %^BOLD%^white %^RESET%^and "+ "%^BOLD%^%^BLACK%^black%^RESET%^. The room is bright and airy, "+ "though there are no obvious sources of light. A single broad "+ "doorway is set into western wall, leading through a small hallway "+ "back to the %^ORANGE%^street%^RESET%^. Beside the doorway, a small "+ "%^ORANGE%^sign %^RESET%^has been tacked up and printed in clear writing.\n"); set_listen("default","The soft murmur of chanting voices drifts around you."); set_smell("default","The clean scent of the ocean rises on the breeze."); set_items(([ ({"circle","dust","carving"}) : "A perfect circle has been carved "+ "into the %^BOLD%^%^WHITE%^marble floor%^RESET%^ and filled "+ "with some kind of %^BOLD%^gli%^RESET%^t%^BOLD%^ter%^RESET%^"+ "i%^BOLD%^ng %^RESET%^dust, perhaps for some magical reason, "+ "or perhaps only to make it more visible against the %^BOLD%^"+ "%^WHITE%^pure white %^RESET%^of the stone it is carved into. "+ "At even intervals around its circumference are three stone pillars, "+ "each formed of a different mineral.", ({"pillars","gemstones","rocks","stones"}) : "Around the "+ "circumference of the circle are three pillars, evenly spaced "+ "apart. Each appears to be made of some precious mineral, in "+ "different hues - one of %^BOLD%^%^BLACK%^obsidian%^RESET%^, "+ "one of %^BOLD%^%^WHITE%^moonstone%^RESET%^, and one of %^BLUE%^"+ "bl%^CYAN%^u%^BLUE%^e ame%^CYAN%^t%^BLUE%^hyst.%^RESET%^", "sign":"A small %^ORANGE%^sign %^RESET%^has been tacked up by the " "doorway, printed with clear writing that you could read.", ])); set_exits(([ "west" : ROOMS"wstreet35" ])); add_location("shadow",0,95,"/d/darkwood/room/road1"); add_location("tonovi",0,95,"/d/dagger/tonovi/road2"); add_location("eldebaro",20,95,"/d/islands/common/eldebaro/newrooms/east_oasis1"); } void reset(){ ::reset(); if(!present("portalpsion")) { new("/d/shadow/mon/nomad")->move(TO); tell_room(TO,"A robed human enters quietly into the room, to stand behind the etched circle."); } }
192356.c
/* Simple DirectMedia Layer Copyright (C) 1997-2022 Sam Lantinga <[email protected]> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../SDL_internal.h" /* Functions for audio drivers to perform runtime conversion of audio format */ /* FIXME: Channel weights when converting from more channels to fewer may need to be adjusted, see https://msdn.microsoft.com/en-us/library/windows/desktop/ff819070(v=vs.85).aspx */ #include "SDL.h" #include "SDL_audio.h" #include "SDL_audio_c.h" #include "SDL_loadso.h" #include "../SDL_dataqueue.h" #include "SDL_cpuinfo.h" #define DEBUG_AUDIOSTREAM 0 #ifdef __ARM_NEON #define HAVE_NEON_INTRINSICS 1 #endif #ifdef __SSE__ #define HAVE_SSE_INTRINSICS 1 #endif #ifdef __SSE3__ #define HAVE_SSE3_INTRINSICS 1 #endif #if defined(HAVE_IMMINTRIN_H) && !defined(SDL_DISABLE_IMMINTRIN_H) #define HAVE_AVX_INTRINSICS 1 #endif #if defined __clang__ # if (!__has_attribute(target)) # undef HAVE_AVX_INTRINSICS # endif # if (defined(_MSC_VER) || defined(__SCE__)) && !defined(__AVX__) # undef HAVE_AVX_INTRINSICS # endif #elif defined __GNUC__ # if (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 9) # undef HAVE_AVX_INTRINSICS # endif #endif #if HAVE_SSE3_INTRINSICS /* Convert from stereo to mono. Average left and right. */ static void SDLCALL SDL_ConvertStereoToMono_SSE3(SDL_AudioCVT * cvt, SDL_AudioFormat format) { const __m128 divby2 = _mm_set1_ps(0.5f); float *dst = (float *) cvt->buf; const float *src = dst; int i = cvt->len_cvt / 8; LOG_DEBUG_CONVERT("stereo", "mono (using SSE3)"); SDL_assert(format == AUDIO_F32SYS); /* Do SSE blocks as long as we have 16 bytes available. Just use unaligned load/stores, if the memory at runtime is aligned it'll be just as fast on modern processors */ while (i >= 4) { /* 4 * float32 */ _mm_storeu_ps(dst, _mm_mul_ps(_mm_hadd_ps(_mm_load_ps(src), _mm_loadu_ps(src+4)), divby2)); i -= 4; src += 8; dst += 4; } /* Finish off any leftovers with scalar operations. */ while (i) { *dst = (src[0] + src[1]) * 0.5f; dst++; i--; src += 2; } cvt->len_cvt /= 2; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } #endif /* Convert from stereo to mono. Average left and right. */ static void SDLCALL SDL_ConvertStereoToMono(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i; LOG_DEBUG_CONVERT("stereo", "mono"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / 8; i; --i, src += 2) { *(dst++) = (src[0] + src[1]) * 0.5f; } cvt->len_cvt /= 2; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } #if HAVE_AVX_INTRINSICS /* MSVC will always accept AVX intrinsics when compiling for x64 */ #if defined(__clang__) || defined(__GNUC__) __attribute__((target("avx"))) #endif /* Convert from 5.1 to stereo. Average left and right, distribute center, discard LFE. */ static void SDLCALL SDL_Convert51ToStereo_AVX(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i = cvt->len_cvt / (sizeof (float) * 6); const float two_fifths_f = 1.0f / 2.5f; const __m256 two_fifths_v = _mm256_set1_ps(two_fifths_f); const __m256 half = _mm256_set1_ps(0.5f); LOG_DEBUG_CONVERT("5.1", "stereo (using AVX)"); SDL_assert(format == AUDIO_F32SYS); /* SDL's 5.1 layout: FL+FR+FC+LFE+BL+BR */ while (i >= 4) { __m256 in0 = _mm256_loadu_ps(src + 0); /* 0FL 0FR 0FC 0LF 0BL 0BR 1FL 1FR */ __m256 in1 = _mm256_loadu_ps(src + 8); /* 1FC 1LF 1BL 1BR 2FL 2FR 2FC 2LF */ __m256 in2 = _mm256_loadu_ps(src + 16); /* 2BL 2BR 3FL 3FR 3FC 3LF 3BL 3BR */ /* 0FL 0FR 0FC 0LF 2FL 2FR 2FC 2LF */ __m256 temp0 = _mm256_blend_ps(in0, in1, 0xF0); /* 1FC 1LF 1BL 1BR 3FC 3LF 3BL 3BR */ __m256 temp1 = _mm256_blend_ps(in1, in2, 0xF0); /* 0FC 0FC 1FC 1FC 2FC 2FC 3FC 3FC */ __m256 fc_distributed = _mm256_mul_ps(half, _mm256_shuffle_ps(temp0, temp1, _MM_SHUFFLE(0, 0, 2, 2))); /* 0FL 0FR 1BL 1BR 2FL 2FR 3BL 3BR */ __m256 permuted0 = _mm256_blend_ps(temp0, temp1, 0xCC); /* 0BL 0BR 1FL 1FR 2BL 2BR 3FL 3FR */ __m256 permuted1 = _mm256_permute2f128_ps(in0, in2, 0x21); /* 0FL 0FR 1BL 1BR 2FL 2FR 3BL 3BR */ /* + 0BL 0BR 1FL 1FR 2BL 2BR 3FL 3FR */ /* = 0L 0R 1L 1R 2L 2R 3L 3R */ __m256 out = _mm256_add_ps(permuted0, permuted1); out = _mm256_add_ps(out, fc_distributed); out = _mm256_mul_ps(out, two_fifths_v); _mm256_storeu_ps(dst, out); i -= 4; src += 24; dst += 8; } /* Finish off any leftovers with scalar operations. */ while (i) { const float front_center_distributed = src[2] * 0.5f; dst[0] = (src[0] + front_center_distributed + src[4]) * two_fifths_f; /* left */ dst[1] = (src[1] + front_center_distributed + src[5]) * two_fifths_f; /* right */ i--; src += 6; dst+=2; } cvt->len_cvt /= 3; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } #endif #if HAVE_SSE_INTRINSICS /* Convert from 5.1 to stereo. Average left and right, distribute center, discard LFE. */ static void SDLCALL SDL_Convert51ToStereo_SSE(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i = cvt->len_cvt / (sizeof (float) * 6); const float two_fifths_f = 1.0f / 2.5f; const __m128 two_fifths_v = _mm_set1_ps(two_fifths_f); const __m128 half = _mm_set1_ps(0.5f); LOG_DEBUG_CONVERT("5.1", "stereo (using SSE)"); SDL_assert(format == AUDIO_F32SYS); /* SDL's 5.1 layout: FL+FR+FC+LFE+BL+BR */ /* Just use unaligned load/stores, if the memory at runtime is */ /* aligned it'll be just as fast on modern processors */ while (i >= 2) { /* Two 5.1 samples (12 floats) fit nicely in three 128bit */ /* registers. Using shuffles they can be rearranged so that */ /* the conversion math can be vectorized. */ __m128 in0 = _mm_loadu_ps(src); /* 0FL 0FR 0FC 0LF */ __m128 in1 = _mm_loadu_ps(src + 4); /* 0BL 0BR 1FL 1FR */ __m128 in2 = _mm_loadu_ps(src + 8); /* 1FC 1LF 1BL 1BR */ /* 0FC 0FC 1FC 1FC */ __m128 fc_distributed = _mm_mul_ps(half, _mm_shuffle_ps(in0, in2, _MM_SHUFFLE(0, 0, 2, 2))); /* 0FL 0FR 1BL 1BR */ __m128 blended = _mm_shuffle_ps(in0, in2, _MM_SHUFFLE(3, 2, 1, 0)); /* 0FL 0FR 1BL 1BR */ /* + 0BL 0BR 1FL 1FR */ /* = 0L 0R 1L 1R */ __m128 out = _mm_add_ps(blended, in1); out = _mm_add_ps(out, fc_distributed); out = _mm_mul_ps(out, two_fifths_v); _mm_storeu_ps(dst, out); i -= 2; src += 12; dst += 4; } /* Finish off any leftovers with scalar operations. */ while (i) { const float front_center_distributed = src[2] * 0.5f; dst[0] = (src[0] + front_center_distributed + src[4]) * two_fifths_f; /* left */ dst[1] = (src[1] + front_center_distributed + src[5]) * two_fifths_f; /* right */ i--; src += 6; dst+=2; } cvt->len_cvt /= 3; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } #endif #if HAVE_NEON_INTRINSICS /* Convert from 5.1 to stereo. Average left and right, distribute center, discard LFE. */ static void SDLCALL SDL_Convert51ToStereo_NEON(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i = cvt->len_cvt / (sizeof (float) * 6); const float two_fifths_f = 1.0f / 2.5f; const float32x4_t two_fifths_v = vdupq_n_f32(two_fifths_f); const float32x4_t half = vdupq_n_f32(0.5f); LOG_DEBUG_CONVERT("5.1", "stereo (using NEON)"); SDL_assert(format == AUDIO_F32SYS); /* SDL's 5.1 layout: FL+FR+FC+LFE+BL+BR */ /* Just use unaligned load/stores, it's the same NEON instructions and hopefully even unaligned NEON is faster than the scalar fallback. */ while (i >= 2) { /* Two 5.1 samples (12 floats) fit nicely in three 128bit */ /* registers. Using shuffles they can be rearranged so that */ /* the conversion math can be vectorized. */ const float32x4_t in0 = vld1q_f32(src); /* 0FL 0FR 0FC 0LF */ const float32x4_t in1 = vld1q_f32(src + 4); /* 0BL 0BR 1FL 1FR */ const float32x4_t in2 = vld1q_f32(src + 8); /* 1FC 1LF 1BL 1BR */ /* 0FC 0FC 1FC 1FC */ const float32x4_t fc_distributed = vmulq_f32(half, vcombine_f32(vdup_lane_f32(vget_high_f32(in0), 0), vdup_lane_f32(vget_low_f32(in2), 0))); /* 0FL 0FR 1BL 1BR */ const float32x4_t blended = vcombine_f32(vget_low_f32(in0), vget_high_f32(in2)); /* 0FL 0FR 1BL 1BR */ /* + 0BL 0BR 1FL 1FR */ /* = 0L 0R 1L 1R */ float32x4_t out = vaddq_f32(blended, in1); out = vaddq_f32(out, fc_distributed); out = vmulq_f32(out, two_fifths_v); vst1q_f32(dst, out); i -= 2; src += 12; dst += 4; } /* Finish off any leftovers with scalar operations. */ while (i) { const float front_center_distributed = src[2] * 0.5f; dst[0] = (src[0] + front_center_distributed + src[4]) * two_fifths_f; /* left */ dst[1] = (src[1] + front_center_distributed + src[5]) * two_fifths_f; /* right */ i--; src += 6; dst+=2; } cvt->len_cvt /= 3; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } #endif /* Convert from 5.1 to stereo. Average left and right, distribute center, discard LFE. */ static void SDLCALL SDL_Convert51ToStereo(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i; const float two_fifths = 1.0f / 2.5f; LOG_DEBUG_CONVERT("5.1", "stereo"); SDL_assert(format == AUDIO_F32SYS); /* SDL's 5.1 layout: FL+FR+FC+LFE+BL+BR */ for (i = cvt->len_cvt / (sizeof (float) * 6); i; --i, src += 6, dst += 2) { const float front_center_distributed = src[2] * 0.5f; dst[0] = (src[0] + front_center_distributed + src[4]) * two_fifths; /* left */ dst[1] = (src[1] + front_center_distributed + src[5]) * two_fifths; /* right */ } cvt->len_cvt /= 3; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Convert from quad to stereo. Average left and right. */ static void SDLCALL SDL_ConvertQuadToStereo(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i; LOG_DEBUG_CONVERT("quad", "stereo"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / (sizeof (float) * 4); i; --i, src += 4, dst += 2) { dst[0] = (src[0] + src[2]) * 0.5f; /* left */ dst[1] = (src[1] + src[3]) * 0.5f; /* right */ } cvt->len_cvt /= 2; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Convert from 7.1 to 5.1. Distribute sides across front and back. */ static void SDLCALL SDL_Convert71To51(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i; const float two_thirds = 1.0f / 1.5f; LOG_DEBUG_CONVERT("7.1", "5.1"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / (sizeof (float) * 8); i; --i, src += 8, dst += 6) { const float surround_left_distributed = src[6] * 0.5f; const float surround_right_distributed = src[7] * 0.5f; dst[0] = (src[0] + surround_left_distributed) * two_thirds; /* FL */ dst[1] = (src[1] + surround_right_distributed) * two_thirds; /* FR */ dst[2] = src[2] * two_thirds; /* CC */ dst[3] = src[3] * two_thirds; /* LFE */ dst[4] = (src[4] + surround_left_distributed) * two_thirds; /* BL */ dst[5] = (src[5] + surround_right_distributed) * two_thirds; /* BR */ } cvt->len_cvt /= 8; cvt->len_cvt *= 6; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Convert from 7.1 to 6.1 */ /* SDL's 6.1 layout: LFE+FC+FR+SR+BackSurround+SL+FL */ /* SDL's 7.1 layout: FL+FR+FC+LFE+BL+BR+SL+SR */ static void SDLCALL SDL_Convert71To61(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i; LOG_DEBUG_CONVERT("7.1", "6.1"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / (sizeof (float) * 8); i; --i, src += 8, dst += 7) { dst[0] = src[3]; /* LFE */ dst[1] = src[2]; /* FC */ dst[2] = src[1]; /* FR */ dst[3] = src[7]; /* SR */ dst[4] = (src[4] + src[5]) / 0.2f; /* BackSurround */ dst[5] = src[6]; /* SL */ dst[6] = src[0]; /* FL */ } cvt->len_cvt /= 8; cvt->len_cvt *= 7; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Convert from 6.1 to 7.1 */ /* SDL's 6.1 layout: LFE+FC+FR+SR+BackSurround+SL+FL */ /* SDL's 7.1 layout: FL+FR+FC+LFE+BL+BR+SL+SR */ static void SDLCALL SDL_Convert61To71(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i; LOG_DEBUG_CONVERT("6.1", "7.1"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / (sizeof (float) * 7); i; --i, src += 7, dst += 8) { dst[0] = src[6]; /* FL */ dst[1] = src[2]; /* FR */ dst[2] = src[1]; /* FC */ dst[3] = src[0]; /* LFE */ dst[4] = src[4]; /* BL */ dst[5] = src[4]; /* BR */ dst[6] = src[5]; /* SL */ dst[7] = src[3]; /* SR */ } cvt->len_cvt /= 7; cvt->len_cvt *= 8; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Convert from 5.1 to 6.1 */ /* SDL's 5.1 layout: FL+FR+FC+LFE+BL+BR */ /* SDL's 6.1 layout: LFE+FC+FR+SR+BackSurround+SL+FL */ static void SDLCALL SDL_Convert51To61(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i; LOG_DEBUG_CONVERT("5.1", "6.1"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / (sizeof (float) * 6); i; --i, src += 6, dst += 7) { dst[0] = src[3]; /* LFE */ dst[1] = src[2]; /* FC */ dst[2] = src[1]; /* FR */ dst[3] = src[5]; /* SR */ dst[4] = (src[4] + src[5]) / 0.2f; /* BackSurround */ dst[5] = src[4]; /* SL */ dst[6] = src[0]; /* FL */ } cvt->len_cvt /= 6; cvt->len_cvt *= 7; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Convert from 6.1 to 5.1 */ /* SDL's 5.1 layout: FL+FR+FC+LFE+BL+BR */ /* SDL's 6.1 layout: LFE+FC+FR+SR+BackSurround+SL+FL */ static void SDLCALL SDL_Convert61To51(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i; LOG_DEBUG_CONVERT("6.1", "5.1"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / (sizeof (float) * 7); i; --i, src += 7, dst += 6) { dst[0] = src[6]; /* FL */ dst[1] = src[2]; /* FR */ dst[2] = src[1]; /* FC */ dst[3] = src[0]; /* LFE */ dst[4] = src[5]; /* BL */ dst[5] = src[3]; /* BR */ } cvt->len_cvt /= 7; cvt->len_cvt *= 6; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Convert from 5.1 to quad. Distribute center across front, discard LFE. */ static void SDLCALL SDL_Convert51ToQuad(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float *dst = (float *) cvt->buf; const float *src = dst; int i; const float two_thirds = 1.0f / 1.5f; LOG_DEBUG_CONVERT("5.1", "quad"); SDL_assert(format == AUDIO_F32SYS); /* SDL's 4.0 layout: FL+FR+BL+BR */ /* SDL's 5.1 layout: FL+FR+FC+LFE+BL+BR */ for (i = cvt->len_cvt / (sizeof (float) * 6); i; --i, src += 6, dst += 4) { const float front_center_distributed = src[2] * 0.5f; dst[0] = (src[0] + front_center_distributed) * two_thirds; /* FL */ dst[1] = (src[1] + front_center_distributed) * two_thirds; /* FR */ dst[2] = src[4] * two_thirds; /* BL */ dst[3] = src[5] * two_thirds; /* BR */ } cvt->len_cvt /= 6; cvt->len_cvt *= 4; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Upmix mono to stereo (by duplication) */ static void SDLCALL SDL_ConvertMonoToStereo(SDL_AudioCVT * cvt, SDL_AudioFormat format) { const float *src = (const float *) (cvt->buf + cvt->len_cvt); float *dst = (float *) (cvt->buf + cvt->len_cvt * 2); int i; LOG_DEBUG_CONVERT("mono", "stereo"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / sizeof (float); i; --i) { src--; dst -= 2; dst[0] = dst[1] = *src; } cvt->len_cvt *= 2; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Upmix stereo to a pseudo-5.1 stream */ static void SDLCALL SDL_ConvertStereoTo51(SDL_AudioCVT * cvt, SDL_AudioFormat format) { int i; float lf, rf, ce; const float *src = (const float *) (cvt->buf + cvt->len_cvt); float *dst = (float *) (cvt->buf + cvt->len_cvt * 3); LOG_DEBUG_CONVERT("stereo", "5.1"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / (sizeof(float) * 2); i; --i) { dst -= 6; src -= 2; lf = src[0]; rf = src[1]; ce = (lf + rf) * 0.5f; /* Constant 0.571f is approx 4/7 not to saturate */ dst[0] = 0.571f * (lf + (lf - 0.5f * ce)); /* FL */ dst[1] = 0.571f * (rf + (rf - 0.5f * ce)); /* FR */ dst[2] = ce; /* FC */ dst[3] = 0; /* LFE (only meant for special LFE effects) */ dst[4] = lf; /* BL */ dst[5] = rf; /* BR */ } cvt->len_cvt *= 3; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Upmix quad to a pseudo-5.1 stream */ static void SDLCALL SDL_ConvertQuadTo51(SDL_AudioCVT * cvt, SDL_AudioFormat format) { int i; float lf, rf, lb, rb, ce; const float *src = (const float *) (cvt->buf + cvt->len_cvt); float *dst = (float *) (cvt->buf + cvt->len_cvt * 3 / 2); LOG_DEBUG_CONVERT("quad", "5.1"); SDL_assert(format == AUDIO_F32SYS); SDL_assert(cvt->len_cvt % (sizeof(float) * 4) == 0); for (i = cvt->len_cvt / (sizeof(float) * 4); i; --i) { dst -= 6; src -= 4; lf = src[0]; rf = src[1]; lb = src[2]; rb = src[3]; ce = (lf + rf) * 0.5f; /* Constant 0.571f is approx 4/7 not to saturate */ dst[0] = 0.571f * (lf + (lf - 0.5f * ce)); /* FL */ dst[1] = 0.571f * (rf + (rf - 0.5f * ce)); /* FR */ dst[2] = ce; /* FC */ dst[3] = 0; /* LFE (only meant for special LFE effects) */ dst[4] = lb; /* BL */ dst[5] = rb; /* BR */ } cvt->len_cvt = cvt->len_cvt * 3 / 2; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Upmix stereo to a pseudo-4.0 stream (by duplication) */ static void SDLCALL SDL_ConvertStereoToQuad(SDL_AudioCVT * cvt, SDL_AudioFormat format) { const float *src = (const float *) (cvt->buf + cvt->len_cvt); float *dst = (float *) (cvt->buf + cvt->len_cvt * 2); float lf, rf; int i; LOG_DEBUG_CONVERT("stereo", "quad"); SDL_assert(format == AUDIO_F32SYS); for (i = cvt->len_cvt / (sizeof(float) * 2); i; --i) { dst -= 4; src -= 2; lf = src[0]; rf = src[1]; dst[0] = lf; /* FL */ dst[1] = rf; /* FR */ dst[2] = lf; /* BL */ dst[3] = rf; /* BR */ } cvt->len_cvt *= 2; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* Upmix 5.1 to 7.1 */ static void SDLCALL SDL_Convert51To71(SDL_AudioCVT * cvt, SDL_AudioFormat format) { float lf, rf, lb, rb, ls, rs; int i; const float *src = (const float *) (cvt->buf + cvt->len_cvt); float *dst = (float *) (cvt->buf + cvt->len_cvt * 4 / 3); LOG_DEBUG_CONVERT("5.1", "7.1"); SDL_assert(format == AUDIO_F32SYS); SDL_assert(cvt->len_cvt % (sizeof(float) * 6) == 0); for (i = cvt->len_cvt / (sizeof(float) * 6); i; --i) { dst -= 8; src -= 6; lf = src[0]; rf = src[1]; lb = src[4]; rb = src[5]; ls = (lf + lb) * 0.5f; rs = (rf + rb) * 0.5f; lf += lf - ls; rf += rf - rs; lb += lb - ls; rb += rb - rs; dst[3] = src[3]; /* LFE */ dst[2] = src[2]; /* FC */ dst[7] = rs; /* SR */ dst[6] = ls; /* SL */ dst[5] = 0.5f * rb; /* BR */ dst[4] = 0.5f * lb; /* BL */ dst[1] = 0.5f * rf; /* FR */ dst[0] = 0.5f * lf; /* FL */ } cvt->len_cvt = cvt->len_cvt * 4 / 3; if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index] (cvt, format); } } /* SDL's resampler uses a "bandlimited interpolation" algorithm: https://ccrma.stanford.edu/~jos/resample/ */ #define RESAMPLER_ZERO_CROSSINGS 5 #define RESAMPLER_BITS_PER_SAMPLE 16 #define RESAMPLER_SAMPLES_PER_ZERO_CROSSING (1 << ((RESAMPLER_BITS_PER_SAMPLE / 2) + 1)) #define RESAMPLER_FILTER_SIZE ((RESAMPLER_SAMPLES_PER_ZERO_CROSSING * RESAMPLER_ZERO_CROSSINGS) + 1) /* This is a "modified" bessel function, so you can't use POSIX j0() */ static double bessel(const double x) { const double xdiv2 = x / 2.0; double i0 = 1.0f; double f = 1.0f; int i = 1; while (SDL_TRUE) { const double diff = SDL_pow(xdiv2, i * 2) / SDL_pow(f, 2); if (diff < 1.0e-21f) { break; } i0 += diff; i++; f *= (double) i; } return i0; } /* build kaiser table with cardinal sine applied to it, and array of differences between elements. */ static void kaiser_and_sinc(float *table, float *diffs, const int tablelen, const double beta) { const int lenm1 = tablelen - 1; const int lenm1div2 = lenm1 / 2; int i; table[0] = 1.0f; for (i = 1; i < tablelen; i++) { const double kaiser = bessel(beta * SDL_sqrt(1.0 - SDL_pow(((i - lenm1) / 2.0) / lenm1div2, 2.0))) / bessel(beta); table[tablelen - i] = (float) kaiser; } for (i = 1; i < tablelen; i++) { const float x = (((float) i) / ((float) RESAMPLER_SAMPLES_PER_ZERO_CROSSING)) * ((float) M_PI); table[i] *= SDL_sinf(x) / x; diffs[i - 1] = table[i] - table[i - 1]; } diffs[lenm1] = 0.0f; } static SDL_SpinLock ResampleFilterSpinlock = 0; static float *ResamplerFilter = NULL; static float *ResamplerFilterDifference = NULL; int SDL_PrepareResampleFilter(void) { SDL_AtomicLock(&ResampleFilterSpinlock); if (!ResamplerFilter) { /* if dB > 50, beta=(0.1102 * (dB - 8.7)), according to Matlab. */ const double dB = 80.0; const double beta = 0.1102 * (dB - 8.7); const size_t alloclen = RESAMPLER_FILTER_SIZE * sizeof (float); ResamplerFilter = (float *) SDL_malloc(alloclen); if (!ResamplerFilter) { SDL_AtomicUnlock(&ResampleFilterSpinlock); return SDL_OutOfMemory(); } ResamplerFilterDifference = (float *) SDL_malloc(alloclen); if (!ResamplerFilterDifference) { SDL_free(ResamplerFilter); ResamplerFilter = NULL; SDL_AtomicUnlock(&ResampleFilterSpinlock); return SDL_OutOfMemory(); } kaiser_and_sinc(ResamplerFilter, ResamplerFilterDifference, RESAMPLER_FILTER_SIZE, beta); } SDL_AtomicUnlock(&ResampleFilterSpinlock); return 0; } void SDL_FreeResampleFilter(void) { SDL_free(ResamplerFilter); SDL_free(ResamplerFilterDifference); ResamplerFilter = NULL; ResamplerFilterDifference = NULL; } static int ResamplerPadding(const int inrate, const int outrate) { if (inrate == outrate) { return 0; } else if (inrate > outrate) { return (int) SDL_ceil(((float) (RESAMPLER_SAMPLES_PER_ZERO_CROSSING * inrate) / ((float) outrate))); } return RESAMPLER_SAMPLES_PER_ZERO_CROSSING; } /* lpadding and rpadding are expected to be buffers of (ResamplePadding(inrate, outrate) * chans * sizeof (float)) bytes. */ static int SDL_ResampleAudio(const int chans, const int inrate, const int outrate, const float *lpadding, const float *rpadding, const float *inbuf, const int inbuflen, float *outbuf, const int outbuflen) { const double finrate = (double) inrate; const double outtimeincr = 1.0 / ((float) outrate); const double ratio = ((float) outrate) / ((float) inrate); const int paddinglen = ResamplerPadding(inrate, outrate); const int framelen = chans * (int)sizeof (float); const int inframes = inbuflen / framelen; const int wantedoutframes = (int) ((inbuflen / framelen) * ratio); /* outbuflen isn't total to write, it's total available. */ const int maxoutframes = outbuflen / framelen; const int outframes = SDL_min(wantedoutframes, maxoutframes); float *dst = outbuf; double outtime = 0.0; int i, j, chan; for (i = 0; i < outframes; i++) { const int srcindex = (int) (outtime * inrate); const double intime = ((double) srcindex) / finrate; const double innexttime = ((double) (srcindex + 1)) / finrate; const double interpolation1 = 1.0 - ((innexttime - outtime) / (innexttime - intime)); const int filterindex1 = (int) (interpolation1 * RESAMPLER_SAMPLES_PER_ZERO_CROSSING); const double interpolation2 = 1.0 - interpolation1; const int filterindex2 = (int) (interpolation2 * RESAMPLER_SAMPLES_PER_ZERO_CROSSING); for (chan = 0; chan < chans; chan++) { float outsample = 0.0f; /* do this twice to calculate the sample, once for the "left wing" and then same for the right. */ /* !!! FIXME: do both wings in one loop */ for (j = 0; (filterindex1 + (j * RESAMPLER_SAMPLES_PER_ZERO_CROSSING)) < RESAMPLER_FILTER_SIZE; j++) { const int srcframe = srcindex - j; /* !!! FIXME: we can bubble this conditional out of here by doing a pre loop. */ const float insample = (srcframe < 0) ? lpadding[((paddinglen + srcframe) * chans) + chan] : inbuf[(srcframe * chans) + chan]; outsample += (float)(insample * (ResamplerFilter[filterindex1 + (j * RESAMPLER_SAMPLES_PER_ZERO_CROSSING)] + (interpolation1 * ResamplerFilterDifference[filterindex1 + (j * RESAMPLER_SAMPLES_PER_ZERO_CROSSING)]))); } for (j = 0; (filterindex2 + (j * RESAMPLER_SAMPLES_PER_ZERO_CROSSING)) < RESAMPLER_FILTER_SIZE; j++) { const int srcframe = srcindex + 1 + j; /* !!! FIXME: we can bubble this conditional out of here by doing a post loop. */ const float insample = (srcframe >= inframes) ? rpadding[((srcframe - inframes) * chans) + chan] : inbuf[(srcframe * chans) + chan]; outsample += (float)(insample * (ResamplerFilter[filterindex2 + (j * RESAMPLER_SAMPLES_PER_ZERO_CROSSING)] + (interpolation2 * ResamplerFilterDifference[filterindex2 + (j * RESAMPLER_SAMPLES_PER_ZERO_CROSSING)]))); } *(dst++) = outsample; } outtime += outtimeincr; } return outframes * chans * sizeof (float); } int SDL_ConvertAudio(SDL_AudioCVT * cvt) { /* !!! FIXME: (cvt) should be const; stack-copy it here. */ /* !!! FIXME: (actually, we can't...len_cvt needs to be updated. Grr.) */ /* Make sure there's data to convert */ if (cvt->buf == NULL) { return SDL_SetError("No buffer allocated for conversion"); } /* Return okay if no conversion is necessary */ cvt->len_cvt = cvt->len; if (cvt->filters[0] == NULL) { return 0; } /* Set up the conversion and go! */ cvt->filter_index = 0; cvt->filters[0] (cvt, cvt->src_format); return 0; } static void SDLCALL SDL_Convert_Byteswap(SDL_AudioCVT *cvt, SDL_AudioFormat format) { #if DEBUG_CONVERT printf("Converting byte order\n"); #endif switch (SDL_AUDIO_BITSIZE(format)) { #define CASESWAP(b) \ case b: { \ Uint##b *ptr = (Uint##b *) cvt->buf; \ int i; \ for (i = cvt->len_cvt / sizeof (*ptr); i; --i, ++ptr) { \ *ptr = SDL_Swap##b(*ptr); \ } \ break; \ } CASESWAP(16); CASESWAP(32); CASESWAP(64); #undef CASESWAP default: SDL_assert(!"unhandled byteswap datatype!"); break; } if (cvt->filters[++cvt->filter_index]) { /* flip endian flag for data. */ if (format & SDL_AUDIO_MASK_ENDIAN) { format &= ~SDL_AUDIO_MASK_ENDIAN; } else { format |= SDL_AUDIO_MASK_ENDIAN; } cvt->filters[cvt->filter_index](cvt, format); } } static int SDL_AddAudioCVTFilter(SDL_AudioCVT *cvt, const SDL_AudioFilter filter) { if (cvt->filter_index >= SDL_AUDIOCVT_MAX_FILTERS) { return SDL_SetError("Too many filters needed for conversion, exceeded maximum of %d", SDL_AUDIOCVT_MAX_FILTERS); } if (filter == NULL) { return SDL_SetError("Audio filter pointer is NULL"); } cvt->filters[cvt->filter_index++] = filter; cvt->filters[cvt->filter_index] = NULL; /* Moving terminator */ return 0; } static int SDL_BuildAudioTypeCVTToFloat(SDL_AudioCVT *cvt, const SDL_AudioFormat src_fmt) { int retval = 0; /* 0 == no conversion necessary. */ if ((SDL_AUDIO_ISBIGENDIAN(src_fmt) != 0) == (SDL_BYTEORDER == SDL_LIL_ENDIAN)) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert_Byteswap) < 0) { return -1; } retval = 1; /* added a converter. */ } if (!SDL_AUDIO_ISFLOAT(src_fmt)) { const Uint16 src_bitsize = SDL_AUDIO_BITSIZE(src_fmt); const Uint16 dst_bitsize = 32; SDL_AudioFilter filter = NULL; switch (src_fmt & ~SDL_AUDIO_MASK_ENDIAN) { case AUDIO_S8: filter = SDL_Convert_S8_to_F32; break; case AUDIO_U8: filter = SDL_Convert_U8_to_F32; break; case AUDIO_S16: filter = SDL_Convert_S16_to_F32; break; case AUDIO_U16: filter = SDL_Convert_U16_to_F32; break; case AUDIO_S32: filter = SDL_Convert_S32_to_F32; break; default: SDL_assert(!"Unexpected audio format!"); break; } if (!filter) { return SDL_SetError("No conversion from source format to float available"); } if (SDL_AddAudioCVTFilter(cvt, filter) < 0) { return -1; } if (src_bitsize < dst_bitsize) { const int mult = (dst_bitsize / src_bitsize); cvt->len_mult *= mult; cvt->len_ratio *= mult; } else if (src_bitsize > dst_bitsize) { cvt->len_ratio /= (src_bitsize / dst_bitsize); } retval = 1; /* added a converter. */ } return retval; } static int SDL_BuildAudioTypeCVTFromFloat(SDL_AudioCVT *cvt, const SDL_AudioFormat dst_fmt) { int retval = 0; /* 0 == no conversion necessary. */ if (!SDL_AUDIO_ISFLOAT(dst_fmt)) { const Uint16 dst_bitsize = SDL_AUDIO_BITSIZE(dst_fmt); const Uint16 src_bitsize = 32; SDL_AudioFilter filter = NULL; switch (dst_fmt & ~SDL_AUDIO_MASK_ENDIAN) { case AUDIO_S8: filter = SDL_Convert_F32_to_S8; break; case AUDIO_U8: filter = SDL_Convert_F32_to_U8; break; case AUDIO_S16: filter = SDL_Convert_F32_to_S16; break; case AUDIO_U16: filter = SDL_Convert_F32_to_U16; break; case AUDIO_S32: filter = SDL_Convert_F32_to_S32; break; default: SDL_assert(!"Unexpected audio format!"); break; } if (!filter) { return SDL_SetError("No conversion from float to format 0x%.4x available", dst_fmt); } if (SDL_AddAudioCVTFilter(cvt, filter) < 0) { return -1; } if (src_bitsize < dst_bitsize) { const int mult = (dst_bitsize / src_bitsize); cvt->len_mult *= mult; cvt->len_ratio *= mult; } else if (src_bitsize > dst_bitsize) { cvt->len_ratio /= (src_bitsize / dst_bitsize); } retval = 1; /* added a converter. */ } if ((SDL_AUDIO_ISBIGENDIAN(dst_fmt) != 0) == (SDL_BYTEORDER == SDL_LIL_ENDIAN)) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert_Byteswap) < 0) { return -1; } retval = 1; /* added a converter. */ } return retval; } static void SDL_ResampleCVT(SDL_AudioCVT *cvt, const int chans, const SDL_AudioFormat format) { /* !!! FIXME in 2.1: there are ten slots in the filter list, and the theoretical maximum we use is six (seven with NULL terminator). !!! FIXME in 2.1: We need to store data for this resampler, because the cvt structure doesn't store the original sample rates, !!! FIXME in 2.1: so we steal the ninth and tenth slot. :( */ const int inrate = (int) (size_t) cvt->filters[SDL_AUDIOCVT_MAX_FILTERS-1]; const int outrate = (int) (size_t) cvt->filters[SDL_AUDIOCVT_MAX_FILTERS]; const float *src = (const float *) cvt->buf; const int srclen = cvt->len_cvt; /*float *dst = (float *) cvt->buf; const int dstlen = (cvt->len * cvt->len_mult);*/ /* !!! FIXME: remove this if we can get the resampler to work in-place again. */ float *dst = (float *) (cvt->buf + srclen); const int dstlen = (cvt->len * cvt->len_mult) - srclen; const int requestedpadding = ResamplerPadding(inrate, outrate); int paddingsamples; float *padding; if (requestedpadding < SDL_MAX_SINT32 / chans) { paddingsamples = requestedpadding * chans; } else { paddingsamples = 0; } SDL_assert(format == AUDIO_F32SYS); /* we keep no streaming state here, so pad with silence on both ends. */ padding = (float *) SDL_calloc(paddingsamples ? paddingsamples : 1, sizeof (float)); if (!padding) { SDL_OutOfMemory(); return; } cvt->len_cvt = SDL_ResampleAudio(chans, inrate, outrate, padding, padding, src, srclen, dst, dstlen); SDL_free(padding); SDL_memmove(cvt->buf, dst, cvt->len_cvt); /* !!! FIXME: remove this if we can get the resampler to work in-place again. */ if (cvt->filters[++cvt->filter_index]) { cvt->filters[cvt->filter_index](cvt, format); } } /* !!! FIXME: We only have this macro salsa because SDL_AudioCVT doesn't !!! FIXME: store channel info, so we have to have function entry !!! FIXME: points for each supported channel count and multiple !!! FIXME: vs arbitrary. When we rev the ABI, clean this up. */ #define RESAMPLER_FUNCS(chans) \ static void SDLCALL \ SDL_ResampleCVT_c##chans(SDL_AudioCVT *cvt, SDL_AudioFormat format) { \ SDL_ResampleCVT(cvt, chans, format); \ } RESAMPLER_FUNCS(1) RESAMPLER_FUNCS(2) RESAMPLER_FUNCS(4) RESAMPLER_FUNCS(6) RESAMPLER_FUNCS(8) #undef RESAMPLER_FUNCS static SDL_AudioFilter ChooseCVTResampler(const int dst_channels) { switch (dst_channels) { case 1: return SDL_ResampleCVT_c1; case 2: return SDL_ResampleCVT_c2; case 4: return SDL_ResampleCVT_c4; case 6: return SDL_ResampleCVT_c6; case 8: return SDL_ResampleCVT_c8; default: break; } return NULL; } static int SDL_BuildAudioResampleCVT(SDL_AudioCVT * cvt, const int dst_channels, const int src_rate, const int dst_rate) { SDL_AudioFilter filter; if (src_rate == dst_rate) { return 0; /* no conversion necessary. */ } filter = ChooseCVTResampler(dst_channels); if (filter == NULL) { return SDL_SetError("No conversion available for these rates"); } if (SDL_PrepareResampleFilter() < 0) { return -1; } /* Update (cvt) with filter details... */ if (SDL_AddAudioCVTFilter(cvt, filter) < 0) { return -1; } /* !!! FIXME in 2.1: there are ten slots in the filter list, and the theoretical maximum we use is six (seven with NULL terminator). !!! FIXME in 2.1: We need to store data for this resampler, because the cvt structure doesn't store the original sample rates, !!! FIXME in 2.1: so we steal the ninth and tenth slot. :( */ if (cvt->filter_index >= (SDL_AUDIOCVT_MAX_FILTERS-2)) { return SDL_SetError("Too many filters needed for conversion, exceeded maximum of %d", SDL_AUDIOCVT_MAX_FILTERS-2); } cvt->filters[SDL_AUDIOCVT_MAX_FILTERS-1] = (SDL_AudioFilter) (uintptr_t) src_rate; cvt->filters[SDL_AUDIOCVT_MAX_FILTERS] = (SDL_AudioFilter) (uintptr_t) dst_rate; if (src_rate < dst_rate) { const double mult = ((double) dst_rate) / ((double) src_rate); cvt->len_mult *= (int) SDL_ceil(mult); cvt->len_ratio *= mult; } else { cvt->len_ratio /= ((double) src_rate) / ((double) dst_rate); } /* !!! FIXME: remove this if we can get the resampler to work in-place again. */ /* the buffer is big enough to hold the destination now, but we need it large enough to hold a separate scratch buffer. */ cvt->len_mult *= 2; return 1; /* added a converter. */ } static SDL_bool SDL_SupportedAudioFormat(const SDL_AudioFormat fmt) { switch (fmt) { case AUDIO_U8: case AUDIO_S8: case AUDIO_U16LSB: case AUDIO_S16LSB: case AUDIO_U16MSB: case AUDIO_S16MSB: case AUDIO_S32LSB: case AUDIO_S32MSB: case AUDIO_F32LSB: case AUDIO_F32MSB: return SDL_TRUE; /* supported. */ default: break; } return SDL_FALSE; /* unsupported. */ } static SDL_bool SDL_SupportedChannelCount(const int channels) { switch (channels) { case 1: /* mono */ case 2: /* stereo */ case 4: /* quad */ case 6: /* 5.1 */ case 7: /* 6.1 */ case 8: /* 7.1 */ return SDL_TRUE; /* supported. */ default: break; } return SDL_FALSE; /* unsupported. */ } /* Creates a set of audio filters to convert from one format to another. Returns 0 if no conversion is needed, 1 if the audio filter is set up, or -1 if an error like invalid parameter, unsupported format, etc. occurred. */ int SDL_BuildAudioCVT(SDL_AudioCVT * cvt, SDL_AudioFormat src_fmt, Uint8 src_channels, int src_rate, SDL_AudioFormat dst_fmt, Uint8 dst_channels, int dst_rate) { /* Sanity check target pointer */ if (cvt == NULL) { return SDL_InvalidParamError("cvt"); } /* Make sure we zero out the audio conversion before error checking */ SDL_zerop(cvt); if (!SDL_SupportedAudioFormat(src_fmt)) { return SDL_SetError("Invalid source format"); } else if (!SDL_SupportedAudioFormat(dst_fmt)) { return SDL_SetError("Invalid destination format"); } else if (!SDL_SupportedChannelCount(src_channels)) { return SDL_SetError("Invalid source channels"); } else if (!SDL_SupportedChannelCount(dst_channels)) { return SDL_SetError("Invalid destination channels"); } else if (src_rate <= 0) { return SDL_SetError("Source rate is equal to or less than zero"); } else if (dst_rate <= 0) { return SDL_SetError("Destination rate is equal to or less than zero"); } else if (src_rate >= SDL_MAX_SINT32 / RESAMPLER_SAMPLES_PER_ZERO_CROSSING) { return SDL_SetError("Source rate is too high"); } else if (dst_rate >= SDL_MAX_SINT32 / RESAMPLER_SAMPLES_PER_ZERO_CROSSING) { return SDL_SetError("Destination rate is too high"); } #if DEBUG_CONVERT printf("Build format %04x->%04x, channels %u->%u, rate %d->%d\n", src_fmt, dst_fmt, src_channels, dst_channels, src_rate, dst_rate); #endif /* Start off with no conversion necessary */ cvt->src_format = src_fmt; cvt->dst_format = dst_fmt; cvt->needed = 0; cvt->filter_index = 0; SDL_zeroa(cvt->filters); cvt->len_mult = 1; cvt->len_ratio = 1.0; cvt->rate_incr = ((double) dst_rate) / ((double) src_rate); /* Make sure we've chosen audio conversion functions (MMX, scalar, etc.) */ SDL_ChooseAudioConverters(); /* Type conversion goes like this now: - byteswap to CPU native format first if necessary. - convert to native Float32 if necessary. - resample and change channel count if necessary. - convert back to native format. - byteswap back to foreign format if necessary. The expectation is we can process data faster in float32 (possibly with SIMD), and making several passes over the same buffer is likely to be CPU cache-friendly, avoiding the biggest performance hit in modern times. Previously we had (script-generated) custom converters for every data type and it was a bloat on SDL compile times and final library size. */ /* see if we can skip float conversion entirely. */ if (src_rate == dst_rate && src_channels == dst_channels) { if (src_fmt == dst_fmt) { return 0; } /* just a byteswap needed? */ if ((src_fmt & ~SDL_AUDIO_MASK_ENDIAN) == (dst_fmt & ~SDL_AUDIO_MASK_ENDIAN)) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert_Byteswap) < 0) { return -1; } cvt->needed = 1; return 1; } } /* Convert data types, if necessary. Updates (cvt). */ if (SDL_BuildAudioTypeCVTToFloat(cvt, src_fmt) < 0) { return -1; /* shouldn't happen, but just in case... */ } /* Channel conversion */ if (src_channels < dst_channels) { /* Upmixing */ /* 6.1 -> 7.1 */ if (src_channels == 7) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert61To71) < 0) { return -1; } cvt->len_mult = (cvt->len_mult * 8 + 6) / 7; src_channels = 8; cvt->len_ratio = cvt->len_ratio * 8 / 7; } /* Mono -> Stereo [-> ...] */ if ((src_channels == 1) && (dst_channels > 1)) { if (SDL_AddAudioCVTFilter(cvt, SDL_ConvertMonoToStereo) < 0) { return -1; } cvt->len_mult *= 2; src_channels = 2; cvt->len_ratio *= 2; } /* [Mono ->] Stereo -> 5.1 [-> 7.1] */ if ((src_channels == 2) && (dst_channels >= 6)) { if (SDL_AddAudioCVTFilter(cvt, SDL_ConvertStereoTo51) < 0) { return -1; } src_channels = 6; cvt->len_mult *= 3; cvt->len_ratio *= 3; } /* Quad -> 5.1 [-> 7.1] */ if ((src_channels == 4) && (dst_channels >= 6)) { if (SDL_AddAudioCVTFilter(cvt, SDL_ConvertQuadTo51) < 0) { return -1; } src_channels = 6; cvt->len_mult = (cvt->len_mult * 3 + 1) / 2; cvt->len_ratio *= 1.5; } /* 5.1 -> 6.1 */ if (src_channels == 6 && dst_channels == 7) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert51To61) < 0) { return -1; } src_channels = 7; cvt->len_mult = (cvt->len_mult * 7 + 5) / 6; cvt->len_ratio = cvt->len_ratio * 7 / 6; } /* [[Mono ->] Stereo ->] 5.1 -> 7.1 */ if ((src_channels == 6) && (dst_channels == 8)) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert51To71) < 0) { return -1; } src_channels = 8; cvt->len_mult = (cvt->len_mult * 4 + 2) / 3; /* Should be numerically exact with every valid input to this function */ cvt->len_ratio = cvt->len_ratio * 4 / 3; } /* [Mono ->] Stereo -> Quad */ if ((src_channels == 2) && (dst_channels == 4)) { if (SDL_AddAudioCVTFilter(cvt, SDL_ConvertStereoToQuad) < 0) { return -1; } src_channels = 4; cvt->len_mult *= 2; cvt->len_ratio *= 2; } } else if (src_channels > dst_channels) { /* Downmixing */ /* 7.1 -> 6.1 */ if (src_channels == 8 && dst_channels == 7) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert71To61) < 0) { return -1; } src_channels = 7; cvt->len_ratio *= 7.0f / 8.0f; } /* 6.1 -> 5.1 [->...] */ if (src_channels == 7 && dst_channels != 7) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert61To51) < 0) { return -1; } src_channels = 6; cvt->len_ratio *= 6.0f / 7.0f; } /* 7.1 -> 5.1 [-> Stereo [-> Mono]] */ /* 7.1 -> 5.1 [-> Quad] */ if ((src_channels == 8) && (dst_channels <= 6)) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert71To51) < 0) { return -1; } src_channels = 6; cvt->len_ratio *= 0.75; } /* [7.1 ->] 5.1 -> Stereo [-> Mono] */ if ((src_channels == 6) && (dst_channels <= 2)) { SDL_AudioFilter filter = NULL; #if HAVE_AVX_INTRINSICS if (SDL_HasAVX()) { filter = SDL_Convert51ToStereo_AVX; } #endif #if HAVE_SSE_INTRINSICS if (!filter && SDL_HasSSE()) { filter = SDL_Convert51ToStereo_SSE; } #endif #if HAVE_NEON_INTRINSICS if (!filter && SDL_HasNEON()) { filter = SDL_Convert51ToStereo_NEON; } #endif if (!filter) { filter = SDL_Convert51ToStereo; } if (SDL_AddAudioCVTFilter(cvt, filter) < 0) { return -1; } src_channels = 2; cvt->len_ratio /= 3; } /* 5.1 -> Quad */ if ((src_channels == 6) && (dst_channels == 4)) { if (SDL_AddAudioCVTFilter(cvt, SDL_Convert51ToQuad) < 0) { return -1; } src_channels = 4; cvt->len_ratio = cvt->len_ratio * 2 / 3; } /* Quad -> Stereo [-> Mono] */ if ((src_channels == 4) && (dst_channels <= 2)) { if (SDL_AddAudioCVTFilter(cvt, SDL_ConvertQuadToStereo) < 0) { return -1; } src_channels = 2; cvt->len_ratio /= 2; } /* [... ->] Stereo -> Mono */ if ((src_channels == 2) && (dst_channels == 1)) { SDL_AudioFilter filter = NULL; #if HAVE_SSE3_INTRINSICS if (SDL_HasSSE3()) { filter = SDL_ConvertStereoToMono_SSE3; } #endif if (!filter) { filter = SDL_ConvertStereoToMono; } if (SDL_AddAudioCVTFilter(cvt, filter) < 0) { return -1; } src_channels = 1; cvt->len_ratio /= 2; } } if (src_channels != dst_channels) { /* All combinations of supported channel counts should have been handled by now, but let's be defensive */ return SDL_SetError("Invalid channel combination"); } /* Do rate conversion, if necessary. Updates (cvt). */ if (SDL_BuildAudioResampleCVT(cvt, dst_channels, src_rate, dst_rate) < 0) { return -1; /* shouldn't happen, but just in case... */ } /* Move to final data type. */ if (SDL_BuildAudioTypeCVTFromFloat(cvt, dst_fmt) < 0) { return -1; /* shouldn't happen, but just in case... */ } cvt->needed = (cvt->filter_index != 0); return (cvt->needed); } typedef int (*SDL_ResampleAudioStreamFunc)(SDL_AudioStream *stream, const void *inbuf, const int inbuflen, void *outbuf, const int outbuflen); typedef void (*SDL_ResetAudioStreamResamplerFunc)(SDL_AudioStream *stream); typedef void (*SDL_CleanupAudioStreamResamplerFunc)(SDL_AudioStream *stream); struct _SDL_AudioStream { SDL_AudioCVT cvt_before_resampling; SDL_AudioCVT cvt_after_resampling; SDL_DataQueue *queue; SDL_bool first_run; Uint8 *staging_buffer; int staging_buffer_size; int staging_buffer_filled; Uint8 *work_buffer_base; /* maybe unaligned pointer from SDL_realloc(). */ int work_buffer_len; int src_sample_frame_size; SDL_AudioFormat src_format; Uint8 src_channels; int src_rate; int dst_sample_frame_size; SDL_AudioFormat dst_format; Uint8 dst_channels; int dst_rate; double rate_incr; Uint8 pre_resample_channels; int packetlen; int resampler_padding_samples; float *resampler_padding; void *resampler_state; SDL_ResampleAudioStreamFunc resampler_func; SDL_ResetAudioStreamResamplerFunc reset_resampler_func; SDL_CleanupAudioStreamResamplerFunc cleanup_resampler_func; }; static Uint8 * EnsureStreamBufferSize(SDL_AudioStream *stream, const int newlen) { Uint8 *ptr; size_t offset; if (stream->work_buffer_len >= newlen) { ptr = stream->work_buffer_base; } else { ptr = (Uint8 *) SDL_realloc(stream->work_buffer_base, newlen + 32); if (!ptr) { SDL_OutOfMemory(); return NULL; } /* Make sure we're aligned to 16 bytes for SIMD code. */ stream->work_buffer_base = ptr; stream->work_buffer_len = newlen; } offset = ((size_t) ptr) & 15; return offset ? ptr + (16 - offset) : ptr; } #ifdef HAVE_LIBSAMPLERATE_H static int SDL_ResampleAudioStream_SRC(SDL_AudioStream *stream, const void *_inbuf, const int inbuflen, void *_outbuf, const int outbuflen) { const float *inbuf = (const float *) _inbuf; float *outbuf = (float *) _outbuf; const int framelen = sizeof(float) * stream->pre_resample_channels; SRC_STATE *state = (SRC_STATE *)stream->resampler_state; SRC_DATA data; int result; SDL_assert(inbuf != ((const float *) outbuf)); /* SDL_AudioStreamPut() shouldn't allow in-place resamples. */ data.data_in = (float *)inbuf; /* Older versions of libsamplerate had a non-const pointer, but didn't write to it */ data.input_frames = inbuflen / framelen; data.input_frames_used = 0; data.data_out = outbuf; data.output_frames = outbuflen / framelen; data.end_of_input = 0; data.src_ratio = stream->rate_incr; result = SRC_src_process(state, &data); if (result != 0) { SDL_SetError("src_process() failed: %s", SRC_src_strerror(result)); return 0; } /* If this fails, we need to store them off somewhere */ SDL_assert(data.input_frames_used == data.input_frames); return data.output_frames_gen * (sizeof(float) * stream->pre_resample_channels); } static void SDL_ResetAudioStreamResampler_SRC(SDL_AudioStream *stream) { SRC_src_reset((SRC_STATE *)stream->resampler_state); } static void SDL_CleanupAudioStreamResampler_SRC(SDL_AudioStream *stream) { SRC_STATE *state = (SRC_STATE *)stream->resampler_state; if (state) { SRC_src_delete(state); } stream->resampler_state = NULL; stream->resampler_func = NULL; stream->reset_resampler_func = NULL; stream->cleanup_resampler_func = NULL; } static SDL_bool SetupLibSampleRateResampling(SDL_AudioStream *stream) { int result = 0; SRC_STATE *state = NULL; if (SRC_available) { state = SRC_src_new(SRC_converter, stream->pre_resample_channels, &result); if (!state) { SDL_SetError("src_new() failed: %s", SRC_src_strerror(result)); } } if (!state) { SDL_CleanupAudioStreamResampler_SRC(stream); return SDL_FALSE; } stream->resampler_state = state; stream->resampler_func = SDL_ResampleAudioStream_SRC; stream->reset_resampler_func = SDL_ResetAudioStreamResampler_SRC; stream->cleanup_resampler_func = SDL_CleanupAudioStreamResampler_SRC; return SDL_TRUE; } #endif /* HAVE_LIBSAMPLERATE_H */ static int SDL_ResampleAudioStream(SDL_AudioStream *stream, const void *_inbuf, const int inbuflen, void *_outbuf, const int outbuflen) { const Uint8 *inbufend = ((const Uint8 *) _inbuf) + inbuflen; const float *inbuf = (const float *) _inbuf; float *outbuf = (float *) _outbuf; const int chans = (int) stream->pre_resample_channels; const int inrate = stream->src_rate; const int outrate = stream->dst_rate; const int paddingsamples = stream->resampler_padding_samples; const int paddingbytes = paddingsamples * sizeof (float); float *lpadding = (float *) stream->resampler_state; const float *rpadding = (const float *) inbufend; /* we set this up so there are valid padding samples at the end of the input buffer. */ const int cpy = SDL_min(inbuflen, paddingbytes); int retval; SDL_assert(inbuf != ((const float *) outbuf)); /* SDL_AudioStreamPut() shouldn't allow in-place resamples. */ retval = SDL_ResampleAudio(chans, inrate, outrate, lpadding, rpadding, inbuf, inbuflen, outbuf, outbuflen); /* update our left padding with end of current input, for next run. */ SDL_memcpy((lpadding + paddingsamples) - (cpy / sizeof (float)), inbufend - cpy, cpy); return retval; } static void SDL_ResetAudioStreamResampler(SDL_AudioStream *stream) { /* set all the padding to silence. */ const int len = stream->resampler_padding_samples; SDL_memset(stream->resampler_state, '\0', len * sizeof (float)); } static void SDL_CleanupAudioStreamResampler(SDL_AudioStream *stream) { SDL_free(stream->resampler_state); } SDL_AudioStream * SDL_NewAudioStream(const SDL_AudioFormat src_format, const Uint8 src_channels, const int src_rate, const SDL_AudioFormat dst_format, const Uint8 dst_channels, const int dst_rate) { const int packetlen = 4096; /* !!! FIXME: good enough for now. */ Uint8 pre_resample_channels; SDL_AudioStream *retval; retval = (SDL_AudioStream *) SDL_calloc(1, sizeof (SDL_AudioStream)); if (!retval) { return NULL; } /* If increasing channels, do it after resampling, since we'd just do more work to resample duplicate channels. If we're decreasing, do it first so we resample the interpolated data instead of interpolating the resampled data (!!! FIXME: decide if that works in practice, though!). */ pre_resample_channels = SDL_min(src_channels, dst_channels); retval->first_run = SDL_TRUE; retval->src_sample_frame_size = (SDL_AUDIO_BITSIZE(src_format) / 8) * src_channels; retval->src_format = src_format; retval->src_channels = src_channels; retval->src_rate = src_rate; retval->dst_sample_frame_size = (SDL_AUDIO_BITSIZE(dst_format) / 8) * dst_channels; retval->dst_format = dst_format; retval->dst_channels = dst_channels; retval->dst_rate = dst_rate; retval->pre_resample_channels = pre_resample_channels; retval->packetlen = packetlen; retval->rate_incr = ((double) dst_rate) / ((double) src_rate); retval->resampler_padding_samples = ResamplerPadding(retval->src_rate, retval->dst_rate) * pre_resample_channels; retval->resampler_padding = (float *) SDL_calloc(retval->resampler_padding_samples ? retval->resampler_padding_samples : 1, sizeof (float)); if (retval->resampler_padding == NULL) { SDL_FreeAudioStream(retval); SDL_OutOfMemory(); return NULL; } retval->staging_buffer_size = ((retval->resampler_padding_samples / retval->pre_resample_channels) * retval->src_sample_frame_size); if (retval->staging_buffer_size > 0) { retval->staging_buffer = (Uint8 *) SDL_malloc(retval->staging_buffer_size); if (retval->staging_buffer == NULL) { SDL_FreeAudioStream(retval); SDL_OutOfMemory(); return NULL; } } /* Not resampling? It's an easy conversion (and maybe not even that!) */ if (src_rate == dst_rate) { retval->cvt_before_resampling.needed = SDL_FALSE; if (SDL_BuildAudioCVT(&retval->cvt_after_resampling, src_format, src_channels, dst_rate, dst_format, dst_channels, dst_rate) < 0) { SDL_FreeAudioStream(retval); return NULL; /* SDL_BuildAudioCVT should have called SDL_SetError. */ } } else { /* Don't resample at first. Just get us to Float32 format. */ /* !!! FIXME: convert to int32 on devices without hardware float. */ if (SDL_BuildAudioCVT(&retval->cvt_before_resampling, src_format, src_channels, src_rate, AUDIO_F32SYS, pre_resample_channels, src_rate) < 0) { SDL_FreeAudioStream(retval); return NULL; /* SDL_BuildAudioCVT should have called SDL_SetError. */ } #ifdef HAVE_LIBSAMPLERATE_H SetupLibSampleRateResampling(retval); #endif if (!retval->resampler_func) { retval->resampler_state = SDL_calloc(retval->resampler_padding_samples, sizeof (float)); if (!retval->resampler_state) { SDL_FreeAudioStream(retval); SDL_OutOfMemory(); return NULL; } if (SDL_PrepareResampleFilter() < 0) { SDL_free(retval->resampler_state); retval->resampler_state = NULL; SDL_FreeAudioStream(retval); return NULL; } retval->resampler_func = SDL_ResampleAudioStream; retval->reset_resampler_func = SDL_ResetAudioStreamResampler; retval->cleanup_resampler_func = SDL_CleanupAudioStreamResampler; } /* Convert us to the final format after resampling. */ if (SDL_BuildAudioCVT(&retval->cvt_after_resampling, AUDIO_F32SYS, pre_resample_channels, dst_rate, dst_format, dst_channels, dst_rate) < 0) { SDL_FreeAudioStream(retval); return NULL; /* SDL_BuildAudioCVT should have called SDL_SetError. */ } } retval->queue = SDL_NewDataQueue(packetlen, packetlen * 2); if (!retval->queue) { SDL_FreeAudioStream(retval); return NULL; /* SDL_NewDataQueue should have called SDL_SetError. */ } return retval; } static int SDL_AudioStreamPutInternal(SDL_AudioStream *stream, const void *buf, int len, int *maxputbytes) { int buflen = len; int workbuflen; Uint8 *workbuf; Uint8 *resamplebuf = NULL; int resamplebuflen = 0; int neededpaddingbytes; int paddingbytes; /* !!! FIXME: several converters can take advantage of SIMD, but only !!! FIXME: if the data is aligned to 16 bytes. EnsureStreamBufferSize() !!! FIXME: guarantees the buffer will align, but the !!! FIXME: converters will iterate over the data backwards if !!! FIXME: the output grows, and this means we won't align if buflen !!! FIXME: isn't a multiple of 16. In these cases, we should chop off !!! FIXME: a few samples at the end and convert them separately. */ /* no padding prepended on first run. */ neededpaddingbytes = stream->resampler_padding_samples * sizeof (float); paddingbytes = stream->first_run ? 0 : neededpaddingbytes; stream->first_run = SDL_FALSE; /* Make sure the work buffer can hold all the data we need at once... */ workbuflen = buflen; if (stream->cvt_before_resampling.needed) { workbuflen *= stream->cvt_before_resampling.len_mult; } if (stream->dst_rate != stream->src_rate) { /* resamples can't happen in place, so make space for second buf. */ const int framesize = stream->pre_resample_channels * sizeof (float); const int frames = workbuflen / framesize; resamplebuflen = ((int) SDL_ceil(frames * stream->rate_incr)) * framesize; #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: will resample %d bytes to %d (ratio=%.6f)\n", workbuflen, resamplebuflen, stream->rate_incr); #endif workbuflen += resamplebuflen; } if (stream->cvt_after_resampling.needed) { /* !!! FIXME: buffer might be big enough already? */ workbuflen *= stream->cvt_after_resampling.len_mult; } workbuflen += neededpaddingbytes; #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: Putting %d bytes of preconverted audio, need %d byte work buffer\n", buflen, workbuflen); #endif workbuf = EnsureStreamBufferSize(stream, workbuflen); if (!workbuf) { return -1; /* probably out of memory. */ } resamplebuf = workbuf; /* default if not resampling. */ SDL_memcpy(workbuf + paddingbytes, buf, buflen); if (stream->cvt_before_resampling.needed) { stream->cvt_before_resampling.buf = workbuf + paddingbytes; stream->cvt_before_resampling.len = buflen; if (SDL_ConvertAudio(&stream->cvt_before_resampling) == -1) { return -1; /* uhoh! */ } buflen = stream->cvt_before_resampling.len_cvt; #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: After initial conversion we have %d bytes\n", buflen); #endif } if (stream->dst_rate != stream->src_rate) { /* save off some samples at the end; they are used for padding now so the resampler is coherent and then used at the start of the next put operation. Prepend last put operation's padding, too. */ /* prepend prior put's padding. :P */ if (paddingbytes) { SDL_memcpy(workbuf, stream->resampler_padding, paddingbytes); buflen += paddingbytes; } /* save off the data at the end for the next run. */ SDL_memcpy(stream->resampler_padding, workbuf + (buflen - neededpaddingbytes), neededpaddingbytes); resamplebuf = workbuf + buflen; /* skip to second piece of workbuf. */ SDL_assert(buflen >= neededpaddingbytes); if (buflen > neededpaddingbytes) { buflen = stream->resampler_func(stream, workbuf, buflen - neededpaddingbytes, resamplebuf, resamplebuflen); } else { buflen = 0; } #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: After resampling we have %d bytes\n", buflen); #endif } if (stream->cvt_after_resampling.needed && (buflen > 0)) { stream->cvt_after_resampling.buf = resamplebuf; stream->cvt_after_resampling.len = buflen; if (SDL_ConvertAudio(&stream->cvt_after_resampling) == -1) { return -1; /* uhoh! */ } buflen = stream->cvt_after_resampling.len_cvt; #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: After final conversion we have %d bytes\n", buflen); #endif } #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: Final output is %d bytes\n", buflen); #endif if (maxputbytes) { const int maxbytes = *maxputbytes; if (buflen > maxbytes) buflen = maxbytes; *maxputbytes -= buflen; } /* resamplebuf holds the final output, even if we didn't resample. */ return buflen ? SDL_WriteToDataQueue(stream->queue, resamplebuf, buflen) : 0; } int SDL_AudioStreamPut(SDL_AudioStream *stream, const void *buf, int len) { /* !!! FIXME: several converters can take advantage of SIMD, but only !!! FIXME: if the data is aligned to 16 bytes. EnsureStreamBufferSize() !!! FIXME: guarantees the buffer will align, but the !!! FIXME: converters will iterate over the data backwards if !!! FIXME: the output grows, and this means we won't align if buflen !!! FIXME: isn't a multiple of 16. In these cases, we should chop off !!! FIXME: a few samples at the end and convert them separately. */ #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: wants to put %d preconverted bytes\n", buflen); #endif if (!stream) { return SDL_InvalidParamError("stream"); } else if (!buf) { return SDL_InvalidParamError("buf"); } else if (len == 0) { return 0; /* nothing to do. */ } else if ((len % stream->src_sample_frame_size) != 0) { return SDL_SetError("Can't add partial sample frames"); } if (!stream->cvt_before_resampling.needed && (stream->dst_rate == stream->src_rate) && !stream->cvt_after_resampling.needed) { #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: no conversion needed at all, queueing %d bytes.\n", len); #endif return SDL_WriteToDataQueue(stream->queue, buf, len); } while (len > 0) { int amount; /* If we don't have a staging buffer or we're given enough data that we don't need to store it for later, skip the staging process. */ if (!stream->staging_buffer_filled && len >= stream->staging_buffer_size) { return SDL_AudioStreamPutInternal(stream, buf, len, NULL); } /* If there's not enough data to fill the staging buffer, just save it */ if ((stream->staging_buffer_filled + len) < stream->staging_buffer_size) { SDL_memcpy(stream->staging_buffer + stream->staging_buffer_filled, buf, len); stream->staging_buffer_filled += len; return 0; } /* Fill the staging buffer, process it, and continue */ amount = (stream->staging_buffer_size - stream->staging_buffer_filled); SDL_assert(amount > 0); SDL_memcpy(stream->staging_buffer + stream->staging_buffer_filled, buf, amount); stream->staging_buffer_filled = 0; if (SDL_AudioStreamPutInternal(stream, stream->staging_buffer, stream->staging_buffer_size, NULL) < 0) { return -1; } buf = (void *)((Uint8 *)buf + amount); len -= amount; } return 0; } int SDL_AudioStreamFlush(SDL_AudioStream *stream) { if (!stream) { return SDL_InvalidParamError("stream"); } #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: flushing! staging_buffer_filled=%d bytes\n", stream->staging_buffer_filled); #endif /* shouldn't use a staging buffer if we're not resampling. */ SDL_assert((stream->dst_rate != stream->src_rate) || (stream->staging_buffer_filled == 0)); if (stream->staging_buffer_filled > 0) { /* push the staging buffer + silence. We need to flush out not just the staging buffer, but the piece that the stream was saving off for right-side resampler padding. */ const SDL_bool first_run = stream->first_run; const int filled = stream->staging_buffer_filled; int actual_input_frames = filled / stream->src_sample_frame_size; if (!first_run) actual_input_frames += stream->resampler_padding_samples / stream->pre_resample_channels; if (actual_input_frames > 0) { /* don't bother if nothing to flush. */ /* This is how many bytes we're expecting without silence appended. */ int flush_remaining = ((int) SDL_ceil(actual_input_frames * stream->rate_incr)) * stream->dst_sample_frame_size; #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: flushing with padding to get max %d bytes!\n", flush_remaining); #endif SDL_memset(stream->staging_buffer + filled, '\0', stream->staging_buffer_size - filled); if (SDL_AudioStreamPutInternal(stream, stream->staging_buffer, stream->staging_buffer_size, &flush_remaining) < 0) { return -1; } /* we have flushed out (or initially filled) the pending right-side resampler padding, but we need to push more silence to guarantee the staging buffer is fully flushed out, too. */ SDL_memset(stream->staging_buffer, '\0', filled); if (SDL_AudioStreamPutInternal(stream, stream->staging_buffer, stream->staging_buffer_size, &flush_remaining) < 0) { return -1; } } } stream->staging_buffer_filled = 0; stream->first_run = SDL_TRUE; return 0; } /* get converted/resampled data from the stream */ int SDL_AudioStreamGet(SDL_AudioStream *stream, void *buf, int len) { #if DEBUG_AUDIOSTREAM printf("AUDIOSTREAM: want to get %d converted bytes\n", len); #endif if (!stream) { return SDL_InvalidParamError("stream"); } else if (!buf) { return SDL_InvalidParamError("buf"); } else if (len <= 0) { return 0; /* nothing to do. */ } else if ((len % stream->dst_sample_frame_size) != 0) { return SDL_SetError("Can't request partial sample frames"); } return (int) SDL_ReadFromDataQueue(stream->queue, buf, len); } /* number of converted/resampled bytes available */ int SDL_AudioStreamAvailable(SDL_AudioStream *stream) { return stream ? (int) SDL_CountDataQueue(stream->queue) : 0; } void SDL_AudioStreamClear(SDL_AudioStream *stream) { if (!stream) { SDL_InvalidParamError("stream"); } else { SDL_ClearDataQueue(stream->queue, stream->packetlen * 2); if (stream->reset_resampler_func) { stream->reset_resampler_func(stream); } stream->first_run = SDL_TRUE; stream->staging_buffer_filled = 0; } } /* dispose of a stream */ void SDL_FreeAudioStream(SDL_AudioStream *stream) { if (stream) { if (stream->cleanup_resampler_func) { stream->cleanup_resampler_func(stream); } SDL_FreeDataQueue(stream->queue); SDL_free(stream->staging_buffer); SDL_free(stream->work_buffer_base); SDL_free(stream->resampler_padding); SDL_free(stream); } } /* vi: set ts=4 sw=4 expandtab: */
173598.c
/********************************************************************* * * Filename: af_irda.c * Version: 0.9 * Description: IrDA sockets implementation * Status: Stable * Author: Dag Brattli <[email protected]> * Created at: Sun May 31 10:12:43 1998 * Modified at: Sat Dec 25 21:10:23 1999 * Modified by: Dag Brattli <[email protected]> * Sources: af_netroom.c, af_ax25.c, af_rose.c, af_x25.c etc. * * Copyright (c) 1999 Dag Brattli <[email protected]> * Copyright (c) 1999-2003 Jean Tourrilhes <[email protected]> * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Linux-IrDA now supports four different types of IrDA sockets: * * o SOCK_STREAM: TinyTP connections with SAR disabled. The * max SDU size is 0 for conn. of this type * o SOCK_SEQPACKET: TinyTP connections with SAR enabled. TTP may * fragment the messages, but will preserve * the message boundaries * o SOCK_DGRAM: IRDAPROTO_UNITDATA: TinyTP connections with Unitdata * (unreliable) transfers * IRDAPROTO_ULTRA: Connectionless and unreliable data * ********************************************************************/ #include <linux/capability.h> #include <linux/module.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/net.h> #include <linux/irda.h> #include <linux/poll.h> #include <asm/ioctls.h> /* TIOCOUTQ, TIOCINQ */ #include <asm/uaccess.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/irda/af_irda.h> static int irda_create(struct net *net, struct socket *sock, int protocol, int kern); static const struct proto_ops irda_stream_ops; static const struct proto_ops irda_seqpacket_ops; static const struct proto_ops irda_dgram_ops; #ifdef CONFIG_IRDA_ULTRA static const struct proto_ops irda_ultra_ops; #define ULTRA_MAX_DATA 382 #endif /* CONFIG_IRDA_ULTRA */ #define IRDA_MAX_HEADER (TTP_MAX_HEADER) /* * Function irda_data_indication (instance, sap, skb) * * Received some data from TinyTP. Just queue it on the receive queue * */ static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; int err; self = instance; sk = instance; err = sock_queue_rcv_skb(sk, skb); if (err) { pr_debug("%s(), error: no more mem!\n", __func__); self->rx_flow = FLOW_STOP; /* When we return error, TTP will need to requeue the skb */ return err; } return 0; } /* * Function irda_disconnect_indication (instance, sap, reason, skb) * * Connection has been closed. Check reason to find out why * */ static void irda_disconnect_indication(void *instance, void *sap, LM_REASON reason, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; self = instance; pr_debug("%s(%p)\n", __func__, self); /* Don't care about it, but let's not leak it */ if(skb) dev_kfree_skb(skb); sk = instance; if (sk == NULL) { pr_debug("%s(%p) : BUG : sk is NULL\n", __func__, self); return; } /* Prevent race conditions with irda_release() and irda_shutdown() */ bh_lock_sock(sk); if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) { sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); /* Close our TSAP. * If we leave it open, IrLMP put it back into the list of * unconnected LSAPs. The problem is that any incoming request * can then be matched to this socket (and it will be, because * it is at the head of the list). This would prevent any * listening socket waiting on the same TSAP to get those * requests. Some apps forget to close sockets, or hang to it * a bit too long, so we may stay in this dead state long * enough to be noticed... * Note : all socket function do check sk->sk_state, so we are * safe... * Jean II */ if (self->tsap) { irttp_close_tsap(self->tsap); self->tsap = NULL; } } bh_unlock_sock(sk); /* Note : once we are there, there is not much you want to do * with the socket anymore, apart from closing it. * For example, bind() and connect() won't reset sk->sk_err, * sk->sk_shutdown and sk->sk_flags to valid values... * Jean II */ } /* * Function irda_connect_confirm (instance, sap, qos, max_sdu_size, skb) * * Connections has been confirmed by the remote device * */ static void irda_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; self = instance; pr_debug("%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { dev_kfree_skb(skb); return; } dev_kfree_skb(skb); // Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb); /* How much header space do we need to reserve */ self->max_header_size = max_header_size; /* IrTTP max SDU size in transmit direction */ self->max_sdu_size_tx = max_sdu_size; /* Find out what the largest chunk of data that we can transmit is */ switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { net_err_ratelimited("%s: max_sdu_size must be 0\n", __func__); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { net_err_ratelimited("%s: max_sdu_size cannot be 0\n", __func__); return; } self->max_data_size = max_sdu_size; break; default: self->max_data_size = irttp_get_max_seg_size(self->tsap); } pr_debug("%s(), max_data_size=%d\n", __func__, self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); /* We are now connected! */ sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); } /* * Function irda_connect_indication(instance, sap, qos, max_sdu_size, userdata) * * Incoming connection * */ static void irda_connect_indication(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; self = instance; pr_debug("%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { dev_kfree_skb(skb); return; } /* How much header space do we need to reserve */ self->max_header_size = max_header_size; /* IrTTP max SDU size in transmit direction */ self->max_sdu_size_tx = max_sdu_size; /* Find out what the largest chunk of data that we can transmit is */ switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { net_err_ratelimited("%s: max_sdu_size must be 0\n", __func__); kfree_skb(skb); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { net_err_ratelimited("%s: max_sdu_size cannot be 0\n", __func__); kfree_skb(skb); return; } self->max_data_size = max_sdu_size; break; default: self->max_data_size = irttp_get_max_seg_size(self->tsap); } pr_debug("%s(), max_data_size=%d\n", __func__, self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_state_change(sk); } /* * Function irda_connect_response (handle) * * Accept incoming connection * */ static void irda_connect_response(struct irda_sock *self) { struct sk_buff *skb; skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_KERNEL); if (skb == NULL) { pr_debug("%s() Unable to allocate sk_buff!\n", __func__); return; } /* Reserve space for MUX_CONTROL and LAP header */ skb_reserve(skb, IRDA_MAX_HEADER); irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb); } /* * Function irda_flow_indication (instance, sap, flow) * * Used by TinyTP to tell us if it can accept more data or not * */ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) { struct irda_sock *self; struct sock *sk; self = instance; sk = instance; BUG_ON(sk == NULL); switch (flow) { case FLOW_STOP: pr_debug("%s(), IrTTP wants us to slow down\n", __func__); self->tx_flow = flow; break; case FLOW_START: self->tx_flow = flow; pr_debug("%s(), IrTTP wants us to start again\n", __func__); wake_up_interruptible(sk_sleep(sk)); break; default: pr_debug("%s(), Unknown flow command!\n", __func__); /* Unknown flow command, better stop */ self->tx_flow = flow; break; } } /* * Function irda_getvalue_confirm (obj_id, value, priv) * * Got answer from remote LM-IAS, just pass object to requester... * * Note : duplicate from above, but we need our own version that * doesn't touch the dtsap_sel and save the full value structure... */ static void irda_getvalue_confirm(int result, __u16 obj_id, struct ias_value *value, void *priv) { struct irda_sock *self; self = priv; if (!self) { net_warn_ratelimited("%s: lost myself!\n", __func__); return; } pr_debug("%s(%p)\n", __func__, self); /* We probably don't need to make any more queries */ iriap_close(self->iriap); self->iriap = NULL; /* Check if request succeeded */ if (result != IAS_SUCCESS) { pr_debug("%s(), IAS query failed! (%d)\n", __func__, result); self->errno = result; /* We really need it later */ /* Wake up any processes waiting for result */ wake_up_interruptible(&self->query_wait); return; } /* Pass the object to the caller (so the caller must delete it) */ self->ias_result = value; self->errno = 0; /* Wake up any processes waiting for result */ wake_up_interruptible(&self->query_wait); } /* * Function irda_selective_discovery_indication (discovery) * * Got a selective discovery indication from IrLMP. * * IrLMP is telling us that this node is new and matching our hint bit * filter. Wake up any process waiting for answer... */ static void irda_selective_discovery_indication(discinfo_t *discovery, DISCOVERY_MODE mode, void *priv) { struct irda_sock *self; self = priv; if (!self) { net_warn_ratelimited("%s: lost myself!\n", __func__); return; } /* Pass parameter to the caller */ self->cachedaddr = discovery->daddr; /* Wake up process if its waiting for device to be discovered */ wake_up_interruptible(&self->query_wait); } /* * Function irda_discovery_timeout (priv) * * Timeout in the selective discovery process * * We were waiting for a node to be discovered, but nothing has come up * so far. Wake up the user and tell him that we failed... */ static void irda_discovery_timeout(u_long priv) { struct irda_sock *self; self = (struct irda_sock *) priv; BUG_ON(self == NULL); /* Nothing for the caller */ self->cachelog = NULL; self->cachedaddr = 0; self->errno = -ETIME; /* Wake up process if its still waiting... */ wake_up_interruptible(&self->query_wait); } /* * Function irda_open_tsap (self) * * Open local Transport Service Access Point (TSAP) * */ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) { notify_t notify; if (self->tsap) { pr_debug("%s: busy!\n", __func__); return -EBUSY; } /* Initialize callbacks to be used by the IrDA stack */ irda_notify_init(&notify); notify.connect_confirm = irda_connect_confirm; notify.connect_indication = irda_connect_indication; notify.disconnect_indication = irda_disconnect_indication; notify.data_indication = irda_data_indication; notify.udata_indication = irda_data_indication; notify.flow_indication = irda_flow_indication; notify.instance = self; strncpy(notify.name, name, NOTIFY_MAX_NAME); self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT, &notify); if (self->tsap == NULL) { pr_debug("%s(), Unable to allocate TSAP!\n", __func__); return -ENOMEM; } /* Remember which TSAP selector we actually got */ self->stsap_sel = self->tsap->stsap_sel; return 0; } /* * Function irda_open_lsap (self) * * Open local Link Service Access Point (LSAP). Used for opening Ultra * sockets */ #ifdef CONFIG_IRDA_ULTRA static int irda_open_lsap(struct irda_sock *self, int pid) { notify_t notify; if (self->lsap) { net_warn_ratelimited("%s(), busy!\n", __func__); return -EBUSY; } /* Initialize callbacks to be used by the IrDA stack */ irda_notify_init(&notify); notify.udata_indication = irda_data_indication; notify.instance = self; strncpy(notify.name, "Ultra", NOTIFY_MAX_NAME); self->lsap = irlmp_open_lsap(LSAP_CONNLESS, &notify, pid); if (self->lsap == NULL) { pr_debug("%s(), Unable to allocate LSAP!\n", __func__); return -ENOMEM; } return 0; } #endif /* CONFIG_IRDA_ULTRA */ /* * Function irda_find_lsap_sel (self, name) * * Try to lookup LSAP selector in remote LM-IAS * * Basically, we start a IAP query, and then go to sleep. When the query * return, irda_getvalue_confirm will wake us up, and we can examine the * result of the query... * Note that in some case, the query fail even before we go to sleep, * creating some races... */ static int irda_find_lsap_sel(struct irda_sock *self, char *name) { pr_debug("%s(%p, %s)\n", __func__, self, name); if (self->iriap) { net_warn_ratelimited("%s(): busy with a previous query\n", __func__); return -EBUSY; } self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irda_getvalue_confirm); if(self->iriap == NULL) return -ENOMEM; /* Treat unexpected wakeup as disconnect */ self->errno = -EHOSTUNREACH; /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr, name, "IrDA:TinyTP:LsapSel"); /* Wait for answer, if not yet finished (or failed) */ if (wait_event_interruptible(self->query_wait, (self->iriap==NULL))) /* Treat signals as disconnect */ return -EHOSTUNREACH; /* Check what happened */ if (self->errno) { /* Requested object/attribute doesn't exist */ if((self->errno == IAS_CLASS_UNKNOWN) || (self->errno == IAS_ATTRIB_UNKNOWN)) return -EADDRNOTAVAIL; else return -EHOSTUNREACH; } /* Get the remote TSAP selector */ switch (self->ias_result->type) { case IAS_INTEGER: pr_debug("%s() int=%d\n", __func__, self->ias_result->t.integer); if (self->ias_result->t.integer != -1) self->dtsap_sel = self->ias_result->t.integer; else self->dtsap_sel = 0; break; default: self->dtsap_sel = 0; pr_debug("%s(), bad type!\n", __func__); break; } if (self->ias_result) irias_delete_value(self->ias_result); if (self->dtsap_sel) return 0; return -EADDRNOTAVAIL; } /* * Function irda_discover_daddr_and_lsap_sel (self, name) * * This try to find a device with the requested service. * * It basically look into the discovery log. For each address in the list, * it queries the LM-IAS of the device to find if this device offer * the requested service. * If there is more than one node supporting the service, we complain * to the user (it should move devices around). * The, we set both the destination address and the lsap selector to point * on the service on the unique device we have found. * * Note : this function fails if there is more than one device in range, * because IrLMP doesn't disconnect the LAP when the last LSAP is closed. * Moreover, we would need to wait the LAP disconnection... */ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) { discinfo_t *discoveries; /* Copy of the discovery log */ int number; /* Number of nodes in the log */ int i; int err = -ENETUNREACH; __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ __u8 dtsap_sel = 0x0; /* TSAP associated with it */ pr_debug("%s(), name=%s\n", __func__, name); /* Ask lmp for the current discovery log * Note : we have to use irlmp_get_discoveries(), as opposed * to play with the cachelog directly, because while we are * making our ias query, le log might change... */ discoveries = irlmp_get_discoveries(&number, self->mask.word, self->nslots); /* Check if the we got some results */ if (discoveries == NULL) return -ENETUNREACH; /* No nodes discovered */ /* * Now, check all discovered devices (if any), and connect * client only about the services that the client is * interested in... */ for(i = 0; i < number; i++) { /* Try the address in the log */ self->daddr = discoveries[i].daddr; self->saddr = 0x0; pr_debug("%s(), trying daddr = %08x\n", __func__, self->daddr); /* Query remote LM-IAS for this service */ err = irda_find_lsap_sel(self, name); switch (err) { case 0: /* We found the requested service */ if(daddr != DEV_ADDR_ANY) { pr_debug("%s(), discovered service ''%s'' in two different devices !!!\n", __func__, name); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -ENOTUNIQ; } /* First time we found that one, save it ! */ daddr = self->daddr; dtsap_sel = self->dtsap_sel; break; case -EADDRNOTAVAIL: /* Requested service simply doesn't exist on this node */ break; default: /* Something bad did happen :-( */ pr_debug("%s(), unexpected IAS query failure\n", __func__); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -EHOSTUNREACH; } } /* Cleanup our copy of the discovery log */ kfree(discoveries); /* Check out what we found */ if(daddr == DEV_ADDR_ANY) { pr_debug("%s(), cannot discover service ''%s'' in any device !!!\n", __func__, name); self->daddr = DEV_ADDR_ANY; return -EADDRNOTAVAIL; } /* Revert back to discovered device & service */ self->daddr = daddr; self->saddr = 0x0; self->dtsap_sel = dtsap_sel; pr_debug("%s(), discovered requested service ''%s'' at address %08x\n", __func__, name, self->daddr); return 0; } /* * Function irda_getname (sock, uaddr, uaddr_len, peer) * * Return the our own, or peers socket address (sockaddr_irda) * */ static int irda_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_irda saddr; struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); memset(&saddr, 0, sizeof(saddr)); if (peer) { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; saddr.sir_family = AF_IRDA; saddr.sir_lsap_sel = self->dtsap_sel; saddr.sir_addr = self->daddr; } else { saddr.sir_family = AF_IRDA; saddr.sir_lsap_sel = self->stsap_sel; saddr.sir_addr = self->saddr; } pr_debug("%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); pr_debug("%s(), addr = %08x\n", __func__, saddr.sir_addr); /* uaddr_len come to us uninitialised */ *uaddr_len = sizeof (struct sockaddr_irda); memcpy(uaddr, &saddr, *uaddr_len); return 0; } /* * Function irda_listen (sock, backlog) * * Just move to the listen state * */ static int irda_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = -EOPNOTSUPP; lock_sock(sk); if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && (sk->sk_type != SOCK_DGRAM)) goto out; if (sk->sk_state != TCP_LISTEN) { sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; err = 0; } out: release_sock(sk); return err; } /* * Function irda_bind (sock, uaddr, addr_len) * * Used by servers to register their well known TSAP * */ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; struct irda_sock *self = irda_sk(sk); int err; pr_debug("%s(%p)\n", __func__, self); if (addr_len != sizeof(struct sockaddr_irda)) return -EINVAL; lock_sock(sk); #ifdef CONFIG_IRDA_ULTRA /* Special care for Ultra sockets */ if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) { self->pid = addr->sir_lsap_sel; err = -EOPNOTSUPP; if (self->pid & 0x80) { pr_debug("%s(), extension in PID not supp!\n", __func__); goto out; } err = irda_open_lsap(self, self->pid); if (err < 0) goto out; /* Pretend we are connected */ sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; err = 0; goto out; } #endif /* CONFIG_IRDA_ULTRA */ self->ias_obj = irias_new_object(addr->sir_name, jiffies); err = -ENOMEM; if (self->ias_obj == NULL) goto out; err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); if (err < 0) { irias_delete_object(self->ias_obj); self->ias_obj = NULL; goto out; } /* Register with LM-IAS */ irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel", self->stsap_sel, IAS_KERNEL_ATTR); irias_insert_object(self->ias_obj); err = 0; out: release_sock(sk); return err; } /* * Function irda_accept (sock, newsock, flags) * * Wait for incoming connection * */ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct irda_sock *new, *self = irda_sk(sk); struct sock *newsk; struct sk_buff *skb; int err; err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); if (err) return err; err = -EINVAL; lock_sock(sk); if (sock->state != SS_UNCONNECTED) goto out; if ((sk = sock->sk) == NULL) goto out; err = -EOPNOTSUPP; if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && (sk->sk_type != SOCK_DGRAM)) goto out; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out; /* * The read queue this time is holding sockets ready to use * hooked into the SABM we saved */ /* * We can perform the accept only if there is incoming data * on the listening socket. * So, we will block the caller until we receive any data. * If the caller was waiting on select() or poll() before * calling us, the data is waiting for us ;-) * Jean II */ while (1) { skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; /* Non blocking operation */ err = -EWOULDBLOCK; if (flags & O_NONBLOCK) goto out; err = wait_event_interruptible(*(sk_sleep(sk)), skb_peek(&sk->sk_receive_queue)); if (err) goto out; } newsk = newsock->sk; err = -EIO; if (newsk == NULL) goto out; newsk->sk_state = TCP_ESTABLISHED; new = irda_sk(newsk); /* Now attach up the new socket */ new->tsap = irttp_dup(self->tsap, new); err = -EPERM; /* value does not seem to make sense. -arnd */ if (!new->tsap) { pr_debug("%s(), dup failed!\n", __func__); kfree_skb(skb); goto out; } new->stsap_sel = new->tsap->stsap_sel; new->dtsap_sel = new->tsap->dtsap_sel; new->saddr = irttp_get_saddr(new->tsap); new->daddr = irttp_get_daddr(new->tsap); new->max_sdu_size_tx = self->max_sdu_size_tx; new->max_sdu_size_rx = self->max_sdu_size_rx; new->max_data_size = self->max_data_size; new->max_header_size = self->max_header_size; memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info)); /* Clean up the original one to keep it in listen state */ irttp_listen(self->tsap); kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; irda_connect_response(new); err = 0; out: release_sock(sk); return err; } /* * Function irda_connect (sock, uaddr, addr_len, flags) * * Connect to a IrDA device * * The main difference with a "standard" connect is that with IrDA we need * to resolve the service name into a TSAP selector (in TCP, port number * doesn't have to be resolved). * Because of this service name resolution, we can offer "auto-connect", * where we connect to a service without specifying a destination address. * * Note : by consulting "errno", the user space caller may learn the cause * of the failure. Most of them are visible in the function, others may come * from subroutines called and are listed here : * o EBUSY : already processing a connect * o EHOSTUNREACH : bad addr->sir_addr argument * o EADDRNOTAVAIL : bad addr->sir_name argument * o ENOTUNIQ : more than one node has addr->sir_name (auto-connect) * o ENETUNREACH : no node found on the network (auto-connect) */ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; struct irda_sock *self = irda_sk(sk); int err; pr_debug("%s(%p)\n", __func__, self); lock_sock(sk); /* Don't allow connect for Ultra sockets */ err = -ESOCKTNOSUPPORT; if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) goto out; if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; err = 0; goto out; /* Connect completed during a ERESTARTSYS event */ } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out; } err = -EISCONN; /* No reconnect on a seqpacket socket */ if (sk->sk_state == TCP_ESTABLISHED) goto out; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; err = -EINVAL; if (addr_len != sizeof(struct sockaddr_irda)) goto out; /* Check if user supplied any destination device address */ if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) { /* Try to find one suitable */ err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); if (err) { pr_debug("%s(), auto-connect failed!\n", __func__); goto out; } } else { /* Use the one provided by the user */ self->daddr = addr->sir_addr; pr_debug("%s(), daddr = %08x\n", __func__, self->daddr); /* If we don't have a valid service name, we assume the * user want to connect on a specific LSAP. Prevent * the use of invalid LSAPs (IrLMP 1.1 p10). Jean II */ if((addr->sir_name[0] != '\0') || (addr->sir_lsap_sel >= 0x70)) { /* Query remote LM-IAS using service name */ err = irda_find_lsap_sel(self, addr->sir_name); if (err) { pr_debug("%s(), connect failed!\n", __func__); goto out; } } else { /* Directly connect to the remote LSAP * specified by the sir_lsap field. * Please use with caution, in IrDA LSAPs are * dynamic and there is no "well-known" LSAP. */ self->dtsap_sel = addr->sir_lsap_sel; } } /* Check if we have opened a local TSAP */ if (!self->tsap) irda_open_tsap(self, LSAP_ANY, addr->sir_name); /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; /* Connect to remote device */ err = irttp_connect_request(self->tsap, self->dtsap_sel, self->saddr, self->daddr, NULL, self->max_sdu_size_rx, NULL); if (err) { pr_debug("%s(), connect failed!\n", __func__); goto out; } /* Now the loop */ err = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) goto out; err = -ERESTARTSYS; if (wait_event_interruptible(*(sk_sleep(sk)), (sk->sk_state != TCP_SYN_SENT))) goto out; if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); if (!err) err = -ECONNRESET; goto out; } sock->state = SS_CONNECTED; /* At this point, IrLMP has assigned our source address */ self->saddr = irttp_get_saddr(self->tsap); err = 0; out: release_sock(sk); return err; } static struct proto irda_proto = { .name = "IRDA", .owner = THIS_MODULE, .obj_size = sizeof(struct irda_sock), }; /* * Function irda_create (sock, protocol) * * Create IrDA socket * */ static int irda_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct irda_sock *self; if (net != &init_net) return -EAFNOSUPPORT; /* Check for valid socket type */ switch (sock->type) { case SOCK_STREAM: /* For TTP connections with SAR disabled */ case SOCK_SEQPACKET: /* For TTP connections with SAR enabled */ case SOCK_DGRAM: /* For TTP Unitdata or LMP Ultra transfers */ break; default: return -ESOCKTNOSUPPORT; } /* Allocate networking socket */ sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto, kern); if (sk == NULL) return -ENOMEM; self = irda_sk(sk); pr_debug("%s() : self is %p\n", __func__, self); init_waitqueue_head(&self->query_wait); switch (sock->type) { case SOCK_STREAM: sock->ops = &irda_stream_ops; self->max_sdu_size_rx = TTP_SAR_DISABLE; break; case SOCK_SEQPACKET: sock->ops = &irda_seqpacket_ops; self->max_sdu_size_rx = TTP_SAR_UNBOUND; break; case SOCK_DGRAM: switch (protocol) { #ifdef CONFIG_IRDA_ULTRA case IRDAPROTO_ULTRA: sock->ops = &irda_ultra_ops; /* Initialise now, because we may send on unbound * sockets. Jean II */ self->max_data_size = ULTRA_MAX_DATA - LMP_PID_HEADER; self->max_header_size = IRDA_MAX_HEADER + LMP_PID_HEADER; break; #endif /* CONFIG_IRDA_ULTRA */ case IRDAPROTO_UNITDATA: sock->ops = &irda_dgram_ops; /* We let Unitdata conn. be like seqpack conn. */ self->max_sdu_size_rx = TTP_SAR_UNBOUND; break; default: sk_free(sk); return -ESOCKTNOSUPPORT; } break; default: sk_free(sk); return -ESOCKTNOSUPPORT; } /* Initialise networking socket struct */ sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */ sk->sk_family = PF_IRDA; sk->sk_protocol = protocol; /* Register as a client with IrLMP */ self->ckey = irlmp_register_client(0, NULL, NULL, NULL); self->mask.word = 0xffff; self->rx_flow = self->tx_flow = FLOW_START; self->nslots = DISCOVERY_DEFAULT_SLOTS; self->daddr = DEV_ADDR_ANY; /* Until we get connected */ self->saddr = 0x0; /* so IrLMP assign us any link */ return 0; } /* * Function irda_destroy_socket (self) * * Destroy socket * */ static void irda_destroy_socket(struct irda_sock *self) { pr_debug("%s(%p)\n", __func__, self); /* Unregister with IrLMP */ irlmp_unregister_client(self->ckey); irlmp_unregister_service(self->skey); /* Unregister with LM-IAS */ if (self->ias_obj) { irias_delete_object(self->ias_obj); self->ias_obj = NULL; } if (self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } if (self->tsap) { irttp_disconnect_request(self->tsap, NULL, P_NORMAL); irttp_close_tsap(self->tsap); self->tsap = NULL; } #ifdef CONFIG_IRDA_ULTRA if (self->lsap) { irlmp_close_lsap(self->lsap); self->lsap = NULL; } #endif /* CONFIG_IRDA_ULTRA */ } /* * Function irda_release (sock) */ static int irda_release(struct socket *sock) { struct sock *sk = sock->sk; if (sk == NULL) return 0; lock_sock(sk); sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); /* Destroy IrDA socket */ irda_destroy_socket(irda_sk(sk)); sock_orphan(sk); sock->sk = NULL; release_sock(sk); /* Purge queues (see sock_init_data()) */ skb_queue_purge(&sk->sk_receive_queue); /* Destroy networking socket if we are the last reference on it, * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */ sock_put(sk); /* Notes on socket locking and deallocation... - Jean II * In theory we should put pairs of sock_hold() / sock_put() to * prevent the socket to be destroyed whenever there is an * outstanding request or outstanding incoming packet or event. * * 1) This may include IAS request, both in connect and getsockopt. * Unfortunately, the situation is a bit more messy than it looks, * because we close iriap and kfree(self) above. * * 2) This may include selective discovery in getsockopt. * Same stuff as above, irlmp registration and self are gone. * * Probably 1 and 2 may not matter, because it's all triggered * by a process and the socket layer already prevent the * socket to go away while a process is holding it, through * sockfd_put() and fput()... * * 3) This may include deferred TSAP closure. In particular, * we may receive a late irda_disconnect_indication() * Fortunately, (tsap_cb *)->close_pend should protect us * from that. * * I did some testing on SMP, and it looks solid. And the socket * memory leak is now gone... - Jean II */ return 0; } /* * Function irda_sendmsg (sock, msg, len) * * Send message down to TinyTP. This function is used for both STREAM and * SEQPACK services. This is possible since it forces the client to * fragment the message if necessary */ static int irda_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; struct sk_buff *skb; int err = -EPIPE; pr_debug("%s(), len=%zd\n", __func__, len); /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | MSG_NOSIGNAL)) { return -EINVAL; } lock_sock(sk); if (sk->sk_shutdown & SEND_SHUTDOWN) goto out_err; if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } self = irda_sk(sk); /* Check if IrTTP is wants us to slow down */ if (wait_event_interruptible(*(sk_sleep(sk)), (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { err = -ERESTARTSYS; goto out; } /* Check if we are still connected */ if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } /* Check that we don't send out too big frames */ if (len > self->max_data_size) { pr_debug("%s(), Chopping frame from %zd to %d bytes!\n", __func__, len, self->max_data_size); len = self->max_data_size; } skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto out_err; skb_reserve(skb, self->max_header_size + 16); skb_reset_transport_header(skb); skb_put(skb, len); err = memcpy_from_msg(skb_transport_header(skb), msg, len); if (err) { kfree_skb(skb); goto out_err; } /* * Just send the message to TinyTP, and let it deal with possible * errors. No need to duplicate all that here */ err = irttp_data_request(self->tsap, skb); if (err) { pr_debug("%s(), err=%d\n", __func__, err); goto out_err; } release_sock(sk); /* Tell client how much data we actually sent */ return len; out_err: err = sk_stream_error(sk, msg->msg_flags, err); out: release_sock(sk); return err; } /* * Function irda_recvmsg_dgram (sock, msg, size, flags) * * Try to receive message and copy it to user. The frame is discarded * after being read, regardless of how much the user actually read */ static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); struct sk_buff *skb; size_t copied; int err; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) return err; skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { pr_debug("%s(), Received truncated frame (%zd < %zd)!\n", __func__, copied, size); copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_msg(skb, 0, msg, copied); skb_free_datagram(sk, skb); /* * Check if we have previously stopped IrTTP and we know * have more free space in our rx_queue. If so tell IrTTP * to start delivering frames again before our rx_queue gets * empty */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { pr_debug("%s(), Starting IrTTP\n", __func__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } } return copied; } /* * Function irda_recvmsg_stream (sock, msg, size, flags) */ static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); int noblock = flags & MSG_DONTWAIT; size_t copied = 0; int target, err; long timeo; if ((err = sock_error(sk)) < 0) return err; if (sock->flags & __SO_ACCEPTCON) return -EINVAL; err =-EOPNOTSUPP; if (flags & MSG_OOB) return -EOPNOTSUPP; err = 0; target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, noblock); do { int chunk; struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { DEFINE_WAIT(wait); err = 0; if (copied >= target) break; prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* * POSIX 1003.1g mandates this order. */ err = sock_error(sk); if (err) ; else if (sk->sk_shutdown & RCV_SHUTDOWN) ; else if (noblock) err = -EAGAIN; else if (signal_pending(current)) err = sock_intr_errno(timeo); else if (sk->sk_state != TCP_ESTABLISHED) err = -ENOTCONN; else if (skb_peek(&sk->sk_receive_queue) == NULL) /* Wait process until data arrives */ schedule(); finish_wait(sk_sleep(sk), &wait); if (err) return err; if (sk->sk_shutdown & RCV_SHUTDOWN) break; continue; } chunk = min_t(unsigned int, skb->len, size); if (memcpy_to_msg(msg, skb->data, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (copied == 0) copied = -EFAULT; break; } copied += chunk; size -= chunk; /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { skb_pull(skb, chunk); /* put the skb back if we didn't use it up.. */ if (skb->len) { pr_debug("%s(), back on q!\n", __func__); skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); } else { pr_debug("%s() questionable!?\n", __func__); /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); break; } } while (size); /* * Check if we have previously stopped IrTTP and we know * have more free space in our rx_queue. If so tell IrTTP * to start delivering frames again before our rx_queue gets * empty */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { pr_debug("%s(), Starting IrTTP\n", __func__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } } return copied; } /* * Function irda_sendmsg_dgram (sock, msg, len) * * Send message down to TinyTP for the unreliable sequenced * packet service... * */ static int irda_sendmsg_dgram(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; struct sk_buff *skb; int err; pr_debug("%s(), len=%zd\n", __func__, len); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); err = -EPIPE; goto out; } err = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; self = irda_sk(sk); /* * Check that we don't send out too big frames. This is an unreliable * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n", __func__, len, self->max_data_size); len = self->max_data_size; } skb = sock_alloc_send_skb(sk, len + self->max_header_size, msg->msg_flags & MSG_DONTWAIT, &err); err = -ENOBUFS; if (!skb) goto out; skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); pr_debug("%s(), appending user data\n", __func__); skb_put(skb, len); err = memcpy_from_msg(skb_transport_header(skb), msg, len); if (err) { kfree_skb(skb); goto out; } /* * Just send the message to TinyTP, and let it deal with possible * errors. No need to duplicate all that here */ err = irttp_udata_request(self->tsap, skb); if (err) { pr_debug("%s(), err=%d\n", __func__, err); goto out; } release_sock(sk); return len; out: release_sock(sk); return err; } /* * Function irda_sendmsg_ultra (sock, msg, len) * * Send message down to IrLMP for the unreliable Ultra * packet service... */ #ifdef CONFIG_IRDA_ULTRA static int irda_sendmsg_ultra(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; __u8 pid = 0; int bound = 0; struct sk_buff *skb; int err; pr_debug("%s(), len=%zd\n", __func__, len); err = -EINVAL; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); err = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); goto out; } self = irda_sk(sk); /* Check if an address was specified with sendto. Jean II */ if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_irda *, addr, msg->msg_name); err = -EINVAL; /* Check address, extract pid. Jean II */ if (msg->msg_namelen < sizeof(*addr)) goto out; if (addr->sir_family != AF_IRDA) goto out; pid = addr->sir_lsap_sel; if (pid & 0x80) { pr_debug("%s(), extension in PID not supp!\n", __func__); err = -EOPNOTSUPP; goto out; } } else { /* Check that the socket is properly bound to an Ultra * port. Jean II */ if ((self->lsap == NULL) || (sk->sk_state != TCP_ESTABLISHED)) { pr_debug("%s(), socket not bound to Ultra PID.\n", __func__); err = -ENOTCONN; goto out; } /* Use PID from socket */ bound = 1; } /* * Check that we don't send out too big frames. This is an unreliable * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n", __func__, len, self->max_data_size); len = self->max_data_size; } skb = sock_alloc_send_skb(sk, len + self->max_header_size, msg->msg_flags & MSG_DONTWAIT, &err); err = -ENOBUFS; if (!skb) goto out; skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); pr_debug("%s(), appending user data\n", __func__); skb_put(skb, len); err = memcpy_from_msg(skb_transport_header(skb), msg, len); if (err) { kfree_skb(skb); goto out; } err = irlmp_connless_data_request((bound ? self->lsap : NULL), skb, pid); if (err) pr_debug("%s(), err=%d\n", __func__, err); out: release_sock(sk); return err ? : len; } #endif /* CONFIG_IRDA_ULTRA */ /* * Function irda_shutdown (sk, how) */ static int irda_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); pr_debug("%s(%p)\n", __func__, self); lock_sock(sk); sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); if (self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } if (self->tsap) { irttp_disconnect_request(self->tsap, NULL, P_NORMAL); irttp_close_tsap(self->tsap); self->tsap = NULL; } /* A few cleanup so the socket look as good as new... */ self->rx_flow = self->tx_flow = FLOW_START; /* needed ??? */ self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */ self->saddr = 0x0; /* so IrLMP assign us any link */ release_sock(sk); return 0; } /* * Function irda_poll (file, sock, wait) */ static unsigned int irda_poll(struct file * file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); unsigned int mask; poll_wait(file, sk_sleep(sk), wait); mask = 0; /* Exceptional events? */ if (sk->sk_err) mask |= POLLERR; if (sk->sk_shutdown & RCV_SHUTDOWN) { pr_debug("%s(), POLLHUP\n", __func__); mask |= POLLHUP; } /* Readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) { pr_debug("Socket is readable\n"); mask |= POLLIN | POLLRDNORM; } /* Connection-based need to check for termination and startup */ switch (sk->sk_type) { case SOCK_STREAM: if (sk->sk_state == TCP_CLOSE) { pr_debug("%s(), POLLHUP\n", __func__); mask |= POLLHUP; } if (sk->sk_state == TCP_ESTABLISHED) { if ((self->tx_flow == FLOW_START) && sock_writeable(sk)) { mask |= POLLOUT | POLLWRNORM | POLLWRBAND; } } break; case SOCK_SEQPACKET: if ((self->tx_flow == FLOW_START) && sock_writeable(sk)) { mask |= POLLOUT | POLLWRNORM | POLLWRBAND; } break; case SOCK_DGRAM: if (sock_writeable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; break; default: break; } return mask; } /* * Function irda_ioctl (sock, cmd, arg) */ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; int err; pr_debug("%s(), cmd=%#x\n", __func__, cmd); err = -EINVAL; switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (unsigned int __user *)arg); break; } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; err = put_user(amount, (unsigned int __user *)arg); break; } case SIOCGSTAMP: if (sk != NULL) err = sock_get_timestamp(sk, (struct timeval __user *)arg); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: break; default: pr_debug("%s(), doing device ioctl!\n", __func__); err = -ENOIOCTLCMD; } return err; } #ifdef CONFIG_COMPAT /* * Function irda_ioctl (sock, cmd, arg) */ static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { /* * All IRDA's ioctl are standard ones. */ return -ENOIOCTLCMD; } #endif /* * Function irda_setsockopt (sock, level, optname, optval, optlen) * * Set some options for the socket * */ static int irda_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); struct irda_ias_set *ias_opt; struct ias_object *ias_obj; struct ias_attrib * ias_attr; /* Attribute in IAS object */ int opt, free_ias = 0, err = 0; pr_debug("%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case IRLMP_IAS_SET: /* The user want to add an attribute to an existing IAS object * (in the IAS database) or to create a new object with this * attribute. * We first query IAS to know if the object exist, and then * create the right attribute... */ if (optlen != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, optlen)) { kfree(ias_opt); err = -EFAULT; goto out; } /* Find the object we target. * If the user gives us an empty string, we use the object * associated with this socket. This will workaround * duplicated class name - Jean II */ if(ias_opt->irda_class_name[0] == '\0') { if(self->ias_obj == NULL) { kfree(ias_opt); err = -EINVAL; goto out; } ias_obj = self->ias_obj; } else ias_obj = irias_find_object(ias_opt->irda_class_name); /* Only ROOT can mess with the global IAS database. * Users can only add attributes to the object associated * with the socket they own - Jean II */ if((!capable(CAP_NET_ADMIN)) && ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { kfree(ias_opt); err = -EPERM; goto out; } /* If the object doesn't exist, create it */ if(ias_obj == (struct ias_object *) NULL) { /* Create a new object */ ias_obj = irias_new_object(ias_opt->irda_class_name, jiffies); if (ias_obj == NULL) { kfree(ias_opt); err = -ENOMEM; goto out; } free_ias = 1; } /* Do we have the attribute already ? */ if(irias_find_attrib(ias_obj, ias_opt->irda_attrib_name)) { kfree(ias_opt); if (free_ias) { kfree(ias_obj->name); kfree(ias_obj); } err = -EINVAL; goto out; } /* Look at the type */ switch(ias_opt->irda_attrib_type) { case IAS_INTEGER: /* Add an integer attribute */ irias_add_integer_attrib( ias_obj, ias_opt->irda_attrib_name, ias_opt->attribute.irda_attrib_int, IAS_USER_ATTR); break; case IAS_OCT_SEQ: /* Check length */ if(ias_opt->attribute.irda_attrib_octet_seq.len > IAS_MAX_OCTET_STRING) { kfree(ias_opt); if (free_ias) { kfree(ias_obj->name); kfree(ias_obj); } err = -EINVAL; goto out; } /* Add an octet sequence attribute */ irias_add_octseq_attrib( ias_obj, ias_opt->irda_attrib_name, ias_opt->attribute.irda_attrib_octet_seq.octet_seq, ias_opt->attribute.irda_attrib_octet_seq.len, IAS_USER_ATTR); break; case IAS_STRING: /* Should check charset & co */ /* Check length */ /* The length is encoded in a __u8, and * IAS_MAX_STRING == 256, so there is no way * userspace can pass us a string too large. * Jean II */ /* NULL terminate the string (avoid troubles) */ ias_opt->attribute.irda_attrib_string.string[ias_opt->attribute.irda_attrib_string.len] = '\0'; /* Add a string attribute */ irias_add_string_attrib( ias_obj, ias_opt->irda_attrib_name, ias_opt->attribute.irda_attrib_string.string, IAS_USER_ATTR); break; default : kfree(ias_opt); if (free_ias) { kfree(ias_obj->name); kfree(ias_obj); } err = -EINVAL; goto out; } irias_insert_object(ias_obj); kfree(ias_opt); break; case IRLMP_IAS_DEL: /* The user want to delete an object from our local IAS * database. We just need to query the IAS, check is the * object is not owned by the kernel and delete it. */ if (optlen != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, optlen)) { kfree(ias_opt); err = -EFAULT; goto out; } /* Find the object we target. * If the user gives us an empty string, we use the object * associated with this socket. This will workaround * duplicated class name - Jean II */ if(ias_opt->irda_class_name[0] == '\0') ias_obj = self->ias_obj; else ias_obj = irias_find_object(ias_opt->irda_class_name); if(ias_obj == (struct ias_object *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Only ROOT can mess with the global IAS database. * Users can only del attributes from the object associated * with the socket they own - Jean II */ if((!capable(CAP_NET_ADMIN)) && ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { kfree(ias_opt); err = -EPERM; goto out; } /* Find the attribute (in the object) we target */ ias_attr = irias_find_attrib(ias_obj, ias_opt->irda_attrib_name); if(ias_attr == (struct ias_attrib *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Check is the user space own the object */ if(ias_attr->value->owner != IAS_USER_ATTR) { pr_debug("%s(), attempting to delete a kernel attribute\n", __func__); kfree(ias_opt); err = -EPERM; goto out; } /* Remove the attribute (and maybe the object) */ irias_delete_attrib(ias_obj, ias_attr, 1); kfree(ias_opt); break; case IRLMP_MAX_SDU_SIZE: if (optlen < sizeof(int)) { err = -EINVAL; goto out; } if (get_user(opt, (int __user *)optval)) { err = -EFAULT; goto out; } /* Only possible for a seqpacket service (TTP with SAR) */ if (sk->sk_type != SOCK_SEQPACKET) { pr_debug("%s(), setting max_sdu_size = %d\n", __func__, opt); self->max_sdu_size_rx = opt; } else { net_warn_ratelimited("%s: not allowed to set MAXSDUSIZE for this socket type!\n", __func__); err = -ENOPROTOOPT; goto out; } break; case IRLMP_HINTS_SET: if (optlen < sizeof(int)) { err = -EINVAL; goto out; } /* The input is really a (__u8 hints[2]), easier as an int */ if (get_user(opt, (int __user *)optval)) { err = -EFAULT; goto out; } /* Unregister any old registration */ if (self->skey) irlmp_unregister_service(self->skey); self->skey = irlmp_register_service((__u16) opt); break; case IRLMP_HINT_MASK_SET: /* As opposed to the previous case which set the hint bits * that we advertise, this one set the filter we use when * making a discovery (nodes which don't match any hint * bit in the mask are not reported). */ if (optlen < sizeof(int)) { err = -EINVAL; goto out; } /* The input is really a (__u8 hints[2]), easier as an int */ if (get_user(opt, (int __user *)optval)) { err = -EFAULT; goto out; } /* Set the new hint mask */ self->mask.word = (__u16) opt; /* Mask out extension bits */ self->mask.word &= 0x7f7f; /* Check if no bits */ if(!self->mask.word) self->mask.word = 0xFFFF; break; default: err = -ENOPROTOOPT; break; } out: release_sock(sk); return err; } /* * Function irda_extract_ias_value(ias_opt, ias_value) * * Translate internal IAS value structure to the user space representation * * The external representation of IAS values, as we exchange them with * user space program is quite different from the internal representation, * as stored in the IAS database (because we need a flat structure for * crossing kernel boundary). * This function transform the former in the latter. We also check * that the value type is valid. */ static int irda_extract_ias_value(struct irda_ias_set *ias_opt, struct ias_value *ias_value) { /* Look at the type */ switch (ias_value->type) { case IAS_INTEGER: /* Copy the integer */ ias_opt->attribute.irda_attrib_int = ias_value->t.integer; break; case IAS_OCT_SEQ: /* Set length */ ias_opt->attribute.irda_attrib_octet_seq.len = ias_value->len; /* Copy over */ memcpy(ias_opt->attribute.irda_attrib_octet_seq.octet_seq, ias_value->t.oct_seq, ias_value->len); break; case IAS_STRING: /* Set length */ ias_opt->attribute.irda_attrib_string.len = ias_value->len; ias_opt->attribute.irda_attrib_string.charset = ias_value->charset; /* Copy over */ memcpy(ias_opt->attribute.irda_attrib_string.string, ias_value->t.string, ias_value->len); /* NULL terminate the string (avoid troubles) */ ias_opt->attribute.irda_attrib_string.string[ias_value->len] = '\0'; break; case IAS_MISSING: default : return -EINVAL; } /* Copy type over */ ias_opt->irda_attrib_type = ias_value->type; return 0; } /* * Function irda_getsockopt (sock, level, optname, optval, optlen) */ static int irda_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); struct irda_device_list list; struct irda_device_info *discoveries; struct irda_ias_set * ias_opt; /* IAS get/query params */ struct ias_object * ias_obj; /* Object in IAS */ struct ias_attrib * ias_attr; /* Attribute in IAS object */ int daddr = DEV_ADDR_ANY; /* Dest address for IAS queries */ int val = 0; int len = 0; int err = 0; int offset, total; pr_debug("%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if(len < 0) return -EINVAL; lock_sock(sk); switch (optname) { case IRLMP_ENUMDEVICES: /* Offset to first device entry */ offset = sizeof(struct irda_device_list) - sizeof(struct irda_device_info); if (len < offset) { err = -EINVAL; goto out; } /* Ask lmp for the current discovery log */ discoveries = irlmp_get_discoveries(&list.len, self->mask.word, self->nslots); /* Check if the we got some results */ if (discoveries == NULL) { err = -EAGAIN; goto out; /* Didn't find any devices */ } /* Write total list length back to client */ if (copy_to_user(optval, &list, offset)) err = -EFAULT; /* Copy the list itself - watch for overflow */ if (list.len > 2048) { err = -EINVAL; goto bed; } total = offset + (list.len * sizeof(struct irda_device_info)); if (total > len) total = len; if (copy_to_user(optval+offset, discoveries, total - offset)) err = -EFAULT; /* Write total number of bytes used back to client */ if (put_user(total, optlen)) err = -EFAULT; bed: /* Free up our buffer */ kfree(discoveries); break; case IRLMP_MAX_SDU_SIZE: val = self->max_data_size; len = sizeof(int); if (put_user(len, optlen)) { err = -EFAULT; goto out; } if (copy_to_user(optval, &val, len)) { err = -EFAULT; goto out; } break; case IRLMP_IAS_GET: /* The user want an object from our local IAS database. * We just need to query the IAS and return the value * that we found */ /* Check that the user has allocated the right space for us */ if (len != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, len)) { kfree(ias_opt); err = -EFAULT; goto out; } /* Find the object we target. * If the user gives us an empty string, we use the object * associated with this socket. This will workaround * duplicated class name - Jean II */ if(ias_opt->irda_class_name[0] == '\0') ias_obj = self->ias_obj; else ias_obj = irias_find_object(ias_opt->irda_class_name); if(ias_obj == (struct ias_object *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Find the attribute (in the object) we target */ ias_attr = irias_find_attrib(ias_obj, ias_opt->irda_attrib_name); if(ias_attr == (struct ias_attrib *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Translate from internal to user structure */ err = irda_extract_ias_value(ias_opt, ias_attr->value); if(err) { kfree(ias_opt); goto out; } /* Copy reply to the user */ if (copy_to_user(optval, ias_opt, sizeof(struct irda_ias_set))) { kfree(ias_opt); err = -EFAULT; goto out; } /* Note : don't need to put optlen, we checked it */ kfree(ias_opt); break; case IRLMP_IAS_QUERY: /* The user want an object from a remote IAS database. * We need to use IAP to query the remote database and * then wait for the answer to come back. */ /* Check that the user has allocated the right space for us */ if (len != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, len)) { kfree(ias_opt); err = -EFAULT; goto out; } /* At this point, there are two cases... * 1) the socket is connected - that's the easy case, we * just query the device we are connected to... * 2) the socket is not connected - the user doesn't want * to connect and/or may not have a valid service name * (so can't create a fake connection). In this case, * we assume that the user pass us a valid destination * address in the requesting structure... */ if(self->daddr != DEV_ADDR_ANY) { /* We are connected - reuse known daddr */ daddr = self->daddr; } else { /* We are not connected, we must specify a valid * destination address */ daddr = ias_opt->daddr; if((!daddr) || (daddr == DEV_ADDR_ANY)) { kfree(ias_opt); err = -EINVAL; goto out; } } /* Check that we can proceed with IAP */ if (self->iriap) { net_warn_ratelimited("%s: busy with a previous query\n", __func__); kfree(ias_opt); err = -EBUSY; goto out; } self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irda_getvalue_confirm); if (self->iriap == NULL) { kfree(ias_opt); err = -ENOMEM; goto out; } /* Treat unexpected wakeup as disconnect */ self->errno = -EHOSTUNREACH; /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->saddr, daddr, ias_opt->irda_class_name, ias_opt->irda_attrib_name); /* Wait for answer, if not yet finished (or failed) */ if (wait_event_interruptible(self->query_wait, (self->iriap == NULL))) { /* pending request uses copy of ias_opt-content * we can free it regardless! */ kfree(ias_opt); /* Treat signals as disconnect */ err = -EHOSTUNREACH; goto out; } /* Check what happened */ if (self->errno) { kfree(ias_opt); /* Requested object/attribute doesn't exist */ if((self->errno == IAS_CLASS_UNKNOWN) || (self->errno == IAS_ATTRIB_UNKNOWN)) err = -EADDRNOTAVAIL; else err = -EHOSTUNREACH; goto out; } /* Translate from internal to user structure */ err = irda_extract_ias_value(ias_opt, self->ias_result); if (self->ias_result) irias_delete_value(self->ias_result); if (err) { kfree(ias_opt); goto out; } /* Copy reply to the user */ if (copy_to_user(optval, ias_opt, sizeof(struct irda_ias_set))) { kfree(ias_opt); err = -EFAULT; goto out; } /* Note : don't need to put optlen, we checked it */ kfree(ias_opt); break; case IRLMP_WAITDEVICE: /* This function is just another way of seeing life ;-) * IRLMP_ENUMDEVICES assumes that you have a static network, * and that you just want to pick one of the devices present. * On the other hand, in here we assume that no device is * present and that at some point in the future a device will * come into range. When this device arrive, we just wake * up the caller, so that he has time to connect to it before * the device goes away... * Note : once the node has been discovered for more than a * few second, it won't trigger this function, unless it * goes away and come back changes its hint bits (so we * might call it IRLMP_WAITNEWDEVICE). */ /* Check that the user is passing us an int */ if (len != sizeof(int)) { err = -EINVAL; goto out; } /* Get timeout in ms (max time we block the caller) */ if (get_user(val, (int __user *)optval)) { err = -EFAULT; goto out; } /* Tell IrLMP we want to be notified */ irlmp_update_client(self->ckey, self->mask.word, irda_selective_discovery_indication, NULL, (void *) self); /* Do some discovery (and also return cached results) */ irlmp_discovery_request(self->nslots); /* Wait until a node is discovered */ if (!self->cachedaddr) { pr_debug("%s(), nothing discovered yet, going to sleep...\n", __func__); /* Set watchdog timer to expire in <val> ms. */ self->errno = 0; setup_timer(&self->watchdog, irda_discovery_timeout, (unsigned long)self); mod_timer(&self->watchdog, jiffies + msecs_to_jiffies(val)); /* Wait for IR-LMP to call us back */ err = __wait_event_interruptible(self->query_wait, (self->cachedaddr != 0 || self->errno == -ETIME)); /* If watchdog is still activated, kill it! */ del_timer(&(self->watchdog)); pr_debug("%s(), ...waking up !\n", __func__); if (err != 0) goto out; } else pr_debug("%s(), found immediately !\n", __func__); /* Tell IrLMP that we have been notified */ irlmp_update_client(self->ckey, self->mask.word, NULL, NULL, NULL); /* Check if the we got some results */ if (!self->cachedaddr) { err = -EAGAIN; /* Didn't find any devices */ goto out; } daddr = self->cachedaddr; /* Cleanup */ self->cachedaddr = 0; /* We return the daddr of the device that trigger the * wakeup. As irlmp pass us only the new devices, we * are sure that it's not an old device. * If the user want more details, he should query * the whole discovery log and pick one device... */ if (put_user(daddr, (int __user *)optval)) { err = -EFAULT; goto out; } break; default: err = -ENOPROTOOPT; } out: release_sock(sk); return err; } static const struct net_proto_family irda_family_ops = { .family = PF_IRDA, .create = irda_create, .owner = THIS_MODULE, }; static const struct proto_ops irda_stream_ops = { .family = PF_IRDA, .owner = THIS_MODULE, .release = irda_release, .bind = irda_bind, .connect = irda_connect, .socketpair = sock_no_socketpair, .accept = irda_accept, .getname = irda_getname, .poll = irda_poll, .ioctl = irda_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = irda_compat_ioctl, #endif .listen = irda_listen, .shutdown = irda_shutdown, .setsockopt = irda_setsockopt, .getsockopt = irda_getsockopt, .sendmsg = irda_sendmsg, .recvmsg = irda_recvmsg_stream, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops irda_seqpacket_ops = { .family = PF_IRDA, .owner = THIS_MODULE, .release = irda_release, .bind = irda_bind, .connect = irda_connect, .socketpair = sock_no_socketpair, .accept = irda_accept, .getname = irda_getname, .poll = datagram_poll, .ioctl = irda_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = irda_compat_ioctl, #endif .listen = irda_listen, .shutdown = irda_shutdown, .setsockopt = irda_setsockopt, .getsockopt = irda_getsockopt, .sendmsg = irda_sendmsg, .recvmsg = irda_recvmsg_dgram, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops irda_dgram_ops = { .family = PF_IRDA, .owner = THIS_MODULE, .release = irda_release, .bind = irda_bind, .connect = irda_connect, .socketpair = sock_no_socketpair, .accept = irda_accept, .getname = irda_getname, .poll = datagram_poll, .ioctl = irda_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = irda_compat_ioctl, #endif .listen = irda_listen, .shutdown = irda_shutdown, .setsockopt = irda_setsockopt, .getsockopt = irda_getsockopt, .sendmsg = irda_sendmsg_dgram, .recvmsg = irda_recvmsg_dgram, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; #ifdef CONFIG_IRDA_ULTRA static const struct proto_ops irda_ultra_ops = { .family = PF_IRDA, .owner = THIS_MODULE, .release = irda_release, .bind = irda_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = irda_getname, .poll = datagram_poll, .ioctl = irda_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = irda_compat_ioctl, #endif .listen = sock_no_listen, .shutdown = irda_shutdown, .setsockopt = irda_setsockopt, .getsockopt = irda_getsockopt, .sendmsg = irda_sendmsg_ultra, .recvmsg = irda_recvmsg_dgram, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; #endif /* CONFIG_IRDA_ULTRA */ /* * Function irsock_init (pro) * * Initialize IrDA protocol * */ int __init irsock_init(void) { int rc = proto_register(&irda_proto, 0); if (rc == 0) rc = sock_register(&irda_family_ops); return rc; } /* * Function irsock_cleanup (void) * * Remove IrDA protocol * */ void irsock_cleanup(void) { sock_unregister(PF_IRDA); proto_unregister(&irda_proto); }
948277.c
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the files COPYING and Copyright.html. COPYING can be found at the root * * of the source code distribution tree; Copyright.html can be found at the * * root level of an installed copy of the electronic HDF5 document set and * * is linked from the top-level documents page. It can also be found at * * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * * access to either file, you may request a copy from [email protected]. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /****************/ /* Module Setup */ /****************/ #define H5D_PACKAGE /*suppress error about including H5Dpkg */ /* Interface initialization */ #define H5_INTERFACE_INIT_FUNC H5D__init_interface /***********/ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ #include "H5Dpkg.h" /* Datasets */ #include "H5Eprivate.h" /* Error handling */ #include "H5FLprivate.h" /* Free Lists */ #include "H5FOprivate.h" /* File objects */ #include "H5Iprivate.h" /* IDs */ #include "H5Lprivate.h" /* Links */ #include "H5MMprivate.h" /* Memory management */ /****************/ /* Local Macros */ /****************/ /******************/ /* Local Typedefs */ /******************/ /* Struct for holding callback info during H5D_flush operation */ typedef struct { const H5F_t *f; /* Pointer to file being flushed */ hid_t dxpl_id; /* DXPL for I/O operations */ } H5D_flush_ud_t; /********************/ /* Local Prototypes */ /********************/ /* General stuff */ static herr_t H5D__get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache); static H5D_shared_t *H5D__new(hid_t dcpl_id, hbool_t creating, hbool_t vl_type); static herr_t H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type); static herr_t H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space); static herr_t H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id); static herr_t H5D__open_oid(H5D_t *dataset, hid_t dapl_id, hid_t dxpl_id); static herr_t H5D__init_storage(H5D_t *dataset, hbool_t full_overwrite, hsize_t old_dim[], hid_t dxpl_id); /*********************/ /* Package Variables */ /*********************/ /* Define a "default" dataset transfer property list cache structure to use for default DXPLs */ H5D_dxpl_cache_t H5D_def_dxpl_cache; /* Declare a free list to manage blocks of VL data */ H5FL_BLK_DEFINE(vlen_vl_buf); /* Declare a free list to manage other blocks of VL data */ H5FL_BLK_DEFINE(vlen_fl_buf); /*****************************/ /* Library Private Variables */ /*****************************/ /*******************/ /* Local Variables */ /*******************/ /* Declare a free list to manage the H5D_t and H5D_shared_t structs */ H5FL_DEFINE_STATIC(H5D_t); H5FL_DEFINE_STATIC(H5D_shared_t); /* Declare the external PQ free list for the sieve buffer information */ H5FL_BLK_EXTERN(sieve_buf); /* Declare the external free list to manage the H5D_chunk_info_t struct */ H5FL_EXTERN(H5D_chunk_info_t); /* Define a static "default" dataset structure to use to initialize new datasets */ static H5D_shared_t H5D_def_dset; /* Dataset ID class */ static const H5I_class_t H5I_DATASET_CLS[1] = {{ H5I_DATASET, /* ID class value */ 0, /* Class flags */ 64, /* Minimum hash size for class */ 0, /* # of reserved IDs for class */ (H5I_free_t)H5D_close /* Callback routine for closing objects of this class */ }}; /*------------------------------------------------------------------------- * Function: H5D_init * * Purpose: Initialize the interface from some other layer. * * Return: Success: non-negative * * Failure: negative * * Programmer: Quincey Koziol * Saturday, March 4, 2000 * *------------------------------------------------------------------------- */ herr_t H5D_init(void) { herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) /* FUNC_ENTER() does all the work */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_init() */ /*-------------------------------------------------------------------------- NAME H5D__init_interface -- Initialize interface-specific information USAGE herr_t H5D__init_interface() RETURNS Non-negative on success/Negative on failure DESCRIPTION Initializes any interface-specific data or routines. NOTES Care must be taken when using the H5P functions, since they can cause a deadlock in the library when the library is attempting to terminate -QAK --------------------------------------------------------------------------*/ static herr_t H5D__init_interface(void) { H5P_genplist_t *def_dcpl; /* Default Dataset Creation Property list */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC /* Initialize the atom group for the dataset IDs */ if(H5I_register_type(H5I_DATASET_CLS) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize interface") /* Reset the "default dataset" information */ HDmemset(&H5D_def_dset, 0, sizeof(H5D_shared_t)); /* Get the default dataset creation property list values and initialize the * default dataset with them. */ if(NULL == (def_dcpl = (H5P_genplist_t *)H5I_object(H5P_LST_DATASET_CREATE_g))) HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "can't get default dataset creation property list") /* Get the default data storage layout */ if(H5P_get(def_dcpl, H5D_CRT_LAYOUT_NAME, &H5D_def_dset.layout) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve layout") /* Get the default dataset creation properties */ if(H5P_get(def_dcpl, H5D_CRT_EXT_FILE_LIST_NAME, &H5D_def_dset.dcpl_cache.efl) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve external file list") if(H5P_get(def_dcpl, H5D_CRT_FILL_VALUE_NAME, &H5D_def_dset.dcpl_cache.fill) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve fill value") if(H5P_get(def_dcpl, H5O_CRT_PIPELINE_NAME, &H5D_def_dset.dcpl_cache.pline) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve pipeline filter") /* Reset the "default DXPL cache" information */ HDmemset(&H5D_def_dxpl_cache, 0, sizeof(H5D_dxpl_cache_t)); /* Get the default DXPL cache information */ if(H5D__get_dxpl_cache_real(H5P_DATASET_XFER_DEFAULT, &H5D_def_dxpl_cache) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve default DXPL info") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__init_interface() */ /*------------------------------------------------------------------------- * Function: H5D_term_interface * * Purpose: Terminate this interface. * * Return: Success: Positive if anything was done that might * affect other interfaces; zero otherwise. * * Failure: Negative. * * Programmer: Robb Matzke * Friday, November 20, 1998 * *------------------------------------------------------------------------- */ int H5D_term_interface(void) { int n=0; FUNC_ENTER_NOAPI_NOINIT_NOERR if(H5_interface_initialize_g) { if((n=H5I_nmembers(H5I_DATASET))>0) { /* The dataset API uses the "force" flag set to true because it * is using the "file objects" (H5FO) API functions to track open * objects in the file. Using the H5FO code means that dataset * IDs can have reference counts >1, when an existing dataset is * opened more than once. However, the H5I code does not attempt * to close objects with reference counts>1 unless the "force" flag * is set to true. * * At some point (probably after the group and datatypes use the * the H5FO code), the H5FO code might need to be switched around * to storing pointers to the objects being tracked (H5D_t, H5G_t, * etc) and reference count those itself instead of relying on the * reference counting in the H5I layer. Then, the "force" flag can * be put back to false. * * Setting the "force" flag to true for all the interfaces won't * work because the "file driver" (H5FD) APIs use the H5I reference * counting to avoid closing a file driver out from underneath an * open file... * * QAK - 5/13/03 */ H5I_clear_type(H5I_DATASET, TRUE, FALSE); } else { H5I_dec_type_ref(H5I_DATASET); H5_interface_initialize_g = 0; n = 1; /*H5I*/ } } FUNC_LEAVE_NOAPI(n) } /* end H5D_term_interface() */ /*-------------------------------------------------------------------------- NAME H5D__get_dxpl_cache_real PURPOSE Get all the values for the DXPL cache. USAGE herr_t H5D__get_dxpl_cache_real(dxpl_id, cache) hid_t dxpl_id; IN: DXPL to query H5D_dxpl_cache_t *cache;IN/OUT: DXPL cache to fill with values RETURNS Non-negative on success/Negative on failure. DESCRIPTION Query all the values from a DXPL that are needed by internal routines within the library. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ static herr_t H5D__get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache) { H5P_genplist_t *dx_plist; /* Data transfer property list */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC /* Check args */ HDassert(cache); /* Get the dataset transfer property list */ if(NULL == (dx_plist = (H5P_genplist_t *)H5I_object(dxpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list") /* Get maximum temporary buffer size */ if(H5P_get(dx_plist, H5D_XFER_MAX_TEMP_BUF_NAME, &cache->max_temp_buf) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve maximum temporary buffer size") /* Get temporary buffer pointer */ if(H5P_get(dx_plist, H5D_XFER_TCONV_BUF_NAME, &cache->tconv_buf) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve temporary buffer pointer") /* Get background buffer pointer */ if(H5P_get(dx_plist, H5D_XFER_BKGR_BUF_NAME, &cache->bkgr_buf) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve background buffer pointer") /* Get background buffer type */ if(H5P_get(dx_plist, H5D_XFER_BKGR_BUF_TYPE_NAME, &cache->bkgr_buf_type) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve background buffer type") /* Get B-tree split ratios */ if(H5P_get(dx_plist, H5D_XFER_BTREE_SPLIT_RATIO_NAME, &cache->btree_split_ratio) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve B-tree split ratios") /* Get I/O vector size */ if(H5P_get(dx_plist, H5D_XFER_HYPER_VECTOR_SIZE_NAME, &cache->vec_size) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve I/O vector size") #ifdef H5_HAVE_PARALLEL /* Collect Parallel I/O information for possible later use */ if(H5P_get(dx_plist, H5D_XFER_IO_XFER_MODE_NAME, &cache->xfer_mode) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve parallel transfer method") if(H5P_get(dx_plist, H5D_XFER_MPIO_COLLECTIVE_OPT_NAME, &cache->coll_opt_mode) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve collective transfer option") #endif /* H5_HAVE_PARALLEL */ /* Get error detection properties */ if(H5P_get(dx_plist, H5D_XFER_EDC_NAME, &cache->err_detect) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve error detection info") /* Get filter callback function */ if(H5P_get(dx_plist, H5D_XFER_FILTER_CB_NAME, &cache->filter_cb) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve filter callback function") /* Get the data transform property */ if(H5P_get(dx_plist, H5D_XFER_XFORM_NAME, &cache->data_xform_prop) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve data transform info") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__get_dxpl_cache_real() */ /*-------------------------------------------------------------------------- NAME H5D__get_dxpl_cache PURPOSE Get all the values for the DXPL cache. USAGE herr_t H5D__get_dxpl_cache(dxpl_id, cache) hid_t dxpl_id; IN: DXPL to query H5D_dxpl_cache_t *cache;IN/OUT: DXPL cache to fill with values RETURNS Non-negative on success/Negative on failure. DESCRIPTION Query all the values from a DXPL that are needed by internal routines within the library. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The CACHE pointer should point at already allocated memory to place non-default property list info. If a default property list is used, the CACHE pointer will be changed to point at the default information. EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ herr_t H5D__get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t **cache) { herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Check args */ assert(cache); /* Check for the default DXPL */ if(dxpl_id==H5P_DATASET_XFER_DEFAULT) *cache=&H5D_def_dxpl_cache; else if(H5D__get_dxpl_cache_real(dxpl_id,*cache) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "Can't retrieve DXPL values") done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__get_dxpl_cache() */ /*------------------------------------------------------------------------- * Function: H5D__create_named * * Purpose: Internal routine to create a new dataset. * * Return: Success: Non-NULL, pointer to new dataset object. * * Failure: NULL * * Programmer: Quincey Koziol * Thursday, April 5, 2007 * *------------------------------------------------------------------------- */ H5D_t * H5D__create_named(const H5G_loc_t *loc, const char *name, hid_t type_id, const H5S_t *space, hid_t lcpl_id, hid_t dcpl_id, hid_t dapl_id, hid_t dxpl_id) { H5O_obj_create_t ocrt_info; /* Information for object creation */ H5D_obj_create_t dcrt_info; /* Information for dataset creation */ H5D_t *ret_value; /* Return value */ FUNC_ENTER_PACKAGE /* Check arguments */ HDassert(loc); HDassert(name && *name); HDassert(type_id != H5P_DEFAULT); HDassert(space); HDassert(lcpl_id != H5P_DEFAULT); HDassert(dcpl_id != H5P_DEFAULT); HDassert(dapl_id != H5P_DEFAULT); HDassert(dxpl_id != H5P_DEFAULT); /* Set up dataset creation info */ dcrt_info.type_id = type_id; dcrt_info.space = space; dcrt_info.dcpl_id = dcpl_id; dcrt_info.dapl_id = dapl_id; /* Set up object creation information */ ocrt_info.obj_type = H5O_TYPE_DATASET; ocrt_info.crt_info = &dcrt_info; ocrt_info.new_obj = NULL; /* Create the new dataset and link it to its parent group */ if(H5L_link_object(loc, name, &ocrt_info, lcpl_id, dapl_id, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to create and link to dataset") HDassert(ocrt_info.new_obj); /* Set the return value */ ret_value = (H5D_t *)ocrt_info.new_obj; done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__create_named() */ /*------------------------------------------------------------------------- * Function: H5D__get_space_status * * Purpose: Returns the status of data space allocation. * * Return: * Success: Non-negative * * Failture: Negative * * Programmer: Raymond Lu * *------------------------------------------------------------------------- */ herr_t H5D__get_space_status(H5D_t *dset, H5D_space_status_t *allocation, hid_t dxpl_id) { H5S_t *space; /* Dataset's dataspace */ hsize_t space_allocated; /* The number of bytes allocated for chunks */ hssize_t snelmts; /* Temporary holder for number of elements in dataspace */ hsize_t nelmts; /* Number of elements in dataspace */ size_t dt_size; /* Size of datatype */ hsize_t full_size; /* The number of bytes in the dataset when fully populated */ herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE HDassert(dset); /* Get the dataset's dataspace */ space = dset->shared->space; HDassert(space); /* Get the total number of elements in dataset's dataspace */ if((snelmts = H5S_GET_EXTENT_NPOINTS(space)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve number of elements in dataspace") nelmts = (hsize_t)snelmts; /* Get the size of the dataset's datatype */ if(0 == (dt_size = H5T_GET_SIZE(dset->shared->type))) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve size of datatype") /* Compute the maximum size of the dataset in bytes */ full_size = nelmts * dt_size; /* Check for overflow during multiplication */ if(nelmts != (full_size / dt_size)) HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "size of dataset's storage overflowed") /* Difficult to error check, since the error value is 0 and 0 is a valid value... :-/ */ if(H5D__get_storage_size(dset, dxpl_id, &space_allocated) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get size of dataset's storage") /* Decide on how much of the space is allocated */ if(space_allocated == 0) *allocation = H5D_SPACE_STATUS_NOT_ALLOCATED; else if(space_allocated == full_size) *allocation = H5D_SPACE_STATUS_ALLOCATED; else { /* Should only happen for chunked datasets currently */ HDassert(dset->shared->layout.type == H5D_CHUNKED); *allocation = H5D_SPACE_STATUS_PART_ALLOCATED; } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__get_space_status() */ /*------------------------------------------------------------------------- * Function: H5D__new * * Purpose: Creates a new, empty dataset structure * * Return: Success: Pointer to a new dataset descriptor. * * Failure: NULL * * Programmer: Quincey Koziol * Monday, October 12, 1998 * *------------------------------------------------------------------------- */ static H5D_shared_t * H5D__new(hid_t dcpl_id, hbool_t creating, hbool_t vl_type) { H5D_shared_t *new_dset = NULL; /* New dataset object */ H5P_genplist_t *plist; /* Property list created */ H5D_shared_t *ret_value; /* Return value */ FUNC_ENTER_STATIC /* Allocate new shared dataset structure */ if(NULL == (new_dset = H5FL_MALLOC(H5D_shared_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Copy the default dataset information */ HDmemcpy(new_dset, &H5D_def_dset, sizeof(H5D_shared_t)); /* If we are using the default dataset creation property list, during creation * don't bother to copy it, just increment the reference count */ if(!vl_type && creating && dcpl_id == H5P_DATASET_CREATE_DEFAULT) { if(H5I_inc_ref(dcpl_id, FALSE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't increment default DCPL ID") new_dset->dcpl_id = dcpl_id; } /* end if */ else { /* Get the property list */ if(NULL == (plist = (H5P_genplist_t *)H5I_object(dcpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a property list") new_dset->dcpl_id = H5P_copy_plist(plist, FALSE); } /* end else */ /* Set return value */ ret_value = new_dset; done: if(ret_value == NULL) if(new_dset != NULL) { if(new_dset->dcpl_id != 0 && H5I_dec_ref(new_dset->dcpl_id) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "can't decrement temporary datatype ID") new_dset = H5FL_FREE(H5D_shared_t, new_dset); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__new() */ /*------------------------------------------------------------------------- * Function: H5D__init_type * * Purpose: Copy a datatype for a dataset's use, performing all the * necessary adjustments, etc. * * Return: Success: SUCCEED * Failure: FAIL * * Programmer: Quincey Koziol * Thursday, June 24, 2004 * *------------------------------------------------------------------------- */ static herr_t H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type) { htri_t relocatable; /* Flag whether the type is relocatable */ htri_t immutable; /* Flag whether the type is immutable */ hbool_t use_latest_format; /* Flag indicating the newest file format should be used */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC /* Sanity checking */ HDassert(file); HDassert(dset); HDassert(type); /* Check whether the datatype is relocatable */ if((relocatable = H5T_is_relocatable(type)) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't check datatype?") /* Check whether the datatype is immutable */ if((immutable = H5T_is_immutable(type)) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't check datatype?") /* Get the file's 'use the latest version of the format' flag */ use_latest_format = H5F_USE_LATEST_FORMAT(file); /* Copy the datatype if it's a custom datatype or if it'll change when it's location is changed */ if(!immutable || relocatable || use_latest_format) { /* Copy datatype for dataset */ if((dset->shared->type = H5T_copy(type, H5T_COPY_ALL)) == NULL) HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy datatype") /* Mark any datatypes as being on disk now */ if(H5T_set_loc(dset->shared->type, file, H5T_LOC_DISK) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't set datatype location") /* Set the latest format, if requested */ if(use_latest_format) if(H5T_set_latest_version(dset->shared->type) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set latest version of datatype") /* Get a datatype ID for the dataset's datatype */ if((dset->shared->type_id = H5I_register(H5I_DATATYPE, dset->shared->type, FALSE)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register type") } /* end if */ /* Not a custom datatype, just use it directly */ else { if(H5I_inc_ref(type_id, FALSE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, FAIL, "Can't increment datatype ID") /* Use existing datatype */ dset->shared->type_id = type_id; dset->shared->type = (H5T_t *)type; /* (Cast away const OK - QAK) */ } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__init_type() */ /*------------------------------------------------------------------------- * Function: H5D__init_space * * Purpose: Copy a dataspace for a dataset's use, performing all the * necessary adjustments, etc. * * Return: Success: SUCCEED * Failure: FAIL * * Programmer: Quincey Koziol * Tuesday, July 24, 2007 * *------------------------------------------------------------------------- */ static herr_t H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space) { hbool_t use_latest_format; /* Flag indicating the newest file format should be used */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC /* Sanity checking */ HDassert(file); HDassert(dset); HDassert(space); /* Get the file's 'use the latest version of the format' flag */ use_latest_format = H5F_USE_LATEST_FORMAT(file); /* Copy dataspace for dataset */ if(NULL == (dset->shared->space = H5S_copy(space, FALSE, TRUE))) HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy dataspace") /* Set the latest format, if requested */ if(use_latest_format) if(H5S_set_latest_version(dset->shared->space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set latest version of datatype") /* Set the dataset's dataspace to 'all' selection */ if(H5S_select_all(dset->shared->space, TRUE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set all selection") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__init_space() */ /*------------------------------------------------------------------------- * Function: H5D__update_oh_info * * Purpose: Create and fill object header for dataset * * Return: Success: SUCCEED * Failure: FAIL * * Programmer: Bill Wendling * Thursday, October 31, 2002 * *------------------------------------------------------------------------- */ static herr_t H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id) { H5O_t *oh = NULL; /* Pointer to dataset's object header */ size_t ohdr_size = H5D_MINHDR_SIZE; /* Size of dataset's object header */ H5O_loc_t *oloc = NULL; /* Dataset's object location */ H5O_layout_t *layout; /* Dataset's layout information */ H5T_t *type; /* Dataset's datatype */ hbool_t use_latest_format; /* Flag indicating the newest file format should be used */ H5O_fill_t *fill_prop; /* Pointer to dataset's fill value information */ H5D_fill_value_t fill_status; /* Fill value status */ hbool_t fill_changed = FALSE; /* Flag indicating the fill value was changed */ hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC /* Sanity checking */ HDassert(file); HDassert(dset); /* Set some local variables, for convenience */ oloc = &dset->oloc; layout = &dset->shared->layout; type = dset->shared->type; fill_prop = &dset->shared->dcpl_cache.fill; /* Get the file's 'use the latest version of the format' flag */ use_latest_format = H5F_USE_LATEST_FORMAT(file); /* Retrieve "defined" status of fill value */ if(H5P_is_fill_value_defined(fill_prop, &fill_status) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined") /* Special case handling for variable-length types */ if(H5T_detect_class(type, H5T_VLEN, FALSE)) { /* If the default fill value is chosen for variable-length types, always write it */ if(fill_prop->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_DEFAULT) { /* Update dataset creation property */ fill_prop->fill_time = H5D_FILL_TIME_ALLOC; /* Note that the fill value changed */ fill_changed = TRUE; } /* end if */ /* Don't allow never writing fill values with variable-length types */ if(fill_prop->fill_time == H5D_FILL_TIME_NEVER) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "Dataset doesn't support VL datatype when fill value is not defined") } /* end if */ /* Determine whether fill value is defined or not */ if(fill_status == H5D_FILL_VALUE_DEFAULT || fill_status == H5D_FILL_VALUE_USER_DEFINED) { /* Convert fill value buffer to dataset's datatype */ if(fill_prop->buf && fill_prop->size > 0 && H5O_fill_convert(fill_prop, type, &fill_changed, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to convert fill value to dataset type") fill_prop->fill_defined = TRUE; } else if(fill_status == H5D_FILL_VALUE_UNDEFINED) { fill_prop->fill_defined = FALSE; } else HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to determine if fill value is defined") /* Check for invalid fill & allocation time setting */ if(fill_prop->fill_defined == FALSE && fill_prop->fill_time == H5D_FILL_TIME_ALLOC) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "fill value writing on allocation set, but no fill value defined") /* Check if the fill value info changed */ if(fill_changed) { H5P_genplist_t *dc_plist; /* Dataset's creation property list */ /* Get dataset's property list object */ HDassert(dset->shared->dcpl_id != H5P_DATASET_CREATE_DEFAULT); if(NULL == (dc_plist = (H5P_genplist_t *)H5I_object(dset->shared->dcpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get dataset creation property list") /* Update dataset creation property */ if(H5P_set(dc_plist, H5D_CRT_FILL_VALUE_NAME, fill_prop) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set fill value info") } /* end if */ /* Add the dataset's raw data size to the size of the header, if the raw data will be stored as compact */ if(layout->type == H5D_COMPACT) ohdr_size += layout->storage.u.compact.size; /* Create an object header for the dataset */ if(H5O_create(file, dxpl_id, ohdr_size, (size_t)1, dset->shared->dcpl_id, oloc/*out*/) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create dataset object header") HDassert(file == dset->oloc.file); /* Pin the object header */ if(NULL == (oh = H5O_pin(oloc, dxpl_id))) HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header") /* Write the dataspace header message */ if(H5S_append(file, dxpl_id, oh, dset->shared->space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update dataspace header message") /* Write the datatype header message */ if(H5O_msg_append_oh(file, dxpl_id, oh, H5O_DTYPE_ID, H5O_MSG_FLAG_CONSTANT, 0, type) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update datatype header message") /* Write new fill value message */ if(H5O_msg_append_oh(file, dxpl_id, oh, H5O_FILL_NEW_ID, H5O_MSG_FLAG_CONSTANT, 0, fill_prop) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update new fill value header message") /* If there is valid information for the old fill value struct, add it */ /* (only if we aren't trying to write the latest version of the file format) */ if(fill_prop->buf && !use_latest_format) { H5O_fill_t old_fill_prop; /* Copy of fill value property, for writing as "old" fill value */ /* Shallow copy the fill value property */ /* (we only want to make certain that the shared component isnt' modified) */ HDmemcpy(&old_fill_prop, fill_prop, sizeof(old_fill_prop)); /* Reset shared component info */ H5O_msg_reset_share(H5O_FILL_ID, &old_fill_prop); /* Write old fill value */ if(H5O_msg_append_oh(file, dxpl_id, oh, H5O_FILL_ID, H5O_MSG_FLAG_CONSTANT, 0, &old_fill_prop) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update old fill value header message") } /* end if */ /* Update/create the layout (and I/O pipeline & EFL) messages */ if(H5D__layout_oh_create(file, dxpl_id, oh, dset, dapl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update layout/pline/efl header message") /* Indicate that the layout information was initialized */ layout_init = TRUE; #ifdef H5O_ENABLE_BOGUS { H5P_genplist_t *dc_plist; /* Dataset's creation property list */ /* Get dataset's property list object */ if(NULL == (dc_plist = (H5P_genplist_t *)H5I_object(dset->shared->dcpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get dataset creation property list") /* Check whether to add a "bogus" message */ if(H5P_exist_plist(dc_plist, H5O_BOGUS_MSG_FLAGS_NAME) > 0) { uint8_t bogus_flags = 0; /* Flags for creating "bogus" message */ /* Retrieve "bogus" message flags */ if(H5P_get(dc_plist, H5O_BOGUS_MSG_FLAGS_NAME, &bogus_flags) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get bogus message options") /* Add a "bogus" message (for error testing). */ if(H5O_bogus_oh(file, dxpl_id, oh, (unsigned)bogus_flags) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create 'bogus' message") } /* end if */ } #endif /* H5O_ENABLE_BOGUS */ /* Add a modification time message, if using older format. */ /* (If using the latest format, the modification time is part of the object * header and doesn't use a separate message -QAK) */ if(!use_latest_format) if(H5O_touch_oh(file, dxpl_id, oh, TRUE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update modification time message") done: /* Release pointer to object header itself */ if(oh != NULL) if(H5O_unpin(oh) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header") /* Error cleanup */ if(ret_value < 0) { if(dset->shared->layout.type == H5D_CHUNKED && layout_init) { if(H5D__chunk_dest(file, dxpl_id, dset) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy chunk cache") } /* end if */ } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__update_oh_info() */ /*------------------------------------------------------------------------- * Function: H5D__create * * Purpose: Creates a new dataset with name NAME in file F and associates * with it a datatype TYPE for each element as stored in the * file, dimensionality information or dataspace SPACE, and * other miscellaneous properties CREATE_PARMS. All arguments * are deep-copied before being associated with the new dataset, * so the caller is free to subsequently modify them without * affecting the dataset. * * Return: Success: Pointer to a new dataset * * Failure: NULL * * Programmer: Robb Matzke * Thursday, December 4, 1997 * *------------------------------------------------------------------------- */ H5D_t * H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id, hid_t dapl_id, hid_t dxpl_id) { const H5T_t *type; /* Datatype for dataset */ H5D_t *new_dset = NULL; H5P_genplist_t *dc_plist = NULL; /* New Property list */ hbool_t has_vl_type = FALSE; /* Flag to indicate a VL-type for dataset */ hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */ H5G_loc_t dset_loc; /* Dataset location */ H5D_t *ret_value; /* Return value */ FUNC_ENTER_PACKAGE /* check args */ HDassert(file); HDassert(H5I_DATATYPE == H5I_get_type(type_id)); HDassert(space); HDassert(H5I_GENPROP_LST == H5I_get_type(dcpl_id)); HDassert(H5I_GENPROP_LST == H5I_get_type(dxpl_id)); /* Get the dataset's datatype */ if(NULL == (type = (const H5T_t *)H5I_object(type_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a datatype") /* Check if the datatype is "sensible" for use in a dataset */ if(H5T_is_sensible(type) != TRUE) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "datatype is not sensible") /* Check if the datatype is/contains a VL-type */ if(H5T_detect_class(type, H5T_VLEN, FALSE)) has_vl_type = TRUE; /* Check if the dataspace has an extent set (or is NULL) */ if(!H5S_has_extent(space)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "dataspace extent has not been set.") /* Initialize the dataset object */ if(NULL == (new_dset = H5FL_CALLOC(H5D_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Set up & reset dataset location */ dset_loc.oloc = &(new_dset->oloc); dset_loc.path = &(new_dset->path); H5G_loc_reset(&dset_loc); /* Initialize the shared dataset space */ if(NULL == (new_dset->shared = H5D__new(dcpl_id, TRUE, has_vl_type))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Copy & initialize datatype for dataset */ if(H5D__init_type(file, new_dset, type_id, type) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't copy datatype") /* Copy & initialize dataspace for dataset */ if(H5D__init_space(file, new_dset, space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't copy dataspace") /* Set the dataset's checked_filters flag to enable writing */ new_dset->shared->checked_filters = TRUE; /* Check if the dataset has a non-default DCPL & get important values, if so */ if(new_dset->shared->dcpl_id != H5P_DATASET_CREATE_DEFAULT) { H5O_layout_t *layout; /* Dataset's layout information */ H5O_pline_t *pline; /* Dataset's I/O pipeline information */ H5O_fill_t *fill; /* Dataset's fill value info */ /* Check if the filters in the DCPL can be applied to this dataset */ if(H5Z_can_apply(new_dset->shared->dcpl_id, new_dset->shared->type_id) < 0) HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, NULL, "I/O filters can't operate on this dataset") /* Make the "set local" filter callbacks for this dataset */ if(H5Z_set_local(new_dset->shared->dcpl_id, new_dset->shared->type_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to set local filter parameters") /* Get new dataset's property list object */ if(NULL == (dc_plist = (H5P_genplist_t *)H5I_object(new_dset->shared->dcpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "can't get dataset creation property list") /* Retrieve the properties we need */ pline = &new_dset->shared->dcpl_cache.pline; if(H5P_get(dc_plist, H5O_CRT_PIPELINE_NAME, pline) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve pipeline filter") layout = &new_dset->shared->layout; if(H5P_get(dc_plist, H5D_CRT_LAYOUT_NAME, layout) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve layout") if(pline->nused > 0 && H5D_CHUNKED != layout->type) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "filters can only be used with chunked layout") fill = &new_dset->shared->dcpl_cache.fill; if(H5P_get(dc_plist, H5D_CRT_FILL_VALUE_NAME, fill) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve fill value info") /* Check if the alloc_time is the default and error out */ if(fill->alloc_time == H5D_ALLOC_TIME_DEFAULT) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "invalid space allocation state") /* Don't allow compact datasets to allocate space later */ if(layout->type == H5D_COMPACT && fill->alloc_time != H5D_ALLOC_TIME_EARLY) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "compact dataset must have early space allocation") /* If MPI VFD is used, no filter support yet. */ if(IS_H5FD_MPI(file) && pline->nused > 0) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, "Parallel I/O does not support filters yet") /* Get the dataset's external file list information */ if(H5P_get(dc_plist, H5D_CRT_EXT_FILE_LIST_NAME, &new_dset->shared->dcpl_cache.efl) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve external file list") } /* end if */ /* Set the latest version of the layout, pline & fill messages, if requested */ if(H5F_USE_LATEST_FORMAT(file)) { /* Set the latest version for the I/O pipeline message */ if(H5O_pline_set_latest_version(&new_dset->shared->dcpl_cache.pline) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of I/O filter pipeline") /* Set the latest version for the fill value message */ if(H5O_fill_set_latest_version(&new_dset->shared->dcpl_cache.fill) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of fill value") } /* end if */ /* Check if this dataset is going into a parallel file and set space allocation time */ if(IS_H5FD_MPI(file)) new_dset->shared->dcpl_cache.fill.alloc_time = H5D_ALLOC_TIME_EARLY; /* Set the dataset's I/O operations */ if(H5D__layout_set_io_ops(new_dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize I/O operations") /* Create the layout information for the new dataset */ if((new_dset->shared->layout.ops->construct)(file, new_dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to construct layout information") /* Update the dataset's object header info. */ if(H5D__update_oh_info(file, dxpl_id, new_dset, dapl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't update the metadata cache") /* Indicate that the layout information was initialized */ layout_init = TRUE; /* Add the dataset to the list of opened objects in the file */ if(H5FO_top_incr(new_dset->oloc.file, new_dset->oloc.addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't incr object ref. count") if(H5FO_insert(new_dset->oloc.file, new_dset->oloc.addr, new_dset->shared, TRUE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, NULL, "can't insert dataset into list of open objects") new_dset->shared->fo_count = 1; /* Success */ ret_value = new_dset; done: if(!ret_value && new_dset && new_dset->shared) { if(new_dset->shared) { if(new_dset->shared->layout.type == H5D_CHUNKED && layout_init) { if(H5D__chunk_dest(file, dxpl_id, new_dset) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, NULL, "unable to destroy chunk cache") } /* end if */ if(new_dset->shared->space && H5S_close(new_dset->shared->space) < 0) HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release dataspace") if(new_dset->shared->type && H5I_dec_ref(new_dset->shared->type_id) < 0) HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release datatype") if(H5F_addr_defined(new_dset->oloc.addr)) { if(H5O_dec_rc_by_loc(&(new_dset->oloc), dxpl_id) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "unable to decrement refcount on newly created object") if(H5O_close(&(new_dset->oloc)) < 0) HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release object header") if(file) { if(H5O_delete(file, dxpl_id, new_dset->oloc.addr) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTDELETE, NULL, "unable to delete object header") } /* end if */ } /* end if */ if(new_dset->shared->dcpl_id != 0 && H5I_dec_ref(new_dset->shared->dcpl_id) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "unable to decrement ref count on property list") new_dset->shared = H5FL_FREE(H5D_shared_t, new_dset->shared); } /* end if */ new_dset->oloc.file = NULL; new_dset = H5FL_FREE(H5D_t, new_dset); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__create() */ /* *------------------------------------------------------------------------- * Function: H5D_open * * Purpose: Checks if dataset is already open, or opens a dataset for * access. * * Return: Success: Dataset ID * Failure: FAIL * * Programmer: Quincey Koziol * Friday, December 20, 2002 * *------------------------------------------------------------------------- */ H5D_t * H5D_open(const H5G_loc_t *loc, hid_t dapl_id, hid_t dxpl_id) { H5D_shared_t *shared_fo = NULL; H5D_t *dataset = NULL; H5D_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI(NULL) /* check args */ HDassert(loc); /* Allocate the dataset structure */ if(NULL == (dataset = H5FL_CALLOC(H5D_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Shallow copy (take ownership) of the object location object */ if(H5O_loc_copy(&(dataset->oloc), loc->oloc, H5_COPY_SHALLOW) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, NULL, "can't copy object location") /* Shallow copy (take ownership) of the group hier. path */ if(H5G_name_copy(&(dataset->path), loc->path, H5_COPY_SHALLOW) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, NULL, "can't copy path") /* Check if dataset was already open */ if(NULL == (shared_fo = (H5D_shared_t *)H5FO_opened(dataset->oloc.file, dataset->oloc.addr))) { /* Clear any errors from H5FO_opened() */ H5E_clear_stack(NULL); /* Open the dataset object */ if(H5D__open_oid(dataset, dapl_id, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_NOTFOUND, NULL, "not found") /* Add the dataset to the list of opened objects in the file */ if(H5FO_insert(dataset->oloc.file, dataset->oloc.addr, dataset->shared, FALSE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, NULL, "can't insert dataset into list of open objects") /* Increment object count for the object in the top file */ if(H5FO_top_incr(dataset->oloc.file, dataset->oloc.addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't increment object count") /* We're the first dataset to use the the shared info */ dataset->shared->fo_count = 1; } /* end if */ else { /* Point to shared info */ dataset->shared = shared_fo; /* Increment # of datasets using shared information */ shared_fo->fo_count++; /* Check if the object has been opened through the top file yet */ if(H5FO_top_count(dataset->oloc.file, dataset->oloc.addr) == 0) { /* Open the object through this top file */ if(H5O_open(&(dataset->oloc)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "unable to open object header") } /* end if */ /* Increment object count for the object in the top file */ if(H5FO_top_incr(dataset->oloc.file, dataset->oloc.addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't increment object count") } /* end else */ ret_value = dataset; done: if(ret_value == NULL) { /* Free the location--casting away const*/ if(dataset) { if(shared_fo == NULL) /* Need to free shared fo */ dataset->shared = H5FL_FREE(H5D_shared_t, dataset->shared); H5O_loc_free(&(dataset->oloc)); H5G_name_free(&(dataset->path)); dataset = H5FL_FREE(H5D_t, dataset); } /* end if */ if(shared_fo) shared_fo->fo_count--; } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_open() */ /*------------------------------------------------------------------------- * Function: H5D__open_oid * * Purpose: Opens a dataset for access. * * Return: Dataset pointer on success, NULL on failure * * Programmer: Quincey Koziol * Monday, October 12, 1998 * *------------------------------------------------------------------------- */ static herr_t H5D__open_oid(H5D_t *dataset, hid_t dapl_id, hid_t dxpl_id) { H5P_genplist_t *plist; /* Property list */ H5O_fill_t *fill_prop; /* Pointer to dataset's fill value info */ unsigned alloc_time_state; /* Allocation time state */ htri_t msg_exists; /* Whether a particular type of message exists */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC /* check args */ HDassert(dataset); /* (Set the 'vl_type' parameter to FALSE since it doesn't matter from here) */ if(NULL == (dataset->shared = H5D__new(H5P_DATASET_CREATE_DEFAULT, FALSE, FALSE))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") /* Open the dataset object */ if(H5O_open(&(dataset->oloc)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open") /* Get the type and space */ if(NULL == (dataset->shared->type = (H5T_t *)H5O_msg_read(&(dataset->oloc), H5O_DTYPE_ID, NULL, dxpl_id))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to load type info from dataset header") if(H5T_set_loc(dataset->shared->type, dataset->oloc.file, H5T_LOC_DISK) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "invalid datatype location") if(NULL == (dataset->shared->space = H5S_read(&(dataset->oloc), dxpl_id))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to load dataspace info from dataset header") /* Get a datatype ID for the dataset's datatype */ if((dataset->shared->type_id = H5I_register(H5I_DATATYPE, dataset->shared->type, FALSE)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register type") /* Get dataset creation property list object */ if(NULL == (plist = (H5P_genplist_t *)H5I_object(dataset->shared->dcpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get dataset creation property list") /* Get the layout/pline/efl message information */ if(H5D__layout_oh_read(dataset, dxpl_id, dapl_id, plist) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get layout/pline/efl info") /* Point at dataset's copy, to cache it for later */ fill_prop = &dataset->shared->dcpl_cache.fill; /* Try to get the new fill value message from the object header */ if((msg_exists = H5O_msg_exists(&(dataset->oloc), H5O_FILL_NEW_ID, dxpl_id)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if message exists") if(msg_exists) { if(NULL == H5O_msg_read(&(dataset->oloc), H5O_FILL_NEW_ID, fill_prop, dxpl_id)) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message") } /* end if */ else { /* For backward compatibility, try to retrieve the old fill value message */ if((msg_exists = H5O_msg_exists(&(dataset->oloc), H5O_FILL_ID, dxpl_id)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if message exists") if(msg_exists) { if(NULL == H5O_msg_read(&(dataset->oloc), H5O_FILL_ID, fill_prop, dxpl_id)) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message") } /* end if */ else { /* Set the space allocation time appropriately, based on the type of dataset storage */ switch(dataset->shared->layout.type) { case H5D_COMPACT: fill_prop->alloc_time = H5D_ALLOC_TIME_EARLY; break; case H5D_CONTIGUOUS: fill_prop->alloc_time = H5D_ALLOC_TIME_LATE; break; case H5D_CHUNKED: fill_prop->alloc_time = H5D_ALLOC_TIME_INCR; break; case H5D_LAYOUT_ERROR: case H5D_NLAYOUTS: default: HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "not implemented yet") } /* end switch */ /*lint !e788 All appropriate cases are covered */ } /* end else */ /* If "old" fill value size is 0 (undefined), map it to -1 */ if(fill_prop->size == 0) fill_prop->size = (ssize_t)-1; } /* end if */ alloc_time_state = 0; if((dataset->shared->layout.type == H5D_COMPACT && fill_prop->alloc_time == H5D_ALLOC_TIME_EARLY) || (dataset->shared->layout.type == H5D_CONTIGUOUS && fill_prop->alloc_time == H5D_ALLOC_TIME_LATE) || (dataset->shared->layout.type == H5D_CHUNKED && fill_prop->alloc_time == H5D_ALLOC_TIME_INCR)) alloc_time_state = 1; /* Set revised fill value properties, if they are different from the defaults */ if(H5P_fill_value_cmp(&H5D_def_dset.dcpl_cache.fill, fill_prop, sizeof(H5O_fill_t))) { if(H5P_set(plist, H5D_CRT_FILL_VALUE_NAME, fill_prop) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set fill value") if(H5P_set(plist, H5D_CRT_ALLOC_TIME_STATE_NAME, &alloc_time_state) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set allocation time state") } /* end if */ /* * Make sure all storage is properly initialized. * This is important only for parallel I/O where the space must * be fully allocated before I/O can happen. */ if((H5F_INTENT(dataset->oloc.file) & H5F_ACC_RDWR) && !(*dataset->shared->layout.ops->is_space_alloc)(&dataset->shared->layout.storage) && IS_H5FD_MPI(dataset->oloc.file)) { if(H5D__alloc_storage(dataset, dxpl_id, H5D_ALLOC_OPEN, FALSE, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize file storage") } /* end if */ done: if(ret_value < 0) { if(H5F_addr_defined(dataset->oloc.addr) && H5O_close(&(dataset->oloc)) < 0) HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release object header") if(dataset->shared) { if(dataset->shared->space && H5S_close(dataset->shared->space) < 0) HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release dataspace") if(dataset->shared->type) { if(dataset->shared->type_id > 0) { if(H5I_dec_ref(dataset->shared->type_id) < 0) HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release datatype") } /* end if */ else { if(H5T_close(dataset->shared->type) < 0) HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release datatype") } /* end else */ } /* end if */ } /* end if */ } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__open_oid() */ /*------------------------------------------------------------------------- * Function: H5D_close * * Purpose: Insures that all data has been saved to the file, closes the * dataset object header, and frees all resources used by the * descriptor. * * Return: Non-negative on success/Negative on failure * * Programmer: Robb Matzke * Thursday, December 4, 1997 * *------------------------------------------------------------------------- */ herr_t H5D_close(H5D_t *dataset) { unsigned free_failed = FALSE; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) /* check args */ HDassert(dataset && dataset->oloc.file && dataset->shared); HDassert(dataset->shared->fo_count > 0); /* Dump debugging info */ #ifdef H5D_CHUNK_DEBUG H5D__chunk_stats(dataset, FALSE); #endif /* H5D_CHUNK_DEBUG */ dataset->shared->fo_count--; if(dataset->shared->fo_count == 0) { /* Flush the dataset's information. Continue to close even if it fails. */ if(H5D__flush_real(dataset, H5AC_dxpl_id) < 0) HDONE_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to flush cached dataset info") /* Free the data sieve buffer, if it's been allocated */ if(dataset->shared->cache.contig.sieve_buf) { HDassert(dataset->shared->layout.type != H5D_COMPACT); /* We should never have a sieve buffer for compact storage */ dataset->shared->cache.contig.sieve_buf = (unsigned char *)H5FL_BLK_FREE(sieve_buf,dataset->shared->cache.contig.sieve_buf); } /* end if */ /* Free cached information for each kind of dataset */ switch(dataset->shared->layout.type) { case H5D_CONTIGUOUS: break; case H5D_CHUNKED: /* Check for skip list for iterating over chunks during I/O to close */ if(dataset->shared->cache.chunk.sel_chunks) { HDassert(H5SL_count(dataset->shared->cache.chunk.sel_chunks) == 0); H5SL_close(dataset->shared->cache.chunk.sel_chunks); dataset->shared->cache.chunk.sel_chunks = NULL; } /* end if */ /* Check for cached single chunk dataspace */ if(dataset->shared->cache.chunk.single_space) { (void)H5S_close(dataset->shared->cache.chunk.single_space); dataset->shared->cache.chunk.single_space = NULL; } /* end if */ /* Check for cached single element chunk info */ if(dataset->shared->cache.chunk.single_chunk_info) { dataset->shared->cache.chunk.single_chunk_info = H5FL_FREE(H5D_chunk_info_t, dataset->shared->cache.chunk.single_chunk_info); dataset->shared->cache.chunk.single_chunk_info = NULL; } /* end if */ /* Flush and destroy chunks in the cache. Continue to close even if * it fails. */ if(H5D__chunk_dest(dataset->oloc.file, H5AC_dxpl_id, dataset) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy chunk cache") break; case H5D_COMPACT: /* Free the buffer for the raw data for compact datasets */ dataset->shared->layout.storage.u.compact.buf = H5MM_xfree(dataset->shared->layout.storage.u.compact.buf); break; case H5D_LAYOUT_ERROR: case H5D_NLAYOUTS: default: HDassert("not implemented yet" && 0); #ifdef NDEBUG HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout") #endif /* NDEBUG */ } /* end switch */ /*lint !e788 All appropriate cases are covered */ /* * Release datatype, dataspace and creation property list -- there isn't * much we can do if one of these fails, so we just continue. */ free_failed = (unsigned)(H5I_dec_ref(dataset->shared->type_id) < 0 || H5S_close(dataset->shared->space) < 0 || H5I_dec_ref(dataset->shared->dcpl_id) < 0); /* Remove the dataset from the list of opened objects in the file */ if(H5FO_top_decr(dataset->oloc.file, dataset->oloc.addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't decrement count for object") if(H5FO_delete(dataset->oloc.file, H5AC_dxpl_id, dataset->oloc.addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't remove dataset from list of open objects") /* Close the dataset object */ /* (This closes the file, if this is the last object open) */ if(H5O_close(&(dataset->oloc)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release object header") /* * Free memory. Before freeing the memory set the file pointer to NULL. * We always check for a null file pointer in other H5D functions to be * sure we're not accessing an already freed dataset (see the HDassert() * above). */ dataset->oloc.file = NULL; dataset->shared = H5FL_FREE(H5D_shared_t, dataset->shared); } /* end if */ else { /* Decrement the ref. count for this object in the top file */ if(H5FO_top_decr(dataset->oloc.file, dataset->oloc.addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't decrement count for object") /* Check reference count for this object in the top file */ if(H5FO_top_count(dataset->oloc.file, dataset->oloc.addr) == 0) { if(H5O_close(&(dataset->oloc)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to close") } /* end if */ else /* Free object location (i.e. "unhold" the file if appropriate) */ if(H5O_loc_free(&(dataset->oloc)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "problem attempting to free location") } /* end else */ /* Release the dataset's path info */ if(H5G_name_free(&(dataset->path)) < 0) free_failed = TRUE; /* Free the dataset's memory structure */ dataset = H5FL_FREE(H5D_t, dataset); /* Check if anything failed in the middle... */ if(free_failed) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't free a component of the dataset, but the dataset was freed anyway.") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_close() */ /*------------------------------------------------------------------------- * Function: H5D_oloc * * Purpose: Returns a pointer to the object location for a dataset. * * Return: Success: Ptr to location * Failure: NULL * * Programmer: Robb Matzke * Friday, April 24, 1998 * *------------------------------------------------------------------------- */ H5O_loc_t * H5D_oloc(H5D_t *dataset) { /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ FUNC_ENTER_NOAPI_NOINIT_NOERR FUNC_LEAVE_NOAPI(dataset ? &(dataset->oloc) : (H5O_loc_t *)NULL) } /* end H5D_oloc() */ /*------------------------------------------------------------------------- * Function: H5D_nameof * * Purpose: Returns a pointer to the group hier. path for a dataset. * * Return: Success: Ptr to entry * Failure: NULL * * Programmer: Quincey Koziol * Monday, September 12, 2005 * *------------------------------------------------------------------------- */ H5G_name_t * H5D_nameof(H5D_t *dataset) { /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ FUNC_ENTER_NOAPI_NOINIT_NOERR FUNC_LEAVE_NOAPI(dataset ? &(dataset->path) : (H5G_name_t *)NULL) } /* end H5D_nameof() */ /*------------------------------------------------------------------------- * Function: H5D_typeof * * Purpose: Returns a pointer to the dataset's datatype. The datatype * is not copied. * * Return: Success: Ptr to the dataset's datatype, uncopied. * Failure: NULL * * Programmer: Robb Matzke * Thursday, June 4, 1998 * *------------------------------------------------------------------------- */ H5T_t * H5D_typeof(const H5D_t *dset) { /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ FUNC_ENTER_NOAPI_NOINIT_NOERR HDassert(dset); HDassert(dset->shared); HDassert(dset->shared->type); FUNC_LEAVE_NOAPI(dset->shared->type) } /* end H5D_typeof() */ /*------------------------------------------------------------------------- * Function: H5D__alloc_storage * * Purpose: Allocate storage for the raw data of a dataset. * * Return: Non-negative on success/Negative on failure * * Programmer: Robb Matzke * Friday, January 16, 1998 * *------------------------------------------------------------------------- */ herr_t H5D__alloc_storage(H5D_t *dset/*in,out*/, hid_t dxpl_id, H5D_time_alloc_t time_alloc, hbool_t full_overwrite, hsize_t old_dim[]) { H5F_t *f = dset->oloc.file; /* The dataset's file pointer */ H5O_layout_t *layout; /* The dataset's layout information */ hbool_t must_init_space = FALSE; /* Flag to indicate that space should be initialized */ hbool_t addr_set = FALSE; /* Flag to indicate that the dataset's storage address was set */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* check args */ HDassert(dset); HDassert(f); /* If the data is stored in external files, don't set an address for the layout * We assume that external storage is already * allocated by the caller, or at least will be before I/O is performed. */ if(!(H5S_NULL == H5S_GET_EXTENT_TYPE(dset->shared->space) || dset->shared->dcpl_cache.efl.nused > 0)) { /* Get a pointer to the dataset's layout information */ layout = &(dset->shared->layout); switch(layout->type) { case H5D_CONTIGUOUS: if(!(*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) { /* Check if we have a zero-sized dataset */ if(layout->storage.u.contig.size > 0) { /* Reserve space in the file for the entire array */ if(H5D__contig_alloc(f, dxpl_id, &layout->storage.u.contig/*out*/) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage") /* Indicate that we should initialize storage space */ must_init_space = TRUE; } /* end if */ else layout->storage.u.contig.addr = HADDR_UNDEF; /* Indicate that we set the storage addr */ addr_set = TRUE; } /* end if */ break; case H5D_CHUNKED: if(!(*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) { /* Create the root of the B-tree that describes chunked storage */ if(H5D__chunk_create(dset /*in,out*/, dxpl_id) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize chunked storage") /* Indicate that we set the storage addr */ addr_set = TRUE; /* Indicate that we should initialize storage space */ must_init_space = TRUE; } /* end if */ /* If space allocation is set to 'early' and we are extending * the dataset, indicate that space should be allocated, so the * B-tree gets expanded. -QAK */ if(dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_EARLY && time_alloc == H5D_ALLOC_EXTEND) must_init_space = TRUE; break; case H5D_COMPACT: /* Check if space is already allocated */ if(NULL == layout->storage.u.compact.buf) { /* Reserve space in layout header message for the entire array. * Starting from the 1.8.7 release, we allow dataspace to have * zero dimension size. So the storage size can be zero. * SLU 2011/4/4 */ if(layout->storage.u.compact.size > 0) { if(NULL == (layout->storage.u.compact.buf = H5MM_malloc(layout->storage.u.compact.size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate memory for compact dataset") if(!full_overwrite) HDmemset(layout->storage.u.compact.buf, 0, layout->storage.u.compact.size); layout->storage.u.compact.dirty = TRUE; /* Indicate that we should initialize storage space */ must_init_space = TRUE; } else { layout->storage.u.compact.dirty = FALSE; must_init_space = FALSE; } } /* end if */ break; case H5D_LAYOUT_ERROR: case H5D_NLAYOUTS: default: HDassert("not implemented yet" && 0); #ifdef NDEBUG HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout") #endif /* NDEBUG */ } /* end switch */ /*lint !e788 All appropriate cases are covered */ /* Check if we need to initialize the space */ if(must_init_space) { if(layout->type == H5D_CHUNKED) { /* If we are doing incremental allocation and the B-tree got * created during a H5Dwrite call, don't initialize the storage * now, wait for the actual writes to each block and let the * low-level chunking routines handle initialize the fill-values. * Otherwise, pass along the space initialization call and let * the low-level chunking routines sort out whether to write * fill values to the chunks they allocate space for. Yes, * this is icky. -QAK */ if(!(dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR && time_alloc == H5D_ALLOC_WRITE)) if(H5D__init_storage(dset, full_overwrite, old_dim, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value") } /* end if */ else { H5D_fill_value_t fill_status; /* The fill value status */ /* Check the dataset's fill-value status */ if(H5P_is_fill_value_defined(&dset->shared->dcpl_cache.fill, &fill_status) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined") /* If we are filling the dataset on allocation or "if set" and * the fill value _is_ set, do that now */ if(dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC || (dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) { if(H5D__init_storage(dset, full_overwrite, old_dim, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value") } /* end if */ } /* end else */ } /* end if */ /* If we set the address (and aren't in the middle of creating the * dataset), mark the layout header message for later writing to * the file. (this improves forward compatibility). */ /* (The layout message is already in the dataset's object header, this * operation just sets the address and makes it constant) */ if(time_alloc != H5D_ALLOC_CREATE && addr_set) /* Mark the layout as dirty, for later writing to the file */ if(H5D__mark(dset, dxpl_id, H5D_MARK_LAYOUT) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to mark dataspace as dirty") } /* end if */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__alloc_storage() */ /*------------------------------------------------------------------------- * Function: H5D__init_storage * * Purpose: Initialize the data for a new dataset. If a selection is * defined for SPACE then initialize only that part of the * dataset. * * Return: Non-negative on success/Negative on failure * * Programmer: Robb Matzke * Monday, October 5, 1998 * *------------------------------------------------------------------------- */ static herr_t H5D__init_storage(H5D_t *dset, hbool_t full_overwrite, hsize_t old_dim[], hid_t dxpl_id) { herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC HDassert(dset); switch (dset->shared->layout.type) { case H5D_COMPACT: /* If we will be immediately overwriting the values, don't bother to clear them */ if(!full_overwrite) { /* Fill the compact dataset storage */ if(H5D__compact_fill(dset, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize compact dataset storage") } /* end if */ break; case H5D_CONTIGUOUS: /* Don't write default fill values to external files */ /* If we will be immediately overwriting the values, don't bother to clear them */ if((dset->shared->dcpl_cache.efl.nused == 0 || dset->shared->dcpl_cache.fill.buf) && !full_overwrite) if(H5D__contig_fill(dset, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset") break; case H5D_CHUNKED: /* * Allocate file space * for all chunks now and initialize each chunk with the fill value. */ { hsize_t zero_dim[H5O_LAYOUT_NDIMS] = {0}; /* Use zeros for old dimensions if not specified */ if(old_dim == NULL) old_dim = zero_dim; if(H5D__chunk_allocate(dset, dxpl_id, full_overwrite, old_dim) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset") break; } /* end block */ case H5D_LAYOUT_ERROR: case H5D_NLAYOUTS: default: HDassert("not implemented yet" && 0); #ifdef NDEBUG HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout") #endif /* NDEBUG */ } /* end switch */ /*lint !e788 All appropriate cases are covered */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__init_storage() */ /*------------------------------------------------------------------------- * Function: H5D__get_storage_size * * Purpose: Determines how much space has been reserved to store the raw * data of a dataset. * * Return: Non-negative on success, negative on failure * * Programmer: Robb Matzke * Wednesday, April 21, 1999 * *------------------------------------------------------------------------- */ herr_t H5D__get_storage_size(H5D_t *dset, hid_t dxpl_id, hsize_t *storage_size) { herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE switch(dset->shared->layout.type) { case H5D_CHUNKED: if((*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) { if(H5D__chunk_allocated(dset, dxpl_id, storage_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve chunked dataset allocated size") } /* end if */ else *storage_size = 0; break; case H5D_CONTIGUOUS: /* Datasets which are not allocated yet are using no space on disk */ if((*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) *storage_size = dset->shared->layout.storage.u.contig.size; else *storage_size = 0; break; case H5D_COMPACT: *storage_size = dset->shared->layout.storage.u.compact.size; break; case H5D_LAYOUT_ERROR: case H5D_NLAYOUTS: default: HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset type") } /*lint !e788 All appropriate cases are covered */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__get_storage_size() */ /*------------------------------------------------------------------------- * Function: H5D__get_offset * * Purpose: Private function for H5D__get_offset. Returns the address * of dataset in file. * * Return: Success: the address of dataset * * Failure: HADDR_UNDEF * * Programmer: Raymond Lu * November 6, 2002 * *------------------------------------------------------------------------- */ haddr_t H5D__get_offset(const H5D_t *dset) { haddr_t ret_value = HADDR_UNDEF; FUNC_ENTER_PACKAGE HDassert(dset); switch(dset->shared->layout.type) { case H5D_CHUNKED: case H5D_COMPACT: break; case H5D_CONTIGUOUS: /* If dataspace hasn't been allocated or dataset is stored in * an external file, the value will be HADDR_UNDEF. */ if(dset->shared->dcpl_cache.efl.nused == 0 || H5F_addr_defined(dset->shared->layout.storage.u.contig.addr)) /* Return the absolute dataset offset from the beginning of file. */ ret_value = dset->shared->layout.storage.u.contig.addr + H5F_BASE_ADDR(dset->oloc.file); break; case H5D_LAYOUT_ERROR: case H5D_NLAYOUTS: default: HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, HADDR_UNDEF, "unknown dataset layout type") } /*lint !e788 All appropriate cases are covered */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__get_offset() */ /*------------------------------------------------------------------------- * Function: H5D__iterate * * Purpose: Internal version of H5Diterate() * * Return: Returns the return value of the last operator if it was non-zero, * or zero if all elements were processed. Otherwise returns a * negative value. * * Programmer: Quincey Koziol * Tuesday, November 22, 2005 * *------------------------------------------------------------------------- */ herr_t H5D__iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t op, void *operator_data) { herr_t ret_value; FUNC_ENTER_PACKAGE_NOERR /* Check args */ HDassert(buf); HDassert(H5I_DATATYPE == H5I_get_type(type_id)); HDassert(space); HDassert(H5S_has_extent(space)); HDassert(op); ret_value = H5S_select_iterate(buf, type_id, space, op, operator_data); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__iterate() */ /*------------------------------------------------------------------------- * Function: H5D_vlen_reclaim * * Purpose: Frees the buffers allocated for storing variable-length data * in memory. Only frees the VL data in the selection defined in the * dataspace. The dataset transfer property list is required to find the * correct allocation/free methods for the VL data in the buffer. * * Return: Non-negative on success, negative on failure * * Programmer: Quincey Koziol * Tuesday, November 22, 2005 * *------------------------------------------------------------------------- */ herr_t H5D_vlen_reclaim(hid_t type_id, H5S_t *space, hid_t plist_id, void *buf) { H5T_vlen_alloc_info_t _vl_alloc_info; /* VL allocation info buffer */ H5T_vlen_alloc_info_t *vl_alloc_info = &_vl_alloc_info; /* VL allocation info */ herr_t ret_value; FUNC_ENTER_NOAPI(FAIL) /* Check args */ HDassert(H5I_DATATYPE == H5I_get_type(type_id)); HDassert(space); HDassert(H5P_isa_class(plist_id, H5P_DATASET_XFER)); HDassert(buf); /* Get the allocation info */ if(H5T_vlen_get_alloc_info(plist_id,&vl_alloc_info) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, FAIL, "unable to retrieve VL allocation info") /* Call H5D__iterate with args, etc. */ ret_value = H5D__iterate(buf, type_id, space ,H5T_vlen_reclaim, vl_alloc_info); done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_vlen_reclaim() */ /*------------------------------------------------------------------------- * Function: H5D__vlen_get_buf_size_alloc * * Purpose: This routine makes certain there is enough space in the temporary * buffer for the new data to read in. All the VL data read in is actually * placed in this buffer, overwriting the previous data. Needless to say, * this data is not actually usable. * * Return: Non-negative on success, negative on failure * * Programmer: Quincey Koziol * Tuesday, August 17, 1999 * *------------------------------------------------------------------------- */ void * H5D__vlen_get_buf_size_alloc(size_t size, void *info) { H5D_vlen_bufsize_t *vlen_bufsize = (H5D_vlen_bufsize_t *)info; void *ret_value; /* Return value */ FUNC_ENTER_PACKAGE_NOERR /* Get a temporary pointer to space for the VL data */ if((vlen_bufsize->vl_tbuf = H5FL_BLK_REALLOC(vlen_vl_buf, vlen_bufsize->vl_tbuf, size)) != NULL) vlen_bufsize->size += size; /* Set return value */ ret_value = vlen_bufsize->vl_tbuf; FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__vlen_get_buf_size_alloc() */ /*------------------------------------------------------------------------- * Function: H5D__vlen_get_buf_size * * Purpose: This routine checks the number of bytes required to store a single * element from a dataset in memory, creating a selection with just the * single element selected to read in the element and using a custom memory * allocator for any VL data encountered. * The *size value is modified according to how many bytes are * required to store the element in memory. * * Implementation: This routine actually performs the read with a custom * memory manager which basically just counts the bytes requested and * uses a temporary memory buffer (through the H5FL API) to make certain * enough space is available to perform the read. Then the temporary * buffer is released and the number of bytes allocated is returned. * Kinda kludgy, but easier than the other method of trying to figure out * the sizes without actually reading the data in... - QAK * * Return: Non-negative on success, negative on failure * * Programmer: Quincey Koziol * Tuesday, August 17, 1999 * *------------------------------------------------------------------------- */ /* ARGSUSED */ herr_t H5D__vlen_get_buf_size(void UNUSED *elem, hid_t type_id, unsigned UNUSED ndim, const hsize_t *point, void *op_data) { H5D_vlen_bufsize_t *vlen_bufsize = (H5D_vlen_bufsize_t *)op_data; H5T_t *dt; /* Datatype for operation */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE HDassert(op_data); HDassert(H5I_DATATYPE == H5I_get_type(type_id)); /* Check args */ if(NULL == (dt = (H5T_t *)H5I_object(type_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype") /* Make certain there is enough fixed-length buffer available */ if(NULL == (vlen_bufsize->fl_tbuf = H5FL_BLK_REALLOC(vlen_fl_buf, vlen_bufsize->fl_tbuf, H5T_get_size(dt)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't resize tbuf") /* Select point to read in */ if(H5Sselect_elements(vlen_bufsize->fspace_id, H5S_SELECT_SET, (size_t)1, point) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't select point") /* Read in the point (with the custom VL memory allocator) */ if(H5Dread(vlen_bufsize->dataset_id, type_id, vlen_bufsize->mspace_id, vlen_bufsize->fspace_id, vlen_bufsize->xfer_pid, vlen_bufsize->fl_tbuf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read point") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__vlen_get_buf_size() */ /*------------------------------------------------------------------------- * Function: H5D__check_filters * * Purpose: Check if the filters have be initialized for the dataset * * Return: Non-negative on success/Negative on failure * * Programmer: Quincey Koziol * Thursday, October 11, 2007 * *------------------------------------------------------------------------- */ herr_t H5D__check_filters(H5D_t *dataset) { H5O_fill_t *fill; /* Dataset's fill value */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Check args */ HDassert(dataset); /* Check if the filters in the DCPL will need to encode, and if so, can they? * * Filters need encoding if fill value is defined and a fill policy is set * that requires writing on an extend. */ fill = &dataset->shared->dcpl_cache.fill; if(!dataset->shared->checked_filters) { H5D_fill_value_t fill_status; /* Whether the fill value is defined */ /* Retrieve the "defined" status of the fill value */ if(H5P_is_fill_value_defined(fill, &fill_status) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Couldn't retrieve fill value from dataset.") /* See if we can check the filter status */ if(fill_status == H5D_FILL_VALUE_DEFAULT || fill_status == H5D_FILL_VALUE_USER_DEFINED) { if(fill->fill_time == H5D_FILL_TIME_ALLOC || (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) { /* Filters must have encoding enabled. Ensure that all filters can be applied */ if(H5Z_can_apply(dataset->shared->dcpl_id, dataset->shared->type_id) < 0) HGOTO_ERROR(H5E_PLINE, H5E_CANAPPLY, FAIL, "can't apply filters") dataset->shared->checked_filters = TRUE; } /* end if */ } /* end if */ } /* end if */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__check_filters() */ /*------------------------------------------------------------------------- * Function: H5D__set_extent * * Purpose: Based on H5D_extend, allows change to a lower dimension, * calls H5S_set_extent and H5D__chunk_prune_by_extent instead * * Return: Non-negative on success, negative on failure * * Programmer: Pedro Vicente, [email protected] * April 9, 2002 * *------------------------------------------------------------------------- */ herr_t H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) { H5S_t *space; /* Dataset's dataspace */ int rank; /* Dataspace # of dimensions */ hsize_t curr_dims[H5O_LAYOUT_NDIMS];/* Current dimension sizes */ htri_t changed; /* Whether the dataspace changed size */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Check args */ HDassert(dset); HDassert(size); /* Check if we are allowed to modify this file */ if(0 == (H5F_INTENT(dset->oloc.file) & H5F_ACC_RDWR)) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "no write intent on file") /* Check if we are allowed to modify the space; only datasets with chunked and external storage are allowed to be modified */ if(H5D_COMPACT == dset->shared->layout.type) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "dataset has compact storage") if(H5D_CONTIGUOUS == dset->shared->layout.type && 0 == dset->shared->dcpl_cache.efl.nused) HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "dataset has contiguous storage") /* Check if the filters in the DCPL will need to encode, and if so, can they? */ if(H5D__check_filters(dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't apply filters") /* Get the data space */ space = dset->shared->space; /* Check if we are shrinking or expanding any of the dimensions */ if((rank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions") /* Modify the size of the data space */ if((changed = H5S_set_extent(space, size)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to modify size of data space") /* Don't bother updating things, unless they've changed */ if(changed) { hbool_t shrink = FALSE; /* Flag to indicate a dimension has shrank */ hbool_t expand = FALSE; /* Flag to indicate a dimension has grown */ unsigned u; /* Local index variable */ /* Determine if we are shrinking and/or expanding any dimensions */ for(u = 0; u < (unsigned)rank; u++) { if(size[u] < curr_dims[u]) shrink = TRUE; if(size[u] > curr_dims[u]) expand = TRUE; } /* end for */ /*------------------------------------------------------------------------- * Modify the dataset storage *------------------------------------------------------------------------- */ /* Update the index values for the cached chunks for this dataset */ if(H5D_CHUNKED == dset->shared->layout.type) { if(H5D__chunk_set_info(dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to update # of chunks") if(H5D__chunk_update_cache(dset, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices") } /* end if */ /* Allocate space for the new parts of the dataset, if appropriate */ if(expand && dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_EARLY) if(H5D__alloc_storage(dset, dxpl_id, H5D_ALLOC_EXTEND, FALSE, curr_dims) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to extend dataset storage") /*------------------------------------------------------------------------- * Remove chunk information in the case of chunked datasets * This removal takes place only in case we are shrinking the dateset * and if the chunks are written *------------------------------------------------------------------------- */ if(shrink && H5D_CHUNKED == dset->shared->layout.type && (*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) { /* Remove excess chunks */ if(H5D__chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks") } /* end if */ /* Mark the dataspace as dirty, for later writing to the file */ if(H5D__mark(dset, dxpl_id, H5D_MARK_SPACE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to mark dataspace as dirty") } /* end if */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__set_extent() */ /*------------------------------------------------------------------------- * Function: H5D__flush_sieve_buf * * Purpose: Flush any dataset sieve buffer info cached in memory * * Return: Success: Non-negative * Failure: Negative * * Programmer: Quincey Koziol * July 27, 2009 * *------------------------------------------------------------------------- */ herr_t H5D__flush_sieve_buf(H5D_t *dataset, hid_t dxpl_id) { herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Check args */ HDassert(dataset); /* Flush the raw data buffer, if we have a dirty one */ if(dataset->shared->cache.contig.sieve_buf && dataset->shared->cache.contig.sieve_dirty) { HDassert(dataset->shared->layout.type != H5D_COMPACT); /* We should never have a sieve buffer for compact storage */ /* Write dirty data sieve buffer to file */ if(H5F_block_write(dataset->oloc.file, H5FD_MEM_DRAW, dataset->shared->cache.contig.sieve_loc, dataset->shared->cache.contig.sieve_size, dxpl_id, dataset->shared->cache.contig.sieve_buf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed") /* Reset sieve buffer dirty flag */ dataset->shared->cache.contig.sieve_dirty = FALSE; } /* end if */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__flush_sieve_buf() */ /*------------------------------------------------------------------------- * Function: H5D__flush_real * * Purpose: Flush any dataset information cached in memory * * Return: Success: Non-negative * Failure: Negative * * Programmer: Quincey Koziol * December 6, 2007 * *------------------------------------------------------------------------- */ herr_t H5D__flush_real(H5D_t *dataset, hid_t dxpl_id) { H5O_t *oh = NULL; /* Pointer to dataset's object header */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Check args */ HDassert(dataset); /* Check for metadata changes that will require updating the object's modification time */ if(dataset->shared->layout_dirty || dataset->shared->space_dirty) { unsigned update_flags = H5O_UPDATE_TIME; /* Modification time flag */ /* Pin the object header */ if(NULL == (oh = H5O_pin(&dataset->oloc, dxpl_id))) HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header") /* Update the layout on disk, if it's been changed */ if(dataset->shared->layout_dirty) { if(H5D__layout_oh_write(dataset, dxpl_id, oh, update_flags) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update layout/pline/efl info") dataset->shared->layout_dirty = FALSE; /* Reset the "update the modification time" flag, so we only do it once */ update_flags = 0; } /* end if */ /* Update the dataspace on disk, if it's been changed */ if(dataset->shared->space_dirty) { if(H5S_write(dataset->oloc.file, dxpl_id, oh, update_flags, dataset->shared->space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update file with new dataspace") dataset->shared->space_dirty = FALSE; /* Reset the "update the modification time" flag, so we only do it once */ update_flags = 0; } /* end if */ /* _Somebody_ should have update the modification time! */ HDassert(update_flags == 0); } /* end if */ /* Flush cached raw data for each kind of dataset layout */ if(dataset->shared->layout.ops->flush && (dataset->shared->layout.ops->flush)(dataset, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to flush raw data") done: /* Release pointer to object header */ if(oh != NULL) if(H5O_unpin(oh) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header") FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__flush_real() */ /*------------------------------------------------------------------------- * Function: H5D__mark * * Purpose: Mark some aspect of a dataset as dirty * * Return: Success: Non-negative * Failure: Negative * * Programmer: Quincey Koziol * July 4, 2008 * *------------------------------------------------------------------------- */ herr_t H5D__mark(H5D_t *dataset, hid_t dxpl_id, unsigned flags) { herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_NOERR /* Check args */ HDassert(dataset); HDassert(!(flags & (unsigned)~(H5D_MARK_SPACE | H5D_MARK_LAYOUT))); /* Mark aspects of the dataset as dirty */ if(flags & H5D_MARK_SPACE) dataset->shared->space_dirty = TRUE; if(flags & H5D_MARK_LAYOUT) dataset->shared->layout_dirty = TRUE; done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__mark() */ /*------------------------------------------------------------------------- * Function: H5D__flush_cb * * Purpose: Flush any dataset information cached in memory * * Return: Success: Non-negative * Failure: Negative * * Programmer: Quincey Koziol * November 8, 2007 * *------------------------------------------------------------------------- */ static int H5D__flush_cb(void *_dataset, hid_t UNUSED id, void *_udata) { H5D_t *dataset = (H5D_t *)_dataset; /* Dataset pointer */ H5D_flush_ud_t *udata = (H5D_flush_ud_t *)_udata; /* User data for callback */ int ret_value = H5_ITER_CONT; /* Return value */ FUNC_ENTER_STATIC /* Check args */ HDassert(dataset); /* Check for dataset in same file */ if(udata->f == dataset->oloc.file) { /* Flush the dataset's information */ if(H5D__flush_real(dataset, udata->dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, H5_ITER_ERROR, "unable to flush cached dataset info") } /* end if */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__flush_cb() */ /*------------------------------------------------------------------------- * Function: H5D_flush * * Purpose: Flush any dataset information cached in memory * * Return: Success: Non-negative * Failure: Negative * * Programmer: Ray Lu * August 14, 2002 * *------------------------------------------------------------------------- */ herr_t H5D_flush(const H5F_t *f, hid_t dxpl_id) { H5D_flush_ud_t udata; /* User data for callback */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) /* Check args */ HDassert(f); /* Set user data for callback */ udata.f = f; udata.dxpl_id = dxpl_id; /* Iterate over all the open datasets */ if(H5I_iterate(H5I_DATASET, H5D__flush_cb, &udata, FALSE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to flush cached dataset info") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_flush() */
866544.c
/* F I E L D C L C . C */ /* Routines needed to calculate fields. */ #ifdef DEBUG #ifdef PCJ /* #define SHOWCALC */ #endif /* PCJ */ #endif /* DEBUG */ #include "word.h" DEBUGASSERTSZ /* WIN - bogus macro for assert string */ #include "props.h" #include "doc.h" #include "sel.h" #include "ch.h" #include "prompt.h" #include "message.h" #include "ourmath.h" #include "format.h" #include "inter.h" #include "print.h" #include "disp.h" #include "dde.h" #include "file.h" #include "strtbl.h" #define FINDNEXT #include "search.h" #include "error.h" #define FIELDCMD #include "field.h" /* includes rgstFldError */ #include "debug.h" #include "opuscmd.h" #ifdef PROTOTYPE #include "fieldclc.cpt" #endif /* PROTOTYPE */ /* E X T E R N A L S */ extern struct SEL selCur; extern struct PREF vpref; extern struct CA caAdjust; /* used while traversing fields during calc */ extern int docMac; extern struct CA caPara; extern struct CHP vchpFetch; extern CHAR szError[]; extern struct MERR vmerr; extern CP vcpFirstLayout; extern CP vcpLimLayout; extern HCURSOR vhcArrow; extern struct DOD **mpdochdod []; extern struct CA caSect; extern int vfExtendSel; extern int vwFieldCalc; extern union RRU vrruCalc; extern CHAR vrrut; extern struct PMD *vppmd; extern int wwCur; extern struct PAP vpapFetch; extern char rgchEop[]; extern struct WWD **hwwdCur; extern struct DDES vddes; extern BOOL vfDdeIdle; extern struct UAB vuab; extern ENV *penvMathError; extern int vdocScratch; extern CP CpMomFromHdr(); /* C M D C A L C S E L */ /* Calculate all fields within the current selection. */ /* %%Function:CmdCalcSel %%Owner:peterj */ CMD CmdCalcSel (pcmb) CMB *pcmb; { BOOL fCalc = fFalse; BOOL fUndo = fTrue; int cFields; if (selCur.fBlock) { Beep(); return cmdError; } Profile( vpfi == pfiFieldRef ? StartProf(30) : 0); AcquireCaAdjust (); if (FSetPcaForCalc (&caAdjust, selCur.doc, selCur.cpFirst, selCur.cpLim, &cFields)) { struct DOD *pdod = PdodDoc(selCur.doc); if ((pdod->fFtn || pdod->fAtn) && !FCheckLargeEditFtn(&caAdjust, &fUndo)) goto LCantUndo; if (fUndo && !FSetUndoB1(bcmCalcFields, uccPaste, &caAdjust)) goto LCantUndo; StartLongOp (); TurnOffSel(&selCur); /* to avoid weirdness on the screen */ Assert (vwFieldCalc == 0); vwFieldCalc = fclSel; fCalc = FCalcFields (&caAdjust, frmUser, (cFields>1), fTrue); Assert (vwFieldCalc == fclSel); vwFieldCalc = 0; EndLongOp (fFalse); } if (!fCalc) Beep (); else { DirtyDoc (selCur.doc); if (fUndo) SetUndoAfter(&caAdjust); else SetUndoNil(); } LCantUndo: ReleaseCaAdjust (); MakeSelCurVisi(fTrue /*fForceBlockToIp*/); Profile( vpfi == pfiFieldRef ? StopProf() : 0); return cmdOK; } /* F C A L C F I E L D S */ /* Calculate all fields enclosed in *pca. frm indicates the types of fields to be calculated. */ /* %%Function:FCalcFields %%Owner:peterj */ FCalcFields (pca, frm, fPrompt, fClearDiffer) struct CA * pca; int frm; BOOL fPrompt, fClearDiffer; { int ifld, ifldFirst; long ifldNext = 0; BOOL fReturn = fFalse; int doc = pca->doc; struct PLC **hplcfld = PdodDoc (doc)->hplcfld; struct PPR **hppr; /* *pca must be an adjusted ca because the calculation function of a field may (and, in general, will) add characters to the document. */ if (doc == docNil || pca->cpFirst >= pca->cpLim || vmerr.fMemFail || vmerr.fDiskFail) /* invalid calculation range. */ { Assert (vmerr.fMemFail || vmerr.fDiskFail); return fFalse; } vwFieldCalc |= fclCa; Scribble (ispFieldCalc1, 'C'); if (fPrompt) hppr = HpprStartProgressReport (mstCalcFields, NULL, nIncrPercent, fTrue); /* what field is cpFirst in? */ ifldFirst = ifld = IfldNextField (doc, pca->cpFirst); Assert (ifld != ifldNil); /* if next field exists and is in our range, calculate it. Terminate at this level if we are having problems or user ESC. */ while (ifld != ifldNil && CpFirstField (doc, ifld) < pca->cpLim && !vmerr.fMemFail && !vmerr.fDiskFail && ! FQueryAbortCheck () && (vppmd == NULL || !vppmd->fAbortPass)) { /* put up a % complete message */ if (fPrompt && ifld >= (int)ifldNext) ProgressReportPercent (hppr, (long)ifldFirst, (long)IInPlcRef (hplcfld, pca->cpLim), (long)ifld, &ifldNext); fReturn |= FCalcFieldIfld (doc, ifld, frm, 0, fClearDiffer); ifld = IfldNextField (doc, CpLimField (doc, ifld)); } Scribble (ispFieldCalc1, ' '); if (fPrompt) { ChangeProgressReport (hppr, 100); StopProgressReport (hppr, pdcAdvise); } /* FIELD CALCULATION STRUCTURES/TERMINATION */ { extern struct STTB **hsttbSequenceCalc; if (hsttbSequenceCalc != hNil) FreePhsttb (&hsttbSequenceCalc); vwFieldCalc &= ~fclCa; } return fReturn; } /* F C A L C F I E L D I F L D */ /* Calculate field ifld. First calculate any field enclosed in ifld's instruction range. Then calculate this field. NB: field calculation functions are assumed to be WELL BEHAVED. A well behaved calculation function is one that does not modify any portion of a document except its result portion. IF A CALC FUNCTION MODIFIES ANY PORTION OF A DOCUMENT OUTSIDE OF ITS RESULT, THE FOLLOWING TRAVERSAL MAY NOT FUNCTION PROPERLY. */ /* %%Function:FCalcFieldIfld %%Owner:peterj */ FCalcFieldIfld (doc, ifld, frm, cNestLevel, fClearDiffer) int doc, ifld, frm, cNestLevel; BOOL fClearDiffer; { int ifldCalc; CP cp = CpFirstField (doc, ifld) + 1; struct EFLT eflt; BOOL fReturn = fFalse; struct FLCD flcd; Assert (ifld != ifldNil); GetIfldFlcd (doc, ifld, &flcd); /* if we are having technical problems or not allowed to refresh, don't go on */ if (vmerr.fMemFail || vmerr.fDiskFail) return fFalse; /* check if we are beyond the nesting limit */ if (++cNestLevel > cNestFieldMax) { ErrorEid (eidFields2DeepCalc, "FCalcFieldIfld"); return fFalse; } Scribble (ispFieldCalc2, ('0'+cNestLevel-1)); /* Calculate fields enclosed in instructions. Note that CpLimInstField for ifld may change. */ while ((ifldCalc = IfldNextField (doc, cp)) != ifldNil && CpFirstField (doc, ifldCalc) < CpLimInstField (doc, ifld)) { fReturn |= FCalcFieldIfld (doc, ifldCalc, frm, cNestLevel, fClearDiffer); cp = CpLimField (doc, ifldCalc); } eflt = EfltFromFlt (flcd.flt); /* Calculate the field itself, if appropriate */ if (eflt.frm & frm && !flcd.fLocked) { #ifdef SHOWCALC CommSzNum(SzShared("FCalcFieldIfld: calculating "), ifld); #endif /* SHOWCALC */ fReturn = fTrue; if (FInsertFieldSeparator (doc, ifld)) CallCalcFunc (doc, ifld, fClearDiffer); } /* Calculate fields enclosed in result. Note that CpLimField of ifld may change. */ cp = CpLimInstField (doc, ifld); while ((ifldCalc = IfldNextField (doc, cp)) != ifldNil && CpFirstField (doc, ifldCalc) < CpLimField (doc, ifld)) { fReturn |= FCalcFieldIfld (doc, ifldCalc, frm, cNestLevel, fClearDiffer); cp = CpLimField (doc, ifldCalc); } Scribble (ispFieldCalc2, (cNestLevel>1 ? '0'+cNestLevel-2 : ' ')); return fReturn; } /* C A L L C A L C F U N C */ /* Call the calculation function for field ifld. */ /* %%Function:CallCalcFunc %%Owner:peterj */ CallCalcFunc (doc, ifld, fClearDiffer) int doc, ifld; { int fcr; CP dcpPrevResult; BOOL fResultDirty; BOOL fResultEdited; BOOL fInTablePrev; BOOL fFldDirty; struct EFLT eflt; struct FLCD flcd; struct FFB ffb; struct CA ca; vwFieldCalc |= fclFunc; /* some callers are not going through normal command dispatch */ InvalAgain(); GetIfldFlcd (doc, ifld, &flcd); dcpPrevResult = CpMax (cp0, flcd.dcpResult - 1); fResultDirty = flcd.fResultDirty; fResultEdited = flcd.fResultEdited; fFldDirty = flcd.fDirty; eflt = EfltFromFlt (flcd.flt); Assert (eflt.pfnCalcFunc != NULL); Assert (flcd.dcpResult > 0); CachePara(doc, flcd.cpFirst + flcd.dcpInst - 1); fInTablePrev = vpapFetch.fInTable; /* initialize the field fetch block */ InitFvb (&ffb.fvb); SetFfbIfld (&ffb, doc, ifld); /* call calc function */ fcr = (*eflt.pfnCalcFunc) (doc, ifld, flcd.flt, flcd.cpFirst, flcd.cpFirst+flcd.dcpInst, &ffb); /* if this field is nested within a dead field, its result must be vanished. */ SetFieldResultProps(doc, ifld, fInTablePrev); if (fcr >= fcrNormal) { /* assure all switches were fetched */ ExhaustFieldText (&ffb); if (!fInTablePrev) { /* note: can use old cpFirst and dcpInst values--they don't change */ CachePara(doc, flcd.cpFirst + flcd.dcpInst - 1); if (vpapFetch.fInTable) { struct PAP pap; struct CHP chp; GetIfldFlcd (doc, ifld, &flcd); CachePara(doc, flcd.cpFirst + flcd.dcpInst + flcd.dcpResult - 1); pap = vpapFetch; Assert(!pap.fInTable); StandardChp(&chp); chp.fVanish = fFalse; FInsertRgch(doc, flcd.cpFirst + flcd.dcpInst, rgchEop, (int)ccpEop, &chp, &pap); } } if (ffb.fsfSys.c > 0) { /* apply the "system" switches to the result */ GetIfldFlcd (doc, ifld, &flcd); DcpApplySysSwitches (&ffb, doc, flcd.cpFirst+flcd.dcpInst, flcd.dcpResult-dcpPrevResult-1, dcpPrevResult, fcr); } } if (fcr != fcrKeepOld) { /* remove old result */ DeleteFieldResult (doc, ifld, flcd.flt!=fltDdeHot/*fSeparator*/, dcpPrevResult); /* no longer dirty */ fResultDirty = fFalse; fResultEdited = fFalse; } /* reset the result dirtyness */ GetIfldFlcd (doc, ifld, &flcd); flcd.fResultDirty = fResultDirty; flcd.fResultEdited = fResultEdited; flcd.fDirty = fFldDirty; if (fClearDiffer) flcd.fDiffer = fFalse; SetFlcdCh (doc, &flcd, 0); /* invalidate fields display */ PcaSetDcp( &ca, doc, flcd.cpFirst, flcd.dcpInst + flcd.dcpResult ); CheckInvalCpFirst (&ca); InvalCp (&ca); vwFieldCalc &= ~fclFunc; } /* F S E T P C A F O R C A L C */ /* Determines the cp range over which fields will be calculated for a given calculation range. */ /* %%Function:FSetPcaForCalc %%Owner:peterj */ FSetPcaForCalc (pca, doc, cpFirst, cpLim, pcFields) struct CA * pca; int doc; CP cpFirst, cpLim; int *pcFields; { int ifld; CP cpT, dcpIns = cpFirst==cpLim; struct FLCD flcd; pca->doc = docNil; if (mpdochdod[doc]==hNil) return fFalse; ifld = IfldFromDocCp (doc, cpFirst, fFalse); /* an insertion point immediately prior to a field should refresh the enclosing field, not just the field it preceeds */ if (ifld != ifldNil && cpFirst == cpLim && CpFirstField (doc, ifld) == cpFirst) ifld = IfldEncloseIfld (doc, ifld); if (ifld == ifldNil) ifld = IfldNextField (doc, cpFirst); if (ifld != ifldNil) { GetIfldFlcd (doc, ifld, &flcd); pca->cpFirst = pca->cpLim = flcd.cpFirst; } else return fFalse; *pcFields = 0; while (ifld != ifldNil && flcd.cpFirst < cpLim+dcpIns) { if (flcd.fDirty) { FltParseDocCp (doc, flcd.cpFirst, ifld, fTrue, fFalse); ifld = IfldNextField (doc, flcd.cpFirst); GetIfldFlcd (doc, ifld, &flcd); continue; } if ((cpT = flcd.cpFirst+flcd.dcpInst+flcd.dcpResult) > pca->cpLim) pca->cpLim = cpT; *pcFields += 1; ifld = IfldAfterFlcd (doc, ifld, &flcd); } pca->doc = doc; AssureLegalSel (pca); return pca->cpLim > pca->cpFirst; } /* A P P L Y S Y S S W I T C H E S */ /* Apply the "system" switches of a calculated field as a post operation on the result. The size of the result may change. Will not change the previous result in any way. May be applied to a "result" that is not part of any field. */ /* %%Function:DcpApplySysSwitches %%Owner:peterj */ CP DcpApplySysSwitches (pffb, doc, cpResult, dcpNew, dcpPrev, fcr) struct FFB *pffb; int doc; CP cpResult, dcpNew, dcpPrev; int fcr; { CHAR chSw; BOOL fPicDefined = fTrue; pffb->fGroupInternal = fFalse; while ((chSw = ChFetchSwitch (pffb, fTrue /*fSys*/)) != chNil) /* above call sets pffb up to fetch argument */ switch (chSw) { case chFldSwSysNumeric: case chFldSwSysDateTime: if (fPicDefined) { dcpNew = DcpApplyPictureSw (chSw, pffb, doc, cpResult, dcpNew, fcr); /* picture only meaningful if first formatting switch */ fPicDefined = fFalse; } else dcpNew = DcpReplaceWithError (doc, cpResult, dcpNew, istErrPicSwFirst); break; case chFldSwSysLock: /* lock all fields in the result */ FSetFieldLocks (fTrue, doc, IfldNextField (doc, cpResult), cpResult+dcpNew); break; case chFldSwSysFormat: dcpNew = DcpApplyFormatSw (pffb, doc, cpResult, dcpNew, dcpPrev, fcr); /* picture only meaningful if first formatting switch */ fPicDefined = fFalse; break; #ifdef DEBUG default: Assert (fFalse); break; #endif /* DEBUG */ } return dcpNew; } /* S E T F I E L D R E S U L T P R O P S */ /* If field is nested within a dead field, make its result vanished. * If Revision Marking is on, set result according to field begin char. */ /* %%Function:SetFieldResultProps %%Owner:peterj */ SetFieldResultProps(doc, ifld, fInTable) int doc, ifld; BOOL fInTable; { struct CA caField, caResult; PcaField( &caField, doc, ifld ); if (DcpCa(PcaFieldResult( &caResult, doc, ifld )) == cp0) return; FetchCpAndParaCa(&caField, fcmProps); if (vchpFetch.fFldVanish) ApplyFFldVanish (&caResult, fTrue /* fVanish */); if (PdodMother(doc)->dop.fRevMarking) ApplyRevMarking(&caResult, vchpFetch.fRMark, vchpFetch.fStrike); if (fInTable) ApplyTableProps(&caResult, fTrue); else if (FTableMismatch(&caResult)) ApplyTableProps(&caResult, fFalse); } /* G E T P D O C P C P M O T H E R F R O M D O C C P */ /* For a field being calculated at doc, cp, what is the doc, cp that is should consider itself to be at? (Some fields only are defined in the Mother document, if they exist in a sub document they must have a reference point in the mother). */ /* %%Function:GetPdocPcpMotherFromDocCp %%Owner:peterj */ GetPdocPcpMotherFromDocCp (doc, cp, pdoc, pcp) int doc, *pdoc; CP cp, *pcp; { struct DOD * pdod = PdodDoc (doc); struct DRP *pdrp; *pdoc = DocMother (doc); if (*pdoc == doc) /* normal document case */ *pcp = cp; else if (pdod->fHdr) { /* in header/footer at PRINT time, use cpLast of the page */ *pcp = CpMax (cp0, vcpLimLayout - 1); } else if (pdod->fFtn || pdod->fAtn) { /* in footnote or annotation, use cp of reference */ int ifnd = IInPlc (pdod->hplcfnd, cp); pdrp = ((int *)PdodDoc(*pdoc)) + (pdod->fFtn ? edcDrpFtn : edcDrpAtn); *pcp = CpPlc( pdrp->hplcRef, ifnd ); Assert (*pcp <= CpMacDocEdit(*pdoc)); } else if (pdod->fDispHdr) { /* in docHdrDisp */ Assert (vcpFirstLayout == cpNil && vcpLimLayout == cpNil); Assert (pdod->doc == *pdoc); CacheSect(*pdoc, CpMomFromHdr(doc)); *pcp = caSect.cpFirst; } else { *pcp = cp0; Assert(fFalse); } } /* F I N S E R T F I E L D S E P E R A T O R */ /* Insert a chFieldSeparate into field ifld. Field separator goes into the result section of the field. If there is already a result section (and, therefore, a separator) do nothing. */ /* %%Function:FInsertFieldSeparator %%Owner:peterj */ FInsertFieldSeparator (doc, ifld) int doc, ifld; { CP cp; int ifldSeparate; struct PLC **hplcfld = PdodDoc (doc)->hplcfld; CHAR ch = chFieldSeparate; struct FLD fld; struct CHP chp; struct FLCD flcd; Assert (doc != docNil && hplcfld != hNil && ifld >= 0 && ifld < (*hplcfld)->iMac); GetIfldFlcd (doc, ifld, &flcd); if (flcd.dcpResult) /* already has a result */ return fTrue; cp = flcd.cpFirst + flcd.dcpInst -1; SetFieldPchp (doc, cp, &chp, flcd.cpFirst); if (!FAssureHplcfld(doc, 1) || !FInsertRgch (doc, cp, &ch, 1, &chp, NULL)) return fFalse; fld.ch = chFieldSeparate; flcd.bData = fld.bData = bDataInval; ifldSeparate = IInPlcRef (hplcfld, cp); Assert (ifldSeparate > ifld && ifldSeparate <= flcd.ifldChEnd); /* won't fail--assured above */ AssertDo(FInsertInPlc (hplcfld, ifldSeparate, cp, &fld)); /* update the fields structures to reflect separator */ /* using the cached flt since field's will have been inval by InsertRgch */ flcd.fResultDirty = fFalse; flcd.fResultEdited = fFalse; flcd.ifldChSeparate = ifldSeparate; flcd.ifldChEnd++; SetFlcdCh (doc, &flcd, 0); return fTrue; } /* D E L E T E F I E L D R E S U L T */ /* Deletes all or a portion of the result of a field. If fSeparator is specified will also delete separator. If dcp >= 0 then dcp is the number of result characters to delete (iff dcp < the number normally deleted if dcp was not given). THE LAST DCP RESULT CHARACTERS ARE DELETED, NOT THE FIRST DCP CHARACTERS!! */ /* %%Function:DeleteFieldResult %%Owner:peterj */ DeleteFieldResult (doc, ifld, fSeparator, dcp) int doc, ifld; BOOL fSeparator; CP dcp; { struct PLC **hplcfld = PdodDoc( doc )->hplcfld; CP dcpDelete; struct CA caDel; struct FLCD flcd; struct FLD fld; BOOL fSetCellBits; GetIfldFlcd (doc, ifld, &flcd); dcpDelete = flcd.dcpResult - (fSeparator ? 0 : 1); if (dcp >= 0 && dcp < (dcpDelete - (fSeparator ? 1 : 0))) dcpDelete = dcp; if (dcpDelete <= 0) return; if (dcpDelete == flcd.dcpResult) /* must remove the separator from the plc */ { GetPlc(hplcfld, flcd.ifldChSeparate, &fld); FOpenPlc (hplcfld, flcd.ifldChSeparate, -1); FStretchPlc(hplcfld, 1); } PcaPoint( &caDel, doc, flcd.cpFirst+flcd.dcpInst+flcd.dcpResult - 1); caDel.cpFirst -= dcpDelete; /* adjust if selCur is forced out of a table it was in */ fSetCellBits = (selCur.fWithinCell && selCur.doc == caDel.doc && selCur.cpLim >= caDel.cpFirst && selCur.cpFirst <= caDel.cpLim); if (!FDelete(&caDel) && dcpDelete == flcd.dcpResult) /* delete failed, must restore the separator fld */ FInsertInPlc (hplcfld, flcd.ifldChSeparate, flcd.cpFirst+flcd.dcpInst-1, &fld); if (fSetCellBits) SetSelCellBits(&selCur); /* must restore the field type, in case of invalidation by Replace */ GetPlc( hplcfld, ifld, &fld ); fld.flt = flcd.flt; PutPlcLast( hplcfld, ifld, &fld ); } /* G E T R E S U L T C H P */ /* Returns the chp that should be used for a field's result (where not otherwise defined. */ /* %%Function:GetResultChp %%Owner:peterj */ GetResultChp (pffb, pchp) struct FFB *pffb; struct CHP *pchp; { GetPchpDocCpFIns (pchp, pffb->doc, pffb->cpField+1, fFalse, wwNil); pchp->fFldVanish = fFalse; Assert (pchp->fSpec == fFalse); } /* I N S E R T F I E L D E R R O R */ /* Insert the field calculation error message into doc at cp. */ /* %%Function:CchInsertFieldError %%Owner:peterj */ CchInsertFieldError (doc, cp, istError) int doc; CP cp; int istError; { int cch; CHAR * stT = stErrorDef; CHAR stBuffer [cchMaxSz]; struct CHP chp; GetPchpDocCpFIns (&chp, doc, cp, fTrue, wwNil); chp.fBold = !chp.fBold; /* general error message */ if (!FInsertRgch (doc, cp, stT+1, *stT, &chp, NULL)) return 0; if (istError != iNil) { bltbx ((CHAR FAR *)rgstFldError [istError], (CHAR FAR *)stBuffer, *rgstFldError [istError]+1); /* specific error message */ if (!FInsertRgch (doc, cp+*stT, stBuffer+1, *stBuffer, &chp, NULL)) return *stT; } else *stBuffer = 0; return *stT + *stBuffer; } /* D C P R E P L A C E W I T H E R R O R */ /* Replace cp, cp+dcp with the field error string and return the new dcp. */ /* %%Function:DcpReplaceWithError %%Owner:peterj */ CP DcpReplaceWithError (doc, cp, dcp, istErr) int doc; CP cp, dcp; int istErr; { struct CA ca; PcaSetDcp(&ca, doc, cp, dcp); /* delete old text */ FDelete(&ca); /* insert new error string */ return (CP) CchInsertFieldError (doc, cp, istErr); } /* F C R R E G I S T E R I N T R E S U L T */ /* Called by calculation functions which have a result which is an integer. Sets up the global result info for the numeric picture switch. */ /* %%Function:FcrRegisterIntResult %%Owner:peterj */ int FcrRegisterIntResult (n) int n; { vrruCalc.l = (long)n; vrrut = rrutLong; return fcrNumeric; } /* F C R R E G I S T E R L O N G R E S U L T */ /* Called by calculation functions which have a result which is a long. Sets up the global result info for the numeric picture switch. */ /* %%Function:FcrRegisterLongResult %%Owner:peterj */ int FcrRegisterLongResult (l) long l; { vrruCalc.l = l; vrrut = rrutLong; return fcrNumeric; } /* F C R R E G I S T E R R E A L R E S U L T */ /* Called by calculation functions which have a result which is a real number. Sets up the global result info for the numeric picture switch. */ /* %%Function:FcrRegisterRealResult %%Owner:peterj */ int FcrRegisterRealResult (pnum) NUM *pnum; { bltbyte(pnum, &vrruCalc.num, sizeof(NUM)); vrrut = rrutNum; return fcrNumeric; } /* F C R R E G I S T E R D T T M R E S U L T */ /* Called by calculation functions which have a result which is a date or time. Sets up the global result info for the numeric picture switch. */ /* %%Function:FcrRegisterDttmResult %%Owner:peterj */ int FcrRegisterDttmResult (dttm) struct DTTM dttm; { vrruCalc.dttm = dttm; vrrut = rrutDttm; return fcrDateTime; } /* F F E T C H A R G U M E N T T E X T */ /* Fetch the text of the next argument into rgch. Does not preserve any other information. If fTruncate then if the next argument is too large to fit into rgch the portion that does not fit will be skipped over; subsequent calls will fetch subsequent arguments. If !fTruncate then if an argument is too long fOverflow will be set and subsequent calls will fetch subsequent portions of the argument. Fetches at most cchMax-1 characters into rgch and NULL terminates. Returns true if an argument was fetched. Argument may have had zero length ("") in which case cch = 0. */ /* %%Function:FFetchArgText %%Owner:peterj */ FFetchArgText (pffb, fTruncate) struct FFB * pffb; BOOL fTruncate; { struct FBB fbb; int cch; fbb = pffb->fbb; pffb->ccrMax = 0; pffb->rgcr = NULL; pffb->cchMax--; FetchFromField (pffb, fTrue /* fArgument */, fFalse /* fRTF */); cch = pffb->cch; pffb->rgch [cch] = '\0'; Assert (cch == pffb->cchMax || !pffb->fOverflow); if (fTruncate && pffb->fOverflow) { pffb->cchMax = 0; pffb->rgch = NULL; FetchFromField (pffb, fTrue, fFalse); /* nothing to overflow!! */ Assert (!pffb->fOverflow); } pffb->fbb = fbb; /* restore orginal buffers */ pffb->cch = cch; pffb->ccr = 0; return !pffb->fNoArg; } /* F F E T C H A R G U M E N T E X T E N T S */ /* Fetches all of one argument. Sets *pcpFirst and *pcpLim for the argument. Does not maintain any other information. Subsequent calls will fetch subsequent arguments. Does not use buffers provided by caller. Returns false if there was no argument fetched. Fetched argument may have had zero length in which case *pcpFirst = *pcpLim = pffb->cpFirst on return. */ /* %%Function:FFetchArgExtents %%Owner:peterj */ FFetchArgExtents (pffb, pcpFirst, pcpLim, fRTF) struct FFB * pffb; CP * pcpFirst, * pcpLim; BOOL fRTF; { struct FBB fbb; struct CR rgcr [ccrArgumentFetch]; fbb = pffb->fbb; pffb->ccrMax = ccrArgumentFetch; pffb->rgcr = rgcr; pffb->cchMax = 0; pffb->rgch = NULL; FetchFromField (pffb, fTrue /* fArgument */, fRTF); if (pffb->ccr > 0) { *pcpFirst = rgcr [0].cp; while (pffb->fOverflow) FetchFromField (pffb, fTrue, fFalse); Assert (pffb->ccr > 0); *pcpLim = rgcr [pffb->ccr-1].cp + rgcr [pffb->ccr-1].ccp; } else *pcpFirst = *pcpLim = pffb->cpFirst; pffb->fbb = fbb; pffb->cch = 0; pffb->ccr = 0; return !pffb->fNoArg; } /* S K I P A R G U M E N T */ /* Skips over one argument. */ /* %%Function:SkipArgument %%Owner:peterj */ SkipArgument (pffb) struct FFB * pffb; { int foc = pffb->foc; struct FBB fbb; fbb = pffb->fbb; SetBytes (&pffb->fbb, 0, sizeof (struct FBB)); if (pffb->cpFirst == pffb->cpField+1) /* fetching keyword */ pffb->foc = focNone; FetchFromField (pffb, fTrue /* fArg */, fFalse /* fRTF */); pffb->foc = foc; pffb->fbb = fbb; } /* D C P C O P Y A R G U M E N T */ /* Copies the text of the next argument to docDest, cpDest. Note that the text copied is that that would be fetched into an rgch (only results of fields, no vanished text). Returns 0 if there is not another argument, else the number of characters copied. (Warning: check vmerr.fMemFail). ccr and cch are undefined on return. */ /* %%Function:DcpCopyArgument %%Owner:peterj */ CP DcpCopyArgument (pffb, docDest, cpDest) struct FFB *pffb; int docDest; CP cpDest; { CP dcp = 0; int docSrc = pffb->doc; int docCopy = docDest; CP cpCopy = cpDest, cpCopyStart = cp0; struct CR rgcr [ccrArgumentFetch]; struct CA caSrc, caDest; struct FBB fbb; struct EFLT eflt; struct CA caT; if (docDest == docSrc) if ((docCopy = DocCreateScratch (docSrc)) == docNil) return cp0; else cpCopy = cp0; fbb = pffb->fbb; pffb->cchMax = 0; pffb->rgch = 0; pffb->ccrMax = ccrArgumentFetch; pffb->rgcr = rgcr; pffb->fOverflow = fFalse; do { CP dcpT; FetchFromField (pffb, fTrue/*fArg*/, fFalse /* fRTF */); if (pffb->ccr == 0) break; dcpT = DcpCopyIchCchPcr (docCopy, cpCopy, docSrc, rgcr, pffb->ccr, 0, -1); dcp += dcpT; cpCopy += dcpT; } while (pffb->fOverflow && !vmerr.fMemFail); /* get rid of fFldVanish property for args of dead fields */ if (pffb->flt != fltNil) { eflt = EfltFromFlt(pffb->flt); if (eflt.fDead) ApplyFFldVanish (PcaSet(&caT, docCopy, cpCopyStart, cpCopy), fFalse); } pffb->fbb = fbb; if (docDest != docCopy) { if (dcp != 0) { PcaSet( &caSrc, docCopy, cp0, dcp ); ScratchBkmks (&caSrc); PcaPoint( &caDest, docDest, cpDest ); FReplaceCps (&caDest, &caSrc); } ReleaseDocScratch(); if (vmerr.fMemFail) return cp0; } return dcp; } /* D C P C O P Y I C H C C H P C R */ /* Uses information in pcr (ccr) to copy cch characters starting at ich. if cch == -1, copies all. */ /* %%Function:DcpCopyIchCchPcr %%Owner:peterj */ CP DcpCopyIchCchPcr (docDest, cp, docSrc, pcr, ccr, ich, cch) int docDest; CP cp; int docSrc; struct CR *pcr; int ccr, ich, cch; { struct CR *pcrMac = pcr+ccr; CP dcp = 0, ccp; struct CA caSrc, caDest; PcaPoint( &caSrc, docSrc, CpFromIchPcr (ich, pcr, ccr) ); PcaPoint( &caDest, docDest, cp ); while (pcr->cp + pcr->ccp <= caSrc.cpFirst) pcr++; Assert (pcr < pcrMac); do { if ((ccp = (pcr->ccp - (caSrc.cpFirst-pcr->cp))) == 0) goto LContinue; if (cch != -1) ccp = CpMin (ccp, (CP)(cch - dcp)); Assert (ccp > 0); caSrc.cpLim = caSrc.cpFirst + ccp; if (!FReplaceCps( &caDest, &caSrc )) /* failure, return number copied */ break; caDest.cpLim = caDest.cpFirst + DcpCa(&caSrc); if (caDest.doc != vdocScratch) CopyStylesFonts (docSrc, caDest.doc, caDest.cpFirst, DcpCa(&caDest)); if (vmerr.fMemFail) /* failure, return number copied */ break; dcp += DcpCa(&caSrc); caDest.cpFirst = caDest.cpLim; LContinue: pcr++; caSrc.cpFirst = pcr->cp; } while (pcr < pcrMac && (cch == -1 || dcp < cch)); return dcp; } /* E X H A U S T F I E L D T E X T */ /* Scan the remaining text of pffb without picking up any characters or CRs. Used to assure all switches in the field instructions have been read. */ /* %%Function:ExhaustFieldText %%Owner:peterj */ ExhaustFieldText (pffb) struct FFB *pffb; { struct FBB fbb; if (pffb->cpFirst < pffb->cpLim && pffb->foc <= focNormal) { Assert (pffb->foc == focNormal || pffb->foc == focNone); fbb = pffb->fbb; SetBytes (&pffb->fbb, 0, sizeof (struct FBB)); FetchFromField (pffb, fFalse/*fArg*/, fFalse /* fRTF */); pffb->fbb = fbb; Assert (pffb->cpFirst >= pffb->cpLim); } } /* C H F E T C H S W I T C H */ /* Iterates through the switches cached in pffb. Returns each ch in turn, and returns chNil when complete. If the switch has an argument, sets up pffb to fetch. Calls with fSys TRUE and FALSE may be interspersed. */ /* %%Function:ChFetchSwitch %%Owner:peterj */ CHAR ChFetchSwitch (pffb, fSys) struct FFB *pffb; BOOL fSys; { int ich; int foc = pffb->foc; struct FSF *pfsf; Assert (pffb->cpFirst == pffb->cpLim || foc > focNormal); if (foc < focNormal) /* nothing (should be) cached */ return chNil; if (fSys) { ich = foc / (cSwitchMax+1); foc += cSwitchMax+1; pfsf = &pffb->fsfSys; } else { ich = foc % (cSwitchMax+1); foc++; pfsf = &pffb->fsfFlt; } pffb->foc = foc; if (ich >= pfsf->c) /* nothing left */ return chNil; if (pfsf->rgcp [ich] != cpNil) { /* set up to fetch the argument */ pffb->cpFirst = pfsf->rgcp [ich]; pffb->fOverflow = fFalse; } else /* no argument */ pffb->cpFirst = pffb->cpLim; return ChLower(pfsf->rgch [ich]); } /* C P L I M I N S T F I E L D */ /* Return the cpLim of the instruction portion of field ifld. Includes either chFieldSeparate or chFieldEnd */ /* %%Function:CpLimInstField %%Owner:peterj */ CP CpLimInstField (doc, ifld) int doc, ifld; { struct FLCD flcd; GetIfldFlcd (doc, ifld, &flcd); return flcd.cpFirst + flcd.dcpInst; } /* %%Function:PcaFieldResult %%Owner:peterj */ struct CA *PcaFieldResult(pca, doc, ifld) struct CA *pca; int doc; int ifld; { struct PLC **hplcfld = PdodDoc (doc)->hplcfld; struct FLCD flcd; GetIfldFlcd (doc, ifld, &flcd); return PcaSetDcp( pca, doc, flcd.cpFirst + flcd.dcpInst, flcd.dcpResult-1); } /* C P L I M F I E L D */ /* Returns the cpLim of the field */ /* %%Function:CpLimField %%Owner:peterj */ EXPORT CP CpLimField (doc, ifld) int doc, ifld; { struct FLCD flcd; GetIfldFlcd (doc, ifld, &flcd); return flcd.cpFirst + flcd.dcpInst + flcd.dcpResult; }
841202.c
/******************************************************************************* * The MIT License (MIT) * * Copyright (c) 2020 Jean-David Gadina - www.xs-labs.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. ******************************************************************************/ /*! * @file XSAutoreleasePoolAddObject.c * @copyright (c) 2020 - Jean-David Gadina - www.xs-labs.com * @author Jean-David Gadina - www.xs-labs.com * @abstract Definition for XSAutoreleasePoolAddObject */ #include <XS/XS.h> #include <XS/Private/Classes/XSAutoreleasePool.h> void XSAutoreleasePoolAddObject( XSAutoreleasePoolRef ap, const void * object ) { struct XSAutoreleasePoolStorage * storage; if( ap == NULL || object == NULL || XSRuntimeIsConstantObject( object ) ) { return; } storage = ap->storage; while( storage != NULL ) { if( storage->count < sizeof( storage->objects ) / sizeof( const void * ) ) { storage->objects[ storage->count++ ] = object; return; } if( storage->next == NULL ) { storage->next = XSAlloc( sizeof( struct XSAutoreleasePoolStorage ) ); if( storage->next == NULL ) { XSBadAlloc(); } storage->next->objects[ storage->next->count++ ] = object; return; } else { storage = storage->next; } } }
274933.c
/* * * This license is set out in https://raw.githubusercontent.com/Broadcom-Network-Switching-Software/OpenBCM/master/Legal/LICENSE file. * * Copyright 2007-2019 Broadcom Inc. All rights reserved. * $All Rights Reserved.$ * * TDM chip based calendar postprocessing filters */ #ifdef _TDM_STANDALONE #include <tdm_top.h> #else #include <soc/tdm/core/tdm_top.h> #endif /** @name: tdm_gh2_filter_check_pipe_sister_min @param: */ int tdm_gh2_filter_check_pipe_sister_min( tdm_mod_t *_tdm ) { int i, j, idx0, tsc_i, tsc0, result=PASS, *tdm_pipe_main, tdm_pipe_main_len; TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,tdm_pipe_main); tdm_pipe_main_len = _tdm->_chip_data.soc_pkg.lr_idx_limit + _tdm->_chip_data.soc_pkg.tvec_size; for (i=0; i<tdm_pipe_main_len; i++) { GH2_TOKEN_CHECK(tdm_pipe_main[i]) { tsc_i = tdm_gh2_scan_which_tsc(tdm_pipe_main[i],_tdm); for (j=1; j<VBS_MIN_SPACING; j++){ idx0 = ((i+j)<tdm_pipe_main_len)? (i+j): (i+j-tdm_pipe_main_len); GH2_TOKEN_CHECK(tdm_pipe_main[idx0]) { tsc0 = tdm_gh2_scan_which_tsc(tdm_pipe_main[idx0],_tdm); if ( tsc_i == tsc0){ result=FAIL; break; } } } } if (result==FAIL) {break;} } return result; } /** @name: tdm_gh2_filter_check_port_sister_min @param: */ int tdm_gh2_filter_check_port_sister_min( tdm_mod_t *_tdm, int port ) { int i, j, idx0, tsc_i, tsc0,result=PASS, *tdm_pipe_main, tdm_pipe_main_len; /* enum port_speed_e *tdm_port_speed; */ TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,tdm_pipe_main); tdm_pipe_main_len = _tdm->_core_data.vars_pkg.cap; /* tdm_port_speed = _tdm->_chip_data.soc_pkg.speed; */ GH2_TOKEN_CHECK(port){ tsc_i = tdm_gh2_scan_which_tsc(port,_tdm); for (i=0; i<tdm_pipe_main_len; i++) { if (port==tdm_pipe_main[i]){ for (j=1; j<VBS_MIN_SPACING; j++){ idx0 = ((i+j)<tdm_pipe_main_len)? (i+j): (i+j-tdm_pipe_main_len); GH2_TOKEN_CHECK(tdm_pipe_main[idx0]) { tsc0 = tdm_gh2_scan_which_tsc(tdm_pipe_main[idx0],_tdm); if ( tsc_i == tsc0){ result=FAIL; break; } } } } if (result==FAIL) {break;} } } return result; } /** @name: tdm_gh2_filter_check_migrate_lr_slot @param: Migrate Linerate slot from src to dst in an array. */ int tdm_gh2_filter_check_migrate_lr_slot(int idx_src, int idx_dst, int *tdm_tbl, int tdm_tbl_len, int **tsc) { int i=idx_src, j, filter_result=FAIL, check_pass=BOOL_TRUE, idx0, idx1, tsc0, tsc1, tsc_i; if ( !(idx_src>=0 && idx_src<tdm_tbl_len) || !(idx_dst>=0 && idx_dst<tdm_tbl_len) ){ check_pass = BOOL_FALSE; } if (check_pass==BOOL_TRUE){ GH2_TOKEN_CHECK(tdm_tbl[idx_src]){ GH2_TOKEN_CHECK(tdm_tbl[idx_dst]){ tsc0 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx_src],tsc); tsc1 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx_dst],tsc); if ( tsc0 == tsc1 ){ check_pass = BOOL_FALSE; TDM_PRINT6(" -------- sister port spacing violation, slots [#%d | #%d], ports [%d | %d], TSC [%d | %d]\n", idx_src, idx_dst, tdm_tbl[idx_src], tdm_tbl[idx_dst], tsc0, tsc1); } } } else { check_pass = BOOL_FALSE; } } /* Check sister port spacing */ if (check_pass==BOOL_TRUE){ i = idx_dst; tsc_i = tdm_gh2_legacy_which_tsc(tdm_tbl[i],tsc); for (j=1; j<VBS_MIN_SPACING; j++){ idx0 = ((i+j)<tdm_tbl_len)? (i+j): (i+j-tdm_tbl_len); idx1 = ((i-j)>=0)? (i-j): (i-j+tdm_tbl_len); GH2_TOKEN_CHECK(tdm_tbl[idx0]){ tsc0 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx0],tsc); if(tsc0==tsc_i){ check_pass = BOOL_FALSE; TDM_PRINT6(" -------- sister port spacing violation, slots [#%d | #%d], ports [%d | %d], TSC [%d | %d]\n", i, idx0, tdm_tbl[i], tdm_tbl[idx0], tsc_i, tsc0); break; } } GH2_TOKEN_CHECK(tdm_tbl[idx1]){ if (tdm_tbl[idx1]==tdm_tbl[i]){continue;} tsc1 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx1],tsc); if(tsc1==tsc_i){ check_pass = BOOL_FALSE; TDM_PRINT6(" -------- sister port spacing violation, slots [#%d | #%d], ports [%d | %d], TSC [%d | %d]\n", i, idx1, tdm_tbl[i], tdm_tbl[idx1], tsc_i, tsc1); break; } } } } filter_result = (check_pass == BOOL_FALSE)? (FAIL): (PASS); return filter_result; } /** @name: tdm_gh2_filter_chk_slot_shift_cond @param: */ int tdm_gh2_filter_chk_slot_shift_cond(int slot, int dir, int cal_len, int *cal_main, tdm_mod_t *_tdm) { int k, src, dst, port, port_pm, idx, port_k, idx_k, result=PASS; int param_phy_lo, param_phy_hi, param_space_sister; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_space_sister= _tdm->_core_data.rule__prox_port_min; if (slot < cal_len) { /* determine src and dst index */ src = slot; if (dir == TDM_DIR_UP) { dst = (src + cal_len - 1) % cal_len; } else { dst = (src + 1) % cal_len; } /* check src port spacing */ idx = src; port = cal_main[idx]; port_pm = tdm_gh2_filter_get_port_pm(port, _tdm); if (port >= param_phy_lo && port <= param_phy_hi) { /* sister port spacing */ for (k=1; k<=param_space_sister; k++) { if (dir == TDM_DIR_UP) { idx_k = (idx + cal_len - k) % cal_len; } else { idx_k = (idx + k) % cal_len; } port_k = cal_main[idx_k]; if (port_k >= param_phy_lo && port_k <= param_phy_hi) { if (tdm_gh2_filter_get_port_pm(port_k, _tdm) == port_pm) { result = FAIL; break; } } } } /* check dst port spacing */ idx = dst; port = cal_main[idx]; port_pm = tdm_gh2_filter_get_port_pm(port, _tdm); if (port >= param_phy_lo && port <= param_phy_hi) { /* sister port spacing */ for (k=1; k<=param_space_sister; k++) { if (dir == TDM_DIR_UP) { idx_k = (idx + k) % cal_len; } else { idx_k = (idx + cal_len - k) % cal_len; } port_k = cal_main[idx_k]; if (port_k >= param_phy_lo && port_k <= param_phy_hi) { if (tdm_gh2_filter_get_port_pm(port_k, _tdm) == port_pm) { result = FAIL; break; } } } } } return (result); } /** @name: tdm_gh2_filter_calc_jitter @param: */ int tdm_gh2_filter_calc_jitter(int speed, int cal_len, int *space_min, int *space_max) { int slot_req, space_frac, jitter_range, jitter_min=0, jitter_max=0; if (speed<SPEED_10G) { slot_req = (speed/100)/25; } else { slot_req = ((speed/10000)*100)/25; } if (slot_req>0) { space_frac = ((cal_len*10)/slot_req)%10; jitter_range= (2*cal_len)/(slot_req*5); jitter_range= ((((2*cal_len*10)/(slot_req*5))%10)<5)? (jitter_range): (jitter_range+1); if (space_frac<5){ jitter_min = cal_len/slot_req - jitter_range/2; jitter_max = cal_len/slot_req + jitter_range/2; jitter_max = ((jitter_range%2)==0)? (jitter_max): (jitter_max+1); } else { jitter_min = cal_len/slot_req - jitter_range/2; jitter_min = ((cal_len%slot_req)==0)? (jitter_min): (jitter_min+1); jitter_min = ((jitter_range%2)==0)? (jitter_min): (jitter_min-1); jitter_max = cal_len/slot_req + jitter_range/2; jitter_max = ((cal_len%slot_req)==0)? (jitter_max): (jitter_max+1); } jitter_min = (jitter_min<1)? (1): (jitter_min); jitter_max = (jitter_max<1)? (1): (jitter_max); } *space_min = jitter_min; *space_max = jitter_max; return (PASS); } /** @name: tdm_gh2_filter_get_same_port_dist @param: */ int tdm_gh2_filter_get_same_port_dist(int slot, int dir, int *cal_main, int cal_len) { int n, k, dist = 0; if (slot < cal_len && cal_len > 0) { if (dir == TDM_DIR_UP) { /* UP direction */ for (n = 1; n < cal_len; n++) { k = (slot + cal_len - n) % cal_len; if (cal_main[k] == cal_main[slot]) { dist = n; break; } } } else { /* DOWN direction */ for (n = 1; n < cal_len; n++) { k = (slot + n) % cal_len; if (cal_main[k] == cal_main[slot]) { dist = n; break; } } } } return (dist); } /** @name: tdm_gh2_filter_get_port_pm @param: Return Port Macro number of the given port */ int tdm_gh2_filter_get_port_pm(int port_token, tdm_mod_t *_tdm) { int port_pm; if (_tdm->_chip_data.soc_pkg.pmap_num_lanes > 0 && port_token >=_tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo && port_token <=_tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi) { port_pm = (port_token - 1) / _tdm->_chip_data.soc_pkg.pmap_num_lanes; } else { port_pm = _tdm->_chip_data.soc_pkg.pm_num_phy_modules; } return (port_pm); /* _tdm->_core_data.vars_pkg.port = port_token; return (_tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm)); */ } /** @name: tdm_gh2_filter_get_port_speed @param: */ int tdm_gh2_filter_get_port_speed(int port_token, tdm_mod_t *_tdm) { if (port_token >= _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo && port_token <= _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi) { return (_tdm->_chip_data.soc_pkg.speed[port_token]); } return (SPEED_0); } /** @name: tdm_gh2_filter_migrate_lr_slot @param: Migrate Linerate slot from src to dst in an array. */ int tdm_gh2_filter_migrate_lr_slot(int idx_src, int idx_dst, int *tdm_tbl, int tdm_tbl_len, int **tsc) { int j, port=GH2_NUM_EXT_PORTS, filter_result=FAIL, check_pass=BOOL_TRUE; if (PASS==tdm_gh2_filter_check_migrate_lr_slot(idx_src, idx_dst, tdm_tbl, tdm_tbl_len, tsc)){ if (check_pass==BOOL_TRUE){ GH2_TOKEN_CHECK(tdm_tbl[idx_dst]){ port = tdm_tbl[idx_src]; if (idx_src<idx_dst){ for (j=idx_src; j<idx_dst; j++){ tdm_tbl[j] = tdm_tbl[j+1]; } tdm_tbl[idx_dst] = port; } else if (idx_src>idx_dst){ for (j=idx_src; j>idx_dst; j--){ tdm_tbl[j] = tdm_tbl[j-1]; } tdm_tbl[idx_dst] = port; } TDM_PRINT3("Filter applied: Linerate Slot Migration, port %3d from index #%03d to index #%03d \n", port, idx_src, idx_dst); } else{ port = tdm_tbl[idx_src]; tdm_tbl[idx_src] = tdm_tbl[idx_dst]; tdm_tbl[idx_dst] = port; TDM_PRINT4("Filter applied: Linerate Slot Migration, port %3d index #%03d, swap with, port %3d index #%03d \n", port, idx_src, tdm_tbl[idx_src], idx_dst); } filter_result = PASS; } } return filter_result; } /** @name: tdm_gh2_filter_migrate_lr_slot_up @param: Migrate Linerate slot from src to dst in an array. */ int tdm_gh2_filter_migrate_lr_slot_up(tdm_mod_t *_tdm) { int i, j, k, idx0, tsc_i, tsc0, idx_dst, mig_cnt=0, *tdm_pipe_main, tdm_pipe_main_len, **tdm_port_pmap; TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,tdm_pipe_main); tdm_pipe_main_len = _tdm->_chip_data.soc_pkg.lr_idx_limit + _tdm->_chip_data.soc_pkg.tvec_size; tdm_port_pmap = _tdm->_chip_data.soc_pkg.pmap; for (i=0; i<tdm_pipe_main_len; i++){ tsc_i = tdm_gh2_scan_which_tsc(tdm_pipe_main[i],_tdm); GH2_TOKEN_CHECK(tdm_pipe_main[i]) { for (j=1; j<VBS_MIN_SPACING; j++){ idx0 = ((i+j)<tdm_pipe_main_len)? (i+j): (i+j-tdm_pipe_main_len); GH2_TOKEN_CHECK(tdm_pipe_main[idx0]) { tsc0 = tdm_gh2_scan_which_tsc(tdm_pipe_main[idx0],_tdm); if (tsc_i == tsc0){ idx_dst = ((i-(VBS_MIN_SPACING-j))>=0)? (i-(VBS_MIN_SPACING-j)): (i-(VBS_MIN_SPACING-j)+tdm_pipe_main_len); for (k=0; k<VBS_MIN_SPACING; k++){ idx_dst = ((idx_dst-k)>=0)? (idx_dst-k): (idx_dst-k+tdm_pipe_main_len); if (PASS==tdm_gh2_filter_check_migrate_lr_slot(i, idx_dst, tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap)){ tdm_gh2_filter_migrate_lr_slot(i, idx_dst, tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap); mig_cnt++; break; } } } } } } } return mig_cnt; } /** @name: tdm_gh2_filter_migrate_lr_slot_dn @param: Migrate Linerate slot from src to dst in an array. */ int tdm_gh2_filter_migrate_lr_slot_dn(tdm_mod_t *_tdm) { int i, j, k, idx0, tsc_i, tsc0, idx_dst, mig_cnt=0, *tdm_pipe_main, tdm_pipe_main_len, **tdm_port_pmap; TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,tdm_pipe_main); tdm_pipe_main_len = _tdm->_chip_data.soc_pkg.lr_idx_limit + _tdm->_chip_data.soc_pkg.tvec_size; tdm_port_pmap = _tdm->_chip_data.soc_pkg.pmap; for (i=0; i<tdm_pipe_main_len; i++){ tsc_i = tdm_gh2_scan_which_tsc(tdm_pipe_main[i],_tdm); GH2_TOKEN_CHECK(tdm_pipe_main[i]) { for (j=1; j<VBS_MIN_SPACING; j++){ idx0 = ((i+j)<tdm_pipe_main_len)? (i+j): (i+j-tdm_pipe_main_len); GH2_TOKEN_CHECK(tdm_pipe_main[idx0]) { tsc0 = tdm_gh2_scan_which_tsc(tdm_pipe_main[idx0],_tdm); if (tsc_i == tsc0){ idx_dst = ((idx0+VBS_MIN_SPACING-j)<tdm_pipe_main_len)? (idx0+VBS_MIN_SPACING-j): (idx0+VBS_MIN_SPACING-j-tdm_pipe_main_len); for (k=0; k<VBS_MIN_SPACING; k++){ idx_dst = ((idx_dst+k)<tdm_pipe_main_len)? (idx_dst+k): (idx_dst+k-tdm_pipe_main_len); if (PASS==tdm_gh2_filter_check_migrate_lr_slot(idx0, idx_dst, tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap)){ tdm_gh2_filter_migrate_lr_slot(idx0, idx_dst, tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap); mig_cnt++; break; } } } } } } } return mig_cnt; } /** @name: tdm_gh2_filter_sister_min @param: */ int tdm_gh2_filter_sister_min( tdm_mod_t *_tdm ) { int min_space_check, mig_cnt=0, timeout, result=FAIL; /* DOWN */ timeout = 20; do{ min_space_check = tdm_gh2_filter_check_pipe_sister_min(_tdm); if (min_space_check==FAIL){ mig_cnt = tdm_gh2_filter_migrate_lr_slot_dn(_tdm); } } while ( (min_space_check==FAIL) && (mig_cnt>0) && (timeout--)>0 ); /* UP */ /* timeout = 10; do{ min_space_check = tdm_gh2_filter_check_pipe_sister_min(_tdm); if (min_space_check==FAIL){ mig_cnt = tdm_gh2_filter_migrate_lr_slot_up(_tdm); } } while ( (min_space_check==FAIL) && (mig_cnt>0) && (timeout--)>0 ); */ if (tdm_gh2_filter_check_pipe_sister_min(_tdm)==PASS) {result = PASS;} return result; } /** @name: tdm_gh2_filter_dither @param: */ int tdm_gh2_filter_dither(int *tdm_tbl, int lr_idx_limit, int accessories, int **tsc, int threshold, enum port_speed_e *speed) { int g, i, j, k=1, l=GH2_NUM_EXT_PORTS, slice_idx, shift_cnt=0; unsigned short dither_shift_done=BOOL_FALSE, dither_done=BOOL_FALSE, dither_slice_counter=0; /* Get index of the OVSB slot with the largest clump size */ dither_shift_done=BOOL_FALSE; for (i=0; i<(lr_idx_limit+accessories); i++) { if (tdm_tbl[i]==GH2_OVSB_TOKEN && tdm_gh2_scan_slice_size_local(i,tdm_tbl,(lr_idx_limit+accessories), &slice_idx)==tdm_gh2_slice_size(GH2_OVSB_TOKEN,tdm_tbl,(lr_idx_limit+accessories))) { g=i; while( (tdm_tbl[g]==GH2_OVSB_TOKEN) && (g<(lr_idx_limit+accessories)) ) {g++;} if ( tdm_gh2_slice_prox_up(g,tdm_tbl,tsc,speed) && tdm_gh2_slice_prox_dn((tdm_gh2_slice_idx(GH2_OVSB_TOKEN,tdm_tbl,(lr_idx_limit+accessories))-1),tdm_tbl,(lr_idx_limit+accessories),tsc,speed) ) { l=tdm_tbl[i]; for (j=i; j<255; j++) { tdm_tbl[j]=tdm_tbl[j+1]; } k=i; dither_shift_done=BOOL_TRUE; break; } } } /* Get index of the LINERATE slot with the largest clump size, then insert OVSB slot. */ dither_done=BOOL_FALSE; if (dither_shift_done) { for (i=1; i<(lr_idx_limit+accessories); i++) { dither_slice_counter=0; while (tdm_tbl[i]!=GH2_OVSB_TOKEN && tdm_gh2_scan_slice_size_local(i,tdm_tbl,(lr_idx_limit+accessories),&slice_idx)==tdm_gh2_slice_size(1,tdm_tbl,(lr_idx_limit+accessories)) && i<(lr_idx_limit+accessories) ) { if (++dither_slice_counter>=threshold && tdm_tbl[i-1]==GH2_OVSB_TOKEN) { for (j=255; j>i; j--) { tdm_tbl[j]=tdm_tbl[j-1]; } tdm_tbl[i+1]=GH2_OVSB_TOKEN; dither_done=BOOL_TRUE; break; } i++; if (tdm_tbl[i]==GH2_OVSB_TOKEN) {i++;} } if (dither_done) { break; } } if (!dither_done) { for (j=255; j>k; j--) { tdm_tbl[j]=tdm_tbl[j-1]; } tdm_tbl[k]=l; } else { shift_cnt++; TDM_PRINT0("Filter applied: Dither (quantization correction)\n"); } } return shift_cnt; } /** @name: tdm_gh2_filter_fine_dither @param: */ int tdm_gh2_filter_fine_dither(int port, int *tdm_tbl, int lr_idx_limit, int accessories, int **tsc) { int i, j, k, min_prox=11,slice_idx; unsigned short fine_dither_done; i=port; fine_dither_done=BOOL_FALSE; for (j=2; j<(lr_idx_limit+accessories-4); j++) { if ( tdm_tbl[j]!=GH2_OVSB_TOKEN && tdm_tbl[j-2]!=GH2_OVSB_TOKEN && tdm_tbl[j+2]!=GH2_OVSB_TOKEN && tdm_tbl[j+4]!=GH2_OVSB_TOKEN && tdm_gh2_scan_slice_size_local((j-2), tdm_tbl, (lr_idx_limit+accessories), &slice_idx)==1 && tdm_gh2_scan_slice_size_local( j, tdm_tbl, (lr_idx_limit+accessories), &slice_idx)==1 && tdm_gh2_scan_slice_size_local((j+2), tdm_tbl, (lr_idx_limit+accessories), &slice_idx)==1 && tdm_gh2_scan_slice_size_local((j+4), tdm_tbl, (lr_idx_limit+accessories), &slice_idx)==1 && tdm_gh2_slice_prox_local( j, tdm_tbl, (lr_idx_limit+accessories), tsc)>min_prox ) { fine_dither_done=BOOL_TRUE; break; } } if (fine_dither_done) { TDM_PRINT1("Filter applied: Fine dithering (normal), index %0d\n",port); for (k=(j+1); k<(lr_idx_limit+accessories); k++) { tdm_tbl[k]=tdm_tbl[k+1]; } for (k=255; k>i; k--) { tdm_tbl[k]=tdm_tbl[k-1]; } tdm_tbl[i]=GH2_OVSB_TOKEN; return PASS; } else { return FAIL; } } /** @name: tdm_gh2_filter_shift_lr_port @param: Shifts all slots of the given linerate port up or down, and returns the total number of shifting occurence. */ int tdm_gh2_filter_shift_lr_port(unsigned short port, int *tdm_tbl, int tdm_tbl_len, int dir) { int i, shift_cnt=0, port_swap; GH2_TOKEN_CHECK(port) { if (dir==DN) { for (i=0; i<(tdm_tbl_len-1); i++) { if (tdm_tbl[i]==port) { port_swap=tdm_tbl[i+1]; tdm_tbl[i+1]=port; tdm_tbl[i]=port_swap; shift_cnt++; i++; } } } else if (dir==UP) { for (i=1; i<tdm_tbl_len; i++) { if (tdm_tbl[i]==port) { port_swap=tdm_tbl[i-1]; tdm_tbl[i-1]=port; tdm_tbl[i]=port_swap; shift_cnt++; } } } } return shift_cnt; } /** @name: tdm_gh2_filter_migrate_os_slot @param: Migrate OVSB slot from src to dst in an array. */ int tdm_gh2_filter_migrate_os_slot(int idx_src, int idx_dst, int *tdm_tbl, int tdm_tbl_len, int **tsc, enum port_speed_e *speed) { int i=idx_src, j, token_tmp, filter_result=FAIL, check_pass=BOOL_TRUE, idx0, idx1, tsc0, tsc1; if ( !(idx_src>=0 && idx_src<tdm_tbl_len) || !(idx_dst>=0 && idx_dst<tdm_tbl_len) ){ check_pass = BOOL_FALSE; } if ( tdm_tbl[idx_src]!=GH2_OVSB_TOKEN && tdm_tbl[idx_src]!=GH2_ANCL_TOKEN) { check_pass = BOOL_FALSE; } /* Check sister port spacing */ if (check_pass==BOOL_TRUE){ i = idx_src; for (j=1; j<VBS_MIN_SPACING; j++){ idx0 = ((i+j)<tdm_tbl_len)? (i+j): (i+j-tdm_tbl_len); idx1 = ((idx0-VBS_MIN_SPACING)>=0) ? (idx0-VBS_MIN_SPACING) : (idx0-VBS_MIN_SPACING+tdm_tbl_len); GH2_TOKEN_CHECK(tdm_tbl[idx0]){ GH2_TOKEN_CHECK(tdm_tbl[idx1]){ tsc0 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx0],tsc); tsc1 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx1],tsc); if(tsc0==tsc1){ check_pass = BOOL_FALSE; break; } } } } } /* Check same port spacing */ if (check_pass==BOOL_TRUE){ for (j=1; j<LLS_MIN_SPACING; j++){ idx0 = ((i+j)<tdm_tbl_len)? (i+j): (i+j-tdm_tbl_len); idx1 = ((idx0-LLS_MIN_SPACING)>=0)? (idx0-LLS_MIN_SPACING): (idx0-LLS_MIN_SPACING+tdm_tbl_len); GH2_TOKEN_CHECK(tdm_tbl[idx0]){ GH2_TOKEN_CHECK(tdm_tbl[idx1]){ if ( speed[tdm_tbl[idx0]]<=SPEED_42G_HG2 && tdm_tbl[idx0]==tdm_tbl[idx1]){ check_pass = BOOL_FALSE; break; } } } } } /* Migrate OVSB/ANCL slot from src to dst */ if (check_pass==BOOL_TRUE){ token_tmp = tdm_tbl[idx_src]; idx_dst = (idx_src>idx_dst)? (idx_dst): (idx_dst-1); for (j=idx_src; j<tdm_tbl_len; j++){ tdm_tbl[j] = tdm_tbl[j+1]; } for (j=(tdm_tbl_len-1); j>idx_dst; j--){ tdm_tbl[j] = tdm_tbl[j-1]; } tdm_tbl[idx_dst] = token_tmp; filter_result = PASS; TDM_PRINT2("Filter applied: OVSB Slot Migration, from index #%03d to index #%03d \n", idx_src, idx_dst); } return filter_result; } /** @name: tdm_gh2_filter_smooth_idle_slice @param: Smooth MMU TDM calendar by migrating IDLE slots */ int tdm_gh2_filter_smooth_idle_slice(int *tdm_tbl, int tdm_tbl_len, int **tsc, enum port_speed_e *speed) { int i, k, idle1_token, idle2_token, pos_cnt, pos_step, pos, idle_slot_cnt=0, idle_slice_max, idle_slice_max_idx, lr_slice_max, lr_slice_max_idx, idx_x, idx_y, idx1, idx2, tsc_x, tsc_y, tsc1, tsc2, slot_token, slot_dst_idx, filter_done, filter_cnt=0; idle1_token = GH2_IDL1_TOKEN; idle2_token = GH2_IDL2_TOKEN; for (i=0; i<tdm_tbl_len; i++){ if ( tdm_tbl[i]==idle1_token || tdm_tbl[i]==idle2_token ){ idle_slot_cnt++; } } if(idle_slot_cnt>0){ pos_cnt = 0; pos_step= tdm_tbl_len/4; for (k=0; k<idle_slot_cnt; k++){ filter_done= BOOL_TRUE; pos = pos_step*pos_cnt; pos_cnt= (pos_cnt>=3)?(0):(pos_cnt+1); idle_slice_max = tdm_gh2_scan_slice_max(idle1_token, tdm_tbl,tdm_tbl_len, &idle_slice_max_idx,0); lr_slice_max = tdm_gh2_scan_mix_slice_max(1,tdm_tbl,tdm_tbl_len, &lr_slice_max_idx,pos); /* Find idle clump with minimum size 2: ..._y_x_IDLE_IDLE_... */ if ( (idle_slice_max>1 && lr_slice_max>0) && (idle_slice_max_idx<tdm_tbl_len && idle_slice_max_idx>1) && ((lr_slice_max_idx<tdm_tbl_len) && ((lr_slice_max_idx>0) || (lr_slice_max_idx==0 && lr_slice_max>1))) ){ /* Check sister port spacing */ idx_x = idle_slice_max_idx-1; idx_y = idle_slice_max_idx-2; idx1 = ((idle_slice_max_idx+2)<tdm_tbl_len) ? (idle_slice_max_idx+2) : ((idle_slice_max_idx+2)-tdm_tbl_len); idx2 = ((idle_slice_max_idx+3)<tdm_tbl_len) ? (idle_slice_max_idx+3) : ((idle_slice_max_idx+3)-tdm_tbl_len); tsc_x = tdm_gh2_legacy_which_tsc(tdm_tbl[idx_x],tsc); tsc_y = tdm_gh2_legacy_which_tsc(tdm_tbl[idx_y],tsc); tsc1 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx1],tsc); tsc2 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx2],tsc); if(tsc_x!=tsc1 && tsc_x!=tsc2 && tsc_y!=tsc1){ slot_token = tdm_tbl[idle_slice_max_idx]; slot_dst_idx= lr_slice_max_idx+(lr_slice_max/2); for (i=idle_slice_max_idx; i<tdm_tbl_len; i++){ tdm_tbl[i] = tdm_tbl[i+1]; } for (i=(tdm_tbl_len-1); i>slot_dst_idx; i--){ tdm_tbl[i] = tdm_tbl[i-1]; } tdm_tbl[slot_dst_idx] = slot_token; filter_done = BOOL_FALSE; filter_cnt++; TDM_PRINT2("Filter applied: IDLE slot moving from index #%03d to index #%03d\n", idle_slice_max_idx, slot_dst_idx); } } if(filter_done == BOOL_TRUE){ break; } } } return filter_cnt; } /** @name: tdm_gh2_filter_smooth_os_slice @param: Smooth MMU TDM calendar by shifting linerate ports Downward/Upward Downward: x_ovsb___x_ovsb___x_ovsb -> ovsb_x___ovsb_x___ovsb_x Upward : ovsb_x___ovsb_x___ovsb_x -> x_ovsb___x_ovsb___x_ovsb */ int tdm_gh2_filter_smooth_os_slice(int *tdm_tbl, int tdm_tbl_len, int **tsc, enum port_speed_e *speed, int dir) { int i, slice_idx, shift_dir, shift_cnt=0, lr_clump_max, lr_clump_min, filter_port=0; const char *str_dir; str_dir = (dir==UP) ? "UPWARD": "DOWNWARD"; shift_dir = (dir==UP) ? (UP) : (DN); for (i=1; i<tdm_tbl_len; i++) { filter_port=tdm_tbl[i]; if (filter_port==tdm_tbl[0]) {continue;} lr_clump_max = tdm_gh2_scan_mix_slice_max(1,tdm_tbl,tdm_tbl_len, &slice_idx, 0); lr_clump_min = tdm_gh2_scan_mix_slice_min(1,tdm_tbl,tdm_tbl_len, &slice_idx, 0); if ( (lr_clump_max<=1) || (lr_clump_max==2 && lr_clump_min==1)){ break; } if ( FAIL==tdm_gh2_check_shift_cond_pattern(filter_port, tdm_tbl, tdm_tbl_len, tsc, dir) ) { continue; } else if ( FAIL==tdm_gh2_check_shift_cond_local_slice(filter_port, tdm_tbl, tdm_tbl_len, tsc, dir) ) { continue; } else { tdm_gh2_filter_shift_lr_port(filter_port, tdm_tbl, tdm_tbl_len, shift_dir); TDM_PRINT3("Filter applied: Vector shift %8s, port %3d, beginning index #%03d \n", str_dir, filter_port, i); shift_cnt++; } } return shift_cnt; } /** @name: tdm_gh2_filter_smooth_os_slice_fine @param: Smooth MMU TDM calendar by migrating OVSB slot */ int tdm_gh2_filter_smooth_os_slice_fine(int *tdm_tbl, int tdm_tbl_len, int **tsc, enum port_speed_e *speed) { int j, k, ovsb_token, pos, pos_step, pos_cnt, pos_cnt_max, slice_max, slice_min, slice_max_idx, slice_min_idx, pp_prox, idx0, idx1, tsc0, tsc1, slot_tmp, idx_dst, idx_src, filter_cnt=0; ovsb_token = GH2_OVSB_TOKEN; pos = 0; pos_cnt = 0; pos_cnt_max = 16; pos_step = tdm_tbl_len/pos_cnt_max + 1; for (k=0; k<tdm_tbl_len; k++){ pos = pos_cnt*pos_step; pos_cnt = ((pos_cnt+1)<pos_cnt_max)? (pos_cnt+1): (0); slice_max= tdm_gh2_scan_slice_max(ovsb_token,tdm_tbl,tdm_tbl_len, &slice_max_idx,k); slice_min= tdm_gh2_scan_slice_min(ovsb_token,tdm_tbl,tdm_tbl_len, &slice_min_idx,pos); if (slice_max_idx < k) {break;} if ( (slice_max-slice_min)<2 || !(slice_max_idx>0 && slice_max_idx<tdm_tbl_len) || !(slice_min_idx>0 && slice_min_idx<tdm_tbl_len) ){ break; } pp_prox = PASS; idx_src = slice_max_idx; idx_dst = slice_min_idx; /* Check sister port spacing */ idx0 = idx_src-1; idx1 = ((idx0+VBS_MIN_SPACING)<tdm_tbl_len) ? (idx0+VBS_MIN_SPACING) : (idx0+VBS_MIN_SPACING-tdm_tbl_len); GH2_TOKEN_CHECK(tdm_tbl[idx0]){ GH2_TOKEN_CHECK(tdm_tbl[idx1]){ tsc0 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx0],tsc); tsc1 = tdm_gh2_legacy_which_tsc(tdm_tbl[idx1],tsc); if ( tsc0==tsc1 ){ pp_prox=FAIL; } } } /* Check same port spacing */ for (j=1; j<LLS_MIN_SPACING; j++){ idx0 = ((idx_src-j)>=0)? (idx_src-j): (idx_src-j+tdm_tbl_len); idx1 = ((idx0+LLS_MIN_SPACING)<tdm_tbl_len)? (idx0+LLS_MIN_SPACING): (idx0+LLS_MIN_SPACING-tdm_tbl_len); GH2_TOKEN_CHECK(tdm_tbl[idx0]){ GH2_TOKEN_CHECK(tdm_tbl[idx1]){ if ( speed[tdm_tbl[idx0]]<=SPEED_42G_HG2 && tdm_tbl[idx0]==tdm_tbl[idx1]){ pp_prox=FAIL; break; } } } } /* Migrate ovsb slot to from src to dst */ if (pp_prox==PASS) { slot_tmp = tdm_tbl[idx_src]; idx_dst = (idx_src>idx_dst)? (idx_dst): (idx_dst-1); for (j=idx_src; j<tdm_tbl_len; j++){ tdm_tbl[j] = tdm_tbl[j+1]; } for (j=(tdm_tbl_len-1); j>idx_dst; j--){ tdm_tbl[j] = tdm_tbl[j-1]; } tdm_tbl[idx_dst] = slot_tmp; filter_cnt++; TDM_PRINT2("Filter applied: OVSB slot migrating from index #%03d to index #%03d\n", idx_src, idx_dst); } else{ k = slice_max_idx+slice_max; } } return filter_cnt; } /** @name: tdm_gh2_filter_smooth_os_os_up @param: Smooth MMU TDM calendar by transforming: --- _x_y_ovsb_ovsb_ -> _x_ovsb_y_ovsb_ */ int tdm_gh2_filter_smooth_os_os_up(int *tdm_tbl, int tdm_tbl_len, int **tsc, enum port_speed_e *speed) { int i, slot_tmp, filter_cnt=0; for (i=2; i<(tdm_tbl_len-1); i++) { if (tdm_tbl[i ]==GH2_OVSB_TOKEN && tdm_tbl[i+1]==GH2_OVSB_TOKEN ){ GH2_TOKEN_CHECK(tdm_tbl[i-1]){ GH2_TOKEN_CHECK(tdm_tbl[i-2]){ if ( PASS==tdm_gh2_check_slot_swap_cond((i-1), tdm_tbl, tdm_tbl_len, tsc, speed) ){ slot_tmp = tdm_tbl[i-1]; tdm_tbl[i-1] = tdm_tbl[i]; tdm_tbl[i ] = slot_tmp; filter_cnt++; i += 3; TDM_PRINT1("Filter applied: Local OVSB slot UP, index #%03d\n", i); } } } } } return filter_cnt; } /** @name: tdm_gh2_filter_smooth_os_os_dn @param: Smooth MMU TDM calendar by transforming: --- _ovsb_ovsb_x_y_ -> _ovsb_x_ovsb_y_ */ int tdm_gh2_filter_smooth_os_os_dn(int *tdm_tbl, int tdm_tbl_len, int **tsc, enum port_speed_e *speed) { int i, slot_tmp, filter_cnt=0; for (i=1; i<(tdm_tbl_len-3); i++) { if (tdm_tbl[i ]==GH2_OVSB_TOKEN && tdm_tbl[i+1]==GH2_OVSB_TOKEN ){ GH2_TOKEN_CHECK(tdm_tbl[i+2]){ GH2_TOKEN_CHECK(tdm_tbl[i+3]){ if ( PASS==tdm_gh2_check_slot_swap_cond((i+1), tdm_tbl, tdm_tbl_len, tsc, speed) ){ slot_tmp = tdm_tbl[i+1]; tdm_tbl[i+1] = tdm_tbl[i+2]; tdm_tbl[i+2] = slot_tmp; filter_cnt++; i += 3; TDM_PRINT1("Filter applied: Local OVSB slot UP, index #%03d\n", i); } } } } } return filter_cnt; } /** @name: tdm_gh2_filter_smooth_ancl @param: Smooth MMU TDM calendar with ANCILLARY slots */ int tdm_gh2_filter_smooth_ancl(int ancl_token, int *tdm_tbl, int tdm_tbl_len, int ancl_space_min) { int i, j, g, l, pool, ancl_num=0, ancl_dist_up, ancl_dist_dn, lr_slice_size, lr_slice_min, slice_idx, timeout; lr_slice_min = tdm_gh2_scan_slice_min(1, tdm_tbl, tdm_tbl_len, &slice_idx,0); /* Retrace ancillary slots */ for (i=0; i<tdm_tbl_len; i++) { if (tdm_tbl[i]==ancl_token) { for (j=i; j<tdm_tbl_len; j++) { tdm_tbl[j]=tdm_tbl[j+1]; } i--; ancl_num++; } } /* Smooth TDM table with ancillary slots */ pool = ancl_num; timeout=tdm_tbl_len; while( (pool>0) && ((--timeout)>0) ){ for (i=1; i<tdm_tbl_len; i++) { if (pool<1){break;} GH2_TOKEN_CHECK(tdm_tbl[i]) { lr_slice_size = tdm_gh2_scan_slice_size_local(i,tdm_tbl,tdm_tbl_len,&slice_idx); ancl_dist_up = tdm_gh2_check_same_port_dist_up_port(ancl_token,i,tdm_tbl,tdm_tbl_len); ancl_dist_dn = tdm_gh2_check_same_port_dist_dn_port(ancl_token,i,tdm_tbl,tdm_tbl_len); if ( lr_slice_size<=lr_slice_min && ancl_dist_up>ancl_space_min && ancl_dist_dn>ancl_space_min ){ for (j=tdm_tbl_len; j>i; j--) { tdm_tbl[j]=tdm_tbl[j-1]; } tdm_tbl[i]=ancl_token; pool--; TDM_PRINT1("Filter applied: Ancillary filter smoothing at index %0d\n",i); } } } lr_slice_min++; } /* Roll back if filter failed */ if(pool>0 && ancl_num>0 ){ TDM_PRINT0("Filter failed: Ancillary Filter failed, MMU TDM table rolling back\n"); for (i=0; i<tdm_tbl_len; i++) { if (tdm_tbl[i]==ancl_token) { for (j=i; j<tdm_tbl_len; j++) { tdm_tbl[j]=tdm_tbl[j+1]; } } } for (j=1; j<=ancl_num; j++) { g=tdm_PQ((((10*(tdm_tbl_len-ancl_num))/ancl_num)*j))+(j-1); for (l=255; l>g; l--) { tdm_tbl[l]=tdm_tbl[l-1]; } tdm_tbl[g]=ancl_token; } } return PASS; } /** @name: tdm_gh2_filter_chain @param: */ int tdm_gh2_filter_tdm5( tdm_mod_t *_tdm ) { int i, timeout=TIMEOUT, filter_result, filter_cnt, slice_idx, *tdm_pipe_main, tdm_pipe_main_len, **tdm_port_pmap, ovsb_token; int k, max_size, min_size, max_size_k, min_size_k, max_slice_idx, min_slice_idx; enum port_speed_e *tdm_port_speed; TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,tdm_pipe_main); tdm_pipe_main_len = _tdm->_chip_data.soc_pkg.lr_idx_limit + _tdm->_chip_data.soc_pkg.tvec_size; tdm_port_pmap = _tdm->_chip_data.soc_pkg.pmap; tdm_port_speed= _tdm->_chip_data.soc_pkg.speed; ovsb_token = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; /* tdm_gh2_print_tbl(_tdm); */ TDM_BIG_BAR TDM_PRINT0("Filters Applied to smooth MMU/IDB TDM calendar\n"); TDM_SML_BAR /* Sister port space */ if (_tdm->_core_data.vars_pkg.lr_enable==BOOL_TRUE){ filter_result = tdm_gh2_filter_sister_min(_tdm ); if (filter_result==PASS){ TDM_PRINT0("\nFilter done: ---SISTER MIN SPACING FILTER, PASS\n"); } else { TDM_WARN1("\nFilter done: ---SISTER MIN SPACING FILTER, FAIL, min space violation in Pipe %d\n", _tdm->_core_data.vars_pkg.cal_id); } TDM_SML_BAR } /* Case 1: linerate only */ if(_tdm->_core_data.vars_pkg.lr_enable && !(_tdm->_core_data.vars_pkg.os_enable) && !(_tdm->_core_data.vars_pkg.refactor_done)){ filter_result = tdm_gh2_filter_smooth_idle_slice(tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap, tdm_port_speed); TDM_PRINT1("\nFilter done: ---IDLE SLOT SMOOTH, filter applied <%d> times\n", filter_result); TDM_SML_BAR } /* Case 2: linerate and oversub mixed */ if (_tdm->_core_data.vars_pkg.lr_enable && _tdm->_core_data.vars_pkg.os_enable && !(_tdm->_core_data.vars_pkg.refactor_done)) { /* Smooth the extremely unbalanced ovsb clump */ filter_cnt= 0; max_size = tdm_gh2_scan_mix_slice_max(ovsb_token,tdm_pipe_main,tdm_pipe_main_len, &max_slice_idx, 0); min_size = tdm_gh2_scan_mix_slice_min(ovsb_token,tdm_pipe_main,tdm_pipe_main_len, &min_slice_idx, 0); if( (max_size-min_size)>=6 ){ for (k=1; k<max_size-min_size; k++){ filter_result = tdm_gh2_filter_migrate_os_slot(max_slice_idx, min_slice_idx, tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap, tdm_port_speed ); if(filter_result==FAIL){break;} filter_cnt++; max_size_k = tdm_gh2_scan_mix_slice_max(ovsb_token,tdm_pipe_main,tdm_pipe_main_len, &max_slice_idx, 0); min_size_k = tdm_gh2_scan_mix_slice_min(ovsb_token,tdm_pipe_main,tdm_pipe_main_len, &min_slice_idx, 0); if ((max_size_k-min_size_k)<=1) {break;} } } TDM_PRINT1("\nFilter done: ---OVSB MAX/MIN SLICE BALANCE, filter applied <%d> times\n", filter_cnt); TDM_SML_BAR /* Smooth oversub distribution */ filter_cnt = 0; timeout = _tdm->_chip_data.soc_pkg.lr_idx_limit; do{ filter_result = tdm_gh2_filter_smooth_os_slice(tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap, tdm_port_speed, DN); TDM_PRINT2("\nFilter done: ---OVSB MAX SLICE REDUCTION (1) Shift Down (%2d), filter applied <%d> times\n", ++filter_cnt, filter_result); TDM_SML_BAR } while ( filter_result>0 && (timeout--)>0 ); filter_cnt = 0; timeout = _tdm->_chip_data.soc_pkg.lr_idx_limit; do{ filter_result = tdm_gh2_filter_smooth_os_slice(tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap, tdm_port_speed, UP); TDM_PRINT2("\nFilter done: ---OVSB MAX SLICE REDUCTION (2) Shift Up (%2d), filter applied <%d> times\n", ++filter_cnt, filter_result); TDM_SML_BAR } while ( filter_result>0 && (timeout--)>0 ); /* Smooth oversub distribution */ filter_result = tdm_gh2_filter_smooth_os_os_up(tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap, tdm_port_speed); TDM_PRINT1("\nFilter done: ---OVSB LOCAL SLICE REDUCTION (1) slot UP, X_Y_OVSB_OVSB, filter applied <%d> times \n", filter_result); TDM_SML_BAR filter_result = tdm_gh2_filter_smooth_os_os_dn(tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap, tdm_port_speed); TDM_PRINT1("\nFilter done: ---OVSB LOCAL SLICE REDUCTION (2) slot DOWN, OVSB_OVSB_X_Y, filter applied <%d> times \n", filter_result); TDM_SML_BAR /* Smooth oversub distribution */ filter_result = tdm_gh2_filter_smooth_os_slice_fine(tdm_pipe_main, tdm_pipe_main_len, tdm_port_pmap, tdm_port_speed); TDM_PRINT1("\nFilter done: ---OVSB SLOT MIGRATION, filter applied <%d> times\n", filter_result); TDM_SML_BAR /* Smooth oversub distribution */ filter_result = 0; timeout=DITHER_PASS; while ( tdm_gh2_slice_size(ovsb_token,tdm_pipe_main,tdm_pipe_main_len)>1 && tdm_gh2_slice_size(ovsb_token,tdm_pipe_main,tdm_pipe_main_len)>=tdm_gh2_slice_size(1,tdm_pipe_main,tdm_pipe_main_len) && ((--timeout)>0) ) { filter_result += tdm_gh2_filter_dither(tdm_pipe_main,_tdm->_chip_data.soc_pkg.lr_idx_limit,_tdm->_chip_data.soc_pkg.tvec_size,tdm_port_pmap,DITHER_THRESHOLD,tdm_port_speed); } timeout=DITHER_PASS; while ( tdm_gh2_slice_size(ovsb_token,tdm_pipe_main,tdm_pipe_main_len)>1 && tdm_gh2_slice_size(ovsb_token,tdm_pipe_main,tdm_pipe_main_len)>=tdm_gh2_slice_size(1,tdm_pipe_main,tdm_pipe_main_len) && ((--timeout)>0) ) { filter_result += tdm_gh2_filter_dither(tdm_pipe_main,_tdm->_chip_data.soc_pkg.lr_idx_limit,_tdm->_chip_data.soc_pkg.tvec_size,tdm_port_pmap,(DITHER_THRESHOLD-DITHER_SUBPASS_STEP_SIZE),tdm_port_speed); } timeout=DITHER_PASS; while ( tdm_gh2_slice_size(ovsb_token,tdm_pipe_main,tdm_pipe_main_len)>1 && tdm_gh2_slice_size(ovsb_token,tdm_pipe_main,tdm_pipe_main_len)>=tdm_gh2_slice_size(1,tdm_pipe_main,tdm_pipe_main_len) && ((--timeout)>0) ) { filter_result += tdm_gh2_filter_dither(tdm_pipe_main,_tdm->_chip_data.soc_pkg.lr_idx_limit,_tdm->_chip_data.soc_pkg.tvec_size,tdm_port_pmap,(DITHER_THRESHOLD-DITHER_SUBPASS_STEP_SIZE-DITHER_SUBPASS_STEP_SIZE),tdm_port_speed); } TDM_PRINT1("Filter done: ---DITHER (1) filter finished, applied times %d\n", filter_result); TDM_SML_BAR /* Smooth oversub distribution */ filter_result = 0; if (tdm_gh2_slice_size(1, tdm_pipe_main, tdm_pipe_main_len)==2) { for (i=3; i<tdm_pipe_main_len; i++) { if (tdm_pipe_main[i-3]!=ovsb_token && tdm_pipe_main[i ]!=ovsb_token && tdm_pipe_main[i+3]!=ovsb_token && tdm_pipe_main[i+6]!=ovsb_token && tdm_gh2_scan_slice_size_local((i-3), tdm_pipe_main, tdm_pipe_main_len, &slice_idx)==2 && tdm_gh2_scan_slice_size_local( i, tdm_pipe_main, tdm_pipe_main_len, &slice_idx)==2 && tdm_gh2_scan_slice_size_local((i+3), tdm_pipe_main, tdm_pipe_main_len, &slice_idx)==2 && tdm_gh2_scan_slice_size_local((i+6), tdm_pipe_main, tdm_pipe_main_len, &slice_idx)==2) { if (tdm_gh2_filter_fine_dither(i,tdm_pipe_main,_tdm->_chip_data.soc_pkg.lr_idx_limit,_tdm->_chip_data.soc_pkg.tvec_size,tdm_port_pmap)==FAIL) { break; } filter_result++; } } for (i=3; i<tdm_pipe_main_len; i++) { if (tdm_pipe_main[i-3]!=ovsb_token && tdm_pipe_main[i ]!=ovsb_token && tdm_pipe_main[i+3]!=ovsb_token && tdm_gh2_scan_slice_size_local((i-3), tdm_pipe_main, tdm_pipe_main_len, &slice_idx)==2 && tdm_gh2_scan_slice_size_local( i, tdm_pipe_main, tdm_pipe_main_len, &slice_idx)==2 && tdm_gh2_scan_slice_size_local((i+3), tdm_pipe_main, tdm_pipe_main_len, &slice_idx)==2) { if (tdm_gh2_filter_fine_dither(i,tdm_pipe_main,_tdm->_chip_data.soc_pkg.lr_idx_limit,_tdm->_chip_data.soc_pkg.tvec_size,tdm_port_pmap)==FAIL) { break; } filter_result++; } } } TDM_PRINT1("\nFilter done: ---DITHER (2) filter finished, applied times %d\n", filter_result); TDM_SML_BAR } return ( _tdm->_chip_exec[TDM_CHIP_EXEC__PARSE]( _tdm ) ); } /** @name: tdm_gh2_filter_ovsb_p1 @param: */ int tdm_gh2_filter_ovsb_p1(tdm_mod_t *_tdm) { int i, lr_slot_cnt, os_slot_cnt, filter_cnt=0, x, y, port_x, port_y, tsc_x, tsc_y, sister_prox; int param_lr_limit, param_ancl_num, param_cal_len, param_space_sister, param_token_ovsb, param_token_ancl, param_phy_lo, param_phy_hi, param_lr_en, param_os_en; int *param_cal_main; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_token_ovsb = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; param_token_ancl = _tdm->_chip_data.soc_pkg.soc_vars.ancl_token; param_space_sister= _tdm->_core_data.rule__prox_port_min; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; TDM_PRINT0("Smooth dual OVSB pattern: Z_Y_OVSB_X_OVSB_OVSB \n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,param_cal_main); if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ lr_slot_cnt = 0; os_slot_cnt = 0; for (i=0; i<param_cal_len; i++){ if (param_cal_main[i]==param_token_ovsb){ os_slot_cnt++; } else { lr_slot_cnt++; } } lr_slot_cnt = (lr_slot_cnt>param_ancl_num)?(lr_slot_cnt-param_ancl_num):(0); if (os_slot_cnt>0 && lr_slot_cnt>0 && 2*os_slot_cnt>lr_slot_cnt){ for (i=0; i<(param_cal_len-6); i++){ /* z_y_ovsb_x_ovsb_ovsb -> z_ovsb_y_ovsb_x_ovsb */ if ((param_cal_main[i] !=param_token_ovsb && param_cal_main[i] !=param_token_ancl) && (param_cal_main[i+1]!=param_token_ovsb && param_cal_main[i+1]!=param_token_ancl) && (param_cal_main[i+2]==param_token_ovsb || param_cal_main[i+2]==param_token_ancl) && (param_cal_main[i+3]!=param_token_ovsb && param_cal_main[i+3]!=param_token_ancl) && (param_cal_main[i+4]==param_token_ovsb || param_cal_main[i+4]==param_token_ancl) && (param_cal_main[i+5]==param_token_ovsb || param_cal_main[i+5]==param_token_ancl) ){ sister_prox = PASS; x = i+3; y = (x+param_space_sister)%param_cal_len; port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (param_space_sister>0 && port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ _tdm->_core_data.vars_pkg.port = port_x; tsc_x = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); _tdm->_core_data.vars_pkg.port = port_y; tsc_y = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; x = i+1; port_x = param_cal_main[x]; param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; filter_cnt++; TDM_PRINT4("Shift OVSB slot UP (1)from %03d to %03d (2)from %03d to %03d\n", i+4, i+3, i+2, i+1); } } } } } return filter_cnt; } /** @name: tdm_gh2_filter_ovsb_5x @param: */ int tdm_gh2_filter_ovsb_5x(tdm_mod_t *_tdm) { int i, j, k, lr_slot_cnt, os_slot_cnt, filter_cnt=0, x, port_x, idx, idx_up, idx_dn, dist_up, dist_dn; int param_lr_limit, param_ancl_num, param_cal_len, param_token_ovsb, param_token_ancl, param_lr_en, param_os_en; int *param_cal_main; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_token_ovsb = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; param_token_ancl = _tdm->_chip_data.soc_pkg.soc_vars.ancl_token; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; TDM_PRINT0("Smooth quadrant OVSB pattern: OVSB_OVSB_OVSB_OVSB_OVSB \n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,param_cal_main); if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ lr_slot_cnt = 0; os_slot_cnt = 0; for (i=0; i<param_cal_len; i++){ if (param_cal_main[i]==param_token_ovsb){ os_slot_cnt++; } else { lr_slot_cnt++; } } lr_slot_cnt = (lr_slot_cnt>param_ancl_num)?(lr_slot_cnt-param_ancl_num):(0); if (os_slot_cnt>0 && lr_slot_cnt>0 && os_slot_cnt>lr_slot_cnt && os_slot_cnt<2*lr_slot_cnt){ /* smooth ovsb_ovsb_ovsb_ovsb_ovsb_ovsb by ANCL */ for (i=0; (i+4)<param_cal_len; i++){ if ((param_cal_main[i] ==param_token_ovsb) && (param_cal_main[i+1]==param_token_ovsb) && (param_cal_main[i+2]==param_token_ovsb) && (param_cal_main[i+3]==param_token_ovsb) && (param_cal_main[i+4]==param_token_ovsb)){ idx_up = param_cal_len; idx_dn = param_cal_len; idx = param_cal_len; for (j=1; j<param_cal_len; j++){ k = (param_cal_len+i-j)%param_cal_len; if (param_cal_main[k]==param_token_ancl){ idx_up = k; break; } } for (j=1; j<param_cal_len; j++){ k = (i+j)%param_cal_len; if (param_cal_main[k]==param_token_ancl){ idx_dn = k; break; } } if (idx_up<param_cal_len || idx_dn<param_cal_len){ if (idx_up<param_cal_len && idx_dn<param_cal_len){ dist_up = (idx_up<i)?(i-idx_up):(i+param_cal_len-idx_up); dist_dn = (idx_dn>i)?(idx_dn-i):(param_cal_len-i+idx_dn); idx = (dist_up<dist_dn)?(idx_up):(idx_dn); } else if (idx_up<param_cal_len) { idx = idx_up; } else if (idx_dn<param_cal_len) { idx = idx_dn; } } if (idx>=0 && idx<param_cal_len){ x = i+2; port_x = param_cal_main[x]; param_cal_main[x] = param_cal_main[idx]; param_cal_main[idx]= port_x; filter_cnt++; TDM_PRINT2("(1)Swap ANCL with OVSB from %03d to %03d\n", idx, x); } } else if ((param_cal_main[i] ==param_token_ovsb || param_cal_main[i] ==param_token_ancl) && (param_cal_main[i+1]==param_token_ovsb || param_cal_main[i+1]==param_token_ancl) && (param_cal_main[i+2]==param_token_ovsb || param_cal_main[i+2]==param_token_ancl) && (param_cal_main[i+3]==param_token_ovsb || param_cal_main[i+3]==param_token_ancl) && (param_cal_main[i+4]==param_token_ovsb || param_cal_main[i+4]==param_token_ancl)){ idx = param_cal_len; for (j=0; j<=4; j++){ if(param_cal_main[i+j]==param_token_ancl){ idx = i+j; break; } } if (idx>=0 && idx<param_cal_len){ x = i+2; port_x = param_cal_main[x]; param_cal_main[x] = param_cal_main[idx]; param_cal_main[idx]= port_x; filter_cnt++; TDM_PRINT2("(2)Swap ANCL with OVSB from %03d to %03d\n", idx, x); } } } } if (filter_cnt>0){ TDM_PRINT1("\nFilter done: --- filter applied <%d> times\n", filter_cnt); } } TDM_SML_BAR return filter_cnt; } /** @name: tdm_gh2_filter_ovsb_4x @param: */ int tdm_gh2_filter_ovsb_4x(tdm_mod_t *_tdm) { int i, lr_slot_cnt, os_slot_cnt, filter_cnt=0, x, y, port_x, port_y, tsc_x, tsc_y, sister_prox; int param_lr_limit, param_ancl_num, param_cal_len, param_token_ovsb, param_token_ancl, param_space_sister, param_phy_lo, param_phy_hi, param_lr_en, param_os_en; int *param_cal_main; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_token_ovsb = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; param_token_ancl = _tdm->_chip_data.soc_pkg.soc_vars.ancl_token; param_space_sister= _tdm->_core_data.rule__prox_port_min; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; TDM_PRINT0("Smooth quadrant OVSB pattern: OVSB_OVSB_OVSB_OVSB \n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,param_cal_main); if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ lr_slot_cnt = 0; os_slot_cnt = 0; for (i=0; i<param_cal_len; i++){ if (param_cal_main[i]==param_token_ovsb){ os_slot_cnt++; } else { lr_slot_cnt++; } } lr_slot_cnt = (lr_slot_cnt>param_ancl_num)?(lr_slot_cnt-param_ancl_num):(0); if (os_slot_cnt>0 && lr_slot_cnt>0 && os_slot_cnt>lr_slot_cnt && os_slot_cnt<2*lr_slot_cnt){ /* ovsb_ovsb_ovsb_ovsb_x */ for (i=0; i<(param_cal_len-6); i++){ /* ovsb_ovsb_ovsb_ovsb_x_ovsb_y -> ovsb_ovsb_ovsb_ovsb_x_ovsb_ovsb_y */ if ((param_cal_main[i] ==param_token_ovsb || param_cal_main[i] ==param_token_ancl) && (param_cal_main[i+1]==param_token_ovsb || param_cal_main[i+1]==param_token_ancl) && (param_cal_main[i+2]==param_token_ovsb || param_cal_main[i+2]==param_token_ancl) && (param_cal_main[i+3]==param_token_ovsb || param_cal_main[i+3]==param_token_ancl) && (param_cal_main[i+4]!=param_token_ovsb && param_cal_main[i+4]!=param_token_ancl) && (param_cal_main[i+5]==param_token_ovsb || param_cal_main[i+5]==param_token_ancl) && (param_cal_main[i+6]!=param_token_ovsb && param_cal_main[i+6]!=param_token_ancl) ){ x = i+4; port_x = param_cal_main[x]; param_cal_main[x] = param_cal_main[x-1]; param_cal_main[x-1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-1, x); } /* ovsb_ovsb_ovsb_ovsb_x_y -> ovsb_ovsb_x_ovsb_ovsb_y */ else if ((param_cal_main[i] ==param_token_ovsb || param_cal_main[i] ==param_token_ancl) && (param_cal_main[i+1]==param_token_ovsb || param_cal_main[i+1]==param_token_ancl) && (param_cal_main[i+2]==param_token_ovsb || param_cal_main[i+2]==param_token_ancl) && (param_cal_main[i+3]==param_token_ovsb || param_cal_main[i+3]==param_token_ancl) && (param_cal_main[i+4]!=param_token_ovsb && param_cal_main[i+4]!=param_token_ancl) && (param_cal_main[i+5]!=param_token_ovsb && param_cal_main[i+5]!=param_token_ancl) ){ sister_prox = PASS; x = i+4; y = ((x-2)>=(param_space_sister-1))? (x-2-param_space_sister+1): (param_cal_len+x-2-param_space_sister+1); port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (param_space_sister>0 && port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ _tdm->_core_data.vars_pkg.port = port_x; tsc_x = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); _tdm->_core_data.vars_pkg.port = port_y; tsc_y = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x-2]; param_cal_main[x-2]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-2, x); } } } /* x_ovsb_ovsb_ovsb_ovsb */ for (i=0; i<(param_cal_len-6); i++){ /* y_ovsb_x_ovsb_ovsb_ovsb_ovsb -> y_ovsb_ovsb_x_ovsb_ovsb_ovsb */ if ((param_cal_main[i] !=param_token_ovsb && param_cal_main[i] !=param_token_ancl) && (param_cal_main[i+1]==param_token_ovsb || param_cal_main[i+1]==param_token_ancl) && (param_cal_main[i+2]!=param_token_ovsb && param_cal_main[i+2]!=param_token_ancl) && (param_cal_main[i+3]==param_token_ovsb || param_cal_main[i+3]==param_token_ancl) && (param_cal_main[i+4]==param_token_ovsb || param_cal_main[i+4]==param_token_ancl) && (param_cal_main[i+5]==param_token_ovsb || param_cal_main[i+5]==param_token_ancl) && (param_cal_main[i+6]==param_token_ovsb || param_cal_main[i+6]==param_token_ancl) ){ x = i+2; port_x = param_cal_main[x]; param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x, x+1); } /* y_x_ovsb_ovsb_ovsb_ovsb -> y_ovsb_ovsb_x_ovsb_ovsb */ else if ((param_cal_main[i] !=param_token_ovsb && param_cal_main[i] !=param_token_ancl) && (param_cal_main[i+1]!=param_token_ovsb && param_cal_main[i+1]!=param_token_ancl) && (param_cal_main[i+2]==param_token_ovsb || param_cal_main[i+2]==param_token_ancl) && (param_cal_main[i+3]==param_token_ovsb || param_cal_main[i+3]==param_token_ancl) && (param_cal_main[i+4]==param_token_ovsb || param_cal_main[i+4]==param_token_ancl) && (param_cal_main[i+5]==param_token_ovsb || param_cal_main[i+5]==param_token_ancl) ){ sister_prox = PASS; x = i+1; y = ((x+2)+(param_space_sister-1))%param_cal_len; port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (param_space_sister>0 && port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ _tdm->_core_data.vars_pkg.port = port_x; tsc_x = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); _tdm->_core_data.vars_pkg.port = port_y; tsc_y = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x+2]; param_cal_main[x+2]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x, x+2); } } } } if (filter_cnt>0){ TDM_PRINT1("\nFilter done: --- filter applied <%d> times\n", filter_cnt); } } TDM_SML_BAR return filter_cnt; } /** @name: tdm_gh2_filter_ovsb_3x @param: */ int tdm_gh2_filter_ovsb_3x(tdm_mod_t *_tdm) { int i, lr_slot_cnt, os_slot_cnt, filter_cnt=0, x, y, port_x, port_y, tsc_x, tsc_y, sister_prox; int param_lr_limit, param_ancl_num, param_cal_len, param_token_ovsb, param_space_sister, param_phy_lo, param_phy_hi, param_lr_en, param_os_en; int *param_cal_main; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_token_ovsb = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; param_space_sister= _tdm->_core_data.rule__prox_port_min; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; TDM_PRINT0("Smooth triple OVSB pattern: OVSB_OVSB_OVSB \n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,param_cal_main); if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ lr_slot_cnt = 0; os_slot_cnt = 0; for (i=0; i<param_cal_len; i++){ if (param_cal_main[i]==param_token_ovsb){ os_slot_cnt++; } else { lr_slot_cnt++; } } lr_slot_cnt = (lr_slot_cnt>param_ancl_num)?(lr_slot_cnt-param_ancl_num):(0); /* ovsb_ovsb_ovsb_x */ if (os_slot_cnt>0 && lr_slot_cnt>0 && os_slot_cnt>lr_slot_cnt && os_slot_cnt<2*lr_slot_cnt){ /* y_ovsb_ovsb_ovsb_x_ovsb_z -> y_ovsb_ovsb_x_ovsb_ovsb_z */ for (i=0; i<(param_cal_len-6); i++){ if (param_cal_main[i] !=param_token_ovsb && param_cal_main[i+1]==param_token_ovsb && param_cal_main[i+2]==param_token_ovsb && param_cal_main[i+3]==param_token_ovsb && param_cal_main[i+4]!=param_token_ovsb && /* param_cal_main[i+5]==param_token_ovsb && */ !(param_cal_main[i+5]>=param_phy_lo && param_cal_main[i+5]<=param_phy_hi ) && param_cal_main[i+6]!=param_token_ovsb ){ sister_prox = PASS; x = i+4; y = i; port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (param_space_sister>0 && port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ _tdm->_core_data.vars_pkg.port = port_x; tsc_x = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); _tdm->_core_data.vars_pkg.port = port_y; tsc_y = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x-1]; param_cal_main[x-1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-1, x); } } } /* y_ovsb_x_ovsb_ovsb_ovsb_z -> y_ovsb_ovsb_x_ovsb_ovsb_z */ for (i=0; i<(param_cal_len-6); i++){ if (param_cal_main[i] !=param_token_ovsb && /* param_cal_main[i+1]==param_token_ovsb && */ !(param_cal_main[i+1]>=param_phy_lo && param_cal_main[i+1]<=param_phy_hi ) && param_cal_main[i+2]!=param_token_ovsb && param_cal_main[i+3]==param_token_ovsb && param_cal_main[i+4]==param_token_ovsb && param_cal_main[i+5]==param_token_ovsb && param_cal_main[i+6]!=param_token_ovsb ){ sister_prox = PASS; x = i+2; y = i+6; port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (param_space_sister>0 && port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ _tdm->_core_data.vars_pkg.port = port_x; tsc_x = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); _tdm->_core_data.vars_pkg.port = port_y; tsc_y = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x+1, x); } } } } if (filter_cnt>0){ TDM_PRINT1("\nFilter done: --- filter applied <%d> times\n", filter_cnt); } } TDM_SML_BAR return filter_cnt; } /** @name: tdm_gh2_filter_ovsb_2x @param: */ int tdm_gh2_filter_ovsb_2x(tdm_mod_t *_tdm) { int i, lr_slot_cnt, os_slot_cnt, filter_cnt=0, x, y, port_x, port_y, tsc_x, tsc_y, sister_prox; int param_lr_limit, param_ancl_num, param_cal_len, param_space_sister, param_token_ovsb, param_phy_lo, param_phy_hi, param_lr_en, param_os_en; int *param_cal_main; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_token_ovsb = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; param_space_sister= _tdm->_core_data.rule__prox_port_min; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; TDM_PRINT0("Smooth dual OVSB pattern: OVSB_OVSB \n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,param_cal_main); if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ lr_slot_cnt = 0; os_slot_cnt = 0; for (i=0; i<param_cal_len; i++){ if (param_cal_main[i]==param_token_ovsb){ os_slot_cnt++; } else { lr_slot_cnt++; } } lr_slot_cnt = (lr_slot_cnt>param_ancl_num)?(lr_slot_cnt-param_ancl_num):(0); /* ovsb_ovsb_x_z */ if (os_slot_cnt>0 && lr_slot_cnt>0 && os_slot_cnt<lr_slot_cnt){ /* ovsb_ovsb_x_z -> ovsb_x_ovsb_z */ for (i=0; i<(param_cal_len-3); i++){ if (param_cal_main[i] ==param_token_ovsb && param_cal_main[i+1]==param_token_ovsb && param_cal_main[i+2]!=param_token_ovsb && param_cal_main[i+3]!=param_token_ovsb){ sister_prox = PASS; x = i+2; y = (x>=param_space_sister)? (x-param_space_sister): (param_cal_len+x-param_space_sister); port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (param_space_sister>0 && port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ _tdm->_core_data.vars_pkg.port = port_x; tsc_x = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); _tdm->_core_data.vars_pkg.port = port_y; tsc_y = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x-1]; param_cal_main[x-1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-1, x); } } } /* z_x_ovsb_ovsb -> z_ovsb_x_ovsb */ for (i=0; i<(param_cal_len-3); i++){ if (param_cal_main[i] !=param_token_ovsb && param_cal_main[i+1]!=param_token_ovsb && param_cal_main[i+2]==param_token_ovsb && param_cal_main[i+3]==param_token_ovsb){ sister_prox = PASS; x = i+1; y = (x+param_space_sister<param_cal_len)? (x+param_space_sister): (x+param_space_sister-param_cal_len); port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (param_space_sister>0 && port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ _tdm->_core_data.vars_pkg.port = port_x; tsc_x = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); _tdm->_core_data.vars_pkg.port = port_y; tsc_y = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x+1, x); } } } } if (filter_cnt>0){ TDM_PRINT1("\nFilter done: --- filter applied <%d> times\n", filter_cnt); } } TDM_SML_BAR return filter_cnt; } /** @name: tdm_gh2_filter_ovsb_1x @param: */ int tdm_gh2_filter_ovsb_1x(tdm_mod_t *_tdm) { int i, j, k, lr_slot_cnt, os_slot_cnt, filter_cnt=0, x, y, port_x, port_y, tsc_x, tsc_y, sister_prox, dist_up, dist_dn, dist_range; int param_lr_limit, param_ancl_num, param_cal_len, param_space_sister, param_token_ovsb, param_phy_lo, param_phy_hi, param_lr_en, param_os_en; int *param_cal_main; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_token_ovsb = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; param_space_sister= _tdm->_core_data.rule__prox_port_min; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; TDM_PRINT0("Smooth single OVSB pattern: 2X unbalanced neighbouring slots \n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,param_cal_main); if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ lr_slot_cnt = 0; os_slot_cnt = 0; for (i=0; i<param_cal_len; i++){ if (param_cal_main[i]==param_token_ovsb){ os_slot_cnt++; } else { lr_slot_cnt++; } } lr_slot_cnt = (lr_slot_cnt>param_ancl_num)?(lr_slot_cnt-param_ancl_num):(0); /* 2X unbalanced neighbouring slots */ if (os_slot_cnt>0 && lr_slot_cnt>0 && 2*os_slot_cnt<=lr_slot_cnt){ for (i=1; i<param_cal_len; i++){ if (param_cal_main[i]==param_token_ovsb){ dist_up = 0; dist_dn = 0; for (j=i; j>0; j--){ if (param_cal_main[j-1]!=param_token_ovsb){ dist_up++; } else{ break; } } for (j=i+1; j<i+1+param_cal_len; j++){ k = j%param_cal_len; if (param_cal_main[k]!=param_token_ovsb){ dist_dn++; } else { break; } } if (dist_up>=2*dist_dn){ dist_range = (dist_up - dist_dn)/2; for (j=i; j>(i-dist_range) && j>2; j--){ sister_prox = PASS; x = j-1; y = ((x+param_space_sister)<param_cal_len)? (x+param_space_sister): (x+param_space_sister-param_cal_len); port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (param_space_sister>0 && port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ _tdm->_core_data.vars_pkg.port = port_x; tsc_x = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); _tdm->_core_data.vars_pkg.port = port_y; tsc_y = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x+1, x); } else { break; } } } else if (dist_dn>=2*dist_up){ dist_range = (dist_dn - dist_up)/2; for (j=i; j<(i+dist_range) && j<(param_cal_len-1); j++){ sister_prox = PASS; x = j+1; y = (x>=param_space_sister)? (x-param_space_sister): (param_cal_len+x-param_space_sister); port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (param_space_sister>0 && port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ _tdm->_core_data.vars_pkg.port = port_x; tsc_x = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); _tdm->_core_data.vars_pkg.port = port_y; tsc_y = _tdm->_core_exec[TDM_CORE_EXEC__PM_SCAN](_tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x-1]; param_cal_main[x-1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-1, x); } else { break; } } } } } } if (filter_cnt>0){ TDM_PRINT1("\nFilter done: --- filter applied <%d> times\n", filter_cnt); } } TDM_SML_BAR return filter_cnt; } /** @name: tdm_gh2_filter_ovsb @param: */ int tdm_gh2_filter_ovsb(tdm_mod_t *_tdm) { int filter_cnt; /* Z_Y_OVSB_X_OVSB_OVSB */ tdm_gh2_filter_ovsb_p1(_tdm); /* OVSB_OVSB_OVSB_OVSB_OVSB */ tdm_gh2_filter_ovsb_5x(_tdm); /* OVSB_OVSB_OVSB_OVSB */ tdm_gh2_filter_ovsb_4x(_tdm); /* OVSB_OVSB_OVSB */ tdm_gh2_filter_ovsb_3x(_tdm); /* OVSB_OVSB */ tdm_gh2_filter_ovsb_2x(_tdm); /* OVSB */ filter_cnt = tdm_gh2_filter_ovsb_1x(_tdm); if (filter_cnt>0){ tdm_gh2_filter_ovsb_1x(_tdm); } return PASS; } /** @name: tdm_gh2_filter @param: */ int tdm_gh2_filter( tdm_mod_t *_tdm ) { int param_lr_en, param_os_en, param_cal_id; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; param_cal_id = _tdm->_core_data.vars_pkg.cal_id; TDM_BIG_BAR TDM_PRINT0("TDM: Filters Applied to smooth TDM calendar\n\n"); TDM_SML_BAR if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ switch(param_cal_id){ case 0: case 4: TDM_PRINT0("TDM: <IDB PIPE 0> \n"); _tdm->_core_data.vars_pkg.cal_id = 0; tdm_gh2_filter_ovsb(_tdm); TDM_PRINT0("TDM: <MMU PIPE 0> \n"); _tdm->_core_data.vars_pkg.cal_id = 4; tdm_gh2_filter_ovsb(_tdm); break; case 1: case 5: TDM_PRINT0("TDM: <IDB PIPE 1> \n"); _tdm->_core_data.vars_pkg.cal_id = 1; tdm_gh2_filter_ovsb(_tdm); TDM_PRINT0("TDM: <MMU PIPE 1> \n"); _tdm->_core_data.vars_pkg.cal_id = 5; tdm_gh2_filter_ovsb(_tdm); break; case 2: case 6: TDM_PRINT0("TDM: <IDB PIPE 2> \n"); _tdm->_core_data.vars_pkg.cal_id = 2; tdm_gh2_filter_ovsb(_tdm); TDM_PRINT0("TDM: <MMU PIPE 2> \n"); _tdm->_core_data.vars_pkg.cal_id = 6; tdm_gh2_filter_ovsb(_tdm); break; case 3: case 7: TDM_PRINT0("TDM: <IDB PIPE 3> \n"); _tdm->_core_data.vars_pkg.cal_id = 3; tdm_gh2_filter_ovsb(_tdm); TDM_PRINT0("TDM: <MMU PIPE 3> \n"); _tdm->_core_data.vars_pkg.cal_id = 7; tdm_gh2_filter_ovsb(_tdm); break; default: break; } _tdm->_core_data.vars_pkg.cal_id = param_cal_id; } return (_tdm->_chip_exec[TDM_CHIP_EXEC__PARSE](_tdm)); } /** @name: tdm_gh2_filter_lr_jitter @param: */ int tdm_gh2_filter_lr_jitter(tdm_mod_t *_tdm, int min_speed) { int i, j, k, filter_cnt=0, x, y, chk_x, chk_y, spd_x, spd_y; int m, port, port_speed, idx_curr, space_min, space_max, dist_up, dist_dn, dist_mv; int port_bmp[GH2_NUM_EXT_PORTS]; int param_lr_limit, param_ancl_num, param_cal_len, param_phy_lo, param_phy_hi, param_lr_en; int *param_cal_main; enum port_speed_e *param_speeds; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_speeds = _tdm->_chip_data.soc_pkg.speed; for (i=0; i<GH2_NUM_EXT_PORTS; i++) { port_bmp[i] = 0; } TDM_PRINT0("Smooth LR port jitter\n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id, param_cal_main); if (param_lr_en==BOOL_TRUE && min_speed>=SPEED_10G) { for (i=0; i<param_cal_len; i++) { port = param_cal_main[i]; if (port >= param_phy_lo && port <= param_phy_hi && port < GH2_NUM_EXT_PORTS) { port_speed = param_speeds[port]; if (port_speed<min_speed || port_bmp[port]==1) { continue; } port_bmp[port] = 1; tdm_gh2_filter_calc_jitter(port_speed, param_cal_len, &space_min, &space_max); for (j=0; j<param_cal_len; j++) { k = (i+param_cal_len-j)%param_cal_len; if (param_cal_main[k]==port) { idx_curr = k; dist_up = tdm_gh2_filter_get_same_port_dist(idx_curr, TDM_DIR_UP, param_cal_main, param_cal_len); dist_dn = tdm_gh2_filter_get_same_port_dist(idx_curr, TDM_DIR_DN, param_cal_main, param_cal_len); /* filter port if space violation is detected: up */ if (dist_up>space_max || dist_dn<space_min) { TDM_PRINT7("%s, port %d, slot %d, dist_up %d > %d, dist_dn %d < %d\n", "TDM: [Linerate jitter 1]", port, idx_curr, dist_up, space_max, dist_dn, space_min); dist_mv = (dist_up-dist_dn)/2; for (m=1; m<=dist_mv; m++) { /* check spacing for neighbor and target port */ x = (idx_curr + param_cal_len - m) % param_cal_len; y = (x + 1) % param_cal_len; chk_x = tdm_gh2_filter_chk_slot_shift_cond(x, TDM_DIR_DN, param_cal_len, param_cal_main, _tdm); chk_y = tdm_gh2_filter_chk_slot_shift_cond(y, TDM_DIR_UP, param_cal_len, param_cal_main, _tdm); spd_x = tdm_gh2_filter_get_port_speed(param_cal_main[x], _tdm); spd_y = tdm_gh2_filter_get_port_speed(param_cal_main[y], _tdm); /* shift port */ if (chk_x == PASS && chk_y == PASS && spd_x < SPEED_100G && spd_y < SPEED_100G) { param_cal_main[y] = param_cal_main[x]; param_cal_main[x] = port; filter_cnt++; TDM_PRINT3("\t Shift port %d UP from slot %d to slot %d\n", port, y, x); } else { break; } } } /* filter port if space violation is detected: down */ else if (dist_up<space_min || dist_dn>space_max) { TDM_PRINT7("%s port %d, slot %d, dist_up %d < %d, dist_dn %d > %d\n", "TDM: [Linerate jitter 2]", port, idx_curr, dist_up, space_min, dist_dn, space_max); dist_mv = (dist_dn-dist_up)/2; for (m=1; m<=dist_mv; m++) { /* check spacing for neighbor and target port */ x = (idx_curr + m) % param_cal_len; y = (x + param_cal_len - 1) % param_cal_len; chk_x = tdm_gh2_filter_chk_slot_shift_cond(x, TDM_DIR_UP, param_cal_len, param_cal_main, _tdm); chk_y = tdm_gh2_filter_chk_slot_shift_cond(y, TDM_DIR_DN, param_cal_len, param_cal_main, _tdm); spd_x = tdm_gh2_filter_get_port_speed(param_cal_main[x], _tdm); spd_y = tdm_gh2_filter_get_port_speed(param_cal_main[y], _tdm); /* shift port */ if (chk_x == PASS && chk_y == PASS && spd_x < SPEED_100G && spd_y < SPEED_100G) { param_cal_main[y] = param_cal_main[x]; param_cal_main[x] = port; filter_cnt++; TDM_PRINT3("\t Shift port %d DOWN from slot %d to slot %d\n", port, y, x); } else { break; } } } } } } } } TDM_SML_BAR return (filter_cnt); } /** @name: tdm_gh2_filter_lr @param: */ int tdm_gh2_filter_lr(tdm_mod_t *_tdm) { if (tdm_gh2_filter_lr_jitter(_tdm, SPEED_10G) == 0){ return PASS; } if (tdm_gh2_filter_lr_jitter(_tdm, SPEED_20G) == 0) { return PASS; } if (tdm_gh2_filter_lr_jitter(_tdm, SPEED_40G) == 0) { return PASS; } return PASS; } /** @name: tdm_gh2_filter_ovsb_4x_mix @param: */ int tdm_gh2_filter_ovsb_4x_mix(tdm_mod_t *_tdm) { int i, lr_slot_cnt, os_slot_cnt, filter_cnt=0, x, y, port_x, port_y, tsc_x, tsc_y, spd_x, spd_y, sister_prox; int param_lr_limit, param_ancl_num, param_cal_len, param_token_ovsb, param_token_ancl, param_phy_lo, param_phy_hi, param_lr_en, param_os_en; int *param_cal_main; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_token_ovsb = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; param_token_ancl = _tdm->_chip_data.soc_pkg.soc_vars.ancl_token; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; TDM_PRINT0("Smooth quadrant OVSB pattern: OVSB_OVSB_OVSB_OVSB \n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,param_cal_main); if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ lr_slot_cnt = 0; os_slot_cnt = 0; for (i=0; i<param_cal_len; i++){ if (param_cal_main[i]==param_token_ovsb){ os_slot_cnt++; } else { lr_slot_cnt++; } } lr_slot_cnt = (lr_slot_cnt>param_ancl_num)?(lr_slot_cnt-param_ancl_num):(0); if (os_slot_cnt>0 && lr_slot_cnt>0 && os_slot_cnt>lr_slot_cnt && os_slot_cnt<2*lr_slot_cnt){ for (i=0; i<(param_cal_len-6); i++){ /* ovsb_ovsb_ovsb_ovsb_x_ovsb_y -> ovsb_ovsb_ovsb_ovsb_x_ovsb_ovsb_y */ if ((param_cal_main[i] ==param_token_ovsb || param_cal_main[i] ==param_token_ancl) && (param_cal_main[i+1]==param_token_ovsb || param_cal_main[i+1]==param_token_ancl) && (param_cal_main[i+2]==param_token_ovsb || param_cal_main[i+2]==param_token_ancl) && (param_cal_main[i+3]==param_token_ovsb || param_cal_main[i+3]==param_token_ancl) && (param_cal_main[i+4]!=param_token_ovsb && param_cal_main[i+4]!=param_token_ancl) && (param_cal_main[i+5]==param_token_ovsb || param_cal_main[i+5]==param_token_ancl) && (param_cal_main[i+6]!=param_token_ovsb && param_cal_main[i+6]!=param_token_ancl) ){ x = i + 4; port_x = param_cal_main[x]; param_cal_main[x] = param_cal_main[x-1]; param_cal_main[x-1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-1, x); } /* ovsb_ovsb_ovsb_ovsb_x_y -> ovsb_ovsb_x_ovsb_ovsb_y */ else if ((param_cal_main[i] ==param_token_ovsb || param_cal_main[i] ==param_token_ancl) && (param_cal_main[i+1]==param_token_ovsb || param_cal_main[i+1]==param_token_ancl) && (param_cal_main[i+2]==param_token_ovsb || param_cal_main[i+2]==param_token_ancl) && (param_cal_main[i+3]==param_token_ovsb || param_cal_main[i+3]==param_token_ancl) && (param_cal_main[i+4]!=param_token_ovsb && param_cal_main[i+4]!=param_token_ancl) && (param_cal_main[i+5]!=param_token_ovsb && param_cal_main[i+5]!=param_token_ancl) ){ sister_prox = PASS; x = i + 4; y = (i + param_cal_len - 1) % param_cal_len; port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi) { spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); spd_y = tdm_gh2_filter_get_port_speed(port_y, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); if (tsc_x == tsc_y || spd_x >= SPEED_100G || spd_y >= SPEED_100G) { sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x-2]; param_cal_main[x-2]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-2, x); } } /* y_ovsb_x_ovsb_ovsb_ovsb_ovsb -> y_ovsb_ovsb_x_ovsb_ovsb_ovsb */ else if ((param_cal_main[i] !=param_token_ovsb && param_cal_main[i] !=param_token_ancl) && (param_cal_main[i+1]==param_token_ovsb || param_cal_main[i+1]==param_token_ancl) && (param_cal_main[i+2]!=param_token_ovsb && param_cal_main[i+2]!=param_token_ancl) && (param_cal_main[i+3]==param_token_ovsb || param_cal_main[i+3]==param_token_ancl) && (param_cal_main[i+4]==param_token_ovsb || param_cal_main[i+4]==param_token_ancl) && (param_cal_main[i+5]==param_token_ovsb || param_cal_main[i+5]==param_token_ancl) && (param_cal_main[i+6]==param_token_ovsb || param_cal_main[i+6]==param_token_ancl) ){ x = i + 2; port_x = param_cal_main[x]; param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x, x+1); } /* y_x_ovsb_ovsb_ovsb_ovsb -> y_ovsb_ovsb_x_ovsb_ovsb */ else if ((param_cal_main[i] !=param_token_ovsb && param_cal_main[i] !=param_token_ancl) && (param_cal_main[i+1]!=param_token_ovsb && param_cal_main[i+1]!=param_token_ancl) && (param_cal_main[i+2]==param_token_ovsb || param_cal_main[i+2]==param_token_ancl) && (param_cal_main[i+3]==param_token_ovsb || param_cal_main[i+3]==param_token_ancl) && (param_cal_main[i+4]==param_token_ovsb || param_cal_main[i+4]==param_token_ancl) && (param_cal_main[i+5]==param_token_ovsb || param_cal_main[i+5]==param_token_ancl) ){ sister_prox = PASS; x = i + 1; y = (i + 6) % param_cal_len; port_x = param_cal_main[x]; port_y = param_cal_main[y]; if (port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi) { spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); spd_y = tdm_gh2_filter_get_port_speed(port_y, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); if (tsc_x == tsc_y || spd_x >= SPEED_100G || spd_y >= SPEED_100G) { sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x+2]; param_cal_main[x+2]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x, x+2); } } } } if (filter_cnt>0){ TDM_PRINT1("\nFilter done: --- filter applied <%d> times\n", filter_cnt); } } TDM_SML_BAR return filter_cnt; } /** @name: tdm_gh2_filter_ovsb_3x_mix @param: */ int tdm_gh2_filter_ovsb_3x_mix(tdm_mod_t *_tdm) { int i, lr_slot_cnt, os_slot_cnt, filter_cnt=0, x, y, port_x, port_y, tsc_x, tsc_y, spd_x, spd_y, sister_prox; int param_lr_limit, param_ancl_num, param_cal_len, param_token_ovsb, param_phy_lo, param_phy_hi, param_lr_en, param_os_en; int *param_cal_main; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_token_ovsb = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; TDM_PRINT0("Smooth triple OVSB pattern: OVSB_OVSB_OVSB \n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,param_cal_main); if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ lr_slot_cnt = 0; os_slot_cnt = 0; for (i=0; i<param_cal_len; i++){ if (param_cal_main[i]==param_token_ovsb){ os_slot_cnt++; } else { lr_slot_cnt++; } } lr_slot_cnt = (lr_slot_cnt>param_ancl_num)?(lr_slot_cnt-param_ancl_num):(0); /* ovsb_ovsb_ovsb_x */ if (os_slot_cnt>0 && lr_slot_cnt>0 && os_slot_cnt>lr_slot_cnt && os_slot_cnt<2*lr_slot_cnt){ for (i=0; i<(param_cal_len-6); i++){ /* y_ovsb_ovsb_ovsb_x_ovsb_z -> y_ovsb_ovsb_x_ovsb_ovsb_z */ if (param_cal_main[i] !=param_token_ovsb && param_cal_main[i+1]==param_token_ovsb && param_cal_main[i+2]==param_token_ovsb && param_cal_main[i+3]==param_token_ovsb && param_cal_main[i+4]!=param_token_ovsb && !(param_cal_main[i+5]>=param_phy_lo && param_cal_main[i+5]<=param_phy_hi ) && param_cal_main[i+6]!=param_token_ovsb ){ sister_prox = PASS; x = i+4; y = i; port_x = param_cal_main[x]; port_y = param_cal_main[y]; spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); spd_y = tdm_gh2_filter_get_port_speed(port_y, _tdm); if (spd_x >= SPEED_100G || spd_y >= SPEED_100G) { continue; } if (port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x-1]; param_cal_main[x-1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-1, x); } } /* y_ovsb_x_ovsb_ovsb_ovsb_z -> y_ovsb_ovsb_x_ovsb_ovsb_z */ else if (param_cal_main[i] !=param_token_ovsb && /* param_cal_main[i+1]==param_token_ovsb && */ !(param_cal_main[i+1]>=param_phy_lo && param_cal_main[i+1]<=param_phy_hi ) && param_cal_main[i+2]!=param_token_ovsb && param_cal_main[i+3]==param_token_ovsb && param_cal_main[i+4]==param_token_ovsb && param_cal_main[i+5]==param_token_ovsb && param_cal_main[i+6]!=param_token_ovsb) { sister_prox = PASS; x = i+2; y = i+6; port_x = param_cal_main[x]; port_y = param_cal_main[y]; spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); spd_y = tdm_gh2_filter_get_port_speed(port_y, _tdm); if (spd_x >= SPEED_100G || spd_y >= SPEED_100G) { continue; } if (port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x+1, x); } } /* y_x_ovsb_ovsb_ovsb_z -> y_ovsb_x_ovsb_ovsb_z */ else if (param_cal_main[i] !=param_token_ovsb && param_cal_main[i+1]!=param_token_ovsb && param_cal_main[i+2]==param_token_ovsb && param_cal_main[i+3]==param_token_ovsb && param_cal_main[i+4]==param_token_ovsb && param_cal_main[i+5]!=param_token_ovsb) { sister_prox = PASS; x = i+1; y = i+5; port_x = param_cal_main[x]; port_y = param_cal_main[y]; spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); spd_y = tdm_gh2_filter_get_port_speed(port_y, _tdm); if (spd_x >= SPEED_100G || spd_y >= SPEED_100G) { continue; } if (port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x+1, x); } } /* y_ovsb_ovsb_ovsb_x_z -> y_ovsb_ovsb_x_ovsb_z */ else if (param_cal_main[i] !=param_token_ovsb && param_cal_main[i+1]==param_token_ovsb && param_cal_main[i+2]==param_token_ovsb && param_cal_main[i+3]==param_token_ovsb && param_cal_main[i+4]!=param_token_ovsb && param_cal_main[i+5]!=param_token_ovsb) { sister_prox = PASS; x = i+4; y = i; port_x = param_cal_main[x]; port_y = param_cal_main[y]; spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); spd_y = tdm_gh2_filter_get_port_speed(port_y, _tdm); if (spd_x >= SPEED_100G || spd_y >= SPEED_100G) { continue; } if (port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi){ tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); if (tsc_x==tsc_y){ sister_prox = FAIL; } } if (sister_prox==PASS){ param_cal_main[x] = param_cal_main[x-1]; param_cal_main[x-1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-1, x); } } } } if (filter_cnt>0){ TDM_PRINT1("\nFilter done: --- filter applied <%d> times\n", filter_cnt); } } TDM_SML_BAR return filter_cnt; } /** @name: tdm_gh2_filter_ovsb_2x_mix @param: */ int tdm_gh2_filter_ovsb_2x_mix(tdm_mod_t *_tdm) { int i, lr_slot_cnt, os_slot_cnt, filter_cnt=0, x, y, port_x, port_y, tsc_x, tsc_y, spd_x, sister_prox; int param_lr_limit, param_ancl_num, param_cal_len, param_space_sister, param_token_ovsb, param_phy_lo, param_phy_hi, param_lr_en, param_os_en; int *param_cal_main; param_lr_limit = _tdm->_chip_data.soc_pkg.lr_idx_limit; param_ancl_num = _tdm->_chip_data.soc_pkg.tvec_size; param_cal_len = param_lr_limit + param_ancl_num; param_phy_lo = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_lo; param_phy_hi = _tdm->_chip_data.soc_pkg.soc_vars.fp_port_hi; param_token_ovsb = _tdm->_chip_data.soc_pkg.soc_vars.ovsb_token; param_space_sister= _tdm->_core_data.rule__prox_port_min; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; TDM_PRINT0("Smooth dual OVSB pattern: OVSB_OVSB \n\n"); TDM_SEL_CAL(_tdm->_core_data.vars_pkg.cal_id,param_cal_main); if (param_os_en == BOOL_TRUE && param_lr_en == BOOL_TRUE) { lr_slot_cnt = 0; os_slot_cnt = 0; for (i = 0; i < param_cal_len; i++){ if (param_cal_main[i] == param_token_ovsb) { os_slot_cnt++; } else { lr_slot_cnt++; } } lr_slot_cnt = (lr_slot_cnt > param_ancl_num) ? (lr_slot_cnt - param_ancl_num) : 0; /* x_ovsb_ovsb_y */ if (os_slot_cnt > 0 && lr_slot_cnt > 0) { /* 4-slot pattern */ for (i = 0; i < (param_cal_len - 3); i++) { /* ovsb_ovsb_x_z -> ovsb_x_ovsb_z */ if (param_cal_main[i] == param_token_ovsb && param_cal_main[i+1] == param_token_ovsb && param_cal_main[i+2] != param_token_ovsb && param_cal_main[i+3] != param_token_ovsb){ sister_prox = PASS; x = i + 2; y = (x + param_cal_len - param_space_sister) % param_cal_len; port_x = param_cal_main[x]; port_y = param_cal_main[y]; spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); if (spd_x >= SPEED_100G) { continue; } if (port_x >= param_phy_lo && port_x <= param_phy_hi && port_y >= param_phy_lo && port_y <= param_phy_hi){ tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); if (tsc_x == tsc_y) { sister_prox = FAIL; } } if (sister_prox == PASS) { param_cal_main[x] = param_cal_main[x-1]; param_cal_main[x-1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-1, x); } } /* z_x_ovsb_ovsb -> z_ovsb_x_ovsb */ else if (param_cal_main[i] != param_token_ovsb && param_cal_main[i+1] != param_token_ovsb && param_cal_main[i+2] == param_token_ovsb && param_cal_main[i+3] == param_token_ovsb){ sister_prox = PASS; x = i + 1; y = (x + param_space_sister) % param_cal_len; port_x = param_cal_main[x]; port_y = param_cal_main[y]; spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); if (spd_x >= SPEED_100G) { continue; } if (port_x >= param_phy_lo && port_x <= param_phy_hi && port_y >= param_phy_lo && port_y <= param_phy_hi) { tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); if (tsc_x == tsc_y) { sister_prox = FAIL; } } if (sister_prox == PASS) { param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; filter_cnt++; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x+1, x); } } } /* 6-slot pattern */ for (i = 0; i < (param_cal_len - 5); i++) { /* z_y_ovsb_x_ovsb_ovsb -> z_ovsb_y_ovsb_x_ovsb */ if (param_cal_main[i] != param_token_ovsb && param_cal_main[i+1] != param_token_ovsb && param_cal_main[i+2] == param_token_ovsb && param_cal_main[i+3] != param_token_ovsb && param_cal_main[i+4] == param_token_ovsb && param_cal_main[i+5] == param_token_ovsb) { sister_prox = PASS; x = i + 3; y = (x + param_space_sister) % param_cal_len; port_x = param_cal_main[x]; port_y = param_cal_main[y]; /* coverity[returned_value : FALSE] */ spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); /* if (spd_x >= SPEED_100G) { continue; } */ if (port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi) { tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); if (tsc_x==tsc_y) { sister_prox = FAIL; } } if (sister_prox == PASS) { x = i + 3; y = i + 1; port_x = param_cal_main[x]; port_y = param_cal_main[y]; param_cal_main[x] = param_cal_main[x+1]; param_cal_main[x+1]= port_x; param_cal_main[y] = param_cal_main[y+1]; param_cal_main[y+1]= port_y; TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", y+1, y); TDM_PRINT2("Shift OVSB slot UP from %03d to %03d\n", x+1, x); } } /* ovsb_ovsb_x_ovsb_y_z -> ovsb_x_ovsb_y_ovsb_z */ else if (param_cal_main[i] == param_token_ovsb && param_cal_main[i+1] == param_token_ovsb && param_cal_main[i+2] != param_token_ovsb && param_cal_main[i+3] == param_token_ovsb && param_cal_main[i+4] != param_token_ovsb && param_cal_main[i+5] != param_token_ovsb) { sister_prox = PASS; x = i + 2; y = (x + param_cal_len - param_space_sister) % param_cal_len; port_x = param_cal_main[x]; port_y = param_cal_main[y]; /* coverity[returned_value : FALSE] */ spd_x = tdm_gh2_filter_get_port_speed(port_x, _tdm); if (spd_x >= SPEED_100G) { continue; } if (port_x>=param_phy_lo && port_x<=param_phy_hi && port_y>=param_phy_lo && port_y<=param_phy_hi) { tsc_x = tdm_gh2_filter_get_port_pm(port_x, _tdm); tsc_y = tdm_gh2_filter_get_port_pm(port_y, _tdm); if (tsc_x==tsc_y) { sister_prox = FAIL; } } if (sister_prox == PASS) { x = i + 2; y = i + 4; port_x = param_cal_main[x]; port_y = param_cal_main[y]; param_cal_main[x] = param_cal_main[x-1]; param_cal_main[x-1]= port_x; param_cal_main[y] = param_cal_main[y-1]; param_cal_main[y-1]= port_y; TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", x-1, x); TDM_PRINT2("Shift OVSB slot DOWN from %03d to %03d\n", y-1, y); } } } } if (filter_cnt>0){ TDM_PRINT1("\nFilter done: --- filter applied <%d> times\n", filter_cnt); } } TDM_SML_BAR return filter_cnt; } /** @name: tdm_gh2_filter_ovsb @param: */ int tdm_gh2_filter_ovsb_mix(tdm_mod_t *_tdm) { tdm_gh2_filter_ovsb_4x_mix(_tdm); tdm_gh2_filter_ovsb_3x_mix(_tdm); tdm_gh2_filter_ovsb_2x_mix(_tdm); return PASS; } /** @name: tdm_gh2_filter @param: */ int tdm_gh2_filter_mix( tdm_mod_t *_tdm ) { int param_lr_en, param_os_en, param_cal_id; param_lr_en = _tdm->_core_data.vars_pkg.lr_enable; param_os_en = _tdm->_core_data.vars_pkg.os_enable; param_cal_id = _tdm->_core_data.vars_pkg.cal_id; TDM_BIG_BAR TDM_PRINT0("TDM: Filters Applied to smooth TDM calendar\n\n"); TDM_SML_BAR if (param_os_en==BOOL_TRUE && param_lr_en==BOOL_TRUE){ switch(param_cal_id){ case 0: case 4: TDM_PRINT0("TDM: <IDB PIPE 0> \n"); _tdm->_core_data.vars_pkg.cal_id = 0; tdm_gh2_filter_ovsb_mix(_tdm); TDM_PRINT0("TDM: <MMU PIPE 0> \n"); _tdm->_core_data.vars_pkg.cal_id = 4; tdm_gh2_filter_ovsb_mix(_tdm); break; case 1: case 5: TDM_PRINT0("TDM: <IDB PIPE 1> \n"); _tdm->_core_data.vars_pkg.cal_id = 1; tdm_gh2_filter_ovsb_mix(_tdm); TDM_PRINT0("TDM: <MMU PIPE 1> \n"); _tdm->_core_data.vars_pkg.cal_id = 5; tdm_gh2_filter_ovsb_mix(_tdm); break; case 2: case 6: TDM_PRINT0("TDM: <IDB PIPE 2> \n"); _tdm->_core_data.vars_pkg.cal_id = 2; tdm_gh2_filter_ovsb_mix(_tdm); TDM_PRINT0("TDM: <MMU PIPE 2> \n"); _tdm->_core_data.vars_pkg.cal_id = 6; tdm_gh2_filter_ovsb_mix(_tdm); break; case 3: case 7: TDM_PRINT0("TDM: <IDB PIPE 3> \n"); _tdm->_core_data.vars_pkg.cal_id = 3; tdm_gh2_filter_ovsb_mix(_tdm); TDM_PRINT0("TDM: <MMU PIPE 3> \n"); _tdm->_core_data.vars_pkg.cal_id = 7; tdm_gh2_filter_ovsb_mix(_tdm); break; default: break; } _tdm->_core_data.vars_pkg.cal_id = param_cal_id; } if (param_lr_en==BOOL_TRUE) { tdm_gh2_filter_lr(_tdm); } return (_tdm->_chip_exec[TDM_CHIP_EXEC__PARSE](_tdm)); }
794428.c
/* * Spin to C/C++ translator * Copyright 2011-2021 Total Spectrum Software Inc. * * +-------------------------------------------------------------------- * ¦ TERMS OF USE: MIT License * +-------------------------------------------------------------------- * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files * (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * +-------------------------------------------------------------------- */ #include <stdlib.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #include "common.h" #include "becommon.h" static AST *make_methodptr; AST * BuildMethodPointer(AST *ast) { Symbol *sym; AST *objast; AST *funcaddr; AST *result; AST *call; Function *func; sym = FindCalledFuncSymbol(ast, &objast, 0); if (!sym || sym->kind != SYM_FUNCTION) { if (sym) { ERROR(ast, "%s is not a function", sym->user_name); } else { ERROR(ast, "Internal error, unable to find function address"); } return ast; } func = (Function *)sym->val; if (func->is_static) { objast = AstInteger(0); } else if (objast == NULL) { objast = NewAST(AST_SELF, NULL, NULL); } else { objast = NewAST(AST_ADDROF, objast, NULL); } func->used_as_ptr = 1; if (func->callSites == 0) { MarkUsed(func, "func pointer"); } // save off the current @ node funcaddr = NewAST(AST_ADDROF, ast->left, ast->right); // create a call if (!make_methodptr) { make_methodptr = AstIdentifier("_make_methodptr"); } call = NewAST(AST_EXPRLIST, funcaddr, NULL); call = NewAST(AST_EXPRLIST, objast, call); result = NewAST(AST_FUNCCALL, make_methodptr, call); return result; } // find base for result variables static int resultOffset(Function *F, int offset) { switch(gl_interp_kind) { case INTERP_KIND_P1ROM: if (offset == 0) return 0; return 4+F->numparams*4 + (offset-1)*4; case INTERP_KIND_NUCODE: return offset + (F->numparams+4)*LONG_SIZE ; default: return offset; } } // number of results for Spin1 bytecode purposes static int BCGetNumResults(Function *F) { int n = F->numresults; return (n<=1) ? 1 : n; } // number of results for Spin2/Nu code bytecode purposes static int DefaultGetNumResults(Function *F) { int n = F->numresults; return n; } // find base for parameter variables static int paramOffset(Function *F, int offset) { switch(gl_interp_kind) { case INTERP_KIND_P1ROM: return 4 + offset; // always one result pushed onto stack case INTERP_KIND_NUCODE: return offset; default: return offset + DefaultGetNumResults(F)*4; } } // find base for local variables static int localOffset(Function *F, int offset) { switch(gl_interp_kind) { case INTERP_KIND_P1ROM: return offset + (BCGetNumResults(F)+F->numparams)*4; // FIXME small variables case INTERP_KIND_NUCODE: return offset + (DefaultGetNumResults(F)+F->numparams+4)*4; default: return offset + (DefaultGetNumResults(F) + F->numparams)*4; } } // // normalize offsets for Spin compatibility // In Spin, function variables must be laid out with // results first, then parameters, then locals. // This function resets all variable offsets to give the // correct values. // static int resetOffsets(Symbol *sym, void *arg) { Function *F = (Function *)arg; switch (sym->kind) { case SYM_RESULT: sym->offset = resultOffset(F, sym->offset); break; case SYM_PARAMETER: sym->offset = paramOffset(F, sym->offset); break; case SYM_LOCALVAR: case SYM_TEMPVAR: sym->offset = localOffset(F, sym->offset); break; default: /* nothing to do */ break; } return 1; } /* convert offsets in local variables to their canonical values, depending on * how the interpreter wants them laid out */ void NormalizeVarOffsets(Function *F) { IterateOverSymbols(&F->localsyms, resetOffsets, (void *)F); } /* * evaluate any constant expressions within a string */ AST * EvalStringConst(AST *expr) { if (!expr) { return expr; } switch (expr->kind) { case AST_EXPRLIST: return NewAST(AST_EXPRLIST, EvalStringConst(expr->left), EvalStringConst(expr->right)); case AST_STRING: case AST_INTEGER: return expr; default: if (IsConstExpr(expr)) { return AstInteger(EvalConstExpr(expr)); } else { return expr; } } } static void StringAppend(Flexbuf *fb,AST *expr) { if(!expr) return; switch (expr->kind) { case AST_INTEGER: { int i = expr->d.ival; if (i < 0 || i>255) ERROR(expr,"Character out of range!"); flexbuf_putc(i,fb); } break; case AST_STRING: { flexbuf_addstr(fb,expr->d.string); } break; case AST_EXPRLIST: { if (expr->left) StringAppend(fb,expr->left); if (expr->right) StringAppend(fb,expr->right); } break; default: { ERROR(expr,"Unhandled AST kind %d in string expression",expr->kind); } break; } } void StringBuildBuffer(Flexbuf *fb, AST *expr) { StringAppend(fb, expr); flexbuf_addchar(fb, 0); } // Printf that auto-allocates some space (and never frees it, lol) char *auto_printf(size_t max,const char *format,...) { char *buffer = malloc(max); va_list args; va_start(args, format); vsnprintf(buffer,max,format,args); va_end(args); return buffer; } /* find the backend name for a symbol */ const char *BackendNameForSymbol(Symbol *sym) { Module *Q = sym->module ? sym->module : NULL; if (NuBytecodeOutput()) { return NuCodeSymbolName(sym); } return IdentifierModuleName(Q, sym->our_name); } /* * utility function for visiting modules and doing things to them (used by back ends primarily) */ int VisitRecursive(void *vptr, Module *P, VisitorFunc func, unsigned visitval) { Module *Q; AST *subobj; Module *save = current; Function *savecurf = curfunc; int change = 0; if (P->all_visitflags & visitval) return change; // already visited this one current = P; P->all_visitflags |= visitval; P->visitFlag = visitval; change |= (*func)(vptr, P); // compile intermediate code for submodules for (subobj = P->objblock; subobj; subobj = subobj->right) { if (subobj->kind != AST_OBJECT) { ERROR(subobj, "Internal Error: Expecting object AST"); break; } Q = (Module *)subobj->d.ptr; change |= VisitRecursive(vptr, Q, func, visitval); } // and for sub-submodules for (Q = P->subclasses; Q; Q = Q->subclasses) { change |= VisitRecursive(vptr, Q, func, visitval); } current = save; curfunc = savecurf; return change; }
65782.c
/*++ Copyright (c) 1995-1999 Microsoft Corporation Module Name: file.c Abstract: Domain Name System (DNS) Server Database file utility routines. Author: Jim Gilroy (jamesg) March 1995 Revision History: --*/ #include "dnssrv.h" // // File directory globals // // Initialized in srvcfg.c when directory info loaded. // PWSTR g_pFileDirectoryAppend; DWORD g_FileDirectoryAppendLength; PWSTR g_pFileBackupDirectoryAppend; DWORD g_FileBackupDirectoryAppendLength; // // Simplified file mapping routines // DNS_STATUS copyAnsiStringToUnicode( OUT LPWSTR pszUnicode, IN LPSTR pszAnsi ) /*++ Routine Description: Copy ANSI string to UNICODE. Assumes adequate length. Arguments: pszUnicode -- buffer to receive unicode string pszAnsi -- incoming ANSI string Return Value: ERROR_SUCCESS if successful. ErrorCode on errors. --*/ { DNS_STATUS status; ANSI_STRING ansiString; UNICODE_STRING unicodeString; RtlInitAnsiString( & ansiString, pszAnsi ); unicodeString.Length = 0; unicodeString.MaximumLength = MAX_PATH; unicodeString.Buffer = pszUnicode; status = RtlAnsiStringToUnicodeString( & unicodeString, & ansiString, FALSE // no allocation ); ASSERT( status == ERROR_SUCCESS ); return status; } DNS_STATUS OpenAndMapFileForReadW( IN PWSTR pwsFilePath, IN OUT PMAPPED_FILE pmfFile, IN BOOL fMustFind ) /*++ Routine Description: Opens and maps file. Note, does not log error for FILE_NOT_FOUND condition if fMustFind is not set -- no file is legitimate for secondary file. Arguments: pwsFilePath - name/path of file pmfFile - ptr to file mapping struct to hold results fMustFind - file must be found Return Value: ERROR_SUCCESS if file opened and mapped. ERROR_FILE_NOT_FOUND if file not found. ErrorCode on errors. --*/ { HANDLE hfile = NULL; HANDLE hmapping = NULL; PVOID pvdata; DWORD fileSizeLow; DWORD fileSizeHigh; DWORD status; // // Open the file // hfile = CreateFileW( pwsFilePath, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL ); if ( hfile == INVALID_HANDLE_VALUE ) { status = GetLastError(); DNS_DEBUG( INIT, ( "Could not open file: %S\n", pwsFilePath )); if ( fMustFind || status != ERROR_FILE_NOT_FOUND ) { DNS_LOG_EVENT( DNS_EVENT_FILE_OPEN_ERROR, 1, (LPSTR *) &pwsFilePath, NULL, 0 ); } return status; } // // Get file size // fileSizeLow = GetFileSize( hfile, &fileSizeHigh ); if ( ( fileSizeLow == 0xFFFFFFFF ) && ( ( status = GetLastError() ) != NO_ERROR ) ) { DNS_DEBUG( INIT, ( "Map of file %S failed. Invalid file size: %d\n", pwsFilePath, status )); goto Failed; } hmapping = CreateFileMapping( hfile, NULL, PAGE_READONLY | SEC_COMMIT, 0, 0, NULL ); if ( hmapping == NULL ) { status = GetLastError(); DNS_DEBUG( INIT, ( "CreateFileMapping() failed for %S. Error = %d\n", pwsFilePath, status )); goto Failed; } pvdata = MapViewOfFile( hmapping, FILE_MAP_READ, 0, 0, 0 ); if ( pvdata == NULL ) { status = GetLastError(); DNS_DEBUG( INIT, ( "MapViewOfFile() failed for %s. Error = %d.\n", pwsFilePath, status )); goto Failed; } // // If we somehow mapped a file larger than 4GB, it must be RNT // = really new technology. // ASSERT( fileSizeHigh == 0 ); pmfFile->hFile = hfile; pmfFile->hMapping = hmapping; pmfFile->pvFileData = pvdata; pmfFile->cbFileBytes = fileSizeLow; return ERROR_SUCCESS; Failed: DNS_LOG_EVENT( DNS_EVENT_FILE_NOT_MAPPED, 1, (LPSTR *) &pwsFilePath, NULL, status ); if ( hmapping ) { CloseHandle( hmapping ); } if ( hfile ) { CloseHandle( hfile ); } return status; } DNS_STATUS OpenAndMapFileForReadA( IN LPSTR pwsFilePath, IN OUT PMAPPED_FILE pmfFile, IN BOOL fMustFind ) /*++ Routine Description: Opens and maps file. Note, does not log error for FILE_NOT_FOUND condition if fMustFind is not set -- no file is legitimate for secondary file. Arguments: pwsFilePath - name/path of file pmfFile - ptr to file mapping struct to hold results fMustFind - file must be found Return Value: ERROR_SUCCESS if file opened and mapped. ERROR_FILE_NOT_FOUND if file not found. ErrorCode on errors. --*/ { DNS_STATUS status; WCHAR szunicode[ MAX_PATH ]; status = copyAnsiStringToUnicode( szunicode, pwsFilePath ); if ( status != ERROR_SUCCESS ) { return status; } return OpenAndMapFileForReadW( szunicode, pmfFile, fMustFind ); } VOID CloseMappedFile( IN PMAPPED_FILE pmfFile ) /*++ Routine Description: Closes mapped file. Arguments: hmapfile - ptr to mapped file struct Return Value: None. --*/ { UnmapViewOfFile( pmfFile->pvFileData ); CloseHandle( pmfFile->hMapping ); CloseHandle( pmfFile->hFile ); } // // File writing // HANDLE OpenWriteFileExW( IN PWSTR pwsFileName, IN BOOLEAN fAppend ) /*++ Routine Description: Open file for write. Arguments: pwsFileName -- path to file to write fAppend -- if TRUE append; if FALSE overwrite Return Value: Handle to file, if successful. NULL otherwise. --*/ { HANDLE hfile; // // open file for write // hfile = CreateFileW( pwsFileName, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ, // let folks use "list.exe" NULL, // no security fAppend ? OPEN_ALWAYS : CREATE_ALWAYS, 0, NULL ); if ( hfile == INVALID_HANDLE_VALUE ) { DWORD status = GetLastError(); PVOID parg = pwsFileName; DNS_LOG_EVENT( DNS_EVENT_FILE_NOT_OPENED_FOR_WRITE, 1, & parg, NULL, status ); DNS_DEBUG( ANY, ( "ERROR: Unable to open file %S for write.\n", pwsFileName )); hfile = NULL; } return( hfile ); } HANDLE OpenWriteFileExA( IN LPSTR pwsFileName, IN BOOLEAN fAppend ) /*++ Routine Description: Open file for write. Arguments: pwsFileName -- path to file to write fAppend -- if TRUE append; if FALSE overwrite Return Value: Handle to file, if successful. NULL otherwise. --*/ { DNS_STATUS status; WCHAR szunicode[MAX_PATH]; status = copyAnsiStringToUnicode( szunicode, pwsFileName ); if ( status != ERROR_SUCCESS ) { return( NULL ); } return OpenWriteFileExW( szunicode, fAppend ); } BOOL FormattedWriteFile( IN HANDLE hfile, IN PCHAR pszFormat, ... ) /*++ Routine Description: Write formatted string to file. Arguments: pszFormat -- standard C format string ... -- standard arg list Return Value: TRUE if successful write. FALSE on error. --*/ { DNS_STATUS status; va_list arglist; CHAR OutputBuffer[ 1024 ]; ULONG length; BOOL ret; // // print format string to buffer // va_start( arglist, pszFormat ); status = StringCchVPrintfA( OutputBuffer, sizeofarray( OutputBuffer ), pszFormat, arglist ); va_end( arglist ); if ( FAILED( status ) ) { return FALSE; } // // write resulting buffer to file // length = strlen( OutputBuffer ); ret = WriteFile( hfile, (LPVOID) OutputBuffer, length, &length, NULL ); if ( !ret ) { DWORD st = GetLastError(); DNS_LOG_EVENT( DNS_EVENT_WRITE_FILE_FAILURE, 0, NULL, NULL, st ); DNS_DEBUG( ANY, ( "ERROR: WriteFile failed, err = 0x%08lx.\n", st )); } return ret; } // FormattedWriteFile VOID ConvertUnixFilenameToNt( IN OUT LPSTR pwsFileName ) /*++ Routine Description: Replace UNIX slash, with NT backslash. Arguments: pszFilename -- filename to convert, must be NULL terminated Return Value: None. --*/ { if ( ! pwsFileName ) { return; } while ( *pwsFileName ) { if ( *pwsFileName == '/' ) { *pwsFileName = '\\'; } pwsFileName++; } } DWORD WriteMessageToFile( IN HANDLE hFile, IN DWORD dwMessageId, ... ) /*++ Routine Description: Write message to file. Arguments: hFile -- handle to file dwMessageId -- message id to write ... -- argument strings Return Value: Number of bytes written. Zero if failure. --*/ { DWORD writeLength; PVOID messageBuffer; va_list arglist; // // write formatted message to buffer // - call allocates message buffer // va_start( arglist, dwMessageId ); writeLength = FormatMessageW( FORMAT_MESSAGE_ALLOCATE_BUFFER // allocate msg buffer | FORMAT_MESSAGE_FROM_HMODULE, NULL, // message table in this module dwMessageId, 0, // default country ID. (LPTSTR) &messageBuffer, 0, &arglist ); // // write formatted message to file // - note, using unicode version, so write length is twice // message length in chars // - free formatted message buffer // if ( writeLength ) { writeLength *= 2; WriteFile( hFile, messageBuffer, writeLength, & writeLength, NULL ); LocalFree( messageBuffer ); } return( writeLength ); } // // File buffer routines // BOOL WriteBufferToFile( IN PBUFFER pBuffer ) /*++ Routine Description: Write buffer to file. Arguments: pBuffer -- ptr to buffer struct containing data to write Return Value: TRUE if successful write. FALSE on error. --*/ { ULONG length; BOOL ret; DNS_DEBUG( WRITE, ( "Writing buffer to file.\n" " handle = %d\n" " length = %d\n", pBuffer->hFile, (pBuffer->pchCurrent - pBuffer->pchStart) )); // // write current data in buffer to file // ret = WriteFile( pBuffer->hFile, (PVOID) pBuffer->pchStart, (DWORD)(pBuffer->pchCurrent - pBuffer->pchStart), &length, NULL ); if ( !ret ) { DWORD status = GetLastError(); DNS_LOG_EVENT( DNS_EVENT_WRITE_FILE_FAILURE, 0, NULL, NULL, status ); DNS_DEBUG( ANY, ( "ERROR: WriteFile failed, err = 0x%08lx.\n", status )); } RESET_BUFFER( pBuffer ); return ret; } // WriteBufferToFile BOOL FormattedWriteToFileBuffer( IN PBUFFER pBuffer, IN PCHAR pszFormat, ... ) /*++ Routine Description: Write formatted string to file buffer. Arguments: pszFormat -- standard C format string ... -- standard arg list Return Value: TRUE if successful write. FALSE on error. --*/ { va_list arglist; ULONG length; // // if buffer approaching full, write it // length = (ULONG)(pBuffer->pchCurrent - pBuffer->pchStart); if ( (INT)(pBuffer->cchLength - length) < MAX_FORMATTED_BUFFER_WRITE ) { WriteBufferToFile( pBuffer ); ASSERT( IS_EMPTY_BUFFER(pBuffer) ); } // // print format string into buffer // va_start( arglist, pszFormat ); vsprintf( pBuffer->pchCurrent, pszFormat, arglist ); va_end( arglist ); // // reset buffer for write // length = strlen( pBuffer->pchCurrent ); pBuffer->pchCurrent += length; ASSERT( pBuffer->pchCurrent < pBuffer->pchEnd ); return TRUE; } // FormattedWriteToFileBuffer VOID FASTCALL InitializeFileBuffer( IN PBUFFER pBuffer, IN PCHAR pData, IN DWORD dwLength, IN HANDLE hFile ) /*++ Routine Description: Initialize file buffer. Arguments: pBuffer -- ptr to buffer struct containing data to write Return Value: None. --*/ { pBuffer->cchLength = dwLength; pBuffer->cchBytesLeft = dwLength; pBuffer->pchStart = pData; pBuffer->pchCurrent = pData; pBuffer->pchEnd = pData + dwLength; pBuffer->hFile = hFile; pBuffer->dwLineCount = 0; } VOID CleanupNonFileBuffer( IN PBUFFER pBuffer ) /*++ Routine Description: Cleanup non-file buffer if has heap data. Arguments: pBuffer -- ptr to buffer struct containing data to write Return Value: None. --*/ { if ( pBuffer->hFile == BUFFER_NONFILE_HEAP ) { FREE_HEAP( pBuffer->pchStart ); pBuffer->pchStart = NULL; pBuffer->hFile = NULL; } } // // DNS specific file utilities // BOOL File_CreateDatabaseFilePath( IN OUT PWCHAR pwFileBuffer, IN OUT PWCHAR pwBackupBuffer, OPTIONAL IN PWSTR pwsFileName ) /*++ Routine Description: Creates full path name to database file. Arguments: pwFileBuffer -- buffer to hold file path name - it is assumed that this buffer will be able to hold MAX_PATH characters pwBackupBuffer -- buffer to hold backup file path name, pwszFileName -- database file name Return Value: TRUE -- if successful FALSE -- on error; filename, directory or full path invalid; if full backup path invalid, simply return empty string --*/ { INT lengthFileName; ASSERT( SrvCfg_pwsDatabaseDirectory ); ASSERT( g_pFileDirectoryAppend ); ASSERT( g_pFileBackupDirectoryAppend ); DNS_DEBUG( INIT2, ( "File_CreateDatabaseFilePath()\n" " SrvCfg directory = %S\n" " file name = %S\n", SrvCfg_pwsDatabaseDirectory, pwsFileName )); // // Initialize output buffers (makes PREFIX happy). // if ( pwFileBuffer ) { *pwFileBuffer = L'\0'; } if ( pwBackupBuffer ) { *pwBackupBuffer = L'\0'; } // // get directory, verify name suitability // if ( !pwsFileName || !SrvCfg_pwsDatabaseDirectory ) { return FALSE; } lengthFileName = wcslen( pwsFileName ); if ( g_FileDirectoryAppendLength + lengthFileName >= MAX_PATH ) { PVOID argArray[2]; argArray[0] = pwsFileName; argArray[1] = SrvCfg_pwsDatabaseDirectory; DNS_LOG_EVENT( DNS_EVENT_FILE_PATH_TOO_LONG, 2, argArray, NULL, 0 ); DNS_DEBUG( ANY, ( "Could not create path for database file %S\n" " in current directory %S.\n", pwsFileName, SrvCfg_pwsDatabaseDirectory )); return FALSE; } // // build file path name // - copy append directory name // - copy file name // wcscpy( pwFileBuffer, g_pFileDirectoryAppend ); wcscat( pwFileBuffer, pwsFileName ); // // if no backup path -- done // if ( ! pwBackupBuffer ) { return TRUE; } // // check backup path length // - note backup subdir string has both directory separators // (i.e "\\backup\\") so no extra bytes for separator needed // if ( !g_pFileBackupDirectoryAppend || g_FileBackupDirectoryAppendLength + lengthFileName >= MAX_PATH ) { *pwBackupBuffer = 0; return TRUE; } wcscpy( pwBackupBuffer, g_pFileBackupDirectoryAppend ); wcscat( pwBackupBuffer, pwsFileName ); return TRUE; } BOOL File_CheckDatabaseFilePath( IN PWCHAR pwFileName, IN DWORD cFileNameLength OPTIONAL ) /*++ Routine Description: Checks validity of file path. Arguments: pwFileName -- database file name cFileNameLength -- optional specification of file name length, name assumed to be string if zero Return Value: TRUE if file path valid FALSE on error --*/ { // // basic validity check // if ( !pwFileName || !SrvCfg_pwsDatabaseDirectory ) { DNS_DEBUG( ANY, ( "ERROR: Missing %S to check path!\n", pwFileName ? "file" : "directory" )); return FALSE; } // // get file name length // if ( ! cFileNameLength ) { cFileNameLength = wcslen( pwFileName ); } // // verify name suitability // if ( g_FileDirectoryAppendLength + cFileNameLength >= MAX_PATH ) { DNS_DEBUG( INIT, ( "Filename %.*S exceeds MAX file path length\n" " with current directory %S.\n", cFileNameLength, pwFileName, SrvCfg_pwsDatabaseDirectory )); return FALSE; } return TRUE; } BOOL File_MoveToBackupDirectory( IN PWSTR pwsFileName ) /*++ Routine Description: Move file to backup directory. Arguments: pwsFileName -- file to move Return Value: TRUE -- if successful FALSE -- otherwise --*/ { WCHAR wsfile[ MAX_PATH ]; WCHAR wsbackup[ MAX_PATH ]; // // secondaries may not have file // if ( !pwsFileName ) { return FALSE; } // // create path to file and backup directory // if ( ! File_CreateDatabaseFilePath( wsfile, wsbackup, pwsFileName ) ) { // should have checked all names when read in boot file // or entered by admin ASSERT( FALSE ); return FALSE; } return MoveFileEx( wsfile, wsbackup, MOVEFILE_REPLACE_EXISTING ); } // // End of file.c //
172610.c
#define xBUFFER_CACHE_SIZE 10 #define xMAX_FAULT_INJECTION_RATE 15 #define xMIN_FAULT_INJECTION_RATE 3 #define xNUM_FAULT_TYPES 1 static NetworkBufferDescriptor_t * xNetworkBufferCache[ xBUFFER_CACHE_SIZE ] = { 0 }; #define xFAULT_LOG_SIZE 2048 uint32_t ulInjectedFault[ xFAULT_LOG_SIZE ]; uint32_t ulFaultLogIndex = 0; static BaseType_t prvCachePacket( NetworkBufferDescriptor_t * pxNetworkBufferIn ) { BaseType_t x, xReturn = pdFALSE; for( x = 0; x < xBUFFER_CACHE_SIZE; x++ ) { if( xNetworkBufferCache[ x ] == NULL ) { xNetworkBufferCache[ x ] = pxNetworkBufferIn; xReturn = pdTRUE; break; } } return xReturn; } /*-----------------------------------------------------------*/ static NetworkBufferDescriptor_t * prvGetCachedPacket( void ) { BaseType_t x; NetworkBufferDescriptor_t * pxReturn = NULL; for( x = ( xBUFFER_CACHE_SIZE - 1 ); x >= 0; x-- ) { if( xNetworkBufferCache[ x ] != NULL ) { pxReturn = xNetworkBufferCache[ x ]; xNetworkBufferCache[ x ] = NULL; break; } } return pxReturn; } /*-----------------------------------------------------------*/ static NetworkBufferDescriptor_t * prvDuplicatePacket( NetworkBufferDescriptor_t * pxOriginalPacket, const uint8_t * pucPacketData ) { NetworkBufferDescriptor_t * pxReturn; /* Obtain a new descriptor. */ pxReturn = pxGetNetworkBufferWithDescriptor( pxOriginalPacket->xDataLength, 0 ); if( pxReturn != NULL ) { /* Copy in the packet data. */ pxReturn->xDataLength = pxOriginalPacket->xDataLength; memcpy( pxReturn->pucEthernetBuffer, pucPacketData, pxOriginalPacket->xDataLength ); } return pxReturn; } /*-----------------------------------------------------------*/ static NetworkBufferDescriptor_t * prvRxFaultInjection( NetworkBufferDescriptor_t * pxNetworkBufferIn, const uint8_t * pucPacketData ) { static uint32_t ulCallCount = 0, ulNextFaultCallCount = 0; NetworkBufferDescriptor_t * pxReturn = pxNetworkBufferIn; IPStackEvent_t xRxEvent = { eNetworkRxEvent, NULL }; uint32_t ulFault; return pxNetworkBufferIn; ulCallCount++; if( ulCallCount > ulNextFaultCallCount ) { xApplicationGetRandomNumber( &( ulNextFaultCallCount ) ); ulNextFaultCallCount = ulNextFaultCallCount % xMAX_FAULT_INJECTION_RATE; if( ulNextFaultCallCount < xMIN_FAULT_INJECTION_RATE ) { ulNextFaultCallCount = xMIN_FAULT_INJECTION_RATE; } ulCallCount = 0; xApplicationGetRandomNumber( &( ulFault ) ); ulFault = ulFault % xNUM_FAULT_TYPES; if( ulFaultLogIndex < xFAULT_LOG_SIZE ) { ulInjectedFault[ ulFaultLogIndex ] = ulFault; ulFaultLogIndex++; } switch( ulFault ) { case 0: /* Just drop the packet. */ vReleaseNetworkBufferAndDescriptor( pxNetworkBufferIn ); pxReturn = NULL; break; case 1: /* Store the packet in the cache for later. */ if( prvCachePacket( pxNetworkBufferIn ) == pdTRUE ) { /* The packet may get sent later, it is not being sent * now. */ pxReturn = NULL; } break; case 2: /* Send a cached packet. */ pxReturn = prvGetCachedPacket(); if( pxReturn != NULL ) { /* A cached packet was obtained so drop the original * packet. */ vReleaseNetworkBufferAndDescriptor( pxNetworkBufferIn ); } else { /* Could not obtain a packet from the cache so just return * the packet that was passed in. */ pxReturn = pxNetworkBufferIn; } break; case 4: /* Send a duplicate of the packet right away. */ pxReturn = prvDuplicatePacket( pxNetworkBufferIn, pucPacketData ); /* Send the original packet to the stack. */ xRxEvent.pvData = ( void * ) pxNetworkBufferIn; if( xSendEventStructToIPTask( &xRxEvent, ( TickType_t ) 0 ) == pdFAIL ) { vReleaseNetworkBufferAndDescriptor( pxNetworkBufferIn ); } break; case 5: /* Send both a cached packet and the current packet. */ xRxEvent.pvData = ( void * ) prvGetCachedPacket(); if( xRxEvent.pvData != NULL ) { if( xSendEventStructToIPTask( &xRxEvent, ( TickType_t ) 0 ) == pdFAIL ) { vReleaseNetworkBufferAndDescriptor( pxNetworkBufferIn ); } } break; case 6: case 7: case 8: /* Store the packet in the cache for later. */ if( prvCachePacket( pxNetworkBufferIn ) == pdTRUE ) { /* The packet may get sent later, it is not being sent * now. */ pxReturn = NULL; } break; } } return pxReturn; } /*-----------------------------------------------------------*/
155156.c
/* * rawnetarch.c - rawnetarch.c wrapper for the SDL UI. * * Written by * Marco van den Heuvel <[email protected]> * * This file is part of VICE, the Versatile Commodore Emulator. * See README for copyright notice. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA. * */ #include <stdint.h> #include "vice.h" #ifdef HAVE_RAWNET #ifdef UNIX_COMPILE #include "../gtk3/rawnetarch_unix.c" #endif #ifdef WIN32_COMPILE #include "../gtk3/rawnetarch_win32.c" #endif #endif
315304.c
/* * Copyright (c) 2007, Kohsuke Ohtani * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <unistd.h> #include <errno.h> gid_t getgid(void) { return (uid_t)1; }
920835.c
n = 1; while (n < 10) { if (n % 3) { echo n *n; } else { echo n; } n = n + 1; }
332442.c
/****************************************************************************/ /* index v2.2.1 */ /* */ /* Copyright (c) 2012-2017 Texas Instruments Incorporated */ /* http://www.ti.com/ */ /* */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* */ /* Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* */ /* Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* */ /* Neither the name of Texas Instruments Incorporated nor the names */ /* of its contributors may be used to endorse or promote products */ /* derived from this software without specific prior written */ /* permission. */ /* */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR */ /* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT */ /* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ /* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ /* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */ /* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY */ /* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE */ /* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* */ /****************************************************************************/ #undef _INLINE #define _STRINGS_IMPLEMENTATION #define _INDEX #include <strings.h>
886002.c
/* ELF linking support for BFD. Copyright 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include "sysdep.h" #include "bfd.h" #include "bfdlink.h" #include "libbfd.h" #define ARCH_SIZE 0 #include "elf-bfd.h" #include "safe-ctype.h" #include "libiberty.h" #include "objalloc.h" /* Define a symbol in a dynamic linkage section. */ struct elf_link_hash_entry * _bfd_elf_define_linkage_sym (bfd *abfd, struct bfd_link_info *info, asection *sec, const char *name) { struct elf_link_hash_entry *h; struct bfd_link_hash_entry *bh; const struct elf_backend_data *bed; h = elf_link_hash_lookup (elf_hash_table (info), name, FALSE, FALSE, FALSE); if (h != NULL) { /* Zap symbol defined in an as-needed lib that wasn't linked. This is a symptom of a larger problem: Absolute symbols defined in shared libraries can't be overridden, because we lose the link to the bfd which is via the symbol section. */ h->root.type = bfd_link_hash_new; } bh = &h->root; if (!_bfd_generic_link_add_one_symbol (info, abfd, name, BSF_GLOBAL, sec, 0, NULL, FALSE, get_elf_backend_data (abfd)->collect, &bh)) return NULL; h = (struct elf_link_hash_entry *) bh; h->def_regular = 1; h->type = STT_OBJECT; h->other = (h->other & ~ELF_ST_VISIBILITY (-1)) | STV_HIDDEN; bed = get_elf_backend_data (abfd); (*bed->elf_backend_hide_symbol) (info, h, TRUE); return h; } bfd_boolean _bfd_elf_create_got_section (bfd *abfd, struct bfd_link_info *info) { flagword flags; asection *s; struct elf_link_hash_entry *h; const struct elf_backend_data *bed = get_elf_backend_data (abfd); int ptralign; /* This function may be called more than once. */ s = bfd_get_section_by_name (abfd, ".got"); if (s != NULL && (s->flags & SEC_LINKER_CREATED) != 0) return TRUE; switch (bed->s->arch_size) { case 32: ptralign = 2; break; case 64: ptralign = 3; break; default: bfd_set_error (bfd_error_bad_value); return FALSE; } flags = bed->dynamic_sec_flags; s = bfd_make_section_with_flags (abfd, ".got", flags); if (s == NULL || !bfd_set_section_alignment (abfd, s, ptralign)) return FALSE; if (bed->want_got_plt) { s = bfd_make_section_with_flags (abfd, ".got.plt", flags); if (s == NULL || !bfd_set_section_alignment (abfd, s, ptralign)) return FALSE; } if (bed->want_got_sym) { /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got (or .got.plt) section. We don't do this in the linker script because we don't want to define the symbol if we are not creating a global offset table. */ h = _bfd_elf_define_linkage_sym (abfd, info, s, "_GLOBAL_OFFSET_TABLE_"); elf_hash_table (info)->hgot = h; if (h == NULL) return FALSE; } /* The first bit of the global offset table is the header. */ s->size += bed->got_header_size; return TRUE; } /* Create a strtab to hold the dynamic symbol names. */ static bfd_boolean _bfd_elf_link_create_dynstrtab (bfd *abfd, struct bfd_link_info *info) { struct elf_link_hash_table *hash_table; hash_table = elf_hash_table (info); if (hash_table->dynobj == NULL) hash_table->dynobj = abfd; if (hash_table->dynstr == NULL) { hash_table->dynstr = _bfd_elf_strtab_init (); if (hash_table->dynstr == NULL) return FALSE; } return TRUE; } /* Create some sections which will be filled in with dynamic linking information. ABFD is an input file which requires dynamic sections to be created. The dynamic sections take up virtual memory space when the final executable is run, so we need to create them before addresses are assigned to the output sections. We work out the actual contents and size of these sections later. */ bfd_boolean _bfd_elf_link_create_dynamic_sections (bfd *abfd, struct bfd_link_info *info) { flagword flags; register asection *s; const struct elf_backend_data *bed; if (! is_elf_hash_table (info->hash)) return FALSE; if (elf_hash_table (info)->dynamic_sections_created) return TRUE; if (!_bfd_elf_link_create_dynstrtab (abfd, info)) return FALSE; abfd = elf_hash_table (info)->dynobj; bed = get_elf_backend_data (abfd); flags = bed->dynamic_sec_flags; /* A dynamically linked executable has a .interp section, but a shared library does not. */ if (info->executable) { s = bfd_make_section_with_flags (abfd, ".interp", flags | SEC_READONLY); if (s == NULL) return FALSE; } /* Create sections to hold version informations. These are removed if they are not needed. */ s = bfd_make_section_with_flags (abfd, ".gnu.version_d", flags | SEC_READONLY); if (s == NULL || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; s = bfd_make_section_with_flags (abfd, ".gnu.version", flags | SEC_READONLY); if (s == NULL || ! bfd_set_section_alignment (abfd, s, 1)) return FALSE; s = bfd_make_section_with_flags (abfd, ".gnu.version_r", flags | SEC_READONLY); if (s == NULL || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; s = bfd_make_section_with_flags (abfd, ".dynsym", flags | SEC_READONLY); if (s == NULL || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; s = bfd_make_section_with_flags (abfd, ".dynstr", flags | SEC_READONLY); if (s == NULL) return FALSE; s = bfd_make_section_with_flags (abfd, ".dynamic", flags); if (s == NULL || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; /* The special symbol _DYNAMIC is always set to the start of the .dynamic section. We could set _DYNAMIC in a linker script, but we only want to define it if we are, in fact, creating a .dynamic section. We don't want to define it if there is no .dynamic section, since on some ELF platforms the start up code examines it to decide how to initialize the process. */ if (!_bfd_elf_define_linkage_sym (abfd, info, s, "_DYNAMIC")) return FALSE; if (info->emit_hash) { s = bfd_make_section_with_flags (abfd, ".hash", flags | SEC_READONLY); if (s == NULL || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; elf_section_data (s)->this_hdr.sh_entsize = bed->s->sizeof_hash_entry; } if (info->emit_gnu_hash) { s = bfd_make_section_with_flags (abfd, ".gnu.hash", flags | SEC_READONLY); if (s == NULL || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; /* For 64-bit ELF, .gnu.hash is a non-uniform entity size section: 4 32-bit words followed by variable count of 64-bit words, then variable count of 32-bit words. */ if (bed->s->arch_size == 64) elf_section_data (s)->this_hdr.sh_entsize = 0; else elf_section_data (s)->this_hdr.sh_entsize = 4; } /* Let the backend create the rest of the sections. This lets the backend set the right flags. The backend will normally create the .got and .plt sections. */ if (! (*bed->elf_backend_create_dynamic_sections) (abfd, info)) return FALSE; elf_hash_table (info)->dynamic_sections_created = TRUE; return TRUE; } /* Create dynamic sections when linking against a dynamic object. */ bfd_boolean _bfd_elf_create_dynamic_sections (bfd *abfd, struct bfd_link_info *info) { flagword flags, pltflags; struct elf_link_hash_entry *h; asection *s; const struct elf_backend_data *bed = get_elf_backend_data (abfd); /* We need to create .plt, .rel[a].plt, .got, .got.plt, .dynbss, and .rel[a].bss sections. */ flags = bed->dynamic_sec_flags; pltflags = flags; if (bed->plt_not_loaded) /* We do not clear SEC_ALLOC here because we still want the OS to allocate space for the section; it's just that there's nothing to read in from the object file. */ pltflags &= ~ (SEC_CODE | SEC_LOAD | SEC_HAS_CONTENTS); else pltflags |= SEC_ALLOC | SEC_CODE | SEC_LOAD; if (bed->plt_readonly) pltflags |= SEC_READONLY; s = bfd_make_section_with_flags (abfd, ".plt", pltflags); if (s == NULL || ! bfd_set_section_alignment (abfd, s, bed->plt_alignment)) return FALSE; /* Define the symbol _PROCEDURE_LINKAGE_TABLE_ at the start of the .plt section. */ if (bed->want_plt_sym) { h = _bfd_elf_define_linkage_sym (abfd, info, s, "_PROCEDURE_LINKAGE_TABLE_"); elf_hash_table (info)->hplt = h; if (h == NULL) return FALSE; } s = bfd_make_section_with_flags (abfd, (bed->default_use_rela_p ? ".rela.plt" : ".rel.plt"), flags | SEC_READONLY); if (s == NULL || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; if (! _bfd_elf_create_got_section (abfd, info)) return FALSE; if (bed->want_dynbss) { /* The .dynbss section is a place to put symbols which are defined by dynamic objects, are referenced by regular objects, and are not functions. We must allocate space for them in the process image and use a R_*_COPY reloc to tell the dynamic linker to initialize them at run time. The linker script puts the .dynbss section into the .bss section of the final image. */ s = bfd_make_section_with_flags (abfd, ".dynbss", (SEC_ALLOC | SEC_LINKER_CREATED)); if (s == NULL) return FALSE; /* The .rel[a].bss section holds copy relocs. This section is not normally needed. We need to create it here, though, so that the linker will map it to an output section. We can't just create it only if we need it, because we will not know whether we need it until we have seen all the input files, and the first time the main linker code calls BFD after examining all the input files (size_dynamic_sections) the input sections have already been mapped to the output sections. If the section turns out not to be needed, we can discard it later. We will never need this section when generating a shared object, since they do not use copy relocs. */ if (! info->shared) { s = bfd_make_section_with_flags (abfd, (bed->default_use_rela_p ? ".rela.bss" : ".rel.bss"), flags | SEC_READONLY); if (s == NULL || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align)) return FALSE; } } return TRUE; } /* Record a new dynamic symbol. We record the dynamic symbols as we read the input files, since we need to have a list of all of them before we can determine the final sizes of the output sections. Note that we may actually call this function even though we are not going to output any dynamic symbols; in some cases we know that a symbol should be in the dynamic symbol table, but only if there is one. */ bfd_boolean bfd_elf_link_record_dynamic_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *h) { if (h->dynindx == -1) { struct elf_strtab_hash *dynstr; char *p; const char *name; bfd_size_type indx; /* XXX: The ABI draft says the linker must turn hidden and internal symbols into STB_LOCAL symbols when producing the DSO. However, if ld.so honors st_other in the dynamic table, this would not be necessary. */ switch (ELF_ST_VISIBILITY (h->other)) { case STV_INTERNAL: case STV_HIDDEN: if (h->root.type != bfd_link_hash_undefined && h->root.type != bfd_link_hash_undefweak) { h->forced_local = 1; if (!elf_hash_table (info)->is_relocatable_executable) return TRUE; } default: break; } h->dynindx = elf_hash_table (info)->dynsymcount; ++elf_hash_table (info)->dynsymcount; dynstr = elf_hash_table (info)->dynstr; if (dynstr == NULL) { /* Create a strtab to hold the dynamic symbol names. */ elf_hash_table (info)->dynstr = dynstr = _bfd_elf_strtab_init (); if (dynstr == NULL) return FALSE; } /* We don't put any version information in the dynamic string table. */ name = h->root.root.string; p = strchr (name, ELF_VER_CHR); if (p != NULL) /* We know that the p points into writable memory. In fact, there are only a few symbols that have read-only names, being those like _GLOBAL_OFFSET_TABLE_ that are created specially by the backends. Most symbols will have names pointing into an ELF string table read from a file, or to objalloc memory. */ *p = 0; indx = _bfd_elf_strtab_add (dynstr, name, p != NULL); if (p != NULL) *p = ELF_VER_CHR; if (indx == (bfd_size_type) -1) return FALSE; h->dynstr_index = indx; } return TRUE; } /* Mark a symbol dynamic. */ void bfd_elf_link_mark_dynamic_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *h, Elf_Internal_Sym *sym) { struct bfd_elf_dynamic_list *d = info->dynamic_list; /* It may be called more than once on the same H. */ if(h->dynamic || info->relocatable) return; if ((info->dynamic_data && (h->type == STT_OBJECT || (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_OBJECT))) || (d != NULL && h->root.type == bfd_link_hash_new && (*d->match) (&d->head, NULL, h->root.root.string))) h->dynamic = 1; } /* Record an assignment to a symbol made by a linker script. We need this in case some dynamic object refers to this symbol. */ bfd_boolean bfd_elf_record_link_assignment (bfd *output_bfd, struct bfd_link_info *info, const char *name, bfd_boolean provide, bfd_boolean hidden) { struct elf_link_hash_entry *h; struct elf_link_hash_table *htab; if (!is_elf_hash_table (info->hash)) return TRUE; htab = elf_hash_table (info); h = elf_link_hash_lookup (htab, name, !provide, TRUE, FALSE); if (h == NULL) return provide; /* Since we're defining the symbol, don't let it seem to have not been defined. record_dynamic_symbol and size_dynamic_sections may depend on this. */ if (h->root.type == bfd_link_hash_undefweak || h->root.type == bfd_link_hash_undefined) { h->root.type = bfd_link_hash_new; if (h->root.u.undef.next != NULL || htab->root.undefs_tail == &h->root) bfd_link_repair_undef_list (&htab->root); } else if (h->root.type == bfd_link_hash_new) { bfd_elf_link_mark_dynamic_symbol (info, h, NULL); h->non_elf = 0; } else if (h->root.type == bfd_link_hash_indirect) { const struct elf_backend_data *bed = get_elf_backend_data (output_bfd); struct elf_link_hash_entry *hv = h; do hv = (struct elf_link_hash_entry *) hv->root.u.i.link; while (hv->root.type == bfd_link_hash_indirect || hv->root.type == bfd_link_hash_warning); h->root.type = bfd_link_hash_undefined; hv->root.type = bfd_link_hash_indirect; hv->root.u.i.link = (struct bfd_link_hash_entry *) h; (*bed->elf_backend_copy_indirect_symbol) (info, h, hv); } else if (h->root.type == bfd_link_hash_warning) { abort (); } /* If this symbol is being provided by the linker script, and it is currently defined by a dynamic object, but not by a regular object, then mark it as undefined so that the generic linker will force the correct value. */ if (provide && h->def_dynamic && !h->def_regular) h->root.type = bfd_link_hash_undefined; /* If this symbol is not being provided by the linker script, and it is currently defined by a dynamic object, but not by a regular object, then clear out any version information because the symbol will not be associated with the dynamic object any more. */ if (!provide && h->def_dynamic && !h->def_regular) h->verinfo.verdef = NULL; h->def_regular = 1; if (provide && hidden) { const struct elf_backend_data *bed = get_elf_backend_data (output_bfd); h->other = (h->other & ~ELF_ST_VISIBILITY (-1)) | STV_HIDDEN; (*bed->elf_backend_hide_symbol) (info, h, TRUE); } /* STV_HIDDEN and STV_INTERNAL symbols must be STB_LOCAL in shared objects and executables. */ if (!info->relocatable && h->dynindx != -1 && (ELF_ST_VISIBILITY (h->other) == STV_HIDDEN || ELF_ST_VISIBILITY (h->other) == STV_INTERNAL)) h->forced_local = 1; if ((h->def_dynamic || h->ref_dynamic || info->shared || (info->executable && elf_hash_table (info)->is_relocatable_executable)) && h->dynindx == -1) { if (! bfd_elf_link_record_dynamic_symbol (info, h)) return FALSE; /* If this is a weak defined symbol, and we know a corresponding real symbol from the same dynamic object, make sure the real symbol is also made into a dynamic symbol. */ if (h->u.weakdef != NULL && h->u.weakdef->dynindx == -1) { if (! bfd_elf_link_record_dynamic_symbol (info, h->u.weakdef)) return FALSE; } } return TRUE; } /* Record a new local dynamic symbol. Returns 0 on failure, 1 on success, and 2 on a failure caused by attempting to record a symbol in a discarded section, eg. a discarded link-once section symbol. */ int bfd_elf_link_record_local_dynamic_symbol (struct bfd_link_info *info, bfd *input_bfd, long input_indx) { bfd_size_type amt; struct elf_link_local_dynamic_entry *entry; struct elf_link_hash_table *eht; struct elf_strtab_hash *dynstr; unsigned long dynstr_index; char *name; Elf_External_Sym_Shndx eshndx; char esym[sizeof (Elf64_External_Sym)]; if (! is_elf_hash_table (info->hash)) return 0; /* See if the entry exists already. */ for (entry = elf_hash_table (info)->dynlocal; entry ; entry = entry->next) if (entry->input_bfd == input_bfd && entry->input_indx == input_indx) return 1; amt = sizeof (*entry); entry = bfd_alloc (input_bfd, amt); if (entry == NULL) return 0; /* Go find the symbol, so that we can find it's name. */ if (!bfd_elf_get_elf_syms (input_bfd, &elf_tdata (input_bfd)->symtab_hdr, 1, input_indx, &entry->isym, esym, &eshndx)) { bfd_release (input_bfd, entry); return 0; } if (entry->isym.st_shndx != SHN_UNDEF && (entry->isym.st_shndx < SHN_LORESERVE || entry->isym.st_shndx > SHN_HIRESERVE)) { asection *s; s = bfd_section_from_elf_index (input_bfd, entry->isym.st_shndx); if (s == NULL || bfd_is_abs_section (s->output_section)) { /* We can still bfd_release here as nothing has done another bfd_alloc. We can't do this later in this function. */ bfd_release (input_bfd, entry); return 2; } } name = (bfd_elf_string_from_elf_section (input_bfd, elf_tdata (input_bfd)->symtab_hdr.sh_link, entry->isym.st_name)); dynstr = elf_hash_table (info)->dynstr; if (dynstr == NULL) { /* Create a strtab to hold the dynamic symbol names. */ elf_hash_table (info)->dynstr = dynstr = _bfd_elf_strtab_init (); if (dynstr == NULL) return 0; } dynstr_index = _bfd_elf_strtab_add (dynstr, name, FALSE); if (dynstr_index == (unsigned long) -1) return 0; entry->isym.st_name = dynstr_index; eht = elf_hash_table (info); entry->next = eht->dynlocal; eht->dynlocal = entry; entry->input_bfd = input_bfd; entry->input_indx = input_indx; eht->dynsymcount++; /* Whatever binding the symbol had before, it's now local. */ entry->isym.st_info = ELF_ST_INFO (STB_LOCAL, ELF_ST_TYPE (entry->isym.st_info)); /* The dynindx will be set at the end of size_dynamic_sections. */ return 1; } /* Return the dynindex of a local dynamic symbol. */ long _bfd_elf_link_lookup_local_dynindx (struct bfd_link_info *info, bfd *input_bfd, long input_indx) { struct elf_link_local_dynamic_entry *e; for (e = elf_hash_table (info)->dynlocal; e ; e = e->next) if (e->input_bfd == input_bfd && e->input_indx == input_indx) return e->dynindx; return -1; } /* This function is used to renumber the dynamic symbols, if some of them are removed because they are marked as local. This is called via elf_link_hash_traverse. */ static bfd_boolean elf_link_renumber_hash_table_dynsyms (struct elf_link_hash_entry *h, void *data) { size_t *count = data; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->forced_local) return TRUE; if (h->dynindx != -1) h->dynindx = ++(*count); return TRUE; } /* Like elf_link_renumber_hash_table_dynsyms, but just number symbols with STB_LOCAL binding. */ static bfd_boolean elf_link_renumber_local_hash_table_dynsyms (struct elf_link_hash_entry *h, void *data) { size_t *count = data; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (!h->forced_local) return TRUE; if (h->dynindx != -1) h->dynindx = ++(*count); return TRUE; } /* Return true if the dynamic symbol for a given section should be omitted when creating a shared library. */ bfd_boolean _bfd_elf_link_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED, struct bfd_link_info *info, asection *p) { struct elf_link_hash_table *htab; switch (elf_section_data (p)->this_hdr.sh_type) { case SHT_PROGBITS: case SHT_NOBITS: /* If sh_type is yet undecided, assume it could be SHT_PROGBITS/SHT_NOBITS. */ case SHT_NULL: htab = elf_hash_table (info); if (p == htab->tls_sec) return FALSE; if (htab->text_index_section != NULL) return p != htab->text_index_section && p != htab->data_index_section; if (strcmp (p->name, ".got") == 0 || strcmp (p->name, ".got.plt") == 0 || strcmp (p->name, ".plt") == 0) { asection *ip; if (htab->dynobj != NULL && (ip = bfd_get_section_by_name (htab->dynobj, p->name)) != NULL && (ip->flags & SEC_LINKER_CREATED) && ip->output_section == p) return TRUE; } return FALSE; /* There shouldn't be section relative relocations against any other section. */ default: return TRUE; } } /* Assign dynsym indices. In a shared library we generate a section symbol for each output section, which come first. Next come symbols which have been forced to local binding. Then all of the back-end allocated local dynamic syms, followed by the rest of the global symbols. */ static unsigned long _bfd_elf_link_renumber_dynsyms (bfd *output_bfd, struct bfd_link_info *info, unsigned long *section_sym_count) { unsigned long dynsymcount = 0; if (info->shared || elf_hash_table (info)->is_relocatable_executable) { const struct elf_backend_data *bed = get_elf_backend_data (output_bfd); asection *p; for (p = output_bfd->sections; p ; p = p->next) if ((p->flags & SEC_EXCLUDE) == 0 && (p->flags & SEC_ALLOC) != 0 && !(*bed->elf_backend_omit_section_dynsym) (output_bfd, info, p)) elf_section_data (p)->dynindx = ++dynsymcount; else elf_section_data (p)->dynindx = 0; } *section_sym_count = dynsymcount; elf_link_hash_traverse (elf_hash_table (info), elf_link_renumber_local_hash_table_dynsyms, &dynsymcount); if (elf_hash_table (info)->dynlocal) { struct elf_link_local_dynamic_entry *p; for (p = elf_hash_table (info)->dynlocal; p ; p = p->next) p->dynindx = ++dynsymcount; } elf_link_hash_traverse (elf_hash_table (info), elf_link_renumber_hash_table_dynsyms, &dynsymcount); /* There is an unused NULL entry at the head of the table which we must account for in our count. Unless there weren't any symbols, which means we'll have no table at all. */ if (dynsymcount != 0) ++dynsymcount; elf_hash_table (info)->dynsymcount = dynsymcount; return dynsymcount; } /* This function is called when we want to define a new symbol. It handles the various cases which arise when we find a definition in a dynamic object, or when there is already a definition in a dynamic object. The new symbol is described by NAME, SYM, PSEC, and PVALUE. We set SYM_HASH to the hash table entry. We set OVERRIDE if the old symbol is overriding a new definition. We set TYPE_CHANGE_OK if it is OK for the type to change. We set SIZE_CHANGE_OK if it is OK for the size to change. By OK to change, we mean that we shouldn't warn if the type or size does change. We set POLD_ALIGNMENT if an old common symbol in a dynamic object is overridden by a regular object. */ bfd_boolean _bfd_elf_merge_symbol (bfd *abfd, struct bfd_link_info *info, const char *name, Elf_Internal_Sym *sym, asection **psec, bfd_vma *pvalue, unsigned int *pold_alignment, struct elf_link_hash_entry **sym_hash, bfd_boolean *skip, bfd_boolean *override, bfd_boolean *type_change_ok, bfd_boolean *size_change_ok) { asection *sec, *oldsec; struct elf_link_hash_entry *h; struct elf_link_hash_entry *flip; int bind; bfd *oldbfd; bfd_boolean newdyn, olddyn, olddef, newdef, newdyncommon, olddyncommon; bfd_boolean newweak, oldweak; const struct elf_backend_data *bed; *skip = FALSE; *override = FALSE; sec = *psec; bind = ELF_ST_BIND (sym->st_info); /* Silently discard TLS symbols from --just-syms. There's no way to combine a static TLS block with a new TLS block for this executable. */ if (ELF_ST_TYPE (sym->st_info) == STT_TLS && sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS) { *skip = TRUE; return TRUE; } if (! bfd_is_und_section (sec)) h = elf_link_hash_lookup (elf_hash_table (info), name, TRUE, FALSE, FALSE); else h = ((struct elf_link_hash_entry *) bfd_wrapped_link_hash_lookup (abfd, info, name, TRUE, FALSE, FALSE)); if (h == NULL) return FALSE; *sym_hash = h; bed = get_elf_backend_data (abfd); /* This code is for coping with dynamic objects, and is only useful if we are doing an ELF link. */ if (!(*bed->relocs_compatible) (abfd->xvec, info->hash->creator)) return TRUE; /* For merging, we only care about real symbols. */ while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* We have to check it for every instance since the first few may be refereences and not all compilers emit symbol type for undefined symbols. */ bfd_elf_link_mark_dynamic_symbol (info, h, sym); /* If we just created the symbol, mark it as being an ELF symbol. Other than that, there is nothing to do--there is no merge issue with a newly defined symbol--so we just return. */ if (h->root.type == bfd_link_hash_new) { h->non_elf = 0; return TRUE; } /* OLDBFD and OLDSEC are a BFD and an ASECTION associated with the existing symbol. */ switch (h->root.type) { default: oldbfd = NULL; oldsec = NULL; break; case bfd_link_hash_undefined: case bfd_link_hash_undefweak: oldbfd = h->root.u.undef.abfd; oldsec = NULL; break; case bfd_link_hash_defined: case bfd_link_hash_defweak: oldbfd = h->root.u.def.section->owner; oldsec = h->root.u.def.section; break; case bfd_link_hash_common: oldbfd = h->root.u.c.p->section->owner; oldsec = h->root.u.c.p->section; break; } /* In cases involving weak versioned symbols, we may wind up trying to merge a symbol with itself. Catch that here, to avoid the confusion that results if we try to override a symbol with itself. The additional tests catch cases like _GLOBAL_OFFSET_TABLE_, which are regular symbols defined in a dynamic object, which we do want to handle here. */ if (abfd == oldbfd && ((abfd->flags & DYNAMIC) == 0 || !h->def_regular)) return TRUE; /* NEWDYN and OLDDYN indicate whether the new or old symbol, respectively, is from a dynamic object. */ newdyn = (abfd->flags & DYNAMIC) != 0; olddyn = FALSE; if (oldbfd != NULL) olddyn = (oldbfd->flags & DYNAMIC) != 0; else if (oldsec != NULL) { /* This handles the special SHN_MIPS_{TEXT,DATA} section indices used by MIPS ELF. */ olddyn = (oldsec->symbol->flags & BSF_DYNAMIC) != 0; } /* NEWDEF and OLDDEF indicate whether the new or old symbol, respectively, appear to be a definition rather than reference. */ newdef = !bfd_is_und_section (sec) && !bfd_is_com_section (sec); olddef = (h->root.type != bfd_link_hash_undefined && h->root.type != bfd_link_hash_undefweak && h->root.type != bfd_link_hash_common); /* When we try to create a default indirect symbol from the dynamic definition with the default version, we skip it if its type and the type of existing regular definition mismatch. We only do it if the existing regular definition won't be dynamic. */ if (pold_alignment == NULL && !info->shared && !info->export_dynamic && !h->ref_dynamic && newdyn && newdef && !olddyn && (olddef || h->root.type == bfd_link_hash_common) && ELF_ST_TYPE (sym->st_info) != h->type && ELF_ST_TYPE (sym->st_info) != STT_NOTYPE && h->type != STT_NOTYPE && !(bed->is_function_type (ELF_ST_TYPE (sym->st_info)) && bed->is_function_type (h->type))) { *skip = TRUE; return TRUE; } /* Check TLS symbol. We don't check undefined symbol introduced by "ld -u". */ if ((ELF_ST_TYPE (sym->st_info) == STT_TLS || h->type == STT_TLS) && ELF_ST_TYPE (sym->st_info) != h->type && oldbfd != NULL) { bfd *ntbfd, *tbfd; bfd_boolean ntdef, tdef; asection *ntsec, *tsec; if (h->type == STT_TLS) { ntbfd = abfd; ntsec = sec; ntdef = newdef; tbfd = oldbfd; tsec = oldsec; tdef = olddef; } else { ntbfd = oldbfd; ntsec = oldsec; ntdef = olddef; tbfd = abfd; tsec = sec; tdef = newdef; } if (tdef && ntdef) (*_bfd_error_handler) (_("%s: TLS definition in %B section %A mismatches non-TLS definition in %B section %A"), tbfd, tsec, ntbfd, ntsec, h->root.root.string); else if (!tdef && !ntdef) (*_bfd_error_handler) (_("%s: TLS reference in %B mismatches non-TLS reference in %B"), tbfd, ntbfd, h->root.root.string); else if (tdef) (*_bfd_error_handler) (_("%s: TLS definition in %B section %A mismatches non-TLS reference in %B"), tbfd, tsec, ntbfd, h->root.root.string); else (*_bfd_error_handler) (_("%s: TLS reference in %B mismatches non-TLS definition in %B section %A"), tbfd, ntbfd, ntsec, h->root.root.string); bfd_set_error (bfd_error_bad_value); return FALSE; } /* We need to remember if a symbol has a definition in a dynamic object or is weak in all dynamic objects. Internal and hidden visibility will make it unavailable to dynamic objects. */ if (newdyn && !h->dynamic_def) { if (!bfd_is_und_section (sec)) h->dynamic_def = 1; else { /* Check if this symbol is weak in all dynamic objects. If it is the first time we see it in a dynamic object, we mark if it is weak. Otherwise, we clear it. */ if (!h->ref_dynamic) { if (bind == STB_WEAK) h->dynamic_weak = 1; } else if (bind != STB_WEAK) h->dynamic_weak = 0; } } /* If the old symbol has non-default visibility, we ignore the new definition from a dynamic object. */ if (newdyn && ELF_ST_VISIBILITY (h->other) != STV_DEFAULT && !bfd_is_und_section (sec)) { *skip = TRUE; /* Make sure this symbol is dynamic. */ h->ref_dynamic = 1; /* A protected symbol has external availability. Make sure it is recorded as dynamic. FIXME: Should we check type and size for protected symbol? */ if (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED) return bfd_elf_link_record_dynamic_symbol (info, h); else return TRUE; } else if (!newdyn && ELF_ST_VISIBILITY (sym->st_other) != STV_DEFAULT && h->def_dynamic) { /* If the new symbol with non-default visibility comes from a relocatable file and the old definition comes from a dynamic object, we remove the old definition. */ if ((*sym_hash)->root.type == bfd_link_hash_indirect) { /* Handle the case where the old dynamic definition is default versioned. We need to copy the symbol info from the symbol with default version to the normal one if it was referenced before. */ if (h->ref_regular) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); struct elf_link_hash_entry *vh = *sym_hash; vh->root.type = h->root.type; h->root.type = bfd_link_hash_indirect; (*bed->elf_backend_copy_indirect_symbol) (info, vh, h); /* Protected symbols will override the dynamic definition with default version. */ if (ELF_ST_VISIBILITY (sym->st_other) == STV_PROTECTED) { h->root.u.i.link = (struct bfd_link_hash_entry *) vh; vh->dynamic_def = 1; vh->ref_dynamic = 1; } else { h->root.type = vh->root.type; vh->ref_dynamic = 0; /* We have to hide it here since it was made dynamic global with extra bits when the symbol info was copied from the old dynamic definition. */ (*bed->elf_backend_hide_symbol) (info, vh, TRUE); } h = vh; } else h = *sym_hash; } if ((h->root.u.undef.next || info->hash->undefs_tail == &h->root) && bfd_is_und_section (sec)) { /* If the new symbol is undefined and the old symbol was also undefined before, we need to make sure _bfd_generic_link_add_one_symbol doesn't mess up the linker hash table undefs list. Since the old definition came from a dynamic object, it is still on the undefs list. */ h->root.type = bfd_link_hash_undefined; h->root.u.undef.abfd = abfd; } else { h->root.type = bfd_link_hash_new; h->root.u.undef.abfd = NULL; } if (h->def_dynamic) { h->def_dynamic = 0; h->ref_dynamic = 1; h->dynamic_def = 1; } /* FIXME: Should we check type and size for protected symbol? */ h->size = 0; h->type = 0; return TRUE; } /* Differentiate strong and weak symbols. */ newweak = bind == STB_WEAK; oldweak = (h->root.type == bfd_link_hash_defweak || h->root.type == bfd_link_hash_undefweak); /* If a new weak symbol definition comes from a regular file and the old symbol comes from a dynamic library, we treat the new one as strong. Similarly, an old weak symbol definition from a regular file is treated as strong when the new symbol comes from a dynamic library. Further, an old weak symbol from a dynamic library is treated as strong if the new symbol is from a dynamic library. This reflects the way glibc's ld.so works. Do this before setting *type_change_ok or *size_change_ok so that we warn properly when dynamic library symbols are overridden. */ if (newdef && !newdyn && olddyn) newweak = FALSE; if (olddef && newdyn) oldweak = FALSE; /* Allow changes between different types of funciton symbol. */ if (bed->is_function_type (ELF_ST_TYPE (sym->st_info)) && bed->is_function_type (h->type)) *type_change_ok = TRUE; /* It's OK to change the type if either the existing symbol or the new symbol is weak. A type change is also OK if the old symbol is undefined and the new symbol is defined. */ if (oldweak || newweak || (newdef && h->root.type == bfd_link_hash_undefined)) *type_change_ok = TRUE; /* It's OK to change the size if either the existing symbol or the new symbol is weak, or if the old symbol is undefined. */ if (*type_change_ok || h->root.type == bfd_link_hash_undefined) *size_change_ok = TRUE; /* NEWDYNCOMMON and OLDDYNCOMMON indicate whether the new or old symbol, respectively, appears to be a common symbol in a dynamic object. If a symbol appears in an uninitialized section, and is not weak, and is not a function, then it may be a common symbol which was resolved when the dynamic object was created. We want to treat such symbols specially, because they raise special considerations when setting the symbol size: if the symbol appears as a common symbol in a regular object, and the size in the regular object is larger, we must make sure that we use the larger size. This problematic case can always be avoided in C, but it must be handled correctly when using Fortran shared libraries. Note that if NEWDYNCOMMON is set, NEWDEF will be set, and likewise for OLDDYNCOMMON and OLDDEF. Note that this test is just a heuristic, and that it is quite possible to have an uninitialized symbol in a shared object which is really a definition, rather than a common symbol. This could lead to some minor confusion when the symbol really is a common symbol in some regular object. However, I think it will be harmless. */ if (newdyn && newdef && !newweak && (sec->flags & SEC_ALLOC) != 0 && (sec->flags & SEC_LOAD) == 0 && sym->st_size > 0 && !bed->is_function_type (ELF_ST_TYPE (sym->st_info))) newdyncommon = TRUE; else newdyncommon = FALSE; if (olddyn && olddef && h->root.type == bfd_link_hash_defined && h->def_dynamic && (h->root.u.def.section->flags & SEC_ALLOC) != 0 && (h->root.u.def.section->flags & SEC_LOAD) == 0 && h->size > 0 && !bed->is_function_type (h->type)) olddyncommon = TRUE; else olddyncommon = FALSE; /* We now know everything about the old and new symbols. We ask the backend to check if we can merge them. */ if (bed->merge_symbol && !bed->merge_symbol (info, sym_hash, h, sym, psec, pvalue, pold_alignment, skip, override, type_change_ok, size_change_ok, &newdyn, &newdef, &newdyncommon, &newweak, abfd, &sec, &olddyn, &olddef, &olddyncommon, &oldweak, oldbfd, &oldsec)) return FALSE; /* If both the old and the new symbols look like common symbols in a dynamic object, set the size of the symbol to the larger of the two. */ if (olddyncommon && newdyncommon && sym->st_size != h->size) { /* Since we think we have two common symbols, issue a multiple common warning if desired. Note that we only warn if the size is different. If the size is the same, we simply let the old symbol override the new one as normally happens with symbols defined in dynamic objects. */ if (! ((*info->callbacks->multiple_common) (info, h->root.root.string, oldbfd, bfd_link_hash_common, h->size, abfd, bfd_link_hash_common, sym->st_size))) return FALSE; if (sym->st_size > h->size) h->size = sym->st_size; *size_change_ok = TRUE; } /* If we are looking at a dynamic object, and we have found a definition, we need to see if the symbol was already defined by some other object. If so, we want to use the existing definition, and we do not want to report a multiple symbol definition error; we do this by clobbering *PSEC to be bfd_und_section_ptr. We treat a common symbol as a definition if the symbol in the shared library is a function, since common symbols always represent variables; this can cause confusion in principle, but any such confusion would seem to indicate an erroneous program or shared library. We also permit a common symbol in a regular object to override a weak symbol in a shared object. */ if (newdyn && newdef && (olddef || (h->root.type == bfd_link_hash_common && (newweak || bed->is_function_type (ELF_ST_TYPE (sym->st_info)))))) { *override = TRUE; newdef = FALSE; newdyncommon = FALSE; *psec = sec = bfd_und_section_ptr; *size_change_ok = TRUE; /* If we get here when the old symbol is a common symbol, then we are explicitly letting it override a weak symbol or function in a dynamic object, and we don't want to warn about a type change. If the old symbol is a defined symbol, a type change warning may still be appropriate. */ if (h->root.type == bfd_link_hash_common) *type_change_ok = TRUE; } /* Handle the special case of an old common symbol merging with a new symbol which looks like a common symbol in a shared object. We change *PSEC and *PVALUE to make the new symbol look like a common symbol, and let _bfd_generic_link_add_one_symbol do the right thing. */ if (newdyncommon && h->root.type == bfd_link_hash_common) { *override = TRUE; newdef = FALSE; newdyncommon = FALSE; *pvalue = sym->st_size; *psec = sec = bed->common_section (oldsec); *size_change_ok = TRUE; } /* Skip weak definitions of symbols that are already defined. */ if (newdef && olddef && newweak) *skip = TRUE; /* If the old symbol is from a dynamic object, and the new symbol is a definition which is not from a dynamic object, then the new symbol overrides the old symbol. Symbols from regular files always take precedence over symbols from dynamic objects, even if they are defined after the dynamic object in the link. As above, we again permit a common symbol in a regular object to override a definition in a shared object if the shared object symbol is a function or is weak. */ flip = NULL; if (!newdyn && (newdef || (bfd_is_com_section (sec) && (oldweak || bed->is_function_type (h->type)))) && olddyn && olddef && h->def_dynamic) { /* Change the hash table entry to undefined, and let _bfd_generic_link_add_one_symbol do the right thing with the new definition. */ h->root.type = bfd_link_hash_undefined; h->root.u.undef.abfd = h->root.u.def.section->owner; *size_change_ok = TRUE; olddef = FALSE; olddyncommon = FALSE; /* We again permit a type change when a common symbol may be overriding a function. */ if (bfd_is_com_section (sec)) *type_change_ok = TRUE; if ((*sym_hash)->root.type == bfd_link_hash_indirect) flip = *sym_hash; else /* This union may have been set to be non-NULL when this symbol was seen in a dynamic object. We must force the union to be NULL, so that it is correct for a regular symbol. */ h->verinfo.vertree = NULL; } /* Handle the special case of a new common symbol merging with an old symbol that looks like it might be a common symbol defined in a shared object. Note that we have already handled the case in which a new common symbol should simply override the definition in the shared library. */ if (! newdyn && bfd_is_com_section (sec) && olddyncommon) { /* It would be best if we could set the hash table entry to a common symbol, but we don't know what to use for the section or the alignment. */ if (! ((*info->callbacks->multiple_common) (info, h->root.root.string, oldbfd, bfd_link_hash_common, h->size, abfd, bfd_link_hash_common, sym->st_size))) return FALSE; /* If the presumed common symbol in the dynamic object is larger, pretend that the new symbol has its size. */ if (h->size > *pvalue) *pvalue = h->size; /* We need to remember the alignment required by the symbol in the dynamic object. */ BFD_ASSERT (pold_alignment); *pold_alignment = h->root.u.def.section->alignment_power; olddef = FALSE; olddyncommon = FALSE; h->root.type = bfd_link_hash_undefined; h->root.u.undef.abfd = h->root.u.def.section->owner; *size_change_ok = TRUE; *type_change_ok = TRUE; if ((*sym_hash)->root.type == bfd_link_hash_indirect) flip = *sym_hash; else h->verinfo.vertree = NULL; } if (flip != NULL) { /* Handle the case where we had a versioned symbol in a dynamic library and now find a definition in a normal object. In this case, we make the versioned symbol point to the normal one. */ const struct elf_backend_data *bed = get_elf_backend_data (abfd); flip->root.type = h->root.type; flip->root.u.undef.abfd = h->root.u.undef.abfd; h->root.type = bfd_link_hash_indirect; h->root.u.i.link = (struct bfd_link_hash_entry *) flip; (*bed->elf_backend_copy_indirect_symbol) (info, flip, h); if (h->def_dynamic) { h->def_dynamic = 0; flip->ref_dynamic = 1; } } return TRUE; } /* This function is called to create an indirect symbol from the default for the symbol with the default version if needed. The symbol is described by H, NAME, SYM, PSEC, VALUE, and OVERRIDE. We set DYNSYM if the new indirect symbol is dynamic. */ bfd_boolean _bfd_elf_add_default_symbol (bfd *abfd, struct bfd_link_info *info, struct elf_link_hash_entry *h, const char *name, Elf_Internal_Sym *sym, asection **psec, bfd_vma *value, bfd_boolean *dynsym, bfd_boolean override) { bfd_boolean type_change_ok; bfd_boolean size_change_ok; bfd_boolean skip; char *shortname; struct elf_link_hash_entry *hi; struct bfd_link_hash_entry *bh; const struct elf_backend_data *bed; bfd_boolean collect; bfd_boolean dynamic; char *p; size_t len, shortlen; asection *sec; /* If this symbol has a version, and it is the default version, we create an indirect symbol from the default name to the fully decorated name. This will cause external references which do not specify a version to be bound to this version of the symbol. */ p = strchr (name, ELF_VER_CHR); if (p == NULL || p[1] != ELF_VER_CHR) return TRUE; if (override) { /* We are overridden by an old definition. We need to check if we need to create the indirect symbol from the default name. */ hi = elf_link_hash_lookup (elf_hash_table (info), name, TRUE, FALSE, FALSE); BFD_ASSERT (hi != NULL); if (hi == h) return TRUE; while (hi->root.type == bfd_link_hash_indirect || hi->root.type == bfd_link_hash_warning) { hi = (struct elf_link_hash_entry *) hi->root.u.i.link; if (hi == h) return TRUE; } } bed = get_elf_backend_data (abfd); collect = bed->collect; dynamic = (abfd->flags & DYNAMIC) != 0; shortlen = p - name; shortname = bfd_hash_allocate (&info->hash->table, shortlen + 1); if (shortname == NULL) return FALSE; memcpy (shortname, name, shortlen); shortname[shortlen] = '\0'; /* We are going to create a new symbol. Merge it with any existing symbol with this name. For the purposes of the merge, act as though we were defining the symbol we just defined, although we actually going to define an indirect symbol. */ type_change_ok = FALSE; size_change_ok = FALSE; sec = *psec; if (!_bfd_elf_merge_symbol (abfd, info, shortname, sym, &sec, value, NULL, &hi, &skip, &override, &type_change_ok, &size_change_ok)) return FALSE; if (skip) goto nondefault; if (! override) { bh = &hi->root; if (! (_bfd_generic_link_add_one_symbol (info, abfd, shortname, BSF_INDIRECT, bfd_ind_section_ptr, 0, name, FALSE, collect, &bh))) return FALSE; hi = (struct elf_link_hash_entry *) bh; } else { /* In this case the symbol named SHORTNAME is overriding the indirect symbol we want to add. We were planning on making SHORTNAME an indirect symbol referring to NAME. SHORTNAME is the name without a version. NAME is the fully versioned name, and it is the default version. Overriding means that we already saw a definition for the symbol SHORTNAME in a regular object, and it is overriding the symbol defined in the dynamic object. When this happens, we actually want to change NAME, the symbol we just added, to refer to SHORTNAME. This will cause references to NAME in the shared object to become references to SHORTNAME in the regular object. This is what we expect when we override a function in a shared object: that the references in the shared object will be mapped to the definition in the regular object. */ while (hi->root.type == bfd_link_hash_indirect || hi->root.type == bfd_link_hash_warning) hi = (struct elf_link_hash_entry *) hi->root.u.i.link; h->root.type = bfd_link_hash_indirect; h->root.u.i.link = (struct bfd_link_hash_entry *) hi; if (h->def_dynamic) { h->def_dynamic = 0; hi->ref_dynamic = 1; if (hi->ref_regular || hi->def_regular) { if (! bfd_elf_link_record_dynamic_symbol (info, hi)) return FALSE; } } /* Now set HI to H, so that the following code will set the other fields correctly. */ hi = h; } /* Check if HI is a warning symbol. */ if (hi->root.type == bfd_link_hash_warning) hi = (struct elf_link_hash_entry *) hi->root.u.i.link; /* If there is a duplicate definition somewhere, then HI may not point to an indirect symbol. We will have reported an error to the user in that case. */ if (hi->root.type == bfd_link_hash_indirect) { struct elf_link_hash_entry *ht; ht = (struct elf_link_hash_entry *) hi->root.u.i.link; (*bed->elf_backend_copy_indirect_symbol) (info, ht, hi); /* See if the new flags lead us to realize that the symbol must be dynamic. */ if (! *dynsym) { if (! dynamic) { if (info->shared || hi->ref_dynamic) *dynsym = TRUE; } else { if (hi->ref_regular) *dynsym = TRUE; } } } /* We also need to define an indirection from the nondefault version of the symbol. */ nondefault: len = strlen (name); shortname = bfd_hash_allocate (&info->hash->table, len); if (shortname == NULL) return FALSE; memcpy (shortname, name, shortlen); memcpy (shortname + shortlen, p + 1, len - shortlen); /* Once again, merge with any existing symbol. */ type_change_ok = FALSE; size_change_ok = FALSE; sec = *psec; if (!_bfd_elf_merge_symbol (abfd, info, shortname, sym, &sec, value, NULL, &hi, &skip, &override, &type_change_ok, &size_change_ok)) return FALSE; if (skip) return TRUE; if (override) { /* Here SHORTNAME is a versioned name, so we don't expect to see the type of override we do in the case above unless it is overridden by a versioned definition. */ if (hi->root.type != bfd_link_hash_defined && hi->root.type != bfd_link_hash_defweak) (*_bfd_error_handler) (_("%B: unexpected redefinition of indirect versioned symbol `%s'"), abfd, shortname); } else { bh = &hi->root; if (! (_bfd_generic_link_add_one_symbol (info, abfd, shortname, BSF_INDIRECT, bfd_ind_section_ptr, 0, name, FALSE, collect, &bh))) return FALSE; hi = (struct elf_link_hash_entry *) bh; /* If there is a duplicate definition somewhere, then HI may not point to an indirect symbol. We will have reported an error to the user in that case. */ if (hi->root.type == bfd_link_hash_indirect) { (*bed->elf_backend_copy_indirect_symbol) (info, h, hi); /* See if the new flags lead us to realize that the symbol must be dynamic. */ if (! *dynsym) { if (! dynamic) { if (info->shared || hi->ref_dynamic) *dynsym = TRUE; } else { if (hi->ref_regular) *dynsym = TRUE; } } } } return TRUE; } /* This routine is used to export all defined symbols into the dynamic symbol table. It is called via elf_link_hash_traverse. */ bfd_boolean _bfd_elf_export_symbol (struct elf_link_hash_entry *h, void *data) { struct elf_info_failed *eif = data; /* Ignore this if we won't export it. */ if (!eif->info->export_dynamic && !h->dynamic) return TRUE; /* Ignore indirect symbols. These are added by the versioning code. */ if (h->root.type == bfd_link_hash_indirect) return TRUE; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->dynindx == -1 && (h->def_regular || h->ref_regular)) { struct bfd_elf_version_tree *t; struct bfd_elf_version_expr *d; for (t = eif->verdefs; t != NULL; t = t->next) { if (t->globals.list != NULL) { d = (*t->match) (&t->globals, NULL, h->root.root.string); if (d != NULL) goto doit; } if (t->locals.list != NULL) { d = (*t->match) (&t->locals, NULL, h->root.root.string); if (d != NULL) return TRUE; } } if (!eif->verdefs) { doit: if (! bfd_elf_link_record_dynamic_symbol (eif->info, h)) { eif->failed = TRUE; return FALSE; } } } return TRUE; } /* Look through the symbols which are defined in other shared libraries and referenced here. Update the list of version dependencies. This will be put into the .gnu.version_r section. This function is called via elf_link_hash_traverse. */ bfd_boolean _bfd_elf_link_find_version_dependencies (struct elf_link_hash_entry *h, void *data) { struct elf_find_verdep_info *rinfo = data; Elf_Internal_Verneed *t; Elf_Internal_Vernaux *a; bfd_size_type amt; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* We only care about symbols defined in shared objects with version information. */ if (!h->def_dynamic || h->def_regular || h->dynindx == -1 || h->verinfo.verdef == NULL) return TRUE; /* See if we already know about this version. */ for (t = elf_tdata (rinfo->output_bfd)->verref; t != NULL; t = t->vn_nextref) { if (t->vn_bfd != h->verinfo.verdef->vd_bfd) continue; for (a = t->vn_auxptr; a != NULL; a = a->vna_nextptr) if (a->vna_nodename == h->verinfo.verdef->vd_nodename) return TRUE; break; } /* This is a new version. Add it to tree we are building. */ if (t == NULL) { amt = sizeof *t; t = bfd_zalloc (rinfo->output_bfd, amt); if (t == NULL) { rinfo->failed = TRUE; return FALSE; } t->vn_bfd = h->verinfo.verdef->vd_bfd; t->vn_nextref = elf_tdata (rinfo->output_bfd)->verref; elf_tdata (rinfo->output_bfd)->verref = t; } amt = sizeof *a; a = bfd_zalloc (rinfo->output_bfd, amt); /* Note that we are copying a string pointer here, and testing it above. If bfd_elf_string_from_elf_section is ever changed to discard the string data when low in memory, this will have to be fixed. */ a->vna_nodename = h->verinfo.verdef->vd_nodename; a->vna_flags = h->verinfo.verdef->vd_flags; a->vna_nextptr = t->vn_auxptr; h->verinfo.verdef->vd_exp_refno = rinfo->vers; ++rinfo->vers; a->vna_other = h->verinfo.verdef->vd_exp_refno + 1; t->vn_auxptr = a; return TRUE; } /* Figure out appropriate versions for all the symbols. We may not have the version number script until we have read all of the input files, so until that point we don't know which symbols should be local. This function is called via elf_link_hash_traverse. */ bfd_boolean _bfd_elf_link_assign_sym_version (struct elf_link_hash_entry *h, void *data) { struct elf_assign_sym_version_info *sinfo; struct bfd_link_info *info; const struct elf_backend_data *bed; struct elf_info_failed eif; char *p; bfd_size_type amt; sinfo = data; info = sinfo->info; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* Fix the symbol flags. */ eif.failed = FALSE; eif.info = info; if (! _bfd_elf_fix_symbol_flags (h, &eif)) { if (eif.failed) sinfo->failed = TRUE; return FALSE; } /* We only need version numbers for symbols defined in regular objects. */ if (!h->def_regular) return TRUE; bed = get_elf_backend_data (sinfo->output_bfd); p = strchr (h->root.root.string, ELF_VER_CHR); if (p != NULL && h->verinfo.vertree == NULL) { struct bfd_elf_version_tree *t; bfd_boolean hidden; hidden = TRUE; /* There are two consecutive ELF_VER_CHR characters if this is not a hidden symbol. */ ++p; if (*p == ELF_VER_CHR) { hidden = FALSE; ++p; } /* If there is no version string, we can just return out. */ if (*p == '\0') { if (hidden) h->hidden = 1; return TRUE; } /* Look for the version. If we find it, it is no longer weak. */ for (t = sinfo->verdefs; t != NULL; t = t->next) { if (strcmp (t->name, p) == 0) { size_t len; char *alc; struct bfd_elf_version_expr *d; len = p - h->root.root.string; alc = bfd_malloc (len); if (alc == NULL) return FALSE; memcpy (alc, h->root.root.string, len - 1); alc[len - 1] = '\0'; if (alc[len - 2] == ELF_VER_CHR) alc[len - 2] = '\0'; h->verinfo.vertree = t; t->used = TRUE; d = NULL; if (t->globals.list != NULL) d = (*t->match) (&t->globals, NULL, alc); /* See if there is anything to force this symbol to local scope. */ if (d == NULL && t->locals.list != NULL) { d = (*t->match) (&t->locals, NULL, alc); if (d != NULL && h->dynindx != -1 && ! info->export_dynamic) (*bed->elf_backend_hide_symbol) (info, h, TRUE); } free (alc); break; } } /* If we are building an application, we need to create a version node for this version. */ if (t == NULL && info->executable) { struct bfd_elf_version_tree **pp; int version_index; /* If we aren't going to export this symbol, we don't need to worry about it. */ if (h->dynindx == -1) return TRUE; amt = sizeof *t; t = bfd_zalloc (sinfo->output_bfd, amt); if (t == NULL) { sinfo->failed = TRUE; return FALSE; } t->name = p; t->name_indx = (unsigned int) -1; t->used = TRUE; version_index = 1; /* Don't count anonymous version tag. */ if (sinfo->verdefs != NULL && sinfo->verdefs->vernum == 0) version_index = 0; for (pp = &sinfo->verdefs; *pp != NULL; pp = &(*pp)->next) ++version_index; t->vernum = version_index; *pp = t; h->verinfo.vertree = t; } else if (t == NULL) { /* We could not find the version for a symbol when generating a shared archive. Return an error. */ (*_bfd_error_handler) (_("%B: version node not found for symbol %s"), sinfo->output_bfd, h->root.root.string); bfd_set_error (bfd_error_bad_value); sinfo->failed = TRUE; return FALSE; } if (hidden) h->hidden = 1; } /* If we don't have a version for this symbol, see if we can find something. */ if (h->verinfo.vertree == NULL && sinfo->verdefs != NULL) { struct bfd_elf_version_tree *t; struct bfd_elf_version_tree *local_ver; struct bfd_elf_version_expr *d; /* See if can find what version this symbol is in. If the symbol is supposed to be local, then don't actually register it. */ local_ver = NULL; for (t = sinfo->verdefs; t != NULL; t = t->next) { if (t->globals.list != NULL) { bfd_boolean matched; matched = FALSE; d = NULL; while ((d = (*t->match) (&t->globals, d, h->root.root.string)) != NULL) if (d->symver) matched = TRUE; else { /* There is a version without definition. Make the symbol the default definition for this version. */ h->verinfo.vertree = t; local_ver = NULL; d->script = 1; break; } if (d != NULL) break; else if (matched) /* There is no undefined version for this symbol. Hide the default one. */ (*bed->elf_backend_hide_symbol) (info, h, TRUE); } if (t->locals.list != NULL) { d = NULL; while ((d = (*t->match) (&t->locals, d, h->root.root.string)) != NULL) { local_ver = t; /* If the match is "*", keep looking for a more explicit, perhaps even global, match. XXX: Shouldn't this be !d->wildcard instead? */ if (d->pattern[0] != '*' || d->pattern[1] != '\0') break; } if (d != NULL) break; } } if (local_ver != NULL) { h->verinfo.vertree = local_ver; if (h->dynindx != -1 && ! info->export_dynamic) { (*bed->elf_backend_hide_symbol) (info, h, TRUE); } } } return TRUE; } /* Read and swap the relocs from the section indicated by SHDR. This may be either a REL or a RELA section. The relocations are translated into RELA relocations and stored in INTERNAL_RELOCS, which should have already been allocated to contain enough space. The EXTERNAL_RELOCS are a buffer where the external form of the relocations should be stored. Returns FALSE if something goes wrong. */ static bfd_boolean elf_link_read_relocs_from_section (bfd *abfd, asection *sec, Elf_Internal_Shdr *shdr, void *external_relocs, Elf_Internal_Rela *internal_relocs) { const struct elf_backend_data *bed; void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *); const bfd_byte *erela; const bfd_byte *erelaend; Elf_Internal_Rela *irela; Elf_Internal_Shdr *symtab_hdr; size_t nsyms; /* Position ourselves at the start of the section. */ if (bfd_seek (abfd, shdr->sh_offset, SEEK_SET) != 0) return FALSE; /* Read the relocations. */ if (bfd_bread (external_relocs, shdr->sh_size, abfd) != shdr->sh_size) return FALSE; symtab_hdr = &elf_tdata (abfd)->symtab_hdr; nsyms = symtab_hdr->sh_size / symtab_hdr->sh_entsize; bed = get_elf_backend_data (abfd); /* Convert the external relocations to the internal format. */ if (shdr->sh_entsize == bed->s->sizeof_rel) swap_in = bed->s->swap_reloc_in; else if (shdr->sh_entsize == bed->s->sizeof_rela) swap_in = bed->s->swap_reloca_in; else { bfd_set_error (bfd_error_wrong_format); return FALSE; } erela = external_relocs; erelaend = erela + shdr->sh_size; irela = internal_relocs; while (erela < erelaend) { bfd_vma r_symndx; (*swap_in) (abfd, erela, irela); r_symndx = ELF32_R_SYM (irela->r_info); if (bed->s->arch_size == 64) r_symndx >>= 24; if ((size_t) r_symndx >= nsyms) { (*_bfd_error_handler) (_("%B: bad reloc symbol index (0x%lx >= 0x%lx)" " for offset 0x%lx in section `%A'"), abfd, sec, (unsigned long) r_symndx, (unsigned long) nsyms, irela->r_offset); bfd_set_error (bfd_error_bad_value); return FALSE; } irela += bed->s->int_rels_per_ext_rel; erela += shdr->sh_entsize; } return TRUE; } /* Read and swap the relocs for a section O. They may have been cached. If the EXTERNAL_RELOCS and INTERNAL_RELOCS arguments are not NULL, they are used as buffers to read into. They are known to be large enough. If the INTERNAL_RELOCS relocs argument is NULL, the return value is allocated using either malloc or bfd_alloc, according to the KEEP_MEMORY argument. If O has two relocation sections (both REL and RELA relocations), then the REL_HDR relocations will appear first in INTERNAL_RELOCS, followed by the REL_HDR2 relocations. */ Elf_Internal_Rela * _bfd_elf_link_read_relocs (bfd *abfd, asection *o, void *external_relocs, Elf_Internal_Rela *internal_relocs, bfd_boolean keep_memory) { Elf_Internal_Shdr *rel_hdr; void *alloc1 = NULL; Elf_Internal_Rela *alloc2 = NULL; const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (elf_section_data (o)->relocs != NULL) return elf_section_data (o)->relocs; if (o->reloc_count == 0) return NULL; rel_hdr = &elf_section_data (o)->rel_hdr; if (internal_relocs == NULL) { bfd_size_type size; size = o->reloc_count; size *= bed->s->int_rels_per_ext_rel * sizeof (Elf_Internal_Rela); if (keep_memory) internal_relocs = bfd_alloc (abfd, size); else internal_relocs = alloc2 = bfd_malloc (size); if (internal_relocs == NULL) goto error_return; } if (external_relocs == NULL) { bfd_size_type size = rel_hdr->sh_size; if (elf_section_data (o)->rel_hdr2) size += elf_section_data (o)->rel_hdr2->sh_size; alloc1 = bfd_malloc (size); if (alloc1 == NULL) goto error_return; external_relocs = alloc1; } if (!elf_link_read_relocs_from_section (abfd, o, rel_hdr, external_relocs, internal_relocs)) goto error_return; if (elf_section_data (o)->rel_hdr2 && (!elf_link_read_relocs_from_section (abfd, o, elf_section_data (o)->rel_hdr2, ((bfd_byte *) external_relocs) + rel_hdr->sh_size, internal_relocs + (NUM_SHDR_ENTRIES (rel_hdr) * bed->s->int_rels_per_ext_rel)))) goto error_return; /* Cache the results for next time, if we can. */ if (keep_memory) elf_section_data (o)->relocs = internal_relocs; if (alloc1 != NULL) free (alloc1); /* Don't free alloc2, since if it was allocated we are passing it back (under the name of internal_relocs). */ return internal_relocs; error_return: if (alloc1 != NULL) free (alloc1); if (alloc2 != NULL) free (alloc2); return NULL; } /* Compute the size of, and allocate space for, REL_HDR which is the section header for a section containing relocations for O. */ bfd_boolean _bfd_elf_link_size_reloc_section (bfd *abfd, Elf_Internal_Shdr *rel_hdr, asection *o) { bfd_size_type reloc_count; bfd_size_type num_rel_hashes; /* Figure out how many relocations there will be. */ if (rel_hdr == &elf_section_data (o)->rel_hdr) reloc_count = elf_section_data (o)->rel_count; else reloc_count = elf_section_data (o)->rel_count2; num_rel_hashes = o->reloc_count; if (num_rel_hashes < reloc_count) num_rel_hashes = reloc_count; /* That allows us to calculate the size of the section. */ rel_hdr->sh_size = rel_hdr->sh_entsize * reloc_count; /* The contents field must last into write_object_contents, so we allocate it with bfd_alloc rather than malloc. Also since we cannot be sure that the contents will actually be filled in, we zero the allocated space. */ rel_hdr->contents = bfd_zalloc (abfd, rel_hdr->sh_size); if (rel_hdr->contents == NULL && rel_hdr->sh_size != 0) return FALSE; /* We only allocate one set of hash entries, so we only do it the first time we are called. */ if (elf_section_data (o)->rel_hashes == NULL && num_rel_hashes) { struct elf_link_hash_entry **p; p = bfd_zmalloc (num_rel_hashes * sizeof (struct elf_link_hash_entry *)); if (p == NULL) return FALSE; elf_section_data (o)->rel_hashes = p; } return TRUE; } /* Copy the relocations indicated by the INTERNAL_RELOCS (which originated from the section given by INPUT_REL_HDR) to the OUTPUT_BFD. */ bfd_boolean _bfd_elf_link_output_relocs (bfd *output_bfd, asection *input_section, Elf_Internal_Shdr *input_rel_hdr, Elf_Internal_Rela *internal_relocs, struct elf_link_hash_entry **rel_hash ATTRIBUTE_UNUSED) { Elf_Internal_Rela *irela; Elf_Internal_Rela *irelaend; bfd_byte *erel; Elf_Internal_Shdr *output_rel_hdr; asection *output_section; unsigned int *rel_countp = NULL; const struct elf_backend_data *bed; void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *); output_section = input_section->output_section; output_rel_hdr = NULL; if (elf_section_data (output_section)->rel_hdr.sh_entsize == input_rel_hdr->sh_entsize) { output_rel_hdr = &elf_section_data (output_section)->rel_hdr; rel_countp = &elf_section_data (output_section)->rel_count; } else if (elf_section_data (output_section)->rel_hdr2 && (elf_section_data (output_section)->rel_hdr2->sh_entsize == input_rel_hdr->sh_entsize)) { output_rel_hdr = elf_section_data (output_section)->rel_hdr2; rel_countp = &elf_section_data (output_section)->rel_count2; } else { (*_bfd_error_handler) (_("%B: relocation size mismatch in %B section %A"), output_bfd, input_section->owner, input_section); bfd_set_error (bfd_error_wrong_object_format); return FALSE; } bed = get_elf_backend_data (output_bfd); if (input_rel_hdr->sh_entsize == bed->s->sizeof_rel) swap_out = bed->s->swap_reloc_out; else if (input_rel_hdr->sh_entsize == bed->s->sizeof_rela) swap_out = bed->s->swap_reloca_out; else abort (); erel = output_rel_hdr->contents; erel += *rel_countp * input_rel_hdr->sh_entsize; irela = internal_relocs; irelaend = irela + (NUM_SHDR_ENTRIES (input_rel_hdr) * bed->s->int_rels_per_ext_rel); while (irela < irelaend) { (*swap_out) (output_bfd, irela, erel); irela += bed->s->int_rels_per_ext_rel; erel += input_rel_hdr->sh_entsize; } /* Bump the counter, so that we know where to add the next set of relocations. */ *rel_countp += NUM_SHDR_ENTRIES (input_rel_hdr); return TRUE; } /* Make weak undefined symbols in PIE dynamic. */ bfd_boolean _bfd_elf_link_hash_fixup_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *h) { if (info->pie && h->dynindx == -1 && h->root.type == bfd_link_hash_undefweak) return bfd_elf_link_record_dynamic_symbol (info, h); return TRUE; } /* Fix up the flags for a symbol. This handles various cases which can only be fixed after all the input files are seen. This is currently called by both adjust_dynamic_symbol and assign_sym_version, which is unnecessary but perhaps more robust in the face of future changes. */ bfd_boolean _bfd_elf_fix_symbol_flags (struct elf_link_hash_entry *h, struct elf_info_failed *eif) { const struct elf_backend_data *bed = NULL; /* If this symbol was mentioned in a non-ELF file, try to set DEF_REGULAR and REF_REGULAR correctly. This is the only way to permit a non-ELF file to correctly refer to a symbol defined in an ELF dynamic object. */ if (h->non_elf) { while (h->root.type == bfd_link_hash_indirect) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->root.type != bfd_link_hash_defined && h->root.type != bfd_link_hash_defweak) { h->ref_regular = 1; h->ref_regular_nonweak = 1; } else { if (h->root.u.def.section->owner != NULL && (bfd_get_flavour (h->root.u.def.section->owner) == bfd_target_elf_flavour)) { h->ref_regular = 1; h->ref_regular_nonweak = 1; } else h->def_regular = 1; } if (h->dynindx == -1 && (h->def_dynamic || h->ref_dynamic)) { if (! bfd_elf_link_record_dynamic_symbol (eif->info, h)) { eif->failed = TRUE; return FALSE; } } } else { /* Unfortunately, NON_ELF is only correct if the symbol was first seen in a non-ELF file. Fortunately, if the symbol was first seen in an ELF file, we're probably OK unless the symbol was defined in a non-ELF file. Catch that case here. FIXME: We're still in trouble if the symbol was first seen in a dynamic object, and then later in a non-ELF regular object. */ if ((h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak) && !h->def_regular && (h->root.u.def.section->owner != NULL ? (bfd_get_flavour (h->root.u.def.section->owner) != bfd_target_elf_flavour) : (bfd_is_abs_section (h->root.u.def.section) && !h->def_dynamic))) h->def_regular = 1; } /* Backend specific symbol fixup. */ if (elf_hash_table (eif->info)->dynobj) { bed = get_elf_backend_data (elf_hash_table (eif->info)->dynobj); if (bed->elf_backend_fixup_symbol && !(*bed->elf_backend_fixup_symbol) (eif->info, h)) return FALSE; } /* If this is a final link, and the symbol was defined as a common symbol in a regular object file, and there was no definition in any dynamic object, then the linker will have allocated space for the symbol in a common section but the DEF_REGULAR flag will not have been set. */ if (h->root.type == bfd_link_hash_defined && !h->def_regular && h->ref_regular && !h->def_dynamic && (h->root.u.def.section->owner->flags & DYNAMIC) == 0) h->def_regular = 1; /* If -Bsymbolic was used (which means to bind references to global symbols to the definition within the shared object), and this symbol was defined in a regular object, then it actually doesn't need a PLT entry. Likewise, if the symbol has non-default visibility. If the symbol has hidden or internal visibility, we will force it local. */ if (h->needs_plt && eif->info->shared && is_elf_hash_table (eif->info->hash) && (SYMBOLIC_BIND (eif->info, h) || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) && h->def_regular) { bfd_boolean force_local; force_local = (ELF_ST_VISIBILITY (h->other) == STV_INTERNAL || ELF_ST_VISIBILITY (h->other) == STV_HIDDEN); (*bed->elf_backend_hide_symbol) (eif->info, h, force_local); } /* If a weak undefined symbol has non-default visibility, we also hide it from the dynamic linker. */ if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT && h->root.type == bfd_link_hash_undefweak) { const struct elf_backend_data *bed; bed = get_elf_backend_data (elf_hash_table (eif->info)->dynobj); (*bed->elf_backend_hide_symbol) (eif->info, h, TRUE); } /* If this is a weak defined symbol in a dynamic object, and we know the real definition in the dynamic object, copy interesting flags over to the real definition. */ if (h->u.weakdef != NULL) { struct elf_link_hash_entry *weakdef; weakdef = h->u.weakdef; if (h->root.type == bfd_link_hash_indirect) h = (struct elf_link_hash_entry *) h->root.u.i.link; BFD_ASSERT (h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak); BFD_ASSERT (weakdef->root.type == bfd_link_hash_defined || weakdef->root.type == bfd_link_hash_defweak); BFD_ASSERT (weakdef->def_dynamic); /* If the real definition is defined by a regular object file, don't do anything special. See the longer description in _bfd_elf_adjust_dynamic_symbol, below. */ if (weakdef->def_regular) h->u.weakdef = NULL; else (*bed->elf_backend_copy_indirect_symbol) (eif->info, weakdef, h); } return TRUE; } /* Make the backend pick a good value for a dynamic symbol. This is called via elf_link_hash_traverse, and also calls itself recursively. */ bfd_boolean _bfd_elf_adjust_dynamic_symbol (struct elf_link_hash_entry *h, void *data) { struct elf_info_failed *eif = data; bfd *dynobj; const struct elf_backend_data *bed; if (! is_elf_hash_table (eif->info->hash)) return FALSE; if (h->root.type == bfd_link_hash_warning) { h->got = elf_hash_table (eif->info)->init_got_offset; h->plt = elf_hash_table (eif->info)->init_plt_offset; /* When warning symbols are created, they **replace** the "real" entry in the hash table, thus we never get to see the real symbol in a hash traversal. So look at it now. */ h = (struct elf_link_hash_entry *) h->root.u.i.link; } /* Ignore indirect symbols. These are added by the versioning code. */ if (h->root.type == bfd_link_hash_indirect) return TRUE; /* Fix the symbol flags. */ if (! _bfd_elf_fix_symbol_flags (h, eif)) return FALSE; /* If this symbol does not require a PLT entry, and it is not defined by a dynamic object, or is not referenced by a regular object, ignore it. We do have to handle a weak defined symbol, even if no regular object refers to it, if we decided to add it to the dynamic symbol table. FIXME: Do we normally need to worry about symbols which are defined by one dynamic object and referenced by another one? */ if (!h->needs_plt && (h->def_regular || !h->def_dynamic || (!h->ref_regular && (h->u.weakdef == NULL || h->u.weakdef->dynindx == -1)))) { h->plt = elf_hash_table (eif->info)->init_plt_offset; return TRUE; } /* If we've already adjusted this symbol, don't do it again. This can happen via a recursive call. */ if (h->dynamic_adjusted) return TRUE; /* Don't look at this symbol again. Note that we must set this after checking the above conditions, because we may look at a symbol once, decide not to do anything, and then get called recursively later after REF_REGULAR is set below. */ h->dynamic_adjusted = 1; /* If this is a weak definition, and we know a real definition, and the real symbol is not itself defined by a regular object file, then get a good value for the real definition. We handle the real symbol first, for the convenience of the backend routine. Note that there is a confusing case here. If the real definition is defined by a regular object file, we don't get the real symbol from the dynamic object, but we do get the weak symbol. If the processor backend uses a COPY reloc, then if some routine in the dynamic object changes the real symbol, we will not see that change in the corresponding weak symbol. This is the way other ELF linkers work as well, and seems to be a result of the shared library model. I will clarify this issue. Most SVR4 shared libraries define the variable _timezone and define timezone as a weak synonym. The tzset call changes _timezone. If you write extern int timezone; int _timezone = 5; int main () { tzset (); printf ("%d %d\n", timezone, _timezone); } you might expect that, since timezone is a synonym for _timezone, the same number will print both times. However, if the processor backend uses a COPY reloc, then actually timezone will be copied into your process image, and, since you define _timezone yourself, _timezone will not. Thus timezone and _timezone will wind up at different memory locations. The tzset call will set _timezone, leaving timezone unchanged. */ if (h->u.weakdef != NULL) { /* If we get to this point, we know there is an implicit reference by a regular object file via the weak symbol H. FIXME: Is this really true? What if the traversal finds H->U.WEAKDEF before it finds H? */ h->u.weakdef->ref_regular = 1; if (! _bfd_elf_adjust_dynamic_symbol (h->u.weakdef, eif)) return FALSE; } /* If a symbol has no type and no size and does not require a PLT entry, then we are probably about to do the wrong thing here: we are probably going to create a COPY reloc for an empty object. This case can arise when a shared object is built with assembly code, and the assembly code fails to set the symbol type. */ if (h->size == 0 && h->type == STT_NOTYPE && !h->needs_plt) (*_bfd_error_handler) (_("warning: type and size of dynamic symbol `%s' are not defined"), h->root.root.string); dynobj = elf_hash_table (eif->info)->dynobj; bed = get_elf_backend_data (dynobj); if (! (*bed->elf_backend_adjust_dynamic_symbol) (eif->info, h)) { eif->failed = TRUE; return FALSE; } return TRUE; } /* Adjust the dynamic symbol, H, for copy in the dynamic bss section, DYNBSS. */ bfd_boolean _bfd_elf_adjust_dynamic_copy (struct elf_link_hash_entry *h, asection *dynbss) { unsigned int power_of_two; bfd_vma mask; asection *sec = h->root.u.def.section; /* The section aligment of definition is the maximum alignment requirement of symbols defined in the section. Since we don't know the symbol alignment requirement, we start with the maximum alignment and check low bits of the symbol address for the minimum alignment. */ power_of_two = bfd_get_section_alignment (sec->owner, sec); mask = ((bfd_vma) 1 << power_of_two) - 1; while ((h->root.u.def.value & mask) != 0) { mask >>= 1; --power_of_two; } if (power_of_two > bfd_get_section_alignment (dynbss->owner, dynbss)) { /* Adjust the section alignment if needed. */ if (! bfd_set_section_alignment (dynbss->owner, dynbss, power_of_two)) return FALSE; } /* We make sure that the symbol will be aligned properly. */ dynbss->size = BFD_ALIGN (dynbss->size, mask + 1); /* Define the symbol as being at this point in DYNBSS. */ h->root.u.def.section = dynbss; h->root.u.def.value = dynbss->size; /* Increment the size of DYNBSS to make room for the symbol. */ dynbss->size += h->size; return TRUE; } /* Adjust all external symbols pointing into SEC_MERGE sections to reflect the object merging within the sections. */ bfd_boolean _bfd_elf_link_sec_merge_syms (struct elf_link_hash_entry *h, void *data) { asection *sec; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if ((h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak) && ((sec = h->root.u.def.section)->flags & SEC_MERGE) && sec->sec_info_type == ELF_INFO_TYPE_MERGE) { bfd *output_bfd = data; h->root.u.def.value = _bfd_merged_section_offset (output_bfd, &h->root.u.def.section, elf_section_data (sec)->sec_info, h->root.u.def.value); } return TRUE; } /* Returns false if the symbol referred to by H should be considered to resolve local to the current module, and true if it should be considered to bind dynamically. */ bfd_boolean _bfd_elf_dynamic_symbol_p (struct elf_link_hash_entry *h, struct bfd_link_info *info, bfd_boolean ignore_protected) { bfd_boolean binding_stays_local_p; const struct elf_backend_data *bed; struct elf_link_hash_table *hash_table; if (h == NULL) return FALSE; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* If it was forced local, then clearly it's not dynamic. */ if (h->dynindx == -1) return FALSE; if (h->forced_local) return FALSE; /* Identify the cases where name binding rules say that a visible symbol resolves locally. */ binding_stays_local_p = info->executable || SYMBOLIC_BIND (info, h); switch (ELF_ST_VISIBILITY (h->other)) { case STV_INTERNAL: case STV_HIDDEN: return FALSE; case STV_PROTECTED: hash_table = elf_hash_table (info); if (!is_elf_hash_table (hash_table)) return FALSE; bed = get_elf_backend_data (hash_table->dynobj); /* Proper resolution for function pointer equality may require that these symbols perhaps be resolved dynamically, even though we should be resolving them to the current module. */ if (!ignore_protected || !bed->is_function_type (h->type)) binding_stays_local_p = TRUE; break; default: break; } /* If it isn't defined locally, then clearly it's dynamic. */ if (!h->def_regular) return TRUE; /* Otherwise, the symbol is dynamic if binding rules don't tell us that it remains local. */ return !binding_stays_local_p; } /* Return true if the symbol referred to by H should be considered to resolve local to the current module, and false otherwise. Differs from (the inverse of) _bfd_elf_dynamic_symbol_p in the treatment of undefined symbols and weak symbols. */ bfd_boolean _bfd_elf_symbol_refs_local_p (struct elf_link_hash_entry *h, struct bfd_link_info *info, bfd_boolean local_protected) { const struct elf_backend_data *bed; struct elf_link_hash_table *hash_table; /* If it's a local sym, of course we resolve locally. */ if (h == NULL) return TRUE; /* Common symbols that become definitions don't get the DEF_REGULAR flag set, so test it first, and don't bail out. */ if (ELF_COMMON_DEF_P (h)) /* Do nothing. */; /* If we don't have a definition in a regular file, then we can't resolve locally. The sym is either undefined or dynamic. */ else if (!h->def_regular) return FALSE; /* Forced local symbols resolve locally. */ if (h->forced_local) return TRUE; /* As do non-dynamic symbols. */ if (h->dynindx == -1) return TRUE; /* At this point, we know the symbol is defined and dynamic. In an executable it must resolve locally, likewise when building symbolic shared libraries. */ if (info->executable || SYMBOLIC_BIND (info, h)) return TRUE; /* Now deal with defined dynamic symbols in shared libraries. Ones with default visibility might not resolve locally. */ if (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT) return FALSE; /* However, STV_HIDDEN or STV_INTERNAL ones must be local. */ if (ELF_ST_VISIBILITY (h->other) != STV_PROTECTED) return TRUE; hash_table = elf_hash_table (info); if (!is_elf_hash_table (hash_table)) return TRUE; bed = get_elf_backend_data (hash_table->dynobj); /* STV_PROTECTED non-function symbols are local. */ if (!bed->is_function_type (h->type)) return TRUE; /* Function pointer equality tests may require that STV_PROTECTED symbols be treated as dynamic symbols, even when we know that the dynamic linker will resolve them locally. */ return local_protected; } /* Caches some TLS segment info, and ensures that the TLS segment vma is aligned. Returns the first TLS output section. */ struct bfd_section * _bfd_elf_tls_setup (bfd *obfd, struct bfd_link_info *info) { struct bfd_section *sec, *tls; unsigned int align = 0; for (sec = obfd->sections; sec != NULL; sec = sec->next) if ((sec->flags & SEC_THREAD_LOCAL) != 0) break; tls = sec; for (; sec != NULL && (sec->flags & SEC_THREAD_LOCAL) != 0; sec = sec->next) if (sec->alignment_power > align) align = sec->alignment_power; elf_hash_table (info)->tls_sec = tls; /* Ensure the alignment of the first section is the largest alignment, so that the tls segment starts aligned. */ if (tls != NULL) tls->alignment_power = align; return tls; } /* Return TRUE iff this is a non-common, definition of a non-function symbol. */ static bfd_boolean is_global_data_symbol_definition (bfd *abfd ATTRIBUTE_UNUSED, Elf_Internal_Sym *sym) { const struct elf_backend_data *bed; /* Local symbols do not count, but target specific ones might. */ if (ELF_ST_BIND (sym->st_info) != STB_GLOBAL && ELF_ST_BIND (sym->st_info) < STB_LOOS) return FALSE; bed = get_elf_backend_data (abfd); /* Function symbols do not count. */ if (bed->is_function_type (ELF_ST_TYPE (sym->st_info))) return FALSE; /* If the section is undefined, then so is the symbol. */ if (sym->st_shndx == SHN_UNDEF) return FALSE; /* If the symbol is defined in the common section, then it is a common definition and so does not count. */ if (bed->common_definition (sym)) return FALSE; /* If the symbol is in a target specific section then we must rely upon the backend to tell us what it is. */ if (sym->st_shndx >= SHN_LORESERVE && sym->st_shndx < SHN_ABS) /* FIXME - this function is not coded yet: return _bfd_is_global_symbol_definition (abfd, sym); Instead for now assume that the definition is not global, Even if this is wrong, at least the linker will behave in the same way that it used to do. */ return FALSE; return TRUE; } /* Search the symbol table of the archive element of the archive ABFD whose archive map contains a mention of SYMDEF, and determine if the symbol is defined in this element. */ static bfd_boolean elf_link_is_defined_archive_symbol (bfd * abfd, carsym * symdef) { Elf_Internal_Shdr * hdr; bfd_size_type symcount; bfd_size_type extsymcount; bfd_size_type extsymoff; Elf_Internal_Sym *isymbuf; Elf_Internal_Sym *isym; Elf_Internal_Sym *isymend; bfd_boolean result; abfd = _bfd_get_elt_at_filepos (abfd, symdef->file_offset); if (abfd == NULL) return FALSE; if (! bfd_check_format (abfd, bfd_object)) return FALSE; /* If we have already included the element containing this symbol in the link then we do not need to include it again. Just claim that any symbol it contains is not a definition, so that our caller will not decide to (re)include this element. */ if (abfd->archive_pass) return FALSE; /* Select the appropriate symbol table. */ if ((abfd->flags & DYNAMIC) == 0 || elf_dynsymtab (abfd) == 0) hdr = &elf_tdata (abfd)->symtab_hdr; else hdr = &elf_tdata (abfd)->dynsymtab_hdr; symcount = hdr->sh_size / get_elf_backend_data (abfd)->s->sizeof_sym; /* The sh_info field of the symtab header tells us where the external symbols start. We don't care about the local symbols. */ if (elf_bad_symtab (abfd)) { extsymcount = symcount; extsymoff = 0; } else { extsymcount = symcount - hdr->sh_info; extsymoff = hdr->sh_info; } if (extsymcount == 0) return FALSE; /* Read in the symbol table. */ isymbuf = bfd_elf_get_elf_syms (abfd, hdr, extsymcount, extsymoff, NULL, NULL, NULL); if (isymbuf == NULL) return FALSE; /* Scan the symbol table looking for SYMDEF. */ result = FALSE; for (isym = isymbuf, isymend = isymbuf + extsymcount; isym < isymend; isym++) { const char *name; name = bfd_elf_string_from_elf_section (abfd, hdr->sh_link, isym->st_name); if (name == NULL) break; if (strcmp (name, symdef->name) == 0) { result = is_global_data_symbol_definition (abfd, isym); break; } } free (isymbuf); return result; } /* Add an entry to the .dynamic table. */ bfd_boolean _bfd_elf_add_dynamic_entry (struct bfd_link_info *info, bfd_vma tag, bfd_vma val) { struct elf_link_hash_table *hash_table; const struct elf_backend_data *bed; asection *s; bfd_size_type newsize; bfd_byte *newcontents; Elf_Internal_Dyn dyn; hash_table = elf_hash_table (info); if (! is_elf_hash_table (hash_table)) return FALSE; bed = get_elf_backend_data (hash_table->dynobj); s = bfd_get_section_by_name (hash_table->dynobj, ".dynamic"); BFD_ASSERT (s != NULL); newsize = s->size + bed->s->sizeof_dyn; newcontents = bfd_realloc (s->contents, newsize); if (newcontents == NULL) return FALSE; dyn.d_tag = tag; dyn.d_un.d_val = val; bed->s->swap_dyn_out (hash_table->dynobj, &dyn, newcontents + s->size); s->size = newsize; s->contents = newcontents; return TRUE; } /* Add a DT_NEEDED entry for this dynamic object if DO_IT is true, otherwise just check whether one already exists. Returns -1 on error, 1 if a DT_NEEDED tag already exists, and 0 on success. */ static int elf_add_dt_needed_tag (bfd *abfd, struct bfd_link_info *info, const char *soname, bfd_boolean do_it) { struct elf_link_hash_table *hash_table; bfd_size_type oldsize; bfd_size_type strindex; if (!_bfd_elf_link_create_dynstrtab (abfd, info)) return -1; hash_table = elf_hash_table (info); oldsize = _bfd_elf_strtab_size (hash_table->dynstr); strindex = _bfd_elf_strtab_add (hash_table->dynstr, soname, FALSE); if (strindex == (bfd_size_type) -1) return -1; if (oldsize == _bfd_elf_strtab_size (hash_table->dynstr)) { asection *sdyn; const struct elf_backend_data *bed; bfd_byte *extdyn; bed = get_elf_backend_data (hash_table->dynobj); sdyn = bfd_get_section_by_name (hash_table->dynobj, ".dynamic"); if (sdyn != NULL) for (extdyn = sdyn->contents; extdyn < sdyn->contents + sdyn->size; extdyn += bed->s->sizeof_dyn) { Elf_Internal_Dyn dyn; bed->s->swap_dyn_in (hash_table->dynobj, extdyn, &dyn); if (dyn.d_tag == DT_NEEDED && dyn.d_un.d_val == strindex) { _bfd_elf_strtab_delref (hash_table->dynstr, strindex); return 1; } } } if (do_it) { if (!_bfd_elf_link_create_dynamic_sections (hash_table->dynobj, info)) return -1; if (!_bfd_elf_add_dynamic_entry (info, DT_NEEDED, strindex)) return -1; } else /* We were just checking for existence of the tag. */ _bfd_elf_strtab_delref (hash_table->dynstr, strindex); return 0; } /* Sort symbol by value and section. */ static int elf_sort_symbol (const void *arg1, const void *arg2) { const struct elf_link_hash_entry *h1; const struct elf_link_hash_entry *h2; bfd_signed_vma vdiff; h1 = *(const struct elf_link_hash_entry **) arg1; h2 = *(const struct elf_link_hash_entry **) arg2; vdiff = h1->root.u.def.value - h2->root.u.def.value; if (vdiff != 0) return vdiff > 0 ? 1 : -1; else { long sdiff = h1->root.u.def.section->id - h2->root.u.def.section->id; if (sdiff != 0) return sdiff > 0 ? 1 : -1; } return 0; } /* This function is used to adjust offsets into .dynstr for dynamic symbols. This is called via elf_link_hash_traverse. */ static bfd_boolean elf_adjust_dynstr_offsets (struct elf_link_hash_entry *h, void *data) { struct elf_strtab_hash *dynstr = data; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->dynindx != -1) h->dynstr_index = _bfd_elf_strtab_offset (dynstr, h->dynstr_index); return TRUE; } /* Assign string offsets in .dynstr, update all structures referencing them. */ static bfd_boolean elf_finalize_dynstr (bfd *output_bfd, struct bfd_link_info *info) { struct elf_link_hash_table *hash_table = elf_hash_table (info); struct elf_link_local_dynamic_entry *entry; struct elf_strtab_hash *dynstr = hash_table->dynstr; bfd *dynobj = hash_table->dynobj; asection *sdyn; bfd_size_type size; const struct elf_backend_data *bed; bfd_byte *extdyn; _bfd_elf_strtab_finalize (dynstr); size = _bfd_elf_strtab_size (dynstr); bed = get_elf_backend_data (dynobj); sdyn = bfd_get_section_by_name (dynobj, ".dynamic"); BFD_ASSERT (sdyn != NULL); /* Update all .dynamic entries referencing .dynstr strings. */ for (extdyn = sdyn->contents; extdyn < sdyn->contents + sdyn->size; extdyn += bed->s->sizeof_dyn) { Elf_Internal_Dyn dyn; bed->s->swap_dyn_in (dynobj, extdyn, &dyn); switch (dyn.d_tag) { case DT_STRSZ: dyn.d_un.d_val = size; break; case DT_NEEDED: case DT_SONAME: case DT_RPATH: case DT_RUNPATH: case DT_FILTER: case DT_AUXILIARY: dyn.d_un.d_val = _bfd_elf_strtab_offset (dynstr, dyn.d_un.d_val); break; default: continue; } bed->s->swap_dyn_out (dynobj, &dyn, extdyn); } /* Now update local dynamic symbols. */ for (entry = hash_table->dynlocal; entry ; entry = entry->next) entry->isym.st_name = _bfd_elf_strtab_offset (dynstr, entry->isym.st_name); /* And the rest of dynamic symbols. */ elf_link_hash_traverse (hash_table, elf_adjust_dynstr_offsets, dynstr); /* Adjust version definitions. */ if (elf_tdata (output_bfd)->cverdefs) { asection *s; bfd_byte *p; bfd_size_type i; Elf_Internal_Verdef def; Elf_Internal_Verdaux defaux; s = bfd_get_section_by_name (dynobj, ".gnu.version_d"); p = s->contents; do { _bfd_elf_swap_verdef_in (output_bfd, (Elf_External_Verdef *) p, &def); p += sizeof (Elf_External_Verdef); if (def.vd_aux != sizeof (Elf_External_Verdef)) continue; for (i = 0; i < def.vd_cnt; ++i) { _bfd_elf_swap_verdaux_in (output_bfd, (Elf_External_Verdaux *) p, &defaux); defaux.vda_name = _bfd_elf_strtab_offset (dynstr, defaux.vda_name); _bfd_elf_swap_verdaux_out (output_bfd, &defaux, (Elf_External_Verdaux *) p); p += sizeof (Elf_External_Verdaux); } } while (def.vd_next); } /* Adjust version references. */ if (elf_tdata (output_bfd)->verref) { asection *s; bfd_byte *p; bfd_size_type i; Elf_Internal_Verneed need; Elf_Internal_Vernaux needaux; s = bfd_get_section_by_name (dynobj, ".gnu.version_r"); p = s->contents; do { _bfd_elf_swap_verneed_in (output_bfd, (Elf_External_Verneed *) p, &need); need.vn_file = _bfd_elf_strtab_offset (dynstr, need.vn_file); _bfd_elf_swap_verneed_out (output_bfd, &need, (Elf_External_Verneed *) p); p += sizeof (Elf_External_Verneed); for (i = 0; i < need.vn_cnt; ++i) { _bfd_elf_swap_vernaux_in (output_bfd, (Elf_External_Vernaux *) p, &needaux); needaux.vna_name = _bfd_elf_strtab_offset (dynstr, needaux.vna_name); _bfd_elf_swap_vernaux_out (output_bfd, &needaux, (Elf_External_Vernaux *) p); p += sizeof (Elf_External_Vernaux); } } while (need.vn_next); } return TRUE; } /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. The default is to only match when the INPUT and OUTPUT are exactly the same target. */ bfd_boolean _bfd_elf_default_relocs_compatible (const bfd_target *input, const bfd_target *output) { return input == output; } /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. This version is used when different targets for the same architecture are virtually identical. */ bfd_boolean _bfd_elf_relocs_compatible (const bfd_target *input, const bfd_target *output) { const struct elf_backend_data *obed, *ibed; if (input == output) return TRUE; ibed = xvec_get_elf_backend_data (input); obed = xvec_get_elf_backend_data (output); if (ibed->arch != obed->arch) return FALSE; /* If both backends are using this function, deem them compatible. */ return ibed->relocs_compatible == obed->relocs_compatible; } /* Add symbols from an ELF object file to the linker hash table. */ static bfd_boolean elf_link_add_object_symbols (bfd *abfd, struct bfd_link_info *info) { Elf_Internal_Shdr *hdr; bfd_size_type symcount; bfd_size_type extsymcount; bfd_size_type extsymoff; struct elf_link_hash_entry **sym_hash; bfd_boolean dynamic; Elf_External_Versym *extversym = NULL; Elf_External_Versym *ever; struct elf_link_hash_entry *weaks; struct elf_link_hash_entry **nondeflt_vers = NULL; bfd_size_type nondeflt_vers_cnt = 0; Elf_Internal_Sym *isymbuf = NULL; Elf_Internal_Sym *isym; Elf_Internal_Sym *isymend; const struct elf_backend_data *bed; bfd_boolean add_needed; struct elf_link_hash_table *htab; bfd_size_type amt; void *alloc_mark = NULL; struct bfd_hash_entry **old_table = NULL; unsigned int old_size = 0; unsigned int old_count = 0; void *old_tab = NULL; void *old_hash; void *old_ent; struct bfd_link_hash_entry *old_undefs = NULL; struct bfd_link_hash_entry *old_undefs_tail = NULL; long old_dynsymcount = 0; size_t tabsize = 0; size_t hashsize = 0; htab = elf_hash_table (info); bed = get_elf_backend_data (abfd); if ((abfd->flags & DYNAMIC) == 0) dynamic = FALSE; else { dynamic = TRUE; /* You can't use -r against a dynamic object. Also, there's no hope of using a dynamic object which does not exactly match the format of the output file. */ if (info->relocatable || !is_elf_hash_table (htab) || htab->root.creator != abfd->xvec) { if (info->relocatable) bfd_set_error (bfd_error_invalid_operation); else bfd_set_error (bfd_error_wrong_format); goto error_return; } } /* As a GNU extension, any input sections which are named .gnu.warning.SYMBOL are treated as warning symbols for the given symbol. This differs from .gnu.warning sections, which generate warnings when they are included in an output file. */ if (info->executable) { asection *s; for (s = abfd->sections; s != NULL; s = s->next) { const char *name; name = bfd_get_section_name (abfd, s); if (CONST_STRNEQ (name, ".gnu.warning.")) { char *msg; bfd_size_type sz; name += sizeof ".gnu.warning." - 1; /* If this is a shared object, then look up the symbol in the hash table. If it is there, and it is already been defined, then we will not be using the entry from this shared object, so we don't need to warn. FIXME: If we see the definition in a regular object later on, we will warn, but we shouldn't. The only fix is to keep track of what warnings we are supposed to emit, and then handle them all at the end of the link. */ if (dynamic) { struct elf_link_hash_entry *h; h = elf_link_hash_lookup (htab, name, FALSE, FALSE, TRUE); /* FIXME: What about bfd_link_hash_common? */ if (h != NULL && (h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak)) { /* We don't want to issue this warning. Clobber the section size so that the warning does not get copied into the output file. */ s->size = 0; continue; } } sz = s->size; msg = bfd_alloc (abfd, sz + 1); if (msg == NULL) goto error_return; if (! bfd_get_section_contents (abfd, s, msg, 0, sz)) goto error_return; msg[sz] = '\0'; if (! (_bfd_generic_link_add_one_symbol (info, abfd, name, BSF_WARNING, s, 0, msg, FALSE, bed->collect, NULL))) goto error_return; if (! info->relocatable) { /* Clobber the section size so that the warning does not get copied into the output file. */ s->size = 0; /* Also set SEC_EXCLUDE, so that symbols defined in the warning section don't get copied to the output. */ s->flags |= SEC_EXCLUDE; } } } } add_needed = TRUE; if (! dynamic) { /* If we are creating a shared library, create all the dynamic sections immediately. We need to attach them to something, so we attach them to this BFD, provided it is the right format. FIXME: If there are no input BFD's of the same format as the output, we can't make a shared library. */ if (info->shared && is_elf_hash_table (htab) && htab->root.creator == abfd->xvec && !htab->dynamic_sections_created) { if (! _bfd_elf_link_create_dynamic_sections (abfd, info)) goto error_return; } } else if (!is_elf_hash_table (htab)) goto error_return; else { asection *s; const char *soname = NULL; struct bfd_link_needed_list *rpath = NULL, *runpath = NULL; int ret; /* ld --just-symbols and dynamic objects don't mix very well. ld shouldn't allow it. */ if ((s = abfd->sections) != NULL && s->sec_info_type == ELF_INFO_TYPE_JUST_SYMS) abort (); /* If this dynamic lib was specified on the command line with --as-needed in effect, then we don't want to add a DT_NEEDED tag unless the lib is actually used. Similary for libs brought in by another lib's DT_NEEDED. When --no-add-needed is used on a dynamic lib, we don't want to add a DT_NEEDED entry for any dynamic library in DT_NEEDED tags in the dynamic lib at all. */ add_needed = (elf_dyn_lib_class (abfd) & (DYN_AS_NEEDED | DYN_DT_NEEDED | DYN_NO_NEEDED)) == 0; s = bfd_get_section_by_name (abfd, ".dynamic"); if (s != NULL) { bfd_byte *dynbuf; bfd_byte *extdyn; int elfsec; unsigned long shlink; if (!bfd_malloc_and_get_section (abfd, s, &dynbuf)) goto error_free_dyn; elfsec = _bfd_elf_section_from_bfd_section (abfd, s); if (elfsec == -1) goto error_free_dyn; shlink = elf_elfsections (abfd)[elfsec]->sh_link; for (extdyn = dynbuf; extdyn < dynbuf + s->size; extdyn += bed->s->sizeof_dyn) { Elf_Internal_Dyn dyn; bed->s->swap_dyn_in (abfd, extdyn, &dyn); if (dyn.d_tag == DT_SONAME) { unsigned int tagv = dyn.d_un.d_val; soname = bfd_elf_string_from_elf_section (abfd, shlink, tagv); if (soname == NULL) goto error_free_dyn; } if (dyn.d_tag == DT_NEEDED) { struct bfd_link_needed_list *n, **pn; char *fnm, *anm; unsigned int tagv = dyn.d_un.d_val; amt = sizeof (struct bfd_link_needed_list); n = bfd_alloc (abfd, amt); fnm = bfd_elf_string_from_elf_section (abfd, shlink, tagv); if (n == NULL || fnm == NULL) goto error_free_dyn; amt = strlen (fnm) + 1; anm = bfd_alloc (abfd, amt); if (anm == NULL) goto error_free_dyn; memcpy (anm, fnm, amt); n->name = anm; n->by = abfd; n->next = NULL; for (pn = &htab->needed; *pn != NULL; pn = &(*pn)->next) ; *pn = n; } if (dyn.d_tag == DT_RUNPATH) { struct bfd_link_needed_list *n, **pn; char *fnm, *anm; unsigned int tagv = dyn.d_un.d_val; amt = sizeof (struct bfd_link_needed_list); n = bfd_alloc (abfd, amt); fnm = bfd_elf_string_from_elf_section (abfd, shlink, tagv); if (n == NULL || fnm == NULL) goto error_free_dyn; amt = strlen (fnm) + 1; anm = bfd_alloc (abfd, amt); if (anm == NULL) goto error_free_dyn; memcpy (anm, fnm, amt); n->name = anm; n->by = abfd; n->next = NULL; for (pn = & runpath; *pn != NULL; pn = &(*pn)->next) ; *pn = n; } /* Ignore DT_RPATH if we have seen DT_RUNPATH. */ if (!runpath && dyn.d_tag == DT_RPATH) { struct bfd_link_needed_list *n, **pn; char *fnm, *anm; unsigned int tagv = dyn.d_un.d_val; amt = sizeof (struct bfd_link_needed_list); n = bfd_alloc (abfd, amt); fnm = bfd_elf_string_from_elf_section (abfd, shlink, tagv); if (n == NULL || fnm == NULL) goto error_free_dyn; amt = strlen (fnm) + 1; anm = bfd_alloc (abfd, amt); if (anm == NULL) { error_free_dyn: free (dynbuf); goto error_return; } memcpy (anm, fnm, amt); n->name = anm; n->by = abfd; n->next = NULL; for (pn = & rpath; *pn != NULL; pn = &(*pn)->next) ; *pn = n; } } free (dynbuf); } /* DT_RUNPATH overrides DT_RPATH. Do _NOT_ bfd_release, as that frees all more recently bfd_alloc'd blocks as well. */ if (runpath) rpath = runpath; if (rpath) { struct bfd_link_needed_list **pn; for (pn = &htab->runpath; *pn != NULL; pn = &(*pn)->next) ; *pn = rpath; } /* We do not want to include any of the sections in a dynamic object in the output file. We hack by simply clobbering the list of sections in the BFD. This could be handled more cleanly by, say, a new section flag; the existing SEC_NEVER_LOAD flag is not the one we want, because that one still implies that the section takes up space in the output file. */ bfd_section_list_clear (abfd); /* Find the name to use in a DT_NEEDED entry that refers to this object. If the object has a DT_SONAME entry, we use it. Otherwise, if the generic linker stuck something in elf_dt_name, we use that. Otherwise, we just use the file name. */ if (soname == NULL || *soname == '\0') { soname = elf_dt_name (abfd); if (soname == NULL || *soname == '\0') soname = bfd_get_filename (abfd); } /* Save the SONAME because sometimes the linker emulation code will need to know it. */ elf_dt_name (abfd) = soname; ret = elf_add_dt_needed_tag (abfd, info, soname, add_needed); if (ret < 0) goto error_return; /* If we have already included this dynamic object in the link, just ignore it. There is no reason to include a particular dynamic object more than once. */ if (ret > 0) return TRUE; } /* If this is a dynamic object, we always link against the .dynsym symbol table, not the .symtab symbol table. The dynamic linker will only see the .dynsym symbol table, so there is no reason to look at .symtab for a dynamic object. */ if (! dynamic || elf_dynsymtab (abfd) == 0) hdr = &elf_tdata (abfd)->symtab_hdr; else hdr = &elf_tdata (abfd)->dynsymtab_hdr; symcount = hdr->sh_size / bed->s->sizeof_sym; /* The sh_info field of the symtab header tells us where the external symbols start. We don't care about the local symbols at this point. */ if (elf_bad_symtab (abfd)) { extsymcount = symcount; extsymoff = 0; } else { extsymcount = symcount - hdr->sh_info; extsymoff = hdr->sh_info; } sym_hash = NULL; if (extsymcount != 0) { isymbuf = bfd_elf_get_elf_syms (abfd, hdr, extsymcount, extsymoff, NULL, NULL, NULL); if (isymbuf == NULL) goto error_return; /* We store a pointer to the hash table entry for each external symbol. */ amt = extsymcount * sizeof (struct elf_link_hash_entry *); sym_hash = bfd_alloc (abfd, amt); if (sym_hash == NULL) goto error_free_sym; elf_sym_hashes (abfd) = sym_hash; } if (dynamic) { /* Read in any version definitions. */ if (!_bfd_elf_slurp_version_tables (abfd, info->default_imported_symver)) goto error_free_sym; /* Read in the symbol versions, but don't bother to convert them to internal format. */ if (elf_dynversym (abfd) != 0) { Elf_Internal_Shdr *versymhdr; versymhdr = &elf_tdata (abfd)->dynversym_hdr; extversym = bfd_malloc (versymhdr->sh_size); if (extversym == NULL) goto error_free_sym; amt = versymhdr->sh_size; if (bfd_seek (abfd, versymhdr->sh_offset, SEEK_SET) != 0 || bfd_bread (extversym, amt, abfd) != amt) goto error_free_vers; } } /* If we are loading an as-needed shared lib, save the symbol table state before we start adding symbols. If the lib turns out to be unneeded, restore the state. */ if ((elf_dyn_lib_class (abfd) & DYN_AS_NEEDED) != 0) { unsigned int i; size_t entsize; for (entsize = 0, i = 0; i < htab->root.table.size; i++) { struct bfd_hash_entry *p; struct elf_link_hash_entry *h; for (p = htab->root.table.table[i]; p != NULL; p = p->next) { h = (struct elf_link_hash_entry *) p; entsize += htab->root.table.entsize; if (h->root.type == bfd_link_hash_warning) entsize += htab->root.table.entsize; } } tabsize = htab->root.table.size * sizeof (struct bfd_hash_entry *); hashsize = extsymcount * sizeof (struct elf_link_hash_entry *); old_tab = bfd_malloc (tabsize + entsize + hashsize); if (old_tab == NULL) goto error_free_vers; /* Remember the current objalloc pointer, so that all mem for symbols added can later be reclaimed. */ alloc_mark = bfd_hash_allocate (&htab->root.table, 1); if (alloc_mark == NULL) goto error_free_vers; /* Make a special call to the linker "notice" function to tell it that we are about to handle an as-needed lib. */ if (!(*info->callbacks->notice) (info, NULL, abfd, NULL, notice_as_needed)) return FALSE; /* Clone the symbol table and sym hashes. Remember some pointers into the symbol table, and dynamic symbol count. */ old_hash = (char *) old_tab + tabsize; old_ent = (char *) old_hash + hashsize; memcpy (old_tab, htab->root.table.table, tabsize); memcpy (old_hash, sym_hash, hashsize); old_undefs = htab->root.undefs; old_undefs_tail = htab->root.undefs_tail; old_table = htab->root.table.table; old_size = htab->root.table.size; old_count = htab->root.table.count; old_dynsymcount = htab->dynsymcount; for (i = 0; i < htab->root.table.size; i++) { struct bfd_hash_entry *p; struct elf_link_hash_entry *h; for (p = htab->root.table.table[i]; p != NULL; p = p->next) { memcpy (old_ent, p, htab->root.table.entsize); old_ent = (char *) old_ent + htab->root.table.entsize; h = (struct elf_link_hash_entry *) p; if (h->root.type == bfd_link_hash_warning) { memcpy (old_ent, h->root.u.i.link, htab->root.table.entsize); old_ent = (char *) old_ent + htab->root.table.entsize; } } } } weaks = NULL; ever = extversym != NULL ? extversym + extsymoff : NULL; for (isym = isymbuf, isymend = isymbuf + extsymcount; isym < isymend; isym++, sym_hash++, ever = (ever != NULL ? ever + 1 : NULL)) { int bind; bfd_vma value; asection *sec, *new_sec; flagword flags; const char *name; struct elf_link_hash_entry *h; bfd_boolean definition; bfd_boolean size_change_ok; bfd_boolean type_change_ok; bfd_boolean new_weakdef; bfd_boolean override; bfd_boolean common; unsigned int old_alignment; bfd *old_bfd; override = FALSE; flags = BSF_NO_FLAGS; sec = NULL; value = isym->st_value; *sym_hash = NULL; common = bed->common_definition (isym); bind = ELF_ST_BIND (isym->st_info); if (bind == STB_LOCAL) { /* This should be impossible, since ELF requires that all global symbols follow all local symbols, and that sh_info point to the first global symbol. Unfortunately, Irix 5 screws this up. */ continue; } else if (bind == STB_GLOBAL) { if (isym->st_shndx != SHN_UNDEF && !common) flags = BSF_GLOBAL; } else if (bind == STB_WEAK) flags = BSF_WEAK; else { /* Leave it up to the processor backend. */ } if (isym->st_shndx == SHN_UNDEF) sec = bfd_und_section_ptr; else if (isym->st_shndx < SHN_LORESERVE || isym->st_shndx > SHN_HIRESERVE) { sec = bfd_section_from_elf_index (abfd, isym->st_shndx); if (sec == NULL) sec = bfd_abs_section_ptr; else if (sec->kept_section) { /* Symbols from discarded section are undefined. We keep its visibility. */ sec = bfd_und_section_ptr; isym->st_shndx = SHN_UNDEF; } else if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0) value -= sec->vma; } else if (isym->st_shndx == SHN_ABS) sec = bfd_abs_section_ptr; else if (isym->st_shndx == SHN_COMMON) { sec = bfd_com_section_ptr; /* What ELF calls the size we call the value. What ELF calls the value we call the alignment. */ value = isym->st_size; } else { /* Leave it up to the processor backend. */ } name = bfd_elf_string_from_elf_section (abfd, hdr->sh_link, isym->st_name); if (name == NULL) goto error_free_vers; if (isym->st_shndx == SHN_COMMON && ELF_ST_TYPE (isym->st_info) == STT_TLS && !info->relocatable) { asection *tcomm = bfd_get_section_by_name (abfd, ".tcommon"); if (tcomm == NULL) { tcomm = bfd_make_section_with_flags (abfd, ".tcommon", (SEC_ALLOC | SEC_IS_COMMON | SEC_LINKER_CREATED | SEC_THREAD_LOCAL)); if (tcomm == NULL) goto error_free_vers; } sec = tcomm; } else if (bed->elf_add_symbol_hook) { if (! (*bed->elf_add_symbol_hook) (abfd, info, isym, &name, &flags, &sec, &value)) goto error_free_vers; /* The hook function sets the name to NULL if this symbol should be skipped for some reason. */ if (name == NULL) continue; } /* Sanity check that all possibilities were handled. */ if (sec == NULL) { bfd_set_error (bfd_error_bad_value); goto error_free_vers; } if (bfd_is_und_section (sec) || bfd_is_com_section (sec)) definition = FALSE; else definition = TRUE; size_change_ok = FALSE; type_change_ok = bed->type_change_ok; old_alignment = 0; old_bfd = NULL; new_sec = sec; if (is_elf_hash_table (htab)) { Elf_Internal_Versym iver; unsigned int vernum = 0; bfd_boolean skip; if (ever == NULL) { if (info->default_imported_symver) /* Use the default symbol version created earlier. */ iver.vs_vers = elf_tdata (abfd)->cverdefs; else iver.vs_vers = 0; } else _bfd_elf_swap_versym_in (abfd, ever, &iver); vernum = iver.vs_vers & VERSYM_VERSION; /* If this is a hidden symbol, or if it is not version 1, we append the version name to the symbol name. However, we do not modify a non-hidden absolute symbol if it is not a function, because it might be the version symbol itself. FIXME: What if it isn't? */ if ((iver.vs_vers & VERSYM_HIDDEN) != 0 || (vernum > 1 && (!bfd_is_abs_section (sec) || bed->is_function_type (ELF_ST_TYPE (isym->st_info))))) { const char *verstr; size_t namelen, verlen, newlen; char *newname, *p; if (isym->st_shndx != SHN_UNDEF) { if (vernum > elf_tdata (abfd)->cverdefs) verstr = NULL; else if (vernum > 1) verstr = elf_tdata (abfd)->verdef[vernum - 1].vd_nodename; else verstr = ""; if (verstr == NULL) { (*_bfd_error_handler) (_("%B: %s: invalid version %u (max %d)"), abfd, name, vernum, elf_tdata (abfd)->cverdefs); bfd_set_error (bfd_error_bad_value); goto error_free_vers; } } else { /* We cannot simply test for the number of entries in the VERNEED section since the numbers for the needed versions do not start at 0. */ Elf_Internal_Verneed *t; verstr = NULL; for (t = elf_tdata (abfd)->verref; t != NULL; t = t->vn_nextref) { Elf_Internal_Vernaux *a; for (a = t->vn_auxptr; a != NULL; a = a->vna_nextptr) { if (a->vna_other == vernum) { verstr = a->vna_nodename; break; } } if (a != NULL) break; } if (verstr == NULL) { (*_bfd_error_handler) (_("%B: %s: invalid needed version %d"), abfd, name, vernum); bfd_set_error (bfd_error_bad_value); goto error_free_vers; } } namelen = strlen (name); verlen = strlen (verstr); newlen = namelen + verlen + 2; if ((iver.vs_vers & VERSYM_HIDDEN) == 0 && isym->st_shndx != SHN_UNDEF) ++newlen; newname = bfd_hash_allocate (&htab->root.table, newlen); if (newname == NULL) goto error_free_vers; memcpy (newname, name, namelen); p = newname + namelen; *p++ = ELF_VER_CHR; /* If this is a defined non-hidden version symbol, we add another @ to the name. This indicates the default version of the symbol. */ if ((iver.vs_vers & VERSYM_HIDDEN) == 0 && isym->st_shndx != SHN_UNDEF) *p++ = ELF_VER_CHR; memcpy (p, verstr, verlen + 1); name = newname; } if (!_bfd_elf_merge_symbol (abfd, info, name, isym, &sec, &value, &old_alignment, sym_hash, &skip, &override, &type_change_ok, &size_change_ok)) goto error_free_vers; if (skip) continue; if (override) definition = FALSE; h = *sym_hash; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* Remember the old alignment if this is a common symbol, so that we don't reduce the alignment later on. We can't check later, because _bfd_generic_link_add_one_symbol will set a default for the alignment which we want to override. We also remember the old bfd where the existing definition comes from. */ switch (h->root.type) { default: break; case bfd_link_hash_defined: case bfd_link_hash_defweak: old_bfd = h->root.u.def.section->owner; break; case bfd_link_hash_common: old_bfd = h->root.u.c.p->section->owner; old_alignment = h->root.u.c.p->alignment_power; break; } if (elf_tdata (abfd)->verdef != NULL && ! override && vernum > 1 && definition) h->verinfo.verdef = &elf_tdata (abfd)->verdef[vernum - 1]; } if (! (_bfd_generic_link_add_one_symbol (info, abfd, name, flags, sec, value, NULL, FALSE, bed->collect, (struct bfd_link_hash_entry **) sym_hash))) goto error_free_vers; h = *sym_hash; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; *sym_hash = h; new_weakdef = FALSE; if (dynamic && definition && (flags & BSF_WEAK) != 0 && !bed->is_function_type (ELF_ST_TYPE (isym->st_info)) && is_elf_hash_table (htab) && h->u.weakdef == NULL) { /* Keep a list of all weak defined non function symbols from a dynamic object, using the weakdef field. Later in this function we will set the weakdef field to the correct value. We only put non-function symbols from dynamic objects on this list, because that happens to be the only time we need to know the normal symbol corresponding to a weak symbol, and the information is time consuming to figure out. If the weakdef field is not already NULL, then this symbol was already defined by some previous dynamic object, and we will be using that previous definition anyhow. */ h->u.weakdef = weaks; weaks = h; new_weakdef = TRUE; } /* Set the alignment of a common symbol. */ if ((common || bfd_is_com_section (sec)) && h->root.type == bfd_link_hash_common) { unsigned int align; if (common) align = bfd_log2 (isym->st_value); else { /* The new symbol is a common symbol in a shared object. We need to get the alignment from the section. */ align = new_sec->alignment_power; } if (align > old_alignment /* Permit an alignment power of zero if an alignment of one is specified and no other alignments have been specified. */ || (isym->st_value == 1 && old_alignment == 0)) h->root.u.c.p->alignment_power = align; else h->root.u.c.p->alignment_power = old_alignment; } if (is_elf_hash_table (htab)) { bfd_boolean dynsym; /* Check the alignment when a common symbol is involved. This can change when a common symbol is overridden by a normal definition or a common symbol is ignored due to the old normal definition. We need to make sure the maximum alignment is maintained. */ if ((old_alignment || common) && h->root.type != bfd_link_hash_common) { unsigned int common_align; unsigned int normal_align; unsigned int symbol_align; bfd *normal_bfd; bfd *common_bfd; symbol_align = ffs (h->root.u.def.value) - 1; if (h->root.u.def.section->owner != NULL && (h->root.u.def.section->owner->flags & DYNAMIC) == 0) { normal_align = h->root.u.def.section->alignment_power; if (normal_align > symbol_align) normal_align = symbol_align; } else normal_align = symbol_align; if (old_alignment) { common_align = old_alignment; common_bfd = old_bfd; normal_bfd = abfd; } else { common_align = bfd_log2 (isym->st_value); common_bfd = abfd; normal_bfd = old_bfd; } if (normal_align < common_align) { /* PR binutils/2735 */ if (normal_bfd == NULL) (*_bfd_error_handler) (_("Warning: alignment %u of common symbol `%s' in %B" " is greater than the alignment (%u) of its section %A"), common_bfd, h->root.u.def.section, 1 << common_align, name, 1 << normal_align); else (*_bfd_error_handler) (_("Warning: alignment %u of symbol `%s' in %B" " is smaller than %u in %B"), normal_bfd, common_bfd, 1 << normal_align, name, 1 << common_align); } } /* Remember the symbol size if it isn't undefined. */ if ((isym->st_size != 0 && isym->st_shndx != SHN_UNDEF) && (definition || h->size == 0)) { if (h->size != 0 && h->size != isym->st_size && ! size_change_ok) (*_bfd_error_handler) (_("Warning: size of symbol `%s' changed" " from %lu in %B to %lu in %B"), old_bfd, abfd, name, (unsigned long) h->size, (unsigned long) isym->st_size); h->size = isym->st_size; } /* If this is a common symbol, then we always want H->SIZE to be the size of the common symbol. The code just above won't fix the size if a common symbol becomes larger. We don't warn about a size change here, because that is covered by --warn-common. Allow changed between different function types. */ if (h->root.type == bfd_link_hash_common) h->size = h->root.u.c.size; if (ELF_ST_TYPE (isym->st_info) != STT_NOTYPE && (definition || h->type == STT_NOTYPE)) { if (h->type != STT_NOTYPE && h->type != ELF_ST_TYPE (isym->st_info) && ! type_change_ok) (*_bfd_error_handler) (_("Warning: type of symbol `%s' changed" " from %d to %d in %B"), abfd, name, h->type, ELF_ST_TYPE (isym->st_info)); h->type = ELF_ST_TYPE (isym->st_info); } /* If st_other has a processor-specific meaning, specific code might be needed here. We never merge the visibility attribute with the one from a dynamic object. */ if (bed->elf_backend_merge_symbol_attribute) (*bed->elf_backend_merge_symbol_attribute) (h, isym, definition, dynamic); /* If this symbol has default visibility and the user has requested we not re-export it, then mark it as hidden. */ if (definition && !dynamic && (abfd->no_export || (abfd->my_archive && abfd->my_archive->no_export)) && ELF_ST_VISIBILITY (isym->st_other) != STV_INTERNAL) isym->st_other = (STV_HIDDEN | (isym->st_other & ~ELF_ST_VISIBILITY (-1))); if (ELF_ST_VISIBILITY (isym->st_other) != 0 && !dynamic) { unsigned char hvis, symvis, other, nvis; /* Only merge the visibility. Leave the remainder of the st_other field to elf_backend_merge_symbol_attribute. */ other = h->other & ~ELF_ST_VISIBILITY (-1); /* Combine visibilities, using the most constraining one. */ hvis = ELF_ST_VISIBILITY (h->other); symvis = ELF_ST_VISIBILITY (isym->st_other); if (! hvis) nvis = symvis; else if (! symvis) nvis = hvis; else nvis = hvis < symvis ? hvis : symvis; h->other = other | nvis; } /* Set a flag in the hash table entry indicating the type of reference or definition we just found. Keep a count of the number of dynamic symbols we find. A dynamic symbol is one which is referenced or defined by both a regular object and a shared object. */ dynsym = FALSE; if (! dynamic) { if (! definition) { h->ref_regular = 1; if (bind != STB_WEAK) h->ref_regular_nonweak = 1; } else h->def_regular = 1; if (! info->executable || h->def_dynamic || h->ref_dynamic) dynsym = TRUE; } else { if (! definition) h->ref_dynamic = 1; else h->def_dynamic = 1; if (h->def_regular || h->ref_regular || (h->u.weakdef != NULL && ! new_weakdef && h->u.weakdef->dynindx != -1)) dynsym = TRUE; } if (definition && (sec->flags & SEC_DEBUGGING)) { /* We don't want to make debug symbol dynamic. */ (*bed->elf_backend_hide_symbol) (info, h, TRUE); dynsym = FALSE; } /* Check to see if we need to add an indirect symbol for the default name. */ if (definition || h->root.type == bfd_link_hash_common) if (!_bfd_elf_add_default_symbol (abfd, info, h, name, isym, &sec, &value, &dynsym, override)) goto error_free_vers; if (definition && !dynamic) { char *p = strchr (name, ELF_VER_CHR); if (p != NULL && p[1] != ELF_VER_CHR) { /* Queue non-default versions so that .symver x, x@FOO aliases can be checked. */ if (!nondeflt_vers) { amt = ((isymend - isym + 1) * sizeof (struct elf_link_hash_entry *)); nondeflt_vers = bfd_malloc (amt); } nondeflt_vers[nondeflt_vers_cnt++] = h; } } if (dynsym && h->dynindx == -1) { if (! bfd_elf_link_record_dynamic_symbol (info, h)) goto error_free_vers; if (h->u.weakdef != NULL && ! new_weakdef && h->u.weakdef->dynindx == -1) { if (!bfd_elf_link_record_dynamic_symbol (info, h->u.weakdef)) goto error_free_vers; } } else if (dynsym && h->dynindx != -1) /* If the symbol already has a dynamic index, but visibility says it should not be visible, turn it into a local symbol. */ switch (ELF_ST_VISIBILITY (h->other)) { case STV_INTERNAL: case STV_HIDDEN: (*bed->elf_backend_hide_symbol) (info, h, TRUE); dynsym = FALSE; break; } if (!add_needed && definition && dynsym && h->ref_regular) { int ret; const char *soname = elf_dt_name (abfd); /* A symbol from a library loaded via DT_NEEDED of some other library is referenced by a regular object. Add a DT_NEEDED entry for it. Issue an error if --no-add-needed is used. */ if ((elf_dyn_lib_class (abfd) & DYN_NO_NEEDED) != 0) { bfd_boolean looks_soish; const char *print_name; int print_len; size_t len, lend = 0; looks_soish = FALSE; print_name = soname; print_len = strlen(soname); if (strncmp(soname, "lib", 3) == 0) { len = print_len; if (len > 5 && strcmp(soname + len - 2, ".a") == 0) lend = len - 5; else { while (len > 6 && (ISDIGIT(soname[len - 1]) || soname[len - 1] == '.')) len--; if (strncmp(soname + len - 3, ".so", 3) == 0) lend = len - 6; } if (lend != 0) { print_name = soname + 3; print_len = lend; looks_soish = TRUE; } } (*_bfd_error_handler) (_("undefined reference to symbol `%s' (try adding -l%s%.*s)"), name, looks_soish? "" : ":", print_len, print_name); bfd_set_error (bfd_error_bad_value); goto error_free_vers; } elf_dyn_lib_class (abfd) &= ~DYN_AS_NEEDED; add_needed = TRUE; ret = elf_add_dt_needed_tag (abfd, info, soname, add_needed); if (ret < 0) goto error_free_vers; BFD_ASSERT (ret == 0); } } } if (extversym != NULL) { free (extversym); extversym = NULL; } if (isymbuf != NULL) { free (isymbuf); isymbuf = NULL; } if ((elf_dyn_lib_class (abfd) & DYN_AS_NEEDED) != 0) { unsigned int i; /* Restore the symbol table. */ if (bed->as_needed_cleanup) (*bed->as_needed_cleanup) (abfd, info); old_hash = (char *) old_tab + tabsize; old_ent = (char *) old_hash + hashsize; sym_hash = elf_sym_hashes (abfd); htab->root.table.table = old_table; htab->root.table.size = old_size; htab->root.table.count = old_count; memcpy (htab->root.table.table, old_tab, tabsize); memcpy (sym_hash, old_hash, hashsize); htab->root.undefs = old_undefs; htab->root.undefs_tail = old_undefs_tail; for (i = 0; i < htab->root.table.size; i++) { struct bfd_hash_entry *p; struct elf_link_hash_entry *h; for (p = htab->root.table.table[i]; p != NULL; p = p->next) { h = (struct elf_link_hash_entry *) p; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->dynindx >= old_dynsymcount) _bfd_elf_strtab_delref (htab->dynstr, h->dynstr_index); memcpy (p, old_ent, htab->root.table.entsize); old_ent = (char *) old_ent + htab->root.table.entsize; h = (struct elf_link_hash_entry *) p; if (h->root.type == bfd_link_hash_warning) { memcpy (h->root.u.i.link, old_ent, htab->root.table.entsize); old_ent = (char *) old_ent + htab->root.table.entsize; } } } /* Make a special call to the linker "notice" function to tell it that symbols added for crefs may need to be removed. */ if (!(*info->callbacks->notice) (info, NULL, abfd, NULL, notice_not_needed)) return FALSE; free (old_tab); objalloc_free_block ((struct objalloc *) htab->root.table.memory, alloc_mark); if (nondeflt_vers != NULL) free (nondeflt_vers); return TRUE; } if (old_tab != NULL) { if (!(*info->callbacks->notice) (info, NULL, abfd, NULL, notice_needed)) return FALSE; free (old_tab); old_tab = NULL; } /* Now that all the symbols from this input file are created, handle .symver foo, foo@BAR such that any relocs against foo become foo@BAR. */ if (nondeflt_vers != NULL) { bfd_size_type cnt, symidx; for (cnt = 0; cnt < nondeflt_vers_cnt; ++cnt) { struct elf_link_hash_entry *h = nondeflt_vers[cnt], *hi; char *shortname, *p; p = strchr (h->root.root.string, ELF_VER_CHR); if (p == NULL || (h->root.type != bfd_link_hash_defined && h->root.type != bfd_link_hash_defweak)) continue; amt = p - h->root.root.string; shortname = bfd_malloc (amt + 1); memcpy (shortname, h->root.root.string, amt); shortname[amt] = '\0'; hi = (struct elf_link_hash_entry *) bfd_link_hash_lookup (&htab->root, shortname, FALSE, FALSE, FALSE); if (hi != NULL && hi->root.type == h->root.type && hi->root.u.def.value == h->root.u.def.value && hi->root.u.def.section == h->root.u.def.section) { (*bed->elf_backend_hide_symbol) (info, hi, TRUE); hi->root.type = bfd_link_hash_indirect; hi->root.u.i.link = (struct bfd_link_hash_entry *) h; (*bed->elf_backend_copy_indirect_symbol) (info, h, hi); sym_hash = elf_sym_hashes (abfd); if (sym_hash) for (symidx = 0; symidx < extsymcount; ++symidx) if (sym_hash[symidx] == hi) { sym_hash[symidx] = h; break; } } free (shortname); } free (nondeflt_vers); nondeflt_vers = NULL; } /* Now set the weakdefs field correctly for all the weak defined symbols we found. The only way to do this is to search all the symbols. Since we only need the information for non functions in dynamic objects, that's the only time we actually put anything on the list WEAKS. We need this information so that if a regular object refers to a symbol defined weakly in a dynamic object, the real symbol in the dynamic object is also put in the dynamic symbols; we also must arrange for both symbols to point to the same memory location. We could handle the general case of symbol aliasing, but a general symbol alias can only be generated in assembler code, handling it correctly would be very time consuming, and other ELF linkers don't handle general aliasing either. */ if (weaks != NULL) { struct elf_link_hash_entry **hpp; struct elf_link_hash_entry **hppend; struct elf_link_hash_entry **sorted_sym_hash; struct elf_link_hash_entry *h; size_t sym_count; /* Since we have to search the whole symbol list for each weak defined symbol, search time for N weak defined symbols will be O(N^2). Binary search will cut it down to O(NlogN). */ amt = extsymcount * sizeof (struct elf_link_hash_entry *); sorted_sym_hash = bfd_malloc (amt); if (sorted_sym_hash == NULL) goto error_return; sym_hash = sorted_sym_hash; hpp = elf_sym_hashes (abfd); hppend = hpp + extsymcount; sym_count = 0; for (; hpp < hppend; hpp++) { h = *hpp; if (h != NULL && h->root.type == bfd_link_hash_defined && !bed->is_function_type (h->type)) { *sym_hash = h; sym_hash++; sym_count++; } } qsort (sorted_sym_hash, sym_count, sizeof (struct elf_link_hash_entry *), elf_sort_symbol); while (weaks != NULL) { struct elf_link_hash_entry *hlook; asection *slook; bfd_vma vlook; long ilook; size_t i, j, idx; hlook = weaks; weaks = hlook->u.weakdef; hlook->u.weakdef = NULL; BFD_ASSERT (hlook->root.type == bfd_link_hash_defined || hlook->root.type == bfd_link_hash_defweak || hlook->root.type == bfd_link_hash_common || hlook->root.type == bfd_link_hash_indirect); slook = hlook->root.u.def.section; vlook = hlook->root.u.def.value; ilook = -1; i = 0; j = sym_count; while (i < j) { bfd_signed_vma vdiff; idx = (i + j) / 2; h = sorted_sym_hash [idx]; vdiff = vlook - h->root.u.def.value; if (vdiff < 0) j = idx; else if (vdiff > 0) i = idx + 1; else { long sdiff = slook->id - h->root.u.def.section->id; if (sdiff < 0) j = idx; else if (sdiff > 0) i = idx + 1; else { ilook = idx; break; } } } /* We didn't find a value/section match. */ if (ilook == -1) continue; for (i = ilook; i < sym_count; i++) { h = sorted_sym_hash [i]; /* Stop if value or section doesn't match. */ if (h->root.u.def.value != vlook || h->root.u.def.section != slook) break; else if (h != hlook) { hlook->u.weakdef = h; /* If the weak definition is in the list of dynamic symbols, make sure the real definition is put there as well. */ if (hlook->dynindx != -1 && h->dynindx == -1) { if (! bfd_elf_link_record_dynamic_symbol (info, h)) goto error_return; } /* If the real definition is in the list of dynamic symbols, make sure the weak definition is put there as well. If we don't do this, then the dynamic loader might not merge the entries for the real definition and the weak definition. */ if (h->dynindx != -1 && hlook->dynindx == -1) { if (! bfd_elf_link_record_dynamic_symbol (info, hlook)) goto error_return; } break; } } } free (sorted_sym_hash); } if (bed->check_directives) (*bed->check_directives) (abfd, info); /* If this object is the same format as the output object, and it is not a shared library, then let the backend look through the relocs. This is required to build global offset table entries and to arrange for dynamic relocs. It is not required for the particular common case of linking non PIC code, even when linking against shared libraries, but unfortunately there is no way of knowing whether an object file has been compiled PIC or not. Looking through the relocs is not particularly time consuming. The problem is that we must either (1) keep the relocs in memory, which causes the linker to require additional runtime memory or (2) read the relocs twice from the input file, which wastes time. This would be a good case for using mmap. I have no idea how to handle linking PIC code into a file of a different format. It probably can't be done. */ if (! dynamic && is_elf_hash_table (htab) && bed->check_relocs != NULL && (*bed->relocs_compatible) (abfd->xvec, htab->root.creator)) { asection *o; for (o = abfd->sections; o != NULL; o = o->next) { Elf_Internal_Rela *internal_relocs; bfd_boolean ok; if ((o->flags & SEC_RELOC) == 0 || o->reloc_count == 0 || ((info->strip == strip_all || info->strip == strip_debugger) && (o->flags & SEC_DEBUGGING) != 0) || bfd_is_abs_section (o->output_section)) continue; internal_relocs = _bfd_elf_link_read_relocs (abfd, o, NULL, NULL, info->keep_memory); if (internal_relocs == NULL) goto error_return; ok = (*bed->check_relocs) (abfd, info, o, internal_relocs); if (elf_section_data (o)->relocs != internal_relocs) free (internal_relocs); if (! ok) goto error_return; } } /* If this is a non-traditional link, try to optimize the handling of the .stab/.stabstr sections. */ if (! dynamic && ! info->traditional_format && is_elf_hash_table (htab) && (info->strip != strip_all && info->strip != strip_debugger)) { asection *stabstr; stabstr = bfd_get_section_by_name (abfd, ".stabstr"); if (stabstr != NULL) { bfd_size_type string_offset = 0; asection *stab; for (stab = abfd->sections; stab; stab = stab->next) if (CONST_STRNEQ (stab->name, ".stab") && (!stab->name[5] || (stab->name[5] == '.' && ISDIGIT (stab->name[6]))) && (stab->flags & SEC_MERGE) == 0 && !bfd_is_abs_section (stab->output_section)) { struct bfd_elf_section_data *secdata; secdata = elf_section_data (stab); if (! _bfd_link_section_stabs (abfd, &htab->stab_info, stab, stabstr, &secdata->sec_info, &string_offset)) goto error_return; if (secdata->sec_info) stab->sec_info_type = ELF_INFO_TYPE_STABS; } } } if (is_elf_hash_table (htab) && add_needed) { /* Add this bfd to the loaded list. */ struct elf_link_loaded_list *n; n = bfd_alloc (abfd, sizeof (struct elf_link_loaded_list)); if (n == NULL) goto error_return; n->abfd = abfd; n->next = htab->loaded; htab->loaded = n; } return TRUE; error_free_vers: if (old_tab != NULL) free (old_tab); if (nondeflt_vers != NULL) free (nondeflt_vers); if (extversym != NULL) free (extversym); error_free_sym: if (isymbuf != NULL) free (isymbuf); error_return: return FALSE; } /* Return the linker hash table entry of a symbol that might be satisfied by an archive symbol. Return -1 on error. */ struct elf_link_hash_entry * _bfd_elf_archive_symbol_lookup (bfd *abfd, struct bfd_link_info *info, const char *name) { struct elf_link_hash_entry *h; char *p, *copy; size_t len, first; h = elf_link_hash_lookup (elf_hash_table (info), name, FALSE, FALSE, FALSE); if (h != NULL) return h; /* If this is a default version (the name contains @@), look up the symbol again with only one `@' as well as without the version. The effect is that references to the symbol with and without the version will be matched by the default symbol in the archive. */ p = strchr (name, ELF_VER_CHR); if (p == NULL || p[1] != ELF_VER_CHR) return h; /* First check with only one `@'. */ len = strlen (name); copy = bfd_alloc (abfd, len); if (copy == NULL) return (struct elf_link_hash_entry *)(intptr_t)-1; first = p - name + 1; memcpy (copy, name, first); memcpy (copy + first, name + first + 1, len - first); h = elf_link_hash_lookup (elf_hash_table (info), copy, FALSE, FALSE, FALSE); if (h == NULL) { /* We also need to check references to the symbol without the version. */ copy[first - 1] = '\0'; h = elf_link_hash_lookup (elf_hash_table (info), copy, FALSE, FALSE, FALSE); } bfd_release (abfd, copy); return h; } /* Add symbols from an ELF archive file to the linker hash table. We don't use _bfd_generic_link_add_archive_symbols because of a problem which arises on UnixWare. The UnixWare libc.so is an archive which includes an entry libc.so.1 which defines a bunch of symbols. The libc.so archive also includes a number of other object files, which also define symbols, some of which are the same as those defined in libc.so.1. Correct linking requires that we consider each object file in turn, and include it if it defines any symbols we need. _bfd_generic_link_add_archive_symbols does not do this; it looks through the list of undefined symbols, and includes any object file which defines them. When this algorithm is used on UnixWare, it winds up pulling in libc.so.1 early and defining a bunch of symbols. This means that some of the other objects in the archive are not included in the link, which is incorrect since they precede libc.so.1 in the archive. Fortunately, ELF archive handling is simpler than that done by _bfd_generic_link_add_archive_symbols, which has to allow for a.out oddities. In ELF, if we find a symbol in the archive map, and the symbol is currently undefined, we know that we must pull in that object file. Unfortunately, we do have to make multiple passes over the symbol table until nothing further is resolved. */ static bfd_boolean elf_link_add_archive_symbols (bfd *abfd, struct bfd_link_info *info) { symindex c; bfd_boolean *defined = NULL; bfd_boolean *included = NULL; carsym *symdefs; bfd_boolean loop; bfd_size_type amt; const struct elf_backend_data *bed; struct elf_link_hash_entry * (*archive_symbol_lookup) (bfd *, struct bfd_link_info *, const char *); if (! bfd_has_map (abfd)) { /* An empty archive is a special case. */ if (bfd_openr_next_archived_file (abfd, NULL) == NULL) return TRUE; bfd_set_error (bfd_error_no_armap); return FALSE; } /* Keep track of all symbols we know to be already defined, and all files we know to be already included. This is to speed up the second and subsequent passes. */ c = bfd_ardata (abfd)->symdef_count; if (c == 0) return TRUE; amt = c; amt *= sizeof (bfd_boolean); defined = bfd_zmalloc (amt); included = bfd_zmalloc (amt); if (defined == NULL || included == NULL) goto error_return; symdefs = bfd_ardata (abfd)->symdefs; bed = get_elf_backend_data (abfd); archive_symbol_lookup = bed->elf_backend_archive_symbol_lookup; do { file_ptr last; symindex i; carsym *symdef; carsym *symdefend; loop = FALSE; last = -1; symdef = symdefs; symdefend = symdef + c; for (i = 0; symdef < symdefend; symdef++, i++) { struct elf_link_hash_entry *h; bfd *element; struct bfd_link_hash_entry *undefs_tail; symindex mark; if (defined[i] || included[i]) continue; if (symdef->file_offset == last) { included[i] = TRUE; continue; } h = archive_symbol_lookup (abfd, info, symdef->name); if (h == (struct elf_link_hash_entry *)(intptr_t)-1) goto error_return; if (h == NULL) continue; if (h->root.type == bfd_link_hash_common) { /* We currently have a common symbol. The archive map contains a reference to this symbol, so we may want to include it. We only want to include it however, if this archive element contains a definition of the symbol, not just another common declaration of it. Unfortunately some archivers (including GNU ar) will put declarations of common symbols into their archive maps, as well as real definitions, so we cannot just go by the archive map alone. Instead we must read in the element's symbol table and check that to see what kind of symbol definition this is. */ if (! elf_link_is_defined_archive_symbol (abfd, symdef)) continue; } else if (h->root.type != bfd_link_hash_undefined) { if (h->root.type != bfd_link_hash_undefweak) defined[i] = TRUE; continue; } /* We need to include this archive member. */ element = _bfd_get_elt_at_filepos (abfd, symdef->file_offset); if (element == NULL) goto error_return; if (! bfd_check_format (element, bfd_object)) goto error_return; /* Doublecheck that we have not included this object already--it should be impossible, but there may be something wrong with the archive. */ if (element->archive_pass != 0) { bfd_set_error (bfd_error_bad_value); goto error_return; } element->archive_pass = 1; undefs_tail = info->hash->undefs_tail; if (! (*info->callbacks->add_archive_element) (info, element, symdef->name)) goto error_return; if (! bfd_link_add_symbols (element, info)) goto error_return; /* If there are any new undefined symbols, we need to make another pass through the archive in order to see whether they can be defined. FIXME: This isn't perfect, because common symbols wind up on undefs_tail and because an undefined symbol which is defined later on in this pass does not require another pass. This isn't a bug, but it does make the code less efficient than it could be. */ if (undefs_tail != info->hash->undefs_tail) loop = TRUE; /* Look backward to mark all symbols from this object file which we have already seen in this pass. */ mark = i; do { included[mark] = TRUE; if (mark == 0) break; --mark; } while (symdefs[mark].file_offset == symdef->file_offset); /* We mark subsequent symbols from this object file as we go on through the loop. */ last = symdef->file_offset; } } while (loop); free (defined); free (included); return TRUE; error_return: if (defined != NULL) free (defined); if (included != NULL) free (included); return FALSE; } /* Given an ELF BFD, add symbols to the global hash table as appropriate. */ bfd_boolean bfd_elf_link_add_symbols (bfd *abfd, struct bfd_link_info *info) { switch (bfd_get_format (abfd)) { case bfd_object: return elf_link_add_object_symbols (abfd, info); case bfd_archive: return elf_link_add_archive_symbols (abfd, info); default: bfd_set_error (bfd_error_wrong_format); return FALSE; } } /* This function will be called though elf_link_hash_traverse to store all hash value of the exported symbols in an array. */ static bfd_boolean elf_collect_hash_codes (struct elf_link_hash_entry *h, void *data) { unsigned long **valuep = data; const char *name; char *p; unsigned long ha; char *alc = NULL; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* Ignore indirect symbols. These are added by the versioning code. */ if (h->dynindx == -1) return TRUE; name = h->root.root.string; p = strchr (name, ELF_VER_CHR); if (p != NULL) { alc = bfd_malloc (p - name + 1); memcpy (alc, name, p - name); alc[p - name] = '\0'; name = alc; } /* Compute the hash value. */ ha = bfd_elf_hash (name); /* Store the found hash value in the array given as the argument. */ *(*valuep)++ = ha; /* And store it in the struct so that we can put it in the hash table later. */ h->u.elf_hash_value = ha; if (alc != NULL) free (alc); return TRUE; } struct collect_gnu_hash_codes { bfd *output_bfd; const struct elf_backend_data *bed; unsigned long int nsyms; unsigned long int maskbits; unsigned long int *hashcodes; unsigned long int *hashval; unsigned long int *indx; unsigned long int *counts; bfd_vma *bitmask; bfd_byte *contents; long int min_dynindx; unsigned long int bucketcount; unsigned long int symindx; long int local_indx; long int shift1, shift2; unsigned long int mask; }; /* This function will be called though elf_link_hash_traverse to store all hash value of the exported symbols in an array. */ static bfd_boolean elf_collect_gnu_hash_codes (struct elf_link_hash_entry *h, void *data) { struct collect_gnu_hash_codes *s = data; const char *name; char *p; unsigned long ha; char *alc = NULL; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* Ignore indirect symbols. These are added by the versioning code. */ if (h->dynindx == -1) return TRUE; /* Ignore also local symbols and undefined symbols. */ if (! (*s->bed->elf_hash_symbol) (h)) return TRUE; name = h->root.root.string; p = strchr (name, ELF_VER_CHR); if (p != NULL) { alc = bfd_malloc (p - name + 1); memcpy (alc, name, p - name); alc[p - name] = '\0'; name = alc; } /* Compute the hash value. */ ha = bfd_elf_gnu_hash (name); /* Store the found hash value in the array for compute_bucket_count, and also for .dynsym reordering purposes. */ s->hashcodes[s->nsyms] = ha; s->hashval[h->dynindx] = ha; ++s->nsyms; if (s->min_dynindx < 0 || s->min_dynindx > h->dynindx) s->min_dynindx = h->dynindx; if (alc != NULL) free (alc); return TRUE; } /* This function will be called though elf_link_hash_traverse to do final dynaminc symbol renumbering. */ static bfd_boolean elf_renumber_gnu_hash_syms (struct elf_link_hash_entry *h, void *data) { struct collect_gnu_hash_codes *s = data; unsigned long int bucket; unsigned long int val; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* Ignore indirect symbols. */ if (h->dynindx == -1) return TRUE; /* Ignore also local symbols and undefined symbols. */ if (! (*s->bed->elf_hash_symbol) (h)) { if (h->dynindx >= s->min_dynindx) h->dynindx = s->local_indx++; return TRUE; } bucket = s->hashval[h->dynindx] % s->bucketcount; val = (s->hashval[h->dynindx] >> s->shift1) & ((s->maskbits >> s->shift1) - 1); s->bitmask[val] |= ((bfd_vma) 1) << (s->hashval[h->dynindx] & s->mask); s->bitmask[val] |= ((bfd_vma) 1) << ((s->hashval[h->dynindx] >> s->shift2) & s->mask); val = s->hashval[h->dynindx] & ~(unsigned long int) 1; if (s->counts[bucket] == 1) /* Last element terminates the chain. */ val |= 1; bfd_put_32 (s->output_bfd, val, s->contents + (s->indx[bucket] - s->symindx) * 4); --s->counts[bucket]; h->dynindx = s->indx[bucket]++; return TRUE; } /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */ bfd_boolean _bfd_elf_hash_symbol (struct elf_link_hash_entry *h) { return !(h->forced_local || h->root.type == bfd_link_hash_undefined || h->root.type == bfd_link_hash_undefweak || ((h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak) && h->root.u.def.section->output_section == NULL)); } /* Array used to determine the number of hash table buckets to use based on the number of symbols there are. If there are fewer than 3 symbols we use 1 bucket, fewer than 17 symbols we use 3 buckets, fewer than 37 we use 17 buckets, and so forth. We never use more than 32771 buckets. */ static const size_t elf_buckets[] = { 1, 3, 17, 37, 67, 97, 131, 197, 263, 521, 1031, 2053, 4099, 8209, 16411, 32771, 0 }; /* Compute bucket count for hashing table. We do not use a static set of possible tables sizes anymore. Instead we determine for all possible reasonable sizes of the table the outcome (i.e., the number of collisions etc) and choose the best solution. The weighting functions are not too simple to allow the table to grow without bounds. Instead one of the weighting factors is the size. Therefore the result is always a good payoff between few collisions (= short chain lengths) and table size. */ static size_t compute_bucket_count (struct bfd_link_info *info, unsigned long int *hashcodes, unsigned long int nsyms, int gnu_hash) { size_t dynsymcount = elf_hash_table (info)->dynsymcount; size_t best_size = 0; unsigned long int i; bfd_size_type amt; /* We have a problem here. The following code to optimize the table size requires an integer type with more the 32 bits. If BFD_HOST_U_64_BIT is set we know about such a type. */ #ifdef BFD_HOST_U_64_BIT if (info->optimize) { size_t minsize; size_t maxsize; BFD_HOST_U_64_BIT best_chlen = ~((BFD_HOST_U_64_BIT) 0); bfd *dynobj = elf_hash_table (info)->dynobj; const struct elf_backend_data *bed = get_elf_backend_data (dynobj); unsigned long int *counts; /* Possible optimization parameters: if we have NSYMS symbols we say that the hashing table must at least have NSYMS/4 and at most 2*NSYMS buckets. */ minsize = nsyms / 4; if (minsize == 0) minsize = 1; best_size = maxsize = nsyms * 2; if (gnu_hash) { if (minsize < 2) minsize = 2; if ((best_size & 31) == 0) ++best_size; } /* Create array where we count the collisions in. We must use bfd_malloc since the size could be large. */ amt = maxsize; amt *= sizeof (unsigned long int); counts = bfd_malloc (amt); if (counts == NULL) return 0; /* Compute the "optimal" size for the hash table. The criteria is a minimal chain length. The minor criteria is (of course) the size of the table. */ for (i = minsize; i < maxsize; ++i) { /* Walk through the array of hashcodes and count the collisions. */ BFD_HOST_U_64_BIT max; unsigned long int j; unsigned long int fact; if (gnu_hash && (i & 31) == 0) continue; memset (counts, '\0', i * sizeof (unsigned long int)); /* Determine how often each hash bucket is used. */ for (j = 0; j < nsyms; ++j) ++counts[hashcodes[j] % i]; /* For the weight function we need some information about the pagesize on the target. This is information need not be 100% accurate. Since this information is not available (so far) we define it here to a reasonable default value. If it is crucial to have a better value some day simply define this value. */ # ifndef BFD_TARGET_PAGESIZE # define BFD_TARGET_PAGESIZE (4096) # endif /* We in any case need 2 + DYNSYMCOUNT entries for the size values and the chains. */ max = (2 + dynsymcount) * bed->s->sizeof_hash_entry; # if 1 /* Variant 1: optimize for short chains. We add the squares of all the chain lengths (which favors many small chain over a few long chains). */ for (j = 0; j < i; ++j) max += counts[j] * counts[j]; /* This adds penalties for the overall size of the table. */ fact = i / (BFD_TARGET_PAGESIZE / bed->s->sizeof_hash_entry) + 1; max *= fact * fact; # else /* Variant 2: Optimize a lot more for small table. Here we also add squares of the size but we also add penalties for empty slots (the +1 term). */ for (j = 0; j < i; ++j) max += (1 + counts[j]) * (1 + counts[j]); /* The overall size of the table is considered, but not as strong as in variant 1, where it is squared. */ fact = i / (BFD_TARGET_PAGESIZE / bed->s->sizeof_hash_entry) + 1; max *= fact; # endif /* Compare with current best results. */ if (max < best_chlen) { best_chlen = max; best_size = i; } } free (counts); } else #endif /* defined (BFD_HOST_U_64_BIT) */ { /* This is the fallback solution if no 64bit type is available or if we are not supposed to spend much time on optimizations. We select the bucket count using a fixed set of numbers. */ for (i = 0; elf_buckets[i] != 0; i++) { best_size = elf_buckets[i]; if (nsyms < elf_buckets[i + 1]) break; } if (gnu_hash && best_size < 2) best_size = 2; } return best_size; } /* Set up the sizes and contents of the ELF dynamic sections. This is called by the ELF linker emulation before_allocation routine. We must set the sizes of the sections before the linker sets the addresses of the various sections. */ bfd_boolean bfd_elf_size_dynamic_sections (bfd *output_bfd, const char *soname, const char *rpath, const char *filter_shlib, const char * const *auxiliary_filters, struct bfd_link_info *info, asection **sinterpptr, struct bfd_elf_version_tree *verdefs) { bfd_size_type soname_indx; bfd *dynobj; const struct elf_backend_data *bed; struct elf_assign_sym_version_info asvinfo; *sinterpptr = NULL; soname_indx = (bfd_size_type) -1; if (!is_elf_hash_table (info->hash)) return TRUE; bed = get_elf_backend_data (output_bfd); elf_tdata (output_bfd)->relro = info->relro; if (info->execstack) elf_tdata (output_bfd)->stack_flags = PF_R | PF_W | PF_X; else if (info->noexecstack) elf_tdata (output_bfd)->stack_flags = PF_R | PF_W; else { bfd *inputobj; asection *notesec = NULL; int exec = 0; for (inputobj = info->input_bfds; inputobj; inputobj = inputobj->link_next) { asection *s; if (inputobj->flags & (DYNAMIC | BFD_LINKER_CREATED)) continue; s = bfd_get_section_by_name (inputobj, ".note.GNU-stack"); if (s) { if (s->flags & SEC_CODE) exec = PF_X; notesec = s; } else if (bed->default_execstack) exec = PF_X; } if (notesec) { elf_tdata (output_bfd)->stack_flags = PF_R | PF_W | exec; if (exec && info->relocatable && notesec->output_section != bfd_abs_section_ptr) notesec->output_section->flags |= SEC_CODE; } } /* Any syms created from now on start with -1 in got.refcount/offset and plt.refcount/offset. */ elf_hash_table (info)->init_got_refcount = elf_hash_table (info)->init_got_offset; elf_hash_table (info)->init_plt_refcount = elf_hash_table (info)->init_plt_offset; /* The backend may have to create some sections regardless of whether we're dynamic or not. */ if (bed->elf_backend_always_size_sections && ! (*bed->elf_backend_always_size_sections) (output_bfd, info)) return FALSE; if (! _bfd_elf_maybe_strip_eh_frame_hdr (info)) return FALSE; dynobj = elf_hash_table (info)->dynobj; /* If there were no dynamic objects in the link, there is nothing to do here. */ if (dynobj == NULL) return TRUE; if (elf_hash_table (info)->dynamic_sections_created) { struct elf_info_failed eif; struct elf_link_hash_entry *h; asection *dynstr; struct bfd_elf_version_tree *t; struct bfd_elf_version_expr *d; asection *s; bfd_boolean all_defined; *sinterpptr = bfd_get_section_by_name (dynobj, ".interp"); BFD_ASSERT (*sinterpptr != NULL || !info->executable); if (soname != NULL) { soname_indx = _bfd_elf_strtab_add (elf_hash_table (info)->dynstr, soname, TRUE); if (soname_indx == (bfd_size_type) -1 || !_bfd_elf_add_dynamic_entry (info, DT_SONAME, soname_indx)) return FALSE; } if (info->symbolic) { if (!_bfd_elf_add_dynamic_entry (info, DT_SYMBOLIC, 0)) return FALSE; info->flags |= DF_SYMBOLIC; } if (rpath != NULL) { bfd_size_type indx; indx = _bfd_elf_strtab_add (elf_hash_table (info)->dynstr, rpath, TRUE); if (indx == (bfd_size_type) -1 || !_bfd_elf_add_dynamic_entry (info, DT_RPATH, indx)) return FALSE; if (info->new_dtags) { _bfd_elf_strtab_addref (elf_hash_table (info)->dynstr, indx); if (!_bfd_elf_add_dynamic_entry (info, DT_RUNPATH, indx)) return FALSE; } } if (filter_shlib != NULL) { bfd_size_type indx; indx = _bfd_elf_strtab_add (elf_hash_table (info)->dynstr, filter_shlib, TRUE); if (indx == (bfd_size_type) -1 || !_bfd_elf_add_dynamic_entry (info, DT_FILTER, indx)) return FALSE; } if (auxiliary_filters != NULL) { const char * const *p; for (p = auxiliary_filters; *p != NULL; p++) { bfd_size_type indx; indx = _bfd_elf_strtab_add (elf_hash_table (info)->dynstr, *p, TRUE); if (indx == (bfd_size_type) -1 || !_bfd_elf_add_dynamic_entry (info, DT_AUXILIARY, indx)) return FALSE; } } eif.info = info; eif.verdefs = verdefs; eif.failed = FALSE; /* If we are supposed to export all symbols into the dynamic symbol table (this is not the normal case), then do so. */ if (info->export_dynamic || (info->executable && info->dynamic)) { elf_link_hash_traverse (elf_hash_table (info), _bfd_elf_export_symbol, &eif); if (eif.failed) return FALSE; } /* Make all global versions with definition. */ for (t = verdefs; t != NULL; t = t->next) for (d = t->globals.list; d != NULL; d = d->next) if (!d->symver && d->symbol) { const char *verstr, *name; size_t namelen, verlen, newlen; char *newname, *p; struct elf_link_hash_entry *newh; name = d->symbol; namelen = strlen (name); verstr = t->name; verlen = strlen (verstr); newlen = namelen + verlen + 3; newname = bfd_malloc (newlen); if (newname == NULL) return FALSE; memcpy (newname, name, namelen); /* Check the hidden versioned definition. */ p = newname + namelen; *p++ = ELF_VER_CHR; memcpy (p, verstr, verlen + 1); newh = elf_link_hash_lookup (elf_hash_table (info), newname, FALSE, FALSE, FALSE); if (newh == NULL || (newh->root.type != bfd_link_hash_defined && newh->root.type != bfd_link_hash_defweak)) { /* Check the default versioned definition. */ *p++ = ELF_VER_CHR; memcpy (p, verstr, verlen + 1); newh = elf_link_hash_lookup (elf_hash_table (info), newname, FALSE, FALSE, FALSE); } free (newname); /* Mark this version if there is a definition and it is not defined in a shared object. */ if (newh != NULL && !newh->def_dynamic && (newh->root.type == bfd_link_hash_defined || newh->root.type == bfd_link_hash_defweak)) d->symver = 1; } /* Attach all the symbols to their version information. */ asvinfo.output_bfd = output_bfd; asvinfo.info = info; asvinfo.verdefs = verdefs; asvinfo.failed = FALSE; elf_link_hash_traverse (elf_hash_table (info), _bfd_elf_link_assign_sym_version, &asvinfo); if (asvinfo.failed) return FALSE; if (!info->allow_undefined_version) { /* Check if all global versions have a definition. */ all_defined = TRUE; for (t = verdefs; t != NULL; t = t->next) for (d = t->globals.list; d != NULL; d = d->next) if (!d->symver && !d->script) { (*_bfd_error_handler) (_("%s: undefined version: %s"), d->pattern, t->name); all_defined = FALSE; } if (!all_defined) { bfd_set_error (bfd_error_bad_value); return FALSE; } } /* Find all symbols which were defined in a dynamic object and make the backend pick a reasonable value for them. */ elf_link_hash_traverse (elf_hash_table (info), _bfd_elf_adjust_dynamic_symbol, &eif); if (eif.failed) return FALSE; /* Add some entries to the .dynamic section. We fill in some of the values later, in bfd_elf_final_link, but we must add the entries now so that we know the final size of the .dynamic section. */ /* If there are initialization and/or finalization functions to call then add the corresponding DT_INIT/DT_FINI entries. */ h = (info->init_function ? elf_link_hash_lookup (elf_hash_table (info), info->init_function, FALSE, FALSE, FALSE) : NULL); if (h != NULL && (h->ref_regular || h->def_regular)) { if (!_bfd_elf_add_dynamic_entry (info, DT_INIT, 0)) return FALSE; } h = (info->fini_function ? elf_link_hash_lookup (elf_hash_table (info), info->fini_function, FALSE, FALSE, FALSE) : NULL); if (h != NULL && (h->ref_regular || h->def_regular)) { if (!_bfd_elf_add_dynamic_entry (info, DT_FINI, 0)) return FALSE; } s = bfd_get_section_by_name (output_bfd, ".preinit_array"); if (s != NULL && s->linker_has_input) { /* DT_PREINIT_ARRAY is not allowed in shared library. */ if (! info->executable) { bfd *sub; asection *o; for (sub = info->input_bfds; sub != NULL; sub = sub->link_next) if (bfd_get_flavour (sub) == bfd_target_elf_flavour) for (o = sub->sections; o != NULL; o = o->next) if (elf_section_data (o)->this_hdr.sh_type == SHT_PREINIT_ARRAY) { (*_bfd_error_handler) (_("%B: .preinit_array section is not allowed in DSO"), sub); break; } bfd_set_error (bfd_error_nonrepresentable_section); return FALSE; } if (!_bfd_elf_add_dynamic_entry (info, DT_PREINIT_ARRAY, 0) || !_bfd_elf_add_dynamic_entry (info, DT_PREINIT_ARRAYSZ, 0)) return FALSE; } s = bfd_get_section_by_name (output_bfd, ".init_array"); if (s != NULL && s->linker_has_input) { if (!_bfd_elf_add_dynamic_entry (info, DT_INIT_ARRAY, 0) || !_bfd_elf_add_dynamic_entry (info, DT_INIT_ARRAYSZ, 0)) return FALSE; } s = bfd_get_section_by_name (output_bfd, ".fini_array"); if (s != NULL && s->linker_has_input) { if (!_bfd_elf_add_dynamic_entry (info, DT_FINI_ARRAY, 0) || !_bfd_elf_add_dynamic_entry (info, DT_FINI_ARRAYSZ, 0)) return FALSE; } dynstr = bfd_get_section_by_name (dynobj, ".dynstr"); /* If .dynstr is excluded from the link, we don't want any of these tags. Strictly, we should be checking each section individually; This quick check covers for the case where someone does a /DISCARD/ : { *(*) }. */ if (dynstr != NULL && dynstr->output_section != bfd_abs_section_ptr) { bfd_size_type strsize; strsize = _bfd_elf_strtab_size (elf_hash_table (info)->dynstr); if ((info->emit_hash && !_bfd_elf_add_dynamic_entry (info, DT_HASH, 0)) || (info->emit_gnu_hash && !_bfd_elf_add_dynamic_entry (info, DT_GNU_HASH, 0)) || !_bfd_elf_add_dynamic_entry (info, DT_STRTAB, 0) || !_bfd_elf_add_dynamic_entry (info, DT_SYMTAB, 0) || !_bfd_elf_add_dynamic_entry (info, DT_STRSZ, strsize) || !_bfd_elf_add_dynamic_entry (info, DT_SYMENT, bed->s->sizeof_sym)) return FALSE; } } /* The backend must work out the sizes of all the other dynamic sections. */ if (bed->elf_backend_size_dynamic_sections && ! (*bed->elf_backend_size_dynamic_sections) (output_bfd, info)) return FALSE; if (elf_hash_table (info)->dynamic_sections_created) { unsigned long section_sym_count; asection *s; /* Set up the version definition section. */ s = bfd_get_section_by_name (dynobj, ".gnu.version_d"); BFD_ASSERT (s != NULL); /* We may have created additional version definitions if we are just linking a regular application. */ verdefs = asvinfo.verdefs; /* Skip anonymous version tag. */ if (verdefs != NULL && verdefs->vernum == 0) verdefs = verdefs->next; if (verdefs == NULL && !info->create_default_symver) s->flags |= SEC_EXCLUDE; else { unsigned int cdefs; bfd_size_type size; struct bfd_elf_version_tree *t; bfd_byte *p; Elf_Internal_Verdef def; Elf_Internal_Verdaux defaux; struct bfd_link_hash_entry *bh; struct elf_link_hash_entry *h; const char *name; cdefs = 0; size = 0; /* Make space for the base version. */ size += sizeof (Elf_External_Verdef); size += sizeof (Elf_External_Verdaux); ++cdefs; /* Make space for the default version. */ if (info->create_default_symver) { size += sizeof (Elf_External_Verdef); ++cdefs; } for (t = verdefs; t != NULL; t = t->next) { struct bfd_elf_version_deps *n; size += sizeof (Elf_External_Verdef); size += sizeof (Elf_External_Verdaux); ++cdefs; for (n = t->deps; n != NULL; n = n->next) size += sizeof (Elf_External_Verdaux); } s->size = size; s->contents = bfd_alloc (output_bfd, s->size); if (s->contents == NULL && s->size != 0) return FALSE; /* Fill in the version definition section. */ p = s->contents; def.vd_version = VER_DEF_CURRENT; def.vd_flags = VER_FLG_BASE; def.vd_ndx = 1; def.vd_cnt = 1; if (info->create_default_symver) { def.vd_aux = 2 * sizeof (Elf_External_Verdef); def.vd_next = sizeof (Elf_External_Verdef); } else { def.vd_aux = sizeof (Elf_External_Verdef); def.vd_next = (sizeof (Elf_External_Verdef) + sizeof (Elf_External_Verdaux)); } if (soname_indx != (bfd_size_type) -1) { _bfd_elf_strtab_addref (elf_hash_table (info)->dynstr, soname_indx); def.vd_hash = bfd_elf_hash (soname); defaux.vda_name = soname_indx; name = soname; } else { bfd_size_type indx; name = lbasename (output_bfd->filename); def.vd_hash = bfd_elf_hash (name); indx = _bfd_elf_strtab_add (elf_hash_table (info)->dynstr, name, FALSE); if (indx == (bfd_size_type) -1) return FALSE; defaux.vda_name = indx; } defaux.vda_next = 0; _bfd_elf_swap_verdef_out (output_bfd, &def, (Elf_External_Verdef *) p); p += sizeof (Elf_External_Verdef); if (info->create_default_symver) { /* Add a symbol representing this version. */ bh = NULL; if (! (_bfd_generic_link_add_one_symbol (info, dynobj, name, BSF_GLOBAL, bfd_abs_section_ptr, 0, NULL, FALSE, get_elf_backend_data (dynobj)->collect, &bh))) return FALSE; h = (struct elf_link_hash_entry *) bh; h->non_elf = 0; h->def_regular = 1; h->type = STT_OBJECT; h->verinfo.vertree = NULL; if (! bfd_elf_link_record_dynamic_symbol (info, h)) return FALSE; /* Create a duplicate of the base version with the same aux block, but different flags. */ def.vd_flags = 0; def.vd_ndx = 2; def.vd_aux = sizeof (Elf_External_Verdef); if (verdefs) def.vd_next = (sizeof (Elf_External_Verdef) + sizeof (Elf_External_Verdaux)); else def.vd_next = 0; _bfd_elf_swap_verdef_out (output_bfd, &def, (Elf_External_Verdef *) p); p += sizeof (Elf_External_Verdef); } _bfd_elf_swap_verdaux_out (output_bfd, &defaux, (Elf_External_Verdaux *) p); p += sizeof (Elf_External_Verdaux); for (t = verdefs; t != NULL; t = t->next) { unsigned int cdeps; struct bfd_elf_version_deps *n; cdeps = 0; for (n = t->deps; n != NULL; n = n->next) ++cdeps; /* Add a symbol representing this version. */ bh = NULL; if (! (_bfd_generic_link_add_one_symbol (info, dynobj, t->name, BSF_GLOBAL, bfd_abs_section_ptr, 0, NULL, FALSE, get_elf_backend_data (dynobj)->collect, &bh))) return FALSE; h = (struct elf_link_hash_entry *) bh; h->non_elf = 0; h->def_regular = 1; h->type = STT_OBJECT; h->verinfo.vertree = t; if (! bfd_elf_link_record_dynamic_symbol (info, h)) return FALSE; def.vd_version = VER_DEF_CURRENT; def.vd_flags = 0; if (t->globals.list == NULL && t->locals.list == NULL && ! t->used) def.vd_flags |= VER_FLG_WEAK; def.vd_ndx = t->vernum + (info->create_default_symver ? 2 : 1); def.vd_cnt = cdeps + 1; def.vd_hash = bfd_elf_hash (t->name); def.vd_aux = sizeof (Elf_External_Verdef); def.vd_next = 0; if (t->next != NULL) def.vd_next = (sizeof (Elf_External_Verdef) + (cdeps + 1) * sizeof (Elf_External_Verdaux)); _bfd_elf_swap_verdef_out (output_bfd, &def, (Elf_External_Verdef *) p); p += sizeof (Elf_External_Verdef); defaux.vda_name = h->dynstr_index; _bfd_elf_strtab_addref (elf_hash_table (info)->dynstr, h->dynstr_index); defaux.vda_next = 0; if (t->deps != NULL) defaux.vda_next = sizeof (Elf_External_Verdaux); t->name_indx = defaux.vda_name; _bfd_elf_swap_verdaux_out (output_bfd, &defaux, (Elf_External_Verdaux *) p); p += sizeof (Elf_External_Verdaux); for (n = t->deps; n != NULL; n = n->next) { if (n->version_needed == NULL) { /* This can happen if there was an error in the version script. */ defaux.vda_name = 0; } else { defaux.vda_name = n->version_needed->name_indx; _bfd_elf_strtab_addref (elf_hash_table (info)->dynstr, defaux.vda_name); } if (n->next == NULL) defaux.vda_next = 0; else defaux.vda_next = sizeof (Elf_External_Verdaux); _bfd_elf_swap_verdaux_out (output_bfd, &defaux, (Elf_External_Verdaux *) p); p += sizeof (Elf_External_Verdaux); } } if (!_bfd_elf_add_dynamic_entry (info, DT_VERDEF, 0) || !_bfd_elf_add_dynamic_entry (info, DT_VERDEFNUM, cdefs)) return FALSE; elf_tdata (output_bfd)->cverdefs = cdefs; } if ((info->new_dtags && info->flags) || (info->flags & DF_STATIC_TLS)) { if (!_bfd_elf_add_dynamic_entry (info, DT_FLAGS, info->flags)) return FALSE; } else if (info->flags & DF_BIND_NOW) { if (!_bfd_elf_add_dynamic_entry (info, DT_BIND_NOW, 0)) return FALSE; } if (info->flags_1) { if (info->executable) info->flags_1 &= ~ (DF_1_INITFIRST | DF_1_NODELETE | DF_1_NOOPEN); if (!_bfd_elf_add_dynamic_entry (info, DT_FLAGS_1, info->flags_1)) return FALSE; } /* Work out the size of the version reference section. */ s = bfd_get_section_by_name (dynobj, ".gnu.version_r"); BFD_ASSERT (s != NULL); { struct elf_find_verdep_info sinfo; sinfo.output_bfd = output_bfd; sinfo.info = info; sinfo.vers = elf_tdata (output_bfd)->cverdefs; if (sinfo.vers == 0) sinfo.vers = 1; sinfo.failed = FALSE; elf_link_hash_traverse (elf_hash_table (info), _bfd_elf_link_find_version_dependencies, &sinfo); if (elf_tdata (output_bfd)->verref == NULL) s->flags |= SEC_EXCLUDE; else { Elf_Internal_Verneed *t; unsigned int size; unsigned int crefs; bfd_byte *p; /* Build the version definition section. */ size = 0; crefs = 0; for (t = elf_tdata (output_bfd)->verref; t != NULL; t = t->vn_nextref) { Elf_Internal_Vernaux *a; size += sizeof (Elf_External_Verneed); ++crefs; for (a = t->vn_auxptr; a != NULL; a = a->vna_nextptr) size += sizeof (Elf_External_Vernaux); } s->size = size; s->contents = bfd_alloc (output_bfd, s->size); if (s->contents == NULL) return FALSE; p = s->contents; for (t = elf_tdata (output_bfd)->verref; t != NULL; t = t->vn_nextref) { unsigned int caux; Elf_Internal_Vernaux *a; bfd_size_type indx; caux = 0; for (a = t->vn_auxptr; a != NULL; a = a->vna_nextptr) ++caux; t->vn_version = VER_NEED_CURRENT; t->vn_cnt = caux; indx = _bfd_elf_strtab_add (elf_hash_table (info)->dynstr, elf_dt_name (t->vn_bfd) != NULL ? elf_dt_name (t->vn_bfd) : lbasename (t->vn_bfd->filename), FALSE); if (indx == (bfd_size_type) -1) return FALSE; t->vn_file = indx; t->vn_aux = sizeof (Elf_External_Verneed); if (t->vn_nextref == NULL) t->vn_next = 0; else t->vn_next = (sizeof (Elf_External_Verneed) + caux * sizeof (Elf_External_Vernaux)); _bfd_elf_swap_verneed_out (output_bfd, t, (Elf_External_Verneed *) p); p += sizeof (Elf_External_Verneed); for (a = t->vn_auxptr; a != NULL; a = a->vna_nextptr) { a->vna_hash = bfd_elf_hash (a->vna_nodename); indx = _bfd_elf_strtab_add (elf_hash_table (info)->dynstr, a->vna_nodename, FALSE); if (indx == (bfd_size_type) -1) return FALSE; a->vna_name = indx; if (a->vna_nextptr == NULL) a->vna_next = 0; else a->vna_next = sizeof (Elf_External_Vernaux); _bfd_elf_swap_vernaux_out (output_bfd, a, (Elf_External_Vernaux *) p); p += sizeof (Elf_External_Vernaux); } } if (!_bfd_elf_add_dynamic_entry (info, DT_VERNEED, 0) || !_bfd_elf_add_dynamic_entry (info, DT_VERNEEDNUM, crefs)) return FALSE; elf_tdata (output_bfd)->cverrefs = crefs; } } if ((elf_tdata (output_bfd)->cverrefs == 0 && elf_tdata (output_bfd)->cverdefs == 0) || _bfd_elf_link_renumber_dynsyms (output_bfd, info, &section_sym_count) == 0) { s = bfd_get_section_by_name (dynobj, ".gnu.version"); s->flags |= SEC_EXCLUDE; } } return TRUE; } /* Find the first non-excluded output section. We'll use its section symbol for some emitted relocs. */ void _bfd_elf_init_1_index_section (bfd *output_bfd, struct bfd_link_info *info) { asection *s; for (s = output_bfd->sections; s != NULL; s = s->next) if ((s->flags & (SEC_EXCLUDE | SEC_ALLOC)) == SEC_ALLOC && !_bfd_elf_link_omit_section_dynsym (output_bfd, info, s)) { elf_hash_table (info)->text_index_section = s; break; } } /* Find two non-excluded output sections, one for code, one for data. We'll use their section symbols for some emitted relocs. */ void _bfd_elf_init_2_index_sections (bfd *output_bfd, struct bfd_link_info *info) { asection *s; for (s = output_bfd->sections; s != NULL; s = s->next) if (((s->flags & (SEC_EXCLUDE | SEC_ALLOC | SEC_READONLY)) == (SEC_ALLOC | SEC_READONLY)) && !_bfd_elf_link_omit_section_dynsym (output_bfd, info, s)) { elf_hash_table (info)->text_index_section = s; break; } for (s = output_bfd->sections; s != NULL; s = s->next) if (((s->flags & (SEC_EXCLUDE | SEC_ALLOC | SEC_READONLY)) == SEC_ALLOC) && !_bfd_elf_link_omit_section_dynsym (output_bfd, info, s)) { elf_hash_table (info)->data_index_section = s; break; } if (elf_hash_table (info)->text_index_section == NULL) elf_hash_table (info)->text_index_section = elf_hash_table (info)->data_index_section; } bfd_boolean bfd_elf_size_dynsym_hash_dynstr (bfd *output_bfd, struct bfd_link_info *info) { const struct elf_backend_data *bed; if (!is_elf_hash_table (info->hash)) return TRUE; bed = get_elf_backend_data (output_bfd); (*bed->elf_backend_init_index_section) (output_bfd, info); if (elf_hash_table (info)->dynamic_sections_created) { bfd *dynobj; asection *s; bfd_size_type dynsymcount; unsigned long section_sym_count; unsigned int dtagcount; dynobj = elf_hash_table (info)->dynobj; /* Assign dynsym indicies. In a shared library we generate a section symbol for each output section, which come first. Next come all of the back-end allocated local dynamic syms, followed by the rest of the global symbols. */ dynsymcount = _bfd_elf_link_renumber_dynsyms (output_bfd, info, &section_sym_count); /* Work out the size of the symbol version section. */ s = bfd_get_section_by_name (dynobj, ".gnu.version"); BFD_ASSERT (s != NULL); if (dynsymcount != 0 && (s->flags & SEC_EXCLUDE) == 0) { s->size = dynsymcount * sizeof (Elf_External_Versym); s->contents = bfd_zalloc (output_bfd, s->size); if (s->contents == NULL) return FALSE; if (!_bfd_elf_add_dynamic_entry (info, DT_VERSYM, 0)) return FALSE; } /* Set the size of the .dynsym and .hash sections. We counted the number of dynamic symbols in elf_link_add_object_symbols. We will build the contents of .dynsym and .hash when we build the final symbol table, because until then we do not know the correct value to give the symbols. We built the .dynstr section as we went along in elf_link_add_object_symbols. */ s = bfd_get_section_by_name (dynobj, ".dynsym"); BFD_ASSERT (s != NULL); s->size = dynsymcount * bed->s->sizeof_sym; if (dynsymcount != 0) { s->contents = bfd_alloc (output_bfd, s->size); if (s->contents == NULL) return FALSE; /* The first entry in .dynsym is a dummy symbol. Clear all the section syms, in case we don't output them all. */ ++section_sym_count; memset (s->contents, 0, section_sym_count * bed->s->sizeof_sym); } elf_hash_table (info)->bucketcount = 0; /* Compute the size of the hashing table. As a side effect this computes the hash values for all the names we export. */ if (info->emit_hash) { unsigned long int *hashcodes; unsigned long int *hashcodesp; bfd_size_type amt; unsigned long int nsyms; size_t bucketcount; size_t hash_entry_size; /* Compute the hash values for all exported symbols. At the same time store the values in an array so that we could use them for optimizations. */ amt = dynsymcount * sizeof (unsigned long int); hashcodes = bfd_malloc (amt); if (hashcodes == NULL) return FALSE; hashcodesp = hashcodes; /* Put all hash values in HASHCODES. */ elf_link_hash_traverse (elf_hash_table (info), elf_collect_hash_codes, &hashcodesp); nsyms = hashcodesp - hashcodes; bucketcount = compute_bucket_count (info, hashcodes, nsyms, 0); free (hashcodes); if (bucketcount == 0) return FALSE; elf_hash_table (info)->bucketcount = bucketcount; s = bfd_get_section_by_name (dynobj, ".hash"); BFD_ASSERT (s != NULL); hash_entry_size = elf_section_data (s)->this_hdr.sh_entsize; s->size = ((2 + bucketcount + dynsymcount) * hash_entry_size); s->contents = bfd_zalloc (output_bfd, s->size); if (s->contents == NULL) return FALSE; bfd_put (8 * hash_entry_size, output_bfd, bucketcount, s->contents); bfd_put (8 * hash_entry_size, output_bfd, dynsymcount, s->contents + hash_entry_size); } if (info->emit_gnu_hash) { size_t i, cnt; unsigned char *contents; struct collect_gnu_hash_codes cinfo; bfd_size_type amt; size_t bucketcount; memset (&cinfo, 0, sizeof (cinfo)); /* Compute the hash values for all exported symbols. At the same time store the values in an array so that we could use them for optimizations. */ amt = dynsymcount * 2 * sizeof (unsigned long int); cinfo.hashcodes = bfd_malloc (amt); if (cinfo.hashcodes == NULL) return FALSE; cinfo.hashval = cinfo.hashcodes + dynsymcount; cinfo.min_dynindx = -1; cinfo.output_bfd = output_bfd; cinfo.bed = bed; /* Put all hash values in HASHCODES. */ elf_link_hash_traverse (elf_hash_table (info), elf_collect_gnu_hash_codes, &cinfo); bucketcount = compute_bucket_count (info, cinfo.hashcodes, cinfo.nsyms, 1); if (bucketcount == 0) { free (cinfo.hashcodes); return FALSE; } s = bfd_get_section_by_name (dynobj, ".gnu.hash"); BFD_ASSERT (s != NULL); if (cinfo.nsyms == 0) { /* Empty .gnu.hash section is special. */ BFD_ASSERT (cinfo.min_dynindx == -1); free (cinfo.hashcodes); s->size = 5 * 4 + bed->s->arch_size / 8; contents = bfd_zalloc (output_bfd, s->size); if (contents == NULL) return FALSE; s->contents = contents; /* 1 empty bucket. */ bfd_put_32 (output_bfd, 1, contents); /* SYMIDX above the special symbol 0. */ bfd_put_32 (output_bfd, 1, contents + 4); /* Just one word for bitmask. */ bfd_put_32 (output_bfd, 1, contents + 8); /* Only hash fn bloom filter. */ bfd_put_32 (output_bfd, 0, contents + 12); /* No hashes are valid - empty bitmask. */ bfd_put (bed->s->arch_size, output_bfd, 0, contents + 16); /* No hashes in the only bucket. */ bfd_put_32 (output_bfd, 0, contents + 16 + bed->s->arch_size / 8); } else { unsigned long int maskwords, maskbitslog2; BFD_ASSERT (cinfo.min_dynindx != -1); maskbitslog2 = bfd_log2 (cinfo.nsyms) + 1; if (maskbitslog2 < 3) maskbitslog2 = 5; else if ((1 << (maskbitslog2 - 2)) & cinfo.nsyms) maskbitslog2 = maskbitslog2 + 3; else maskbitslog2 = maskbitslog2 + 2; if (bed->s->arch_size == 64) { if (maskbitslog2 == 5) maskbitslog2 = 6; cinfo.shift1 = 6; } else cinfo.shift1 = 5; cinfo.mask = (1 << cinfo.shift1) - 1; cinfo.shift2 = maskbitslog2; cinfo.maskbits = 1 << maskbitslog2; maskwords = 1 << (maskbitslog2 - cinfo.shift1); amt = bucketcount * sizeof (unsigned long int) * 2; amt += maskwords * sizeof (bfd_vma); cinfo.bitmask = bfd_malloc (amt); if (cinfo.bitmask == NULL) { free (cinfo.hashcodes); return FALSE; } cinfo.counts = (void *) (cinfo.bitmask + maskwords); cinfo.indx = cinfo.counts + bucketcount; cinfo.symindx = dynsymcount - cinfo.nsyms; memset (cinfo.bitmask, 0, maskwords * sizeof (bfd_vma)); /* Determine how often each hash bucket is used. */ memset (cinfo.counts, 0, bucketcount * sizeof (cinfo.counts[0])); for (i = 0; i < cinfo.nsyms; ++i) ++cinfo.counts[cinfo.hashcodes[i] % bucketcount]; for (i = 0, cnt = cinfo.symindx; i < bucketcount; ++i) if (cinfo.counts[i] != 0) { cinfo.indx[i] = cnt; cnt += cinfo.counts[i]; } BFD_ASSERT (cnt == dynsymcount); cinfo.bucketcount = bucketcount; cinfo.local_indx = cinfo.min_dynindx; s->size = (4 + bucketcount + cinfo.nsyms) * 4; s->size += cinfo.maskbits / 8; contents = bfd_zalloc (output_bfd, s->size); if (contents == NULL) { free (cinfo.bitmask); free (cinfo.hashcodes); return FALSE; } s->contents = contents; bfd_put_32 (output_bfd, bucketcount, contents); bfd_put_32 (output_bfd, cinfo.symindx, contents + 4); bfd_put_32 (output_bfd, maskwords, contents + 8); bfd_put_32 (output_bfd, cinfo.shift2, contents + 12); contents += 16 + cinfo.maskbits / 8; for (i = 0; i < bucketcount; ++i) { if (cinfo.counts[i] == 0) bfd_put_32 (output_bfd, 0, contents); else bfd_put_32 (output_bfd, cinfo.indx[i], contents); contents += 4; } cinfo.contents = contents; /* Renumber dynamic symbols, populate .gnu.hash section. */ elf_link_hash_traverse (elf_hash_table (info), elf_renumber_gnu_hash_syms, &cinfo); contents = s->contents + 16; for (i = 0; i < maskwords; ++i) { bfd_put (bed->s->arch_size, output_bfd, cinfo.bitmask[i], contents); contents += bed->s->arch_size / 8; } free (cinfo.bitmask); free (cinfo.hashcodes); } } s = bfd_get_section_by_name (dynobj, ".dynstr"); BFD_ASSERT (s != NULL); elf_finalize_dynstr (output_bfd, info); s->size = _bfd_elf_strtab_size (elf_hash_table (info)->dynstr); for (dtagcount = 0; dtagcount <= info->spare_dynamic_tags; ++dtagcount) if (!_bfd_elf_add_dynamic_entry (info, DT_NULL, 0)) return FALSE; } return TRUE; } /* Final phase of ELF linker. */ /* A structure we use to avoid passing large numbers of arguments. */ struct elf_final_link_info { /* General link information. */ struct bfd_link_info *info; /* Output BFD. */ bfd *output_bfd; /* Symbol string table. */ struct bfd_strtab_hash *symstrtab; /* .dynsym section. */ asection *dynsym_sec; /* .hash section. */ asection *hash_sec; /* symbol version section (.gnu.version). */ asection *symver_sec; /* Buffer large enough to hold contents of any section. */ bfd_byte *contents; /* Buffer large enough to hold external relocs of any section. */ void *external_relocs; /* Buffer large enough to hold internal relocs of any section. */ Elf_Internal_Rela *internal_relocs; /* Buffer large enough to hold external local symbols of any input BFD. */ bfd_byte *external_syms; /* And a buffer for symbol section indices. */ Elf_External_Sym_Shndx *locsym_shndx; /* Buffer large enough to hold internal local symbols of any input BFD. */ Elf_Internal_Sym *internal_syms; /* Array large enough to hold a symbol index for each local symbol of any input BFD. */ long *indices; /* Array large enough to hold a section pointer for each local symbol of any input BFD. */ asection **sections; /* Buffer to hold swapped out symbols. */ bfd_byte *symbuf; /* And one for symbol section indices. */ Elf_External_Sym_Shndx *symshndxbuf; /* Number of swapped out symbols in buffer. */ size_t symbuf_count; /* Number of symbols which fit in symbuf. */ size_t symbuf_size; /* And same for symshndxbuf. */ size_t shndxbuf_size; }; /* This struct is used to pass information to elf_link_output_extsym. */ struct elf_outext_info { bfd_boolean failed; bfd_boolean localsyms; struct elf_final_link_info *finfo; }; /* Support for evaluating a complex relocation. Complex relocations are generalized, self-describing relocations. The implementation of them consists of two parts: complex symbols, and the relocations themselves. The relocations are use a reserved elf-wide relocation type code (R_RELC external / BFD_RELOC_RELC internal) and an encoding of relocation field information (start bit, end bit, word width, etc) into the addend. This information is extracted from CGEN-generated operand tables within gas. Complex symbols are mangled symbols (BSF_RELC external / STT_RELC internal) representing prefix-notation expressions, including but not limited to those sorts of expressions normally encoded as addends in the addend field. The symbol mangling format is: <node> := <literal> | <unary-operator> ':' <node> | <binary-operator> ':' <node> ':' <node> ; <literal> := 's' <digits=N> ':' <N character symbol name> | 'S' <digits=N> ':' <N character section name> | '#' <hexdigits> ; <binary-operator> := as in C <unary-operator> := as in C, plus "0-" for unambiguous negation. */ static void set_symbol_value (bfd * bfd_with_globals, struct elf_final_link_info * finfo, int symidx, bfd_vma val) { bfd_boolean is_local; Elf_Internal_Sym * sym; struct elf_link_hash_entry ** sym_hashes; struct elf_link_hash_entry * h; sym_hashes = elf_sym_hashes (bfd_with_globals); sym = finfo->internal_syms + symidx; is_local = ELF_ST_BIND(sym->st_info) == STB_LOCAL; if (is_local) { /* It is a local symbol: move it to the "absolute" section and give it a value. */ sym->st_shndx = SHN_ABS; sym->st_value = val; } else { /* It is a global symbol: set its link type to "defined" and give it a value. */ h = sym_hashes [symidx]; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; h->root.type = bfd_link_hash_defined; h->root.u.def.value = val; h->root.u.def.section = bfd_abs_section_ptr; } } static bfd_boolean resolve_symbol (const char * name, bfd * input_bfd, struct elf_final_link_info * finfo, bfd_vma * result, size_t locsymcount) { Elf_Internal_Sym * sym; struct bfd_link_hash_entry * global_entry; const char * candidate = NULL; Elf_Internal_Shdr * symtab_hdr; asection * sec = NULL; size_t i; symtab_hdr = & elf_tdata (input_bfd)->symtab_hdr; for (i = 0; i < locsymcount; ++ i) { sym = finfo->internal_syms + i; sec = finfo->sections [i]; if (ELF_ST_BIND (sym->st_info) != STB_LOCAL) continue; candidate = bfd_elf_string_from_elf_section (input_bfd, symtab_hdr->sh_link, sym->st_name); #ifdef DEBUG printf ("Comparing string: '%s' vs. '%s' = 0x%x\n", name, candidate, (unsigned int)sym->st_value); #endif if (candidate && strcmp (candidate, name) == 0) { * result = sym->st_value; if (sym->st_shndx > SHN_UNDEF && sym->st_shndx < SHN_LORESERVE) { #ifdef DEBUG printf ("adjusting for sec '%s' @ 0x%x + 0x%x\n", sec->output_section->name, (unsigned int)sec->output_section->vma, (unsigned int)sec->output_offset); #endif * result += sec->output_offset + sec->output_section->vma; } #ifdef DEBUG printf ("Found symbol with effective value %8.8x\n", (unsigned int)* result); #endif return TRUE; } } /* Hmm, haven't found it yet. perhaps it is a global. */ global_entry = bfd_link_hash_lookup (finfo->info->hash, name, FALSE, FALSE, TRUE); if (!global_entry) return FALSE; if (global_entry->type == bfd_link_hash_defined || global_entry->type == bfd_link_hash_defweak) { * result = global_entry->u.def.value + global_entry->u.def.section->output_section->vma + global_entry->u.def.section->output_offset; #ifdef DEBUG printf ("Found GLOBAL symbol '%s' with value %8.8x\n", global_entry->root.string, (unsigned int)*result); #endif return TRUE; } if (global_entry->type == bfd_link_hash_common) { *result = global_entry->u.def.value + bfd_com_section_ptr->output_section->vma + bfd_com_section_ptr->output_offset; #ifdef DEBUG printf ("Found COMMON symbol '%s' with value %8.8x\n", global_entry->root.string, (unsigned int)*result); #endif return TRUE; } return FALSE; } static bfd_boolean resolve_section (const char * name, asection * sections, bfd_vma * result) { asection * curr; unsigned int len; for (curr = sections; curr; curr = curr->next) if (strcmp (curr->name, name) == 0) { *result = curr->vma; return TRUE; } /* Hmm. still haven't found it. try pseudo-section names. */ for (curr = sections; curr; curr = curr->next) { len = strlen (curr->name); if (len > strlen (name)) continue; if (strncmp (curr->name, name, len) == 0) { if (strncmp (".end", name + len, 4) == 0) { *result = curr->vma + curr->size; return TRUE; } /* Insert more pseudo-section names here, if you like. */ } } return FALSE; } static void undefined_reference (const char * reftype, const char * name) { _bfd_error_handler (_("undefined %s reference in complex symbol: %s"), reftype, name); } static bfd_boolean eval_symbol (bfd_vma * result, char * sym, char ** advanced, bfd * input_bfd, struct elf_final_link_info * finfo, bfd_vma addr, bfd_vma section_offset, size_t locsymcount, int signed_p) { int len; int symlen; bfd_vma a; bfd_vma b; const int bufsz = 4096; char symbuf [bufsz]; const char * symend; bfd_boolean symbol_is_section = FALSE; len = strlen (sym); symend = sym + len; if (len < 1 || len > bufsz) { bfd_set_error (bfd_error_invalid_operation); return FALSE; } switch (* sym) { case '.': * result = addr + section_offset; * advanced = sym + 1; return TRUE; case '#': ++ sym; * result = strtoul (sym, advanced, 16); return TRUE; case 'S': symbol_is_section = TRUE; case 's': ++ sym; symlen = strtol (sym, &sym, 10); ++ sym; /* Skip the trailing ':'. */ if ((symend < sym) || ((symlen + 1) > bufsz)) { bfd_set_error (bfd_error_invalid_operation); return FALSE; } memcpy (symbuf, sym, symlen); symbuf [symlen] = '\0'; * advanced = sym + symlen; /* Is it always possible, with complex symbols, that gas "mis-guessed" the symbol as a section, or vice-versa. so we're pretty liberal in our interpretation here; section means "try section first", not "must be a section", and likewise with symbol. */ if (symbol_is_section) { if ((resolve_section (symbuf, finfo->output_bfd->sections, result) != TRUE) && (resolve_symbol (symbuf, input_bfd, finfo, result, locsymcount) != TRUE)) { undefined_reference ("section", symbuf); return FALSE; } } else { if ((resolve_symbol (symbuf, input_bfd, finfo, result, locsymcount) != TRUE) && (resolve_section (symbuf, finfo->output_bfd->sections, result) != TRUE)) { undefined_reference ("symbol", symbuf); return FALSE; } } return TRUE; /* All that remains are operators. */ #define UNARY_OP(op) \ if (strncmp (sym, #op, strlen (#op)) == 0) \ { \ sym += strlen (#op); \ if (* sym == ':') \ ++ sym; \ if (eval_symbol (& a, sym, & sym, input_bfd, finfo, addr, \ section_offset, locsymcount, signed_p) \ != TRUE) \ return FALSE; \ if (signed_p) \ * result = op ((signed)a); \ else \ * result = op a; \ * advanced = sym; \ return TRUE; \ } #define BINARY_OP(op) \ if (strncmp (sym, #op, strlen (#op)) == 0) \ { \ sym += strlen (#op); \ if (* sym == ':') \ ++ sym; \ if (eval_symbol (& a, sym, & sym, input_bfd, finfo, addr, \ section_offset, locsymcount, signed_p) \ != TRUE) \ return FALSE; \ ++ sym; \ if (eval_symbol (& b, sym, & sym, input_bfd, finfo, addr, \ section_offset, locsymcount, signed_p) \ != TRUE) \ return FALSE; \ if (signed_p) \ * result = ((signed) a) op ((signed) b); \ else \ * result = a op b; \ * advanced = sym; \ return TRUE; \ } default: UNARY_OP (0-); BINARY_OP (<<); BINARY_OP (>>); BINARY_OP (==); BINARY_OP (!=); BINARY_OP (<=); BINARY_OP (>=); BINARY_OP (&&); BINARY_OP (||); UNARY_OP (~); UNARY_OP (!); BINARY_OP (*); BINARY_OP (/); BINARY_OP (%); BINARY_OP (^); BINARY_OP (|); BINARY_OP (&); BINARY_OP (+); BINARY_OP (-); BINARY_OP (<); BINARY_OP (>); #undef UNARY_OP #undef BINARY_OP _bfd_error_handler (_("unknown operator '%c' in complex symbol"), * sym); bfd_set_error (bfd_error_invalid_operation); return FALSE; } } /* Entry point to evaluator, called from elf_link_input_bfd. */ static bfd_boolean evaluate_complex_relocation_symbols (bfd * input_bfd, struct elf_final_link_info * finfo, size_t locsymcount) { const struct elf_backend_data * bed; Elf_Internal_Shdr * symtab_hdr; struct elf_link_hash_entry ** sym_hashes; asection * reloc_sec; bfd_boolean result = TRUE; /* For each section, we're going to check and see if it has any complex relocations, and we're going to evaluate any of them we can. */ if (finfo->info->relocatable) return TRUE; symtab_hdr = & elf_tdata (input_bfd)->symtab_hdr; sym_hashes = elf_sym_hashes (input_bfd); bed = get_elf_backend_data (input_bfd); for (reloc_sec = input_bfd->sections; reloc_sec; reloc_sec = reloc_sec->next) { Elf_Internal_Rela * internal_relocs; unsigned long i; /* This section was omitted from the link. */ if (! reloc_sec->linker_mark) continue; /* Only process sections containing relocs. */ if ((reloc_sec->flags & SEC_RELOC) == 0) continue; if (reloc_sec->reloc_count == 0) continue; /* Read in the relocs for this section. */ internal_relocs = _bfd_elf_link_read_relocs (input_bfd, reloc_sec, NULL, (Elf_Internal_Rela *) NULL, FALSE); if (internal_relocs == NULL) continue; for (i = reloc_sec->reloc_count; i--;) { Elf_Internal_Rela * rel; char * sym_name; bfd_vma index; Elf_Internal_Sym * sym; bfd_vma result; bfd_vma section_offset; bfd_vma addr; int signed_p = 0; rel = internal_relocs + i; section_offset = reloc_sec->output_section->vma + reloc_sec->output_offset; addr = rel->r_offset; index = ELF32_R_SYM (rel->r_info); if (bed->s->arch_size == 64) index >>= 24; if (index == STN_UNDEF) continue; if (index < locsymcount) { /* The symbol is local. */ sym = finfo->internal_syms + index; /* We're only processing STT_RELC or STT_SRELC type symbols. */ if ((ELF_ST_TYPE (sym->st_info) != STT_RELC) && (ELF_ST_TYPE (sym->st_info) != STT_SRELC)) continue; sym_name = bfd_elf_string_from_elf_section (input_bfd, symtab_hdr->sh_link, sym->st_name); signed_p = (ELF_ST_TYPE (sym->st_info) == STT_SRELC); } else { /* The symbol is global. */ struct elf_link_hash_entry * h; if (elf_bad_symtab (input_bfd)) continue; h = sym_hashes [index - locsymcount]; while ( h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->type != STT_RELC && h->type != STT_SRELC) continue; signed_p = (h->type == STT_SRELC); sym_name = (char *) h->root.root.string; } #ifdef DEBUG printf ("Encountered a complex symbol!"); printf (" (input_bfd %s, section %s, reloc %ld\n", input_bfd->filename, reloc_sec->name, i); printf (" symbol: idx %8.8lx, name %s\n", index, sym_name); printf (" reloc : info %8.8lx, addr %8.8lx\n", rel->r_info, addr); printf (" Evaluating '%s' ...\n ", sym_name); #endif if (eval_symbol (& result, sym_name, & sym_name, input_bfd, finfo, addr, section_offset, locsymcount, signed_p)) /* Symbol evaluated OK. Update to absolute value. */ set_symbol_value (input_bfd, finfo, index, result); else result = FALSE; } if (internal_relocs != elf_section_data (reloc_sec)->relocs) free (internal_relocs); } /* If nothing went wrong, then we adjusted everything we wanted to adjust. */ return result; } static void put_value (bfd_vma size, unsigned long chunksz, bfd * input_bfd, bfd_vma x, bfd_byte * location) { location += (size - chunksz); for (; size; size -= chunksz, location -= chunksz, x >>= (chunksz * 8)) { switch (chunksz) { default: case 0: abort (); case 1: bfd_put_8 (input_bfd, x, location); break; case 2: bfd_put_16 (input_bfd, x, location); break; case 4: bfd_put_32 (input_bfd, x, location); break; case 8: #ifdef BFD64 bfd_put_64 (input_bfd, x, location); #else abort (); #endif break; } } } static bfd_vma get_value (bfd_vma size, unsigned long chunksz, bfd * input_bfd, bfd_byte * location) { bfd_vma x = 0; for (; size; size -= chunksz, location += chunksz) { switch (chunksz) { default: case 0: abort (); case 1: x = (x << (8 * chunksz)) | bfd_get_8 (input_bfd, location); break; case 2: x = (x << (8 * chunksz)) | bfd_get_16 (input_bfd, location); break; case 4: x = (x << (8 * chunksz)) | bfd_get_32 (input_bfd, location); break; case 8: #ifdef BFD64 x = (x << (8 * chunksz)) | bfd_get_64 (input_bfd, location); #else abort (); #endif break; } } return x; } static void decode_complex_addend (unsigned long * start, /* in bits */ unsigned long * oplen, /* in bits */ unsigned long * len, /* in bits */ unsigned long * wordsz, /* in bytes */ unsigned long * chunksz, /* in bytes */ unsigned long * lsb0_p, unsigned long * signed_p, unsigned long * trunc_p, unsigned long encoded) { * start = encoded & 0x3F; * len = (encoded >> 6) & 0x3F; * oplen = (encoded >> 12) & 0x3F; * wordsz = (encoded >> 18) & 0xF; * chunksz = (encoded >> 22) & 0xF; * lsb0_p = (encoded >> 27) & 1; * signed_p = (encoded >> 28) & 1; * trunc_p = (encoded >> 29) & 1; } void bfd_elf_perform_complex_relocation (bfd * output_bfd ATTRIBUTE_UNUSED, struct bfd_link_info * info, bfd * input_bfd, asection * input_section, bfd_byte * contents, Elf_Internal_Rela * rel, Elf_Internal_Sym * local_syms, asection ** local_sections) { const struct elf_backend_data * bed; Elf_Internal_Shdr * symtab_hdr; asection * sec; bfd_vma relocation = 0, shift, x; bfd_vma r_symndx; bfd_vma mask; unsigned long start, oplen, len, wordsz, chunksz, lsb0_p, signed_p, trunc_p; /* Perform this reloc, since it is complex. (this is not to say that it necessarily refers to a complex symbol; merely that it is a self-describing CGEN based reloc. i.e. the addend has the complete reloc information (bit start, end, word size, etc) encoded within it.). */ r_symndx = ELF32_R_SYM (rel->r_info); bed = get_elf_backend_data (input_bfd); if (bed->s->arch_size == 64) r_symndx >>= 24; #ifdef DEBUG printf ("Performing complex relocation %ld...\n", r_symndx); #endif symtab_hdr = & elf_tdata (input_bfd)->symtab_hdr; if (r_symndx < symtab_hdr->sh_info) { /* The symbol is local. */ Elf_Internal_Sym * sym; sym = local_syms + r_symndx; sec = local_sections [r_symndx]; relocation = sym->st_value; if (sym->st_shndx > SHN_UNDEF && sym->st_shndx < SHN_LORESERVE) relocation += (sec->output_offset + sec->output_section->vma); } else { /* The symbol is global. */ struct elf_link_hash_entry **sym_hashes; struct elf_link_hash_entry * h; sym_hashes = elf_sym_hashes (input_bfd); h = sym_hashes [r_symndx]; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak) { sec = h->root.u.def.section; relocation = h->root.u.def.value; if (! bfd_is_abs_section (sec)) relocation += (sec->output_section->vma + sec->output_offset); } if (h->root.type == bfd_link_hash_undefined && !((*info->callbacks->undefined_symbol) (info, h->root.root.string, input_bfd, input_section, rel->r_offset, info->unresolved_syms_in_objects == RM_GENERATE_ERROR || ELF_ST_VISIBILITY (h->other)))) return; } decode_complex_addend (& start, & oplen, & len, & wordsz, & chunksz, & lsb0_p, & signed_p, & trunc_p, rel->r_addend); mask = (((1L << (len - 1)) - 1) << 1) | 1; if (lsb0_p) shift = (start + 1) - len; else shift = (8 * wordsz) - (start + len); x = get_value (wordsz, chunksz, input_bfd, contents + rel->r_offset); #ifdef DEBUG printf ("Doing complex reloc: " "lsb0? %ld, signed? %ld, trunc? %ld, wordsz %ld, " "chunksz %ld, start %ld, len %ld, oplen %ld\n" " dest: %8.8lx, mask: %8.8lx, reloc: %8.8lx\n", lsb0_p, signed_p, trunc_p, wordsz, chunksz, start, len, oplen, x, mask, relocation); #endif if (! trunc_p) { /* Now do an overflow check. */ if (bfd_check_overflow ((signed_p ? complain_overflow_signed : complain_overflow_unsigned), len, 0, (8 * wordsz), relocation) == bfd_reloc_overflow) (*_bfd_error_handler) ("%s (%s + 0x%lx): relocation overflow: 0x%lx %sdoes not fit " "within 0x%lx", input_bfd->filename, input_section->name, rel->r_offset, relocation, (signed_p ? "(signed) " : ""), mask); } /* Do the deed. */ x = (x & ~(mask << shift)) | ((relocation & mask) << shift); #ifdef DEBUG printf (" relocation: %8.8lx\n" " shifted mask: %8.8lx\n" " shifted/masked reloc: %8.8lx\n" " result: %8.8lx\n", relocation, (mask << shift), ((relocation & mask) << shift), x); #endif put_value (wordsz, chunksz, input_bfd, x, contents + rel->r_offset); } /* When performing a relocatable link, the input relocations are preserved. But, if they reference global symbols, the indices referenced must be updated. Update all the relocations in REL_HDR (there are COUNT of them), using the data in REL_HASH. */ static void elf_link_adjust_relocs (bfd *abfd, Elf_Internal_Shdr *rel_hdr, unsigned int count, struct elf_link_hash_entry **rel_hash) { unsigned int i; const struct elf_backend_data *bed = get_elf_backend_data (abfd); bfd_byte *erela; void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *); void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *); bfd_vma r_type_mask; int r_sym_shift; if (rel_hdr->sh_entsize == bed->s->sizeof_rel) { swap_in = bed->s->swap_reloc_in; swap_out = bed->s->swap_reloc_out; } else if (rel_hdr->sh_entsize == bed->s->sizeof_rela) { swap_in = bed->s->swap_reloca_in; swap_out = bed->s->swap_reloca_out; } else abort (); if (bed->s->int_rels_per_ext_rel > MAX_INT_RELS_PER_EXT_REL) abort (); if (bed->s->arch_size == 32) { r_type_mask = 0xff; r_sym_shift = 8; } else { r_type_mask = 0xffffffff; r_sym_shift = 32; } erela = rel_hdr->contents; for (i = 0; i < count; i++, rel_hash++, erela += rel_hdr->sh_entsize) { Elf_Internal_Rela irela[MAX_INT_RELS_PER_EXT_REL]; unsigned int j; if (*rel_hash == NULL) continue; BFD_ASSERT ((*rel_hash)->indx >= 0); (*swap_in) (abfd, erela, irela); for (j = 0; j < bed->s->int_rels_per_ext_rel; j++) irela[j].r_info = ((bfd_vma) (*rel_hash)->indx << r_sym_shift | (irela[j].r_info & r_type_mask)); (*swap_out) (abfd, irela, erela); } } struct elf_link_sort_rela { union { bfd_vma offset; bfd_vma sym_mask; } u; enum elf_reloc_type_class type; /* We use this as an array of size int_rels_per_ext_rel. */ Elf_Internal_Rela rela[1]; }; static int elf_link_sort_cmp1 (const void *A, const void *B) { const struct elf_link_sort_rela *a = A; const struct elf_link_sort_rela *b = B; int relativea, relativeb; relativea = a->type == reloc_class_relative; relativeb = b->type == reloc_class_relative; if (relativea < relativeb) return 1; if (relativea > relativeb) return -1; if ((a->rela->r_info & a->u.sym_mask) < (b->rela->r_info & b->u.sym_mask)) return -1; if ((a->rela->r_info & a->u.sym_mask) > (b->rela->r_info & b->u.sym_mask)) return 1; if (a->rela->r_offset < b->rela->r_offset) return -1; if (a->rela->r_offset > b->rela->r_offset) return 1; return 0; } static int elf_link_sort_cmp2 (const void *A, const void *B) { const struct elf_link_sort_rela *a = A; const struct elf_link_sort_rela *b = B; int copya, copyb; if (a->u.offset < b->u.offset) return -1; if (a->u.offset > b->u.offset) return 1; copya = (a->type == reloc_class_copy) * 2 + (a->type == reloc_class_plt); copyb = (b->type == reloc_class_copy) * 2 + (b->type == reloc_class_plt); if (copya < copyb) return -1; if (copya > copyb) return 1; if (a->rela->r_offset < b->rela->r_offset) return -1; if (a->rela->r_offset > b->rela->r_offset) return 1; return 0; } static size_t elf_link_sort_relocs (bfd *abfd, struct bfd_link_info *info, asection **psec) { asection *dynamic_relocs; asection *rela_dyn; asection *rel_dyn; bfd_size_type count, size; size_t i, ret, sort_elt, ext_size; bfd_byte *sort, *s_non_relative, *p; struct elf_link_sort_rela *sq; const struct elf_backend_data *bed = get_elf_backend_data (abfd); int i2e = bed->s->int_rels_per_ext_rel; void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *); void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *); struct bfd_link_order *lo; bfd_vma r_sym_mask; bfd_boolean use_rela; /* Find a dynamic reloc section. */ rela_dyn = bfd_get_section_by_name (abfd, ".rela.dyn"); rel_dyn = bfd_get_section_by_name (abfd, ".rel.dyn"); if (rela_dyn != NULL && rela_dyn->size > 0 && rel_dyn != NULL && rel_dyn->size > 0) { bfd_boolean use_rela_initialised = FALSE; /* This is just here to stop gcc from complaining. It's initialization checking code is not perfect. */ use_rela = TRUE; /* Both sections are present. Examine the sizes of the indirect sections to help us choose. */ for (lo = rela_dyn->map_head.link_order; lo != NULL; lo = lo->next) if (lo->type == bfd_indirect_link_order) { asection *o = lo->u.indirect.section; if ((o->size % bed->s->sizeof_rela) == 0) { if ((o->size % bed->s->sizeof_rel) == 0) /* Section size is divisible by both rel and rela sizes. It is of no help to us. */ ; else { /* Section size is only divisible by rela. */ if (use_rela_initialised && (use_rela == FALSE)) { _bfd_error_handler (_("%B: Unable to sort relocs - they are in more than one size"), abfd); bfd_set_error (bfd_error_invalid_operation); return 0; } else { use_rela = TRUE; use_rela_initialised = TRUE; } } } else if ((o->size % bed->s->sizeof_rel) == 0) { /* Section size is only divisible by rel. */ if (use_rela_initialised && (use_rela == TRUE)) { _bfd_error_handler (_("%B: Unable to sort relocs - they are in more than one size"), abfd); bfd_set_error (bfd_error_invalid_operation); return 0; } else { use_rela = FALSE; use_rela_initialised = TRUE; } } else { /* The section size is not divisible by either - something is wrong. */ _bfd_error_handler (_("%B: Unable to sort relocs - they are of an unknown size"), abfd); bfd_set_error (bfd_error_invalid_operation); return 0; } } for (lo = rel_dyn->map_head.link_order; lo != NULL; lo = lo->next) if (lo->type == bfd_indirect_link_order) { asection *o = lo->u.indirect.section; if ((o->size % bed->s->sizeof_rela) == 0) { if ((o->size % bed->s->sizeof_rel) == 0) /* Section size is divisible by both rel and rela sizes. It is of no help to us. */ ; else { /* Section size is only divisible by rela. */ if (use_rela_initialised && (use_rela == FALSE)) { _bfd_error_handler (_("%B: Unable to sort relocs - they are in more than one size"), abfd); bfd_set_error (bfd_error_invalid_operation); return 0; } else { use_rela = TRUE; use_rela_initialised = TRUE; } } } else if ((o->size % bed->s->sizeof_rel) == 0) { /* Section size is only divisible by rel. */ if (use_rela_initialised && (use_rela == TRUE)) { _bfd_error_handler (_("%B: Unable to sort relocs - they are in more than one size"), abfd); bfd_set_error (bfd_error_invalid_operation); return 0; } else { use_rela = FALSE; use_rela_initialised = TRUE; } } else { /* The section size is not divisible by either - something is wrong. */ _bfd_error_handler (_("%B: Unable to sort relocs - they are of an unknown size"), abfd); bfd_set_error (bfd_error_invalid_operation); return 0; } } if (! use_rela_initialised) /* Make a guess. */ use_rela = TRUE; } else if (rela_dyn != NULL && rela_dyn->size > 0) use_rela = TRUE; else if (rel_dyn != NULL && rel_dyn->size > 0) use_rela = FALSE; else return 0; if (use_rela) { dynamic_relocs = rela_dyn; ext_size = bed->s->sizeof_rela; swap_in = bed->s->swap_reloca_in; swap_out = bed->s->swap_reloca_out; } else { dynamic_relocs = rel_dyn; ext_size = bed->s->sizeof_rel; swap_in = bed->s->swap_reloc_in; swap_out = bed->s->swap_reloc_out; } size = 0; for (lo = dynamic_relocs->map_head.link_order; lo != NULL; lo = lo->next) if (lo->type == bfd_indirect_link_order) size += lo->u.indirect.section->size; if (size != dynamic_relocs->size) return 0; sort_elt = (sizeof (struct elf_link_sort_rela) + (i2e - 1) * sizeof (Elf_Internal_Rela)); count = dynamic_relocs->size / ext_size; sort = bfd_zmalloc (sort_elt * count); if (sort == NULL) { (*info->callbacks->warning) (info, _("Not enough memory to sort relocations"), 0, abfd, 0, 0); return 0; } if (bed->s->arch_size == 32) r_sym_mask = ~(bfd_vma) 0xff; else r_sym_mask = ~(bfd_vma) 0xffffffff; for (lo = dynamic_relocs->map_head.link_order; lo != NULL; lo = lo->next) if (lo->type == bfd_indirect_link_order) { bfd_byte *erel, *erelend; asection *o = lo->u.indirect.section; if (o->contents == NULL && o->size != 0) { /* This is a reloc section that is being handled as a normal section. See bfd_section_from_shdr. We can't combine relocs in this case. */ free (sort); return 0; } erel = o->contents; erelend = o->contents + o->size; p = sort + o->output_offset / ext_size * sort_elt; while (erel < erelend) { struct elf_link_sort_rela *s = (struct elf_link_sort_rela *) p; (*swap_in) (abfd, erel, s->rela); s->type = (*bed->elf_backend_reloc_type_class) (s->rela); s->u.sym_mask = r_sym_mask; p += sort_elt; erel += ext_size; } } qsort (sort, count, sort_elt, elf_link_sort_cmp1); for (i = 0, p = sort; i < count; i++, p += sort_elt) { struct elf_link_sort_rela *s = (struct elf_link_sort_rela *) p; if (s->type != reloc_class_relative) break; } ret = i; s_non_relative = p; sq = (struct elf_link_sort_rela *) s_non_relative; for (; i < count; i++, p += sort_elt) { struct elf_link_sort_rela *sp = (struct elf_link_sort_rela *) p; if (((sp->rela->r_info ^ sq->rela->r_info) & r_sym_mask) != 0) sq = sp; sp->u.offset = sq->rela->r_offset; } qsort (s_non_relative, count - ret, sort_elt, elf_link_sort_cmp2); for (lo = dynamic_relocs->map_head.link_order; lo != NULL; lo = lo->next) if (lo->type == bfd_indirect_link_order) { bfd_byte *erel, *erelend; asection *o = lo->u.indirect.section; erel = o->contents; erelend = o->contents + o->size; p = sort + o->output_offset / ext_size * sort_elt; while (erel < erelend) { struct elf_link_sort_rela *s = (struct elf_link_sort_rela *) p; (*swap_out) (abfd, s->rela, erel); p += sort_elt; erel += ext_size; } } free (sort); *psec = dynamic_relocs; return ret; } /* Flush the output symbols to the file. */ static bfd_boolean elf_link_flush_output_syms (struct elf_final_link_info *finfo, const struct elf_backend_data *bed) { if (finfo->symbuf_count > 0) { Elf_Internal_Shdr *hdr; file_ptr pos; bfd_size_type amt; hdr = &elf_tdata (finfo->output_bfd)->symtab_hdr; pos = hdr->sh_offset + hdr->sh_size; amt = finfo->symbuf_count * bed->s->sizeof_sym; if (bfd_seek (finfo->output_bfd, pos, SEEK_SET) != 0 || bfd_bwrite (finfo->symbuf, amt, finfo->output_bfd) != amt) return FALSE; hdr->sh_size += amt; finfo->symbuf_count = 0; } return TRUE; } /* Add a symbol to the output symbol table. */ static bfd_boolean elf_link_output_sym (struct elf_final_link_info *finfo, const char *name, Elf_Internal_Sym *elfsym, asection *input_sec, struct elf_link_hash_entry *h) { bfd_byte *dest; Elf_External_Sym_Shndx *destshndx; bfd_boolean (*output_symbol_hook) (struct bfd_link_info *, const char *, Elf_Internal_Sym *, asection *, struct elf_link_hash_entry *); const struct elf_backend_data *bed; bed = get_elf_backend_data (finfo->output_bfd); output_symbol_hook = bed->elf_backend_link_output_symbol_hook; if (output_symbol_hook != NULL) { if (! (*output_symbol_hook) (finfo->info, name, elfsym, input_sec, h)) return FALSE; } if (name == NULL || *name == '\0') elfsym->st_name = 0; else if (input_sec->flags & SEC_EXCLUDE) elfsym->st_name = 0; else { elfsym->st_name = (unsigned long) _bfd_stringtab_add (finfo->symstrtab, name, TRUE, FALSE); if (elfsym->st_name == (unsigned long) -1) return FALSE; } if (finfo->symbuf_count >= finfo->symbuf_size) { if (! elf_link_flush_output_syms (finfo, bed)) return FALSE; } dest = finfo->symbuf + finfo->symbuf_count * bed->s->sizeof_sym; destshndx = finfo->symshndxbuf; if (destshndx != NULL) { if (bfd_get_symcount (finfo->output_bfd) >= finfo->shndxbuf_size) { bfd_size_type amt; amt = finfo->shndxbuf_size * sizeof (Elf_External_Sym_Shndx); finfo->symshndxbuf = destshndx = bfd_realloc (destshndx, amt * 2); if (destshndx == NULL) return FALSE; memset ((char *) destshndx + amt, 0, amt); finfo->shndxbuf_size *= 2; } destshndx += bfd_get_symcount (finfo->output_bfd); } bed->s->swap_symbol_out (finfo->output_bfd, elfsym, dest, destshndx); finfo->symbuf_count += 1; bfd_get_symcount (finfo->output_bfd) += 1; return TRUE; } /* Return TRUE if the dynamic symbol SYM in ABFD is supported. */ static bfd_boolean check_dynsym (bfd *abfd, Elf_Internal_Sym *sym) { if (sym->st_shndx > SHN_HIRESERVE) { /* The gABI doesn't support dynamic symbols in output sections beyond 64k. */ (*_bfd_error_handler) (_("%B: Too many sections: %d (>= %d)"), abfd, bfd_count_sections (abfd), SHN_LORESERVE); bfd_set_error (bfd_error_nonrepresentable_section); return FALSE; } return TRUE; } /* For DSOs loaded in via a DT_NEEDED entry, emulate ld.so in allowing an unsatisfied unversioned symbol in the DSO to match a versioned symbol that would normally require an explicit version. We also handle the case that a DSO references a hidden symbol which may be satisfied by a versioned symbol in another DSO. */ static bfd_boolean elf_link_check_versioned_symbol (struct bfd_link_info *info, const struct elf_backend_data *bed, struct elf_link_hash_entry *h) { bfd *abfd; struct elf_link_loaded_list *loaded; if (!is_elf_hash_table (info->hash)) return FALSE; switch (h->root.type) { default: abfd = NULL; break; case bfd_link_hash_undefined: case bfd_link_hash_undefweak: abfd = h->root.u.undef.abfd; if ((abfd->flags & DYNAMIC) == 0 || (elf_dyn_lib_class (abfd) & DYN_DT_NEEDED) == 0) return FALSE; break; case bfd_link_hash_defined: case bfd_link_hash_defweak: abfd = h->root.u.def.section->owner; break; case bfd_link_hash_common: abfd = h->root.u.c.p->section->owner; break; } BFD_ASSERT (abfd != NULL); for (loaded = elf_hash_table (info)->loaded; loaded != NULL; loaded = loaded->next) { bfd *input; Elf_Internal_Shdr *hdr; bfd_size_type symcount; bfd_size_type extsymcount; bfd_size_type extsymoff; Elf_Internal_Shdr *versymhdr; Elf_Internal_Sym *isym; Elf_Internal_Sym *isymend; Elf_Internal_Sym *isymbuf; Elf_External_Versym *ever; Elf_External_Versym *extversym; input = loaded->abfd; /* We check each DSO for a possible hidden versioned definition. */ if (input == abfd || (input->flags & DYNAMIC) == 0 || elf_dynversym (input) == 0) continue; hdr = &elf_tdata (input)->dynsymtab_hdr; symcount = hdr->sh_size / bed->s->sizeof_sym; if (elf_bad_symtab (input)) { extsymcount = symcount; extsymoff = 0; } else { extsymcount = symcount - hdr->sh_info; extsymoff = hdr->sh_info; } if (extsymcount == 0) continue; isymbuf = bfd_elf_get_elf_syms (input, hdr, extsymcount, extsymoff, NULL, NULL, NULL); if (isymbuf == NULL) return FALSE; /* Read in any version definitions. */ versymhdr = &elf_tdata (input)->dynversym_hdr; extversym = bfd_malloc (versymhdr->sh_size); if (extversym == NULL) goto error_ret; if (bfd_seek (input, versymhdr->sh_offset, SEEK_SET) != 0 || (bfd_bread (extversym, versymhdr->sh_size, input) != versymhdr->sh_size)) { free (extversym); error_ret: free (isymbuf); return FALSE; } ever = extversym + extsymoff; isymend = isymbuf + extsymcount; for (isym = isymbuf; isym < isymend; isym++, ever++) { const char *name; Elf_Internal_Versym iver; unsigned short version_index; if (ELF_ST_BIND (isym->st_info) == STB_LOCAL || isym->st_shndx == SHN_UNDEF) continue; name = bfd_elf_string_from_elf_section (input, hdr->sh_link, isym->st_name); if (strcmp (name, h->root.root.string) != 0) continue; _bfd_elf_swap_versym_in (input, ever, &iver); if ((iver.vs_vers & VERSYM_HIDDEN) == 0) { /* If we have a non-hidden versioned sym, then it should have provided a definition for the undefined sym. */ abort (); } version_index = iver.vs_vers & VERSYM_VERSION; if (version_index == 1 || version_index == 2) { /* This is the base or first version. We can use it. */ free (extversym); free (isymbuf); return TRUE; } } free (extversym); free (isymbuf); } return FALSE; } /* Add an external symbol to the symbol table. This is called from the hash table traversal routine. When generating a shared object, we go through the symbol table twice. The first time we output anything that might have been forced to local scope in a version script. The second time we output the symbols that are still global symbols. */ static bfd_boolean elf_link_output_extsym (struct elf_link_hash_entry *h, void *data) { struct elf_outext_info *eoinfo = data; struct elf_final_link_info *finfo = eoinfo->finfo; bfd_boolean strip; Elf_Internal_Sym sym; asection *input_sec; const struct elf_backend_data *bed; if (h->root.type == bfd_link_hash_warning) { h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->root.type == bfd_link_hash_new) return TRUE; } /* Decide whether to output this symbol in this pass. */ if (eoinfo->localsyms) { if (!h->forced_local) return TRUE; } else { if (h->forced_local) return TRUE; } bed = get_elf_backend_data (finfo->output_bfd); if (h->root.type == bfd_link_hash_undefined) { /* If we have an undefined symbol reference here then it must have come from a shared library that is being linked in. (Undefined references in regular files have already been handled). */ bfd_boolean ignore_undef = FALSE; /* Some symbols may be special in that the fact that they're undefined can be safely ignored - let backend determine that. */ if (bed->elf_backend_ignore_undef_symbol) ignore_undef = bed->elf_backend_ignore_undef_symbol (h); /* If we are reporting errors for this situation then do so now. */ if (ignore_undef == FALSE && h->ref_dynamic && ! h->ref_regular && ! elf_link_check_versioned_symbol (finfo->info, bed, h) && finfo->info->unresolved_syms_in_shared_libs != RM_IGNORE) { if (! (finfo->info->callbacks->undefined_symbol (finfo->info, h->root.root.string, h->root.u.undef.abfd, NULL, 0, finfo->info->unresolved_syms_in_shared_libs == RM_GENERATE_ERROR))) { eoinfo->failed = TRUE; return FALSE; } } } /* We should also warn if a forced local symbol is referenced from shared libraries. */ if (! finfo->info->relocatable && (! finfo->info->shared) && h->forced_local && h->ref_dynamic && !h->dynamic_def && !h->dynamic_weak && ! elf_link_check_versioned_symbol (finfo->info, bed, h)) { (*_bfd_error_handler) (_("%B: %s symbol `%s' in %B is referenced by DSO"), finfo->output_bfd, h->root.u.def.section == bfd_abs_section_ptr ? finfo->output_bfd : h->root.u.def.section->owner, ELF_ST_VISIBILITY (h->other) == STV_INTERNAL ? "internal" : ELF_ST_VISIBILITY (h->other) == STV_HIDDEN ? "hidden" : "local", h->root.root.string); eoinfo->failed = TRUE; return FALSE; } /* We don't want to output symbols that have never been mentioned by a regular file, or that we have been told to strip. However, if h->indx is set to -2, the symbol is used by a reloc and we must output it. */ if (h->indx == -2) strip = FALSE; else if ((h->def_dynamic || h->ref_dynamic || h->root.type == bfd_link_hash_new) && !h->def_regular && !h->ref_regular) strip = TRUE; else if (finfo->info->strip == strip_all) strip = TRUE; else if (finfo->info->strip == strip_some && bfd_hash_lookup (finfo->info->keep_hash, h->root.root.string, FALSE, FALSE) == NULL) strip = TRUE; else if (finfo->info->strip_discarded && (h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak) && elf_discarded_section (h->root.u.def.section)) strip = TRUE; else strip = FALSE; /* If we're stripping it, and it's not a dynamic symbol, there's nothing else to do unless it is a forced local symbol. */ if (strip && h->dynindx == -1 && !h->forced_local) return TRUE; sym.st_value = 0; sym.st_size = h->size; sym.st_other = h->other; if (h->forced_local) sym.st_info = ELF_ST_INFO (STB_LOCAL, h->type); else if (h->root.type == bfd_link_hash_undefweak || h->root.type == bfd_link_hash_defweak) sym.st_info = ELF_ST_INFO (STB_WEAK, h->type); else sym.st_info = ELF_ST_INFO (STB_GLOBAL, h->type); switch (h->root.type) { default: case bfd_link_hash_new: case bfd_link_hash_warning: abort (); return FALSE; case bfd_link_hash_undefined: case bfd_link_hash_undefweak: input_sec = bfd_und_section_ptr; sym.st_shndx = SHN_UNDEF; break; case bfd_link_hash_defined: case bfd_link_hash_defweak: { input_sec = h->root.u.def.section; if (input_sec->output_section != NULL) { sym.st_shndx = _bfd_elf_section_from_bfd_section (finfo->output_bfd, input_sec->output_section); if (sym.st_shndx == SHN_BAD) { (*_bfd_error_handler) (_("%B: could not find output section %A for input section %A"), finfo->output_bfd, input_sec->output_section, input_sec); eoinfo->failed = TRUE; return FALSE; } /* ELF symbols in relocatable files are section relative, but in nonrelocatable files they are virtual addresses. */ sym.st_value = h->root.u.def.value + input_sec->output_offset; if (! finfo->info->relocatable) { sym.st_value += input_sec->output_section->vma; if (h->type == STT_TLS) { /* STT_TLS symbols are relative to PT_TLS segment base. */ BFD_ASSERT (elf_hash_table (finfo->info)->tls_sec != NULL); sym.st_value -= elf_hash_table (finfo->info)->tls_sec->vma; } } } else { BFD_ASSERT (input_sec->owner == NULL || (input_sec->owner->flags & DYNAMIC) != 0); sym.st_shndx = SHN_UNDEF; input_sec = bfd_und_section_ptr; } } break; case bfd_link_hash_common: input_sec = h->root.u.c.p->section; sym.st_shndx = bed->common_section_index (input_sec); sym.st_value = 1 << h->root.u.c.p->alignment_power; break; case bfd_link_hash_indirect: /* These symbols are created by symbol versioning. They point to the decorated version of the name. For example, if the symbol foo@@GNU_1.2 is the default, which should be used when foo is used with no version, then we add an indirect symbol foo which points to foo@@GNU_1.2. We ignore these symbols, since the indirected symbol is already in the hash table. */ return TRUE; } /* Give the processor backend a chance to tweak the symbol value, and also to finish up anything that needs to be done for this symbol. FIXME: Not calling elf_backend_finish_dynamic_symbol for forced local syms when non-shared is due to a historical quirk. */ if ((h->dynindx != -1 || h->forced_local) && ((finfo->info->shared && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT || h->root.type != bfd_link_hash_undefweak)) || !h->forced_local) && elf_hash_table (finfo->info)->dynamic_sections_created) { if (! ((*bed->elf_backend_finish_dynamic_symbol) (finfo->output_bfd, finfo->info, h, &sym))) { eoinfo->failed = TRUE; return FALSE; } } /* If we are marking the symbol as undefined, and there are no non-weak references to this symbol from a regular object, then mark the symbol as weak undefined; if there are non-weak references, mark the symbol as strong. We can't do this earlier, because it might not be marked as undefined until the finish_dynamic_symbol routine gets through with it. */ if (sym.st_shndx == SHN_UNDEF && h->ref_regular && (ELF_ST_BIND (sym.st_info) == STB_GLOBAL || ELF_ST_BIND (sym.st_info) == STB_WEAK)) { int bindtype; if (h->ref_regular_nonweak) bindtype = STB_GLOBAL; else bindtype = STB_WEAK; sym.st_info = ELF_ST_INFO (bindtype, ELF_ST_TYPE (sym.st_info)); } /* If a non-weak symbol with non-default visibility is not defined locally, it is a fatal error. */ if (! finfo->info->relocatable && ELF_ST_VISIBILITY (sym.st_other) != STV_DEFAULT && ELF_ST_BIND (sym.st_info) != STB_WEAK && h->root.type == bfd_link_hash_undefined && !h->def_regular) { (*_bfd_error_handler) (_("%B: %s symbol `%s' isn't defined"), finfo->output_bfd, ELF_ST_VISIBILITY (sym.st_other) == STV_PROTECTED ? "protected" : ELF_ST_VISIBILITY (sym.st_other) == STV_INTERNAL ? "internal" : "hidden", h->root.root.string); eoinfo->failed = TRUE; return FALSE; } /* If this symbol should be put in the .dynsym section, then put it there now. We already know the symbol index. We also fill in the entry in the .hash section. */ if (h->dynindx != -1 && elf_hash_table (finfo->info)->dynamic_sections_created) { bfd_byte *esym; sym.st_name = h->dynstr_index; esym = finfo->dynsym_sec->contents + h->dynindx * bed->s->sizeof_sym; if (! check_dynsym (finfo->output_bfd, &sym)) { eoinfo->failed = TRUE; return FALSE; } bed->s->swap_symbol_out (finfo->output_bfd, &sym, esym, 0); if (finfo->hash_sec != NULL) { size_t hash_entry_size; bfd_byte *bucketpos; bfd_vma chain; size_t bucketcount; size_t bucket; bucketcount = elf_hash_table (finfo->info)->bucketcount; bucket = h->u.elf_hash_value % bucketcount; hash_entry_size = elf_section_data (finfo->hash_sec)->this_hdr.sh_entsize; bucketpos = ((bfd_byte *) finfo->hash_sec->contents + (bucket + 2) * hash_entry_size); chain = bfd_get (8 * hash_entry_size, finfo->output_bfd, bucketpos); bfd_put (8 * hash_entry_size, finfo->output_bfd, h->dynindx, bucketpos); bfd_put (8 * hash_entry_size, finfo->output_bfd, chain, ((bfd_byte *) finfo->hash_sec->contents + (bucketcount + 2 + h->dynindx) * hash_entry_size)); } if (finfo->symver_sec != NULL && finfo->symver_sec->contents != NULL) { Elf_Internal_Versym iversym; Elf_External_Versym *eversym; if (!h->def_regular) { if (h->verinfo.verdef == NULL) iversym.vs_vers = 0; else iversym.vs_vers = h->verinfo.verdef->vd_exp_refno + 1; } else { if (h->verinfo.vertree == NULL) iversym.vs_vers = 1; else iversym.vs_vers = h->verinfo.vertree->vernum + 1; if (finfo->info->create_default_symver) iversym.vs_vers++; } if (h->hidden) iversym.vs_vers |= VERSYM_HIDDEN; eversym = (Elf_External_Versym *) finfo->symver_sec->contents; eversym += h->dynindx; _bfd_elf_swap_versym_out (finfo->output_bfd, &iversym, eversym); } } /* If we're stripping it, then it was just a dynamic symbol, and there's nothing else to do. */ if (strip || (input_sec->flags & SEC_EXCLUDE) != 0) return TRUE; h->indx = bfd_get_symcount (finfo->output_bfd); if (! elf_link_output_sym (finfo, h->root.root.string, &sym, input_sec, h)) { eoinfo->failed = TRUE; return FALSE; } return TRUE; } /* Return TRUE if special handling is done for relocs in SEC against symbols defined in discarded sections. */ static bfd_boolean elf_section_ignore_discarded_relocs (asection *sec) { const struct elf_backend_data *bed; switch (sec->sec_info_type) { case ELF_INFO_TYPE_STABS: case ELF_INFO_TYPE_EH_FRAME: return TRUE; default: break; } bed = get_elf_backend_data (sec->owner); if (bed->elf_backend_ignore_discarded_relocs != NULL && (*bed->elf_backend_ignore_discarded_relocs) (sec)) return TRUE; return FALSE; } /* Return a mask saying how ld should treat relocations in SEC against symbols defined in discarded sections. If this function returns COMPLAIN set, ld will issue a warning message. If this function returns PRETEND set, and the discarded section was link-once and the same size as the kept link-once section, ld will pretend that the symbol was actually defined in the kept section. Otherwise ld will zero the reloc (at least that is the intent, but some cooperation by the target dependent code is needed, particularly for REL targets). */ unsigned int _bfd_elf_default_action_discarded (asection *sec) { if (sec->flags & SEC_DEBUGGING) return PRETEND; if (strcmp (".eh_frame", sec->name) == 0) return 0; if (strcmp (".gcc_except_table", sec->name) == 0) return 0; return COMPLAIN | PRETEND; } /* Find a match between a section and a member of a section group. */ static asection * match_group_member (asection *sec, asection *group, struct bfd_link_info *info) { asection *first = elf_next_in_group (group); asection *s = first; while (s != NULL) { if (bfd_elf_match_symbols_in_sections (s, sec, info)) return s; s = elf_next_in_group (s); if (s == first) break; } return NULL; } /* Check if the kept section of a discarded section SEC can be used to replace it. Return the replacement if it is OK. Otherwise return NULL. */ asection * _bfd_elf_check_kept_section (asection *sec, struct bfd_link_info *info) { asection *kept; kept = sec->kept_section; if (kept != NULL) { if ((kept->flags & SEC_GROUP) != 0) kept = match_group_member (sec, kept, info); if (kept != NULL && sec->size != kept->size) kept = NULL; sec->kept_section = kept; } return kept; } /* Link an input file into the linker output file. This function handles all the sections and relocations of the input file at once. This is so that we only have to read the local symbols once, and don't have to keep them in memory. */ static bfd_boolean elf_link_input_bfd (struct elf_final_link_info *finfo, bfd *input_bfd) { int (*relocate_section) (bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *, Elf_Internal_Rela *, Elf_Internal_Sym *, asection **); bfd *output_bfd; Elf_Internal_Shdr *symtab_hdr; size_t locsymcount; size_t extsymoff; Elf_Internal_Sym *isymbuf; Elf_Internal_Sym *isym; Elf_Internal_Sym *isymend; long *pindex; asection **ppsection; asection *o; const struct elf_backend_data *bed; struct elf_link_hash_entry **sym_hashes; output_bfd = finfo->output_bfd; bed = get_elf_backend_data (output_bfd); relocate_section = bed->elf_backend_relocate_section; /* If this is a dynamic object, we don't want to do anything here: we don't want the local symbols, and we don't want the section contents. */ if ((input_bfd->flags & DYNAMIC) != 0) return TRUE; symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; if (elf_bad_symtab (input_bfd)) { locsymcount = symtab_hdr->sh_size / bed->s->sizeof_sym; extsymoff = 0; } else { locsymcount = symtab_hdr->sh_info; extsymoff = symtab_hdr->sh_info; } /* Read the local symbols. */ isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents; if (isymbuf == NULL && locsymcount != 0) { isymbuf = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, locsymcount, 0, finfo->internal_syms, finfo->external_syms, finfo->locsym_shndx); if (isymbuf == NULL) return FALSE; } /* evaluate_complex_relocation_symbols looks for symbols in finfo->internal_syms. */ else if (isymbuf != NULL && locsymcount != 0) { bfd_elf_get_elf_syms (input_bfd, symtab_hdr, locsymcount, 0, finfo->internal_syms, finfo->external_syms, finfo->locsym_shndx); } /* Find local symbol sections and adjust values of symbols in SEC_MERGE sections. Write out those local symbols we know are going into the output file. */ isymend = isymbuf + locsymcount; for (isym = isymbuf, pindex = finfo->indices, ppsection = finfo->sections; isym < isymend; isym++, pindex++, ppsection++) { asection *isec; const char *name; Elf_Internal_Sym osym; *pindex = -1; if (elf_bad_symtab (input_bfd)) { if (ELF_ST_BIND (isym->st_info) != STB_LOCAL) { *ppsection = NULL; continue; } } if (isym->st_shndx == SHN_UNDEF) isec = bfd_und_section_ptr; else if (isym->st_shndx < SHN_LORESERVE || isym->st_shndx > SHN_HIRESERVE) { isec = bfd_section_from_elf_index (input_bfd, isym->st_shndx); if (isec && isec->sec_info_type == ELF_INFO_TYPE_MERGE && ELF_ST_TYPE (isym->st_info) != STT_SECTION) isym->st_value = _bfd_merged_section_offset (output_bfd, &isec, elf_section_data (isec)->sec_info, isym->st_value); } else if (isym->st_shndx == SHN_ABS) isec = bfd_abs_section_ptr; else if (isym->st_shndx == SHN_COMMON) isec = bfd_com_section_ptr; else { /* Don't attempt to output symbols with st_shnx in the reserved range other than SHN_ABS and SHN_COMMON. */ *ppsection = NULL; continue; } *ppsection = isec; /* Don't output the first, undefined, symbol. */ if (ppsection == finfo->sections) continue; if (ELF_ST_TYPE (isym->st_info) == STT_SECTION) { /* We never output section symbols. Instead, we use the section symbol of the corresponding section in the output file. */ continue; } /* If we are stripping all symbols, we don't want to output this one. */ if (finfo->info->strip == strip_all) continue; /* If we are discarding all local symbols, we don't want to output this one. If we are generating a relocatable output file, then some of the local symbols may be required by relocs; we output them below as we discover that they are needed. */ if (finfo->info->discard == discard_all) continue; /* If this symbol is defined in a section which we are discarding, we don't need to keep it. */ if (isym->st_shndx != SHN_UNDEF && (isym->st_shndx < SHN_LORESERVE || isym->st_shndx > SHN_HIRESERVE) && (isec == NULL || bfd_section_removed_from_list (output_bfd, isec->output_section))) continue; /* Get the name of the symbol. */ name = bfd_elf_string_from_elf_section (input_bfd, symtab_hdr->sh_link, isym->st_name); if (name == NULL) return FALSE; /* See if we are discarding symbols with this name. */ if ((finfo->info->strip == strip_some && (bfd_hash_lookup (finfo->info->keep_hash, name, FALSE, FALSE) == NULL)) || (((finfo->info->discard == discard_sec_merge && (isec->flags & SEC_MERGE) && ! finfo->info->relocatable) || finfo->info->discard == discard_l) && bfd_is_local_label_name (input_bfd, name))) continue; /* If we get here, we are going to output this symbol. */ osym = *isym; /* Adjust the section index for the output file. */ osym.st_shndx = _bfd_elf_section_from_bfd_section (output_bfd, isec->output_section); if (osym.st_shndx == SHN_BAD) return FALSE; *pindex = bfd_get_symcount (output_bfd); /* ELF symbols in relocatable files are section relative, but in executable files they are virtual addresses. Note that this code assumes that all ELF sections have an associated BFD section with a reasonable value for output_offset; below we assume that they also have a reasonable value for output_section. Any special sections must be set up to meet these requirements. */ osym.st_value += isec->output_offset; if (! finfo->info->relocatable) { osym.st_value += isec->output_section->vma; if (ELF_ST_TYPE (osym.st_info) == STT_TLS) { /* STT_TLS symbols are relative to PT_TLS segment base. */ BFD_ASSERT (elf_hash_table (finfo->info)->tls_sec != NULL); osym.st_value -= elf_hash_table (finfo->info)->tls_sec->vma; } } if (! elf_link_output_sym (finfo, name, &osym, isec, NULL)) return FALSE; } if (! evaluate_complex_relocation_symbols (input_bfd, finfo, locsymcount)) return FALSE; /* Relocate the contents of each section. */ sym_hashes = elf_sym_hashes (input_bfd); for (o = input_bfd->sections; o != NULL; o = o->next) { bfd_byte *contents; if (! o->linker_mark) { /* This section was omitted from the link. */ continue; } if ((o->flags & SEC_HAS_CONTENTS) == 0 || (o->size == 0 && (o->flags & SEC_RELOC) == 0)) continue; if ((o->flags & SEC_LINKER_CREATED) != 0) { /* Section was created by _bfd_elf_link_create_dynamic_sections or somesuch. */ continue; } /* Get the contents of the section. They have been cached by a relaxation routine. Note that o is a section in an input file, so the contents field will not have been set by any of the routines which work on output files. */ if (elf_section_data (o)->this_hdr.contents != NULL) contents = elf_section_data (o)->this_hdr.contents; else { bfd_size_type amt = o->rawsize ? o->rawsize : o->size; contents = finfo->contents; if (! bfd_get_section_contents (input_bfd, o, contents, 0, amt)) return FALSE; } if ((o->flags & SEC_RELOC) != 0) { Elf_Internal_Rela *internal_relocs; bfd_vma r_type_mask; int r_sym_shift; int ret; /* Get the swapped relocs. */ internal_relocs = _bfd_elf_link_read_relocs (input_bfd, o, finfo->external_relocs, finfo->internal_relocs, FALSE); if (internal_relocs == NULL && o->reloc_count > 0) return FALSE; if (bed->s->arch_size == 32) { r_type_mask = 0xff; r_sym_shift = 8; } else { r_type_mask = 0xffffffff; r_sym_shift = 32; } /* Run through the relocs looking for any against symbols from discarded sections and section symbols from removed link-once sections. Complain about relocs against discarded sections. Zero relocs against removed link-once sections. */ if (!elf_section_ignore_discarded_relocs (o)) { Elf_Internal_Rela *rel, *relend; unsigned int action = (*bed->action_discarded) (o); rel = internal_relocs; relend = rel + o->reloc_count * bed->s->int_rels_per_ext_rel; for ( ; rel < relend; rel++) { unsigned long r_symndx = rel->r_info >> r_sym_shift; asection **ps, *sec; struct elf_link_hash_entry *h = NULL; const char *sym_name; if (r_symndx == STN_UNDEF) continue; if (r_symndx >= locsymcount || (elf_bad_symtab (input_bfd) && finfo->sections[r_symndx] == NULL)) { h = sym_hashes[r_symndx - extsymoff]; /* Badly formatted input files can contain relocs that reference non-existant symbols. Check here so that we do not seg fault. */ if (h == NULL) { char buffer [32]; sprintf_vma (buffer, rel->r_info); (*_bfd_error_handler) (_("error: %B contains a reloc (0x%s) for section %A " "that references a non-existent global symbol"), input_bfd, o, buffer); bfd_set_error (bfd_error_bad_value); return FALSE; } while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->root.type != bfd_link_hash_defined && h->root.type != bfd_link_hash_defweak) continue; ps = &h->root.u.def.section; sym_name = h->root.root.string; } else { Elf_Internal_Sym *sym = isymbuf + r_symndx; ps = &finfo->sections[r_symndx]; sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, *ps); } /* Complain if the definition comes from a discarded section. */ if ((sec = *ps) != NULL && elf_discarded_section (sec)) { BFD_ASSERT (r_symndx != 0); if (action & COMPLAIN) (*finfo->info->callbacks->einfo) (_("%X`%s' referenced in section `%A' of %B: " "defined in discarded section `%A' of %B\n"), sym_name, o, input_bfd, sec, sec->owner); /* Try to do the best we can to support buggy old versions of gcc. Pretend that the symbol is really defined in the kept linkonce section. FIXME: This is quite broken. Modifying the symbol here means we will be changing all later uses of the symbol, not just in this section. */ if (action & PRETEND) { asection *kept; kept = _bfd_elf_check_kept_section (sec, finfo->info); if (kept != NULL) { *ps = kept; continue; } } } } } /* Relocate the section by invoking a back end routine. The back end routine is responsible for adjusting the section contents as necessary, and (if using Rela relocs and generating a relocatable output file) adjusting the reloc addend as necessary. The back end routine does not have to worry about setting the reloc address or the reloc symbol index. The back end routine is given a pointer to the swapped in internal symbols, and can access the hash table entries for the external symbols via elf_sym_hashes (input_bfd). When generating relocatable output, the back end routine must handle STB_LOCAL/STT_SECTION symbols specially. The output symbol is going to be a section symbol corresponding to the output section, which will require the addend to be adjusted. */ ret = (*relocate_section) (output_bfd, finfo->info, input_bfd, o, contents, internal_relocs, isymbuf, finfo->sections); if (!ret) return FALSE; if (ret == 2 || finfo->info->relocatable || finfo->info->emitrelocations) { Elf_Internal_Rela *irela; Elf_Internal_Rela *irelaend; bfd_vma last_offset; struct elf_link_hash_entry **rel_hash; struct elf_link_hash_entry **rel_hash_list; Elf_Internal_Shdr *input_rel_hdr, *input_rel_hdr2; unsigned int next_erel; bfd_boolean rela_normal; input_rel_hdr = &elf_section_data (o)->rel_hdr; rela_normal = (bed->rela_normal && (input_rel_hdr->sh_entsize == bed->s->sizeof_rela)); /* Adjust the reloc addresses and symbol indices. */ irela = internal_relocs; irelaend = irela + o->reloc_count * bed->s->int_rels_per_ext_rel; rel_hash = (elf_section_data (o->output_section)->rel_hashes + elf_section_data (o->output_section)->rel_count + elf_section_data (o->output_section)->rel_count2); rel_hash_list = rel_hash; last_offset = o->output_offset; if (!finfo->info->relocatable) last_offset += o->output_section->vma; for (next_erel = 0; irela < irelaend; irela++, next_erel++) { unsigned long r_symndx; asection *sec; Elf_Internal_Sym sym; if (next_erel == bed->s->int_rels_per_ext_rel) { rel_hash++; next_erel = 0; } irela->r_offset = _bfd_elf_section_offset (output_bfd, finfo->info, o, irela->r_offset); if (irela->r_offset >= (bfd_vma) -2) { /* This is a reloc for a deleted entry or somesuch. Turn it into an R_*_NONE reloc, at the same offset as the last reloc. elf_eh_frame.c and bfd_elf_discard_info rely on reloc offsets being ordered. */ irela->r_offset = last_offset; irela->r_info = 0; irela->r_addend = 0; continue; } irela->r_offset += o->output_offset; /* Relocs in an executable have to be virtual addresses. */ if (!finfo->info->relocatable) irela->r_offset += o->output_section->vma; last_offset = irela->r_offset; r_symndx = irela->r_info >> r_sym_shift; if (r_symndx == STN_UNDEF) continue; if (r_symndx >= locsymcount || (elf_bad_symtab (input_bfd) && finfo->sections[r_symndx] == NULL)) { struct elf_link_hash_entry *rh; unsigned long indx; /* This is a reloc against a global symbol. We have not yet output all the local symbols, so we do not know the symbol index of any global symbol. We set the rel_hash entry for this reloc to point to the global hash table entry for this symbol. The symbol index is then set at the end of bfd_elf_final_link. */ indx = r_symndx - extsymoff; rh = elf_sym_hashes (input_bfd)[indx]; while (rh->root.type == bfd_link_hash_indirect || rh->root.type == bfd_link_hash_warning) rh = (struct elf_link_hash_entry *) rh->root.u.i.link; /* Setting the index to -2 tells elf_link_output_extsym that this symbol is used by a reloc. */ BFD_ASSERT (rh->indx < 0); rh->indx = -2; *rel_hash = rh; continue; } /* This is a reloc against a local symbol. */ *rel_hash = NULL; sym = isymbuf[r_symndx]; sec = finfo->sections[r_symndx]; if (ELF_ST_TYPE (sym.st_info) == STT_SECTION) { /* I suppose the backend ought to fill in the section of any STT_SECTION symbol against a processor specific section. */ r_symndx = 0; if (bfd_is_abs_section (sec)) ; else if (sec == NULL || sec->owner == NULL) { bfd_set_error (bfd_error_bad_value); return FALSE; } else { asection *osec = sec->output_section; /* If we have discarded a section, the output section will be the absolute section. In case of discarded SEC_MERGE sections, use the kept section. relocate_section should have already handled discarded linkonce sections. */ if (bfd_is_abs_section (osec) && sec->kept_section != NULL && sec->kept_section->output_section != NULL) { osec = sec->kept_section->output_section; irela->r_addend -= osec->vma; } if (!bfd_is_abs_section (osec)) { r_symndx = osec->target_index; if (r_symndx == 0) { struct elf_link_hash_table *htab; asection *oi; htab = elf_hash_table (finfo->info); oi = htab->text_index_section; if ((osec->flags & SEC_READONLY) == 0 && htab->data_index_section != NULL) oi = htab->data_index_section; if (oi != NULL) { irela->r_addend += osec->vma - oi->vma; r_symndx = oi->target_index; } } BFD_ASSERT (r_symndx != 0); } } /* Adjust the addend according to where the section winds up in the output section. */ if (rela_normal) irela->r_addend += sec->output_offset; } else { if (finfo->indices[r_symndx] == -1) { unsigned long shlink; const char *name; asection *osec; if (finfo->info->strip == strip_all) { /* You can't do ld -r -s. */ bfd_set_error (bfd_error_invalid_operation); return FALSE; } /* This symbol was skipped earlier, but since it is needed by a reloc, we must output it now. */ shlink = symtab_hdr->sh_link; name = (bfd_elf_string_from_elf_section (input_bfd, shlink, sym.st_name)); if (name == NULL) return FALSE; osec = sec->output_section; sym.st_shndx = _bfd_elf_section_from_bfd_section (output_bfd, osec); if (sym.st_shndx == SHN_BAD) return FALSE; sym.st_value += sec->output_offset; if (! finfo->info->relocatable) { sym.st_value += osec->vma; if (ELF_ST_TYPE (sym.st_info) == STT_TLS) { /* STT_TLS symbols are relative to PT_TLS segment base. */ BFD_ASSERT (elf_hash_table (finfo->info) ->tls_sec != NULL); sym.st_value -= (elf_hash_table (finfo->info) ->tls_sec->vma); } } finfo->indices[r_symndx] = bfd_get_symcount (output_bfd); if (! elf_link_output_sym (finfo, name, &sym, sec, NULL)) return FALSE; } r_symndx = finfo->indices[r_symndx]; } irela->r_info = ((bfd_vma) r_symndx << r_sym_shift | (irela->r_info & r_type_mask)); } /* Swap out the relocs. */ if (input_rel_hdr->sh_size != 0 && !bed->elf_backend_emit_relocs (output_bfd, o, input_rel_hdr, internal_relocs, rel_hash_list)) return FALSE; input_rel_hdr2 = elf_section_data (o)->rel_hdr2; if (input_rel_hdr2 && input_rel_hdr2->sh_size != 0) { internal_relocs += (NUM_SHDR_ENTRIES (input_rel_hdr) * bed->s->int_rels_per_ext_rel); rel_hash_list += NUM_SHDR_ENTRIES (input_rel_hdr); if (!bed->elf_backend_emit_relocs (output_bfd, o, input_rel_hdr2, internal_relocs, rel_hash_list)) return FALSE; } } } /* Write out the modified section contents. */ if (bed->elf_backend_write_section && (*bed->elf_backend_write_section) (output_bfd, finfo->info, o, contents)) { /* Section written out. */ } else switch (o->sec_info_type) { case ELF_INFO_TYPE_STABS: if (! (_bfd_write_section_stabs (output_bfd, &elf_hash_table (finfo->info)->stab_info, o, &elf_section_data (o)->sec_info, contents))) return FALSE; break; case ELF_INFO_TYPE_MERGE: if (! _bfd_write_merged_section (output_bfd, o, elf_section_data (o)->sec_info)) return FALSE; break; case ELF_INFO_TYPE_EH_FRAME: { if (! _bfd_elf_write_section_eh_frame (output_bfd, finfo->info, o, contents)) return FALSE; } break; default: { if (! (o->flags & SEC_EXCLUDE) && ! bfd_set_section_contents (output_bfd, o->output_section, contents, (file_ptr) o->output_offset, o->size)) return FALSE; } break; } } return TRUE; } /* Generate a reloc when linking an ELF file. This is a reloc requested by the linker, and does not come from any input file. This is used to build constructor and destructor tables when linking with -Ur. */ static bfd_boolean elf_reloc_link_order (bfd *output_bfd, struct bfd_link_info *info, asection *output_section, struct bfd_link_order *link_order) { reloc_howto_type *howto; long indx; bfd_vma offset; bfd_vma addend; struct elf_link_hash_entry **rel_hash_ptr; Elf_Internal_Shdr *rel_hdr; const struct elf_backend_data *bed = get_elf_backend_data (output_bfd); Elf_Internal_Rela irel[MAX_INT_RELS_PER_EXT_REL]; bfd_byte *erel; unsigned int i; howto = bfd_reloc_type_lookup (output_bfd, link_order->u.reloc.p->reloc); if (howto == NULL) { bfd_set_error (bfd_error_bad_value); return FALSE; } addend = link_order->u.reloc.p->addend; /* Figure out the symbol index. */ rel_hash_ptr = (elf_section_data (output_section)->rel_hashes + elf_section_data (output_section)->rel_count + elf_section_data (output_section)->rel_count2); if (link_order->type == bfd_section_reloc_link_order) { indx = link_order->u.reloc.p->u.section->target_index; BFD_ASSERT (indx != 0); *rel_hash_ptr = NULL; } else { struct elf_link_hash_entry *h; /* Treat a reloc against a defined symbol as though it were actually against the section. */ h = ((struct elf_link_hash_entry *) bfd_wrapped_link_hash_lookup (output_bfd, info, link_order->u.reloc.p->u.name, FALSE, FALSE, TRUE)); if (h != NULL && (h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak)) { asection *section; section = h->root.u.def.section; indx = section->output_section->target_index; *rel_hash_ptr = NULL; /* It seems that we ought to add the symbol value to the addend here, but in practice it has already been added because it was passed to constructor_callback. */ addend += section->output_section->vma + section->output_offset; } else if (h != NULL) { /* Setting the index to -2 tells elf_link_output_extsym that this symbol is used by a reloc. */ h->indx = -2; *rel_hash_ptr = h; indx = 0; } else { if (! ((*info->callbacks->unattached_reloc) (info, link_order->u.reloc.p->u.name, NULL, NULL, 0))) return FALSE; indx = 0; } } /* If this is an inplace reloc, we must write the addend into the object file. */ if (howto->partial_inplace && addend != 0) { bfd_size_type size; bfd_reloc_status_type rstat; bfd_byte *buf; bfd_boolean ok; const char *sym_name; size = bfd_get_reloc_size (howto); buf = bfd_zmalloc (size); if (buf == NULL) return FALSE; rstat = _bfd_relocate_contents (howto, output_bfd, addend, buf); switch (rstat) { case bfd_reloc_ok: break; default: case bfd_reloc_outofrange: abort (); case bfd_reloc_overflow: if (link_order->type == bfd_section_reloc_link_order) sym_name = bfd_section_name (output_bfd, link_order->u.reloc.p->u.section); else sym_name = link_order->u.reloc.p->u.name; if (! ((*info->callbacks->reloc_overflow) (info, NULL, sym_name, howto->name, addend, NULL, NULL, (bfd_vma) 0))) { free (buf); return FALSE; } break; } ok = bfd_set_section_contents (output_bfd, output_section, buf, link_order->offset, size); free (buf); if (! ok) return FALSE; } /* The address of a reloc is relative to the section in a relocatable file, and is a virtual address in an executable file. */ offset = link_order->offset; if (! info->relocatable) offset += output_section->vma; for (i = 0; i < bed->s->int_rels_per_ext_rel; i++) { irel[i].r_offset = offset; irel[i].r_info = 0; irel[i].r_addend = 0; } if (bed->s->arch_size == 32) irel[0].r_info = ELF32_R_INFO (indx, howto->type); else irel[0].r_info = ELF64_R_INFO (indx, howto->type); rel_hdr = &elf_section_data (output_section)->rel_hdr; erel = rel_hdr->contents; if (rel_hdr->sh_type == SHT_REL) { erel += (elf_section_data (output_section)->rel_count * bed->s->sizeof_rel); (*bed->s->swap_reloc_out) (output_bfd, irel, erel); } else { irel[0].r_addend = addend; erel += (elf_section_data (output_section)->rel_count * bed->s->sizeof_rela); (*bed->s->swap_reloca_out) (output_bfd, irel, erel); } ++elf_section_data (output_section)->rel_count; return TRUE; } /* Get the output vma of the section pointed to by the sh_link field. */ static bfd_vma elf_get_linked_section_vma (struct bfd_link_order *p) { Elf_Internal_Shdr **elf_shdrp; asection *s; int elfsec; s = p->u.indirect.section; elf_shdrp = elf_elfsections (s->owner); elfsec = _bfd_elf_section_from_bfd_section (s->owner, s); elfsec = elf_shdrp[elfsec]->sh_link; /* PR 290: The Intel C compiler generates SHT_IA_64_UNWIND with SHF_LINK_ORDER. But it doesn't set the sh_link or sh_info fields. Hence we could get the situation where elfsec is 0. */ if (elfsec == 0) { const struct elf_backend_data *bed = get_elf_backend_data (s->owner); if (bed->link_order_error_handler) bed->link_order_error_handler (_("%B: warning: sh_link not set for section `%A'"), s->owner, s); return 0; } else { s = elf_shdrp[elfsec]->bfd_section; return s->output_section->vma + s->output_offset; } } /* Compare two sections based on the locations of the sections they are linked to. Used by elf_fixup_link_order. */ static int compare_link_order (const void * a, const void * b) { bfd_vma apos; bfd_vma bpos; apos = elf_get_linked_section_vma (*(struct bfd_link_order **)a); bpos = elf_get_linked_section_vma (*(struct bfd_link_order **)b); if (apos < bpos) return -1; return apos > bpos; } /* Looks for sections with SHF_LINK_ORDER set. Rearranges them into the same order as their linked sections. Returns false if this could not be done because an output section includes both ordered and unordered sections. Ideally we'd do this in the linker proper. */ static bfd_boolean elf_fixup_link_order (bfd *abfd, asection *o) { int seen_linkorder; int seen_other; int n; struct bfd_link_order *p; bfd *sub; const struct elf_backend_data *bed = get_elf_backend_data (abfd); unsigned elfsec; struct bfd_link_order **sections; asection *s, *other_sec, *linkorder_sec; bfd_vma offset; other_sec = NULL; linkorder_sec = NULL; seen_other = 0; seen_linkorder = 0; for (p = o->map_head.link_order; p != NULL; p = p->next) { if (p->type == bfd_indirect_link_order) { s = p->u.indirect.section; sub = s->owner; if (bfd_get_flavour (sub) == bfd_target_elf_flavour && elf_elfheader (sub)->e_ident[EI_CLASS] == bed->s->elfclass && (elfsec = _bfd_elf_section_from_bfd_section (sub, s)) && elfsec < elf_numsections (sub) && elf_elfsections (sub)[elfsec]->sh_flags & SHF_LINK_ORDER) { seen_linkorder++; linkorder_sec = s; } else { seen_other++; other_sec = s; } } else seen_other++; if (seen_other && seen_linkorder) { if (other_sec && linkorder_sec) (*_bfd_error_handler) (_("%A has both ordered [`%A' in %B] and unordered [`%A' in %B] sections"), o, linkorder_sec, linkorder_sec->owner, other_sec, other_sec->owner); else (*_bfd_error_handler) (_("%A has both ordered and unordered sections"), o); bfd_set_error (bfd_error_bad_value); return FALSE; } } if (!seen_linkorder) return TRUE; sections = (struct bfd_link_order **) xmalloc (seen_linkorder * sizeof (struct bfd_link_order *)); seen_linkorder = 0; for (p = o->map_head.link_order; p != NULL; p = p->next) { sections[seen_linkorder++] = p; } /* Sort the input sections in the order of their linked section. */ qsort (sections, seen_linkorder, sizeof (struct bfd_link_order *), compare_link_order); /* Change the offsets of the sections. */ offset = 0; for (n = 0; n < seen_linkorder; n++) { s = sections[n]->u.indirect.section; offset &= ~(bfd_vma)((1 << s->alignment_power) - 1); s->output_offset = offset; sections[n]->offset = offset; offset += sections[n]->size; } return TRUE; } /* Do the final step of an ELF link. */ bfd_boolean bfd_elf_final_link (bfd *abfd, struct bfd_link_info *info) { bfd_boolean dynamic; bfd_boolean emit_relocs; bfd *dynobj; struct elf_final_link_info finfo; register asection *o; register struct bfd_link_order *p; register bfd *sub; bfd_size_type max_contents_size; bfd_size_type max_external_reloc_size; bfd_size_type max_internal_reloc_count; bfd_size_type max_sym_count; bfd_size_type max_sym_shndx_count; file_ptr off; Elf_Internal_Sym elfsym; unsigned int i; Elf_Internal_Shdr *symtab_hdr; Elf_Internal_Shdr *symtab_shndx_hdr; Elf_Internal_Shdr *symstrtab_hdr; const struct elf_backend_data *bed = get_elf_backend_data (abfd); struct elf_outext_info eoinfo; bfd_boolean merged; size_t relativecount = 0; asection *reldyn = 0; bfd_size_type amt; asection *attr_section = NULL; bfd_vma attr_size = 0; const char *std_attrs_section; if (! is_elf_hash_table (info->hash)) return FALSE; if (info->shared) abfd->flags |= DYNAMIC; dynamic = elf_hash_table (info)->dynamic_sections_created; dynobj = elf_hash_table (info)->dynobj; emit_relocs = (info->relocatable || info->emitrelocations); finfo.info = info; finfo.output_bfd = abfd; finfo.symstrtab = _bfd_elf_stringtab_init (); if (finfo.symstrtab == NULL) return FALSE; if (! dynamic) { finfo.dynsym_sec = NULL; finfo.hash_sec = NULL; finfo.symver_sec = NULL; } else { finfo.dynsym_sec = bfd_get_section_by_name (dynobj, ".dynsym"); finfo.hash_sec = bfd_get_section_by_name (dynobj, ".hash"); BFD_ASSERT (finfo.dynsym_sec != NULL); finfo.symver_sec = bfd_get_section_by_name (dynobj, ".gnu.version"); /* Note that it is OK if symver_sec is NULL. */ } finfo.contents = NULL; finfo.external_relocs = NULL; finfo.internal_relocs = NULL; finfo.external_syms = NULL; finfo.locsym_shndx = NULL; finfo.internal_syms = NULL; finfo.indices = NULL; finfo.sections = NULL; finfo.symbuf = NULL; finfo.symshndxbuf = NULL; finfo.symbuf_count = 0; finfo.shndxbuf_size = 0; /* The object attributes have been merged. Remove the input sections from the link, and set the contents of the output secton. */ std_attrs_section = get_elf_backend_data (abfd)->obj_attrs_section; for (o = abfd->sections; o != NULL; o = o->next) { if ((std_attrs_section && strcmp (o->name, std_attrs_section) == 0) || strcmp (o->name, ".gnu.attributes") == 0) { for (p = o->map_head.link_order; p != NULL; p = p->next) { asection *input_section; if (p->type != bfd_indirect_link_order) continue; input_section = p->u.indirect.section; /* Hack: reset the SEC_HAS_CONTENTS flag so that elf_link_input_bfd ignores this section. */ input_section->flags &= ~SEC_HAS_CONTENTS; } attr_size = bfd_elf_obj_attr_size (abfd); if (attr_size) { bfd_set_section_size (abfd, o, attr_size); attr_section = o; /* Skip this section later on. */ o->map_head.link_order = NULL; } else o->flags |= SEC_EXCLUDE; } } /* Count up the number of relocations we will output for each output section, so that we know the sizes of the reloc sections. We also figure out some maximum sizes. */ max_contents_size = 0; max_external_reloc_size = 0; max_internal_reloc_count = 0; max_sym_count = 0; max_sym_shndx_count = 0; merged = FALSE; for (o = abfd->sections; o != NULL; o = o->next) { struct bfd_elf_section_data *esdo = elf_section_data (o); o->reloc_count = 0; for (p = o->map_head.link_order; p != NULL; p = p->next) { unsigned int reloc_count = 0; struct bfd_elf_section_data *esdi = NULL; unsigned int *rel_count1; if (p->type == bfd_section_reloc_link_order || p->type == bfd_symbol_reloc_link_order) reloc_count = 1; else if (p->type == bfd_indirect_link_order) { asection *sec; sec = p->u.indirect.section; esdi = elf_section_data (sec); /* Mark all sections which are to be included in the link. This will normally be every section. We need to do this so that we can identify any sections which the linker has decided to not include. */ sec->linker_mark = TRUE; if (sec->flags & SEC_MERGE) merged = TRUE; if (info->relocatable || info->emitrelocations) reloc_count = sec->reloc_count; else if (bed->elf_backend_count_relocs) { Elf_Internal_Rela * relocs; relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL, info->keep_memory); if (relocs != NULL) { reloc_count = (*bed->elf_backend_count_relocs) (sec, relocs); if (elf_section_data (sec)->relocs != relocs) free (relocs); } } if (sec->rawsize > max_contents_size) max_contents_size = sec->rawsize; if (sec->size > max_contents_size) max_contents_size = sec->size; /* We are interested in just local symbols, not all symbols. */ if (bfd_get_flavour (sec->owner) == bfd_target_elf_flavour && (sec->owner->flags & DYNAMIC) == 0) { size_t sym_count; if (elf_bad_symtab (sec->owner)) sym_count = (elf_tdata (sec->owner)->symtab_hdr.sh_size / bed->s->sizeof_sym); else sym_count = elf_tdata (sec->owner)->symtab_hdr.sh_info; if (sym_count > max_sym_count) max_sym_count = sym_count; if (sym_count > max_sym_shndx_count && elf_symtab_shndx (sec->owner) != 0) max_sym_shndx_count = sym_count; if ((sec->flags & SEC_RELOC) != 0) { size_t ext_size; ext_size = elf_section_data (sec)->rel_hdr.sh_size; if (ext_size > max_external_reloc_size) max_external_reloc_size = ext_size; if (sec->reloc_count > max_internal_reloc_count) max_internal_reloc_count = sec->reloc_count; } } } if (reloc_count == 0) continue; o->reloc_count += reloc_count; /* MIPS may have a mix of REL and RELA relocs on sections. To support this curious ABI we keep reloc counts in elf_section_data too. We must be careful to add the relocations from the input section to the right output count. FIXME: Get rid of one count. We have o->reloc_count == esdo->rel_count + esdo->rel_count2. */ rel_count1 = &esdo->rel_count; if (esdi != NULL) { bfd_boolean same_size; bfd_size_type entsize1; entsize1 = esdi->rel_hdr.sh_entsize; BFD_ASSERT (entsize1 == bed->s->sizeof_rel || entsize1 == bed->s->sizeof_rela); same_size = !o->use_rela_p == (entsize1 == bed->s->sizeof_rel); if (!same_size) rel_count1 = &esdo->rel_count2; if (esdi->rel_hdr2 != NULL) { bfd_size_type entsize2 = esdi->rel_hdr2->sh_entsize; unsigned int alt_count; unsigned int *rel_count2; BFD_ASSERT (entsize2 != entsize1 && (entsize2 == bed->s->sizeof_rel || entsize2 == bed->s->sizeof_rela)); rel_count2 = &esdo->rel_count2; if (!same_size) rel_count2 = &esdo->rel_count; /* The following is probably too simplistic if the backend counts output relocs unusually. */ BFD_ASSERT (bed->elf_backend_count_relocs == NULL); alt_count = NUM_SHDR_ENTRIES (esdi->rel_hdr2); *rel_count2 += alt_count; reloc_count -= alt_count; } } *rel_count1 += reloc_count; } if (o->reloc_count > 0) o->flags |= SEC_RELOC; else { /* Explicitly clear the SEC_RELOC flag. The linker tends to set it (this is probably a bug) and if it is set assign_section_numbers will create a reloc section. */ o->flags &=~ SEC_RELOC; } /* If the SEC_ALLOC flag is not set, force the section VMA to zero. This is done in elf_fake_sections as well, but forcing the VMA to 0 here will ensure that relocs against these sections are handled correctly. */ if ((o->flags & SEC_ALLOC) == 0 && ! o->user_set_vma) o->vma = 0; } if (! info->relocatable && merged) elf_link_hash_traverse (elf_hash_table (info), _bfd_elf_link_sec_merge_syms, abfd); /* Figure out the file positions for everything but the symbol table and the relocs. We set symcount to force assign_section_numbers to create a symbol table. */ bfd_get_symcount (abfd) = info->strip == strip_all ? 0 : 1; BFD_ASSERT (! abfd->output_has_begun); if (! _bfd_elf_compute_section_file_positions (abfd, info)) goto error_return; /* Set sizes, and assign file positions for reloc sections. */ for (o = abfd->sections; o != NULL; o = o->next) { if ((o->flags & SEC_RELOC) != 0) { if (!(_bfd_elf_link_size_reloc_section (abfd, &elf_section_data (o)->rel_hdr, o))) goto error_return; if (elf_section_data (o)->rel_hdr2 && !(_bfd_elf_link_size_reloc_section (abfd, elf_section_data (o)->rel_hdr2, o))) goto error_return; } /* Now, reset REL_COUNT and REL_COUNT2 so that we can use them to count upwards while actually outputting the relocations. */ elf_section_data (o)->rel_count = 0; elf_section_data (o)->rel_count2 = 0; } _bfd_elf_assign_file_positions_for_relocs (abfd); /* We have now assigned file positions for all the sections except .symtab and .strtab. We start the .symtab section at the current file position, and write directly to it. We build the .strtab section in memory. */ bfd_get_symcount (abfd) = 0; symtab_hdr = &elf_tdata (abfd)->symtab_hdr; /* sh_name is set in prep_headers. */ symtab_hdr->sh_type = SHT_SYMTAB; /* sh_flags, sh_addr and sh_size all start off zero. */ symtab_hdr->sh_entsize = bed->s->sizeof_sym; /* sh_link is set in assign_section_numbers. */ /* sh_info is set below. */ /* sh_offset is set just below. */ symtab_hdr->sh_addralign = 1 << bed->s->log_file_align; off = elf_tdata (abfd)->next_file_pos; off = _bfd_elf_assign_file_position_for_section (symtab_hdr, off, TRUE); /* Note that at this point elf_tdata (abfd)->next_file_pos is incorrect. We do not yet know the size of the .symtab section. We correct next_file_pos below, after we do know the size. */ /* Allocate a buffer to hold swapped out symbols. This is to avoid continuously seeking to the right position in the file. */ if (! info->keep_memory || max_sym_count < 20) finfo.symbuf_size = 20; else finfo.symbuf_size = max_sym_count; amt = finfo.symbuf_size; amt *= bed->s->sizeof_sym; finfo.symbuf = bfd_malloc (amt); if (finfo.symbuf == NULL) goto error_return; if (elf_numsections (abfd) > SHN_LORESERVE) { /* Wild guess at number of output symbols. realloc'd as needed. */ amt = 2 * max_sym_count + elf_numsections (abfd) + 1000; finfo.shndxbuf_size = amt; amt *= sizeof (Elf_External_Sym_Shndx); finfo.symshndxbuf = bfd_zmalloc (amt); if (finfo.symshndxbuf == NULL) goto error_return; } /* Start writing out the symbol table. The first symbol is always a dummy symbol. */ if (info->strip != strip_all || emit_relocs) { elfsym.st_value = 0; elfsym.st_size = 0; elfsym.st_info = 0; elfsym.st_other = 0; elfsym.st_shndx = SHN_UNDEF; if (! elf_link_output_sym (&finfo, NULL, &elfsym, bfd_und_section_ptr, NULL)) goto error_return; } /* Output a symbol for each section. We output these even if we are discarding local symbols, since they are used for relocs. These symbols have no names. We store the index of each one in the index field of the section, so that we can find it again when outputting relocs. */ if (info->strip != strip_all || emit_relocs) { elfsym.st_size = 0; elfsym.st_info = ELF_ST_INFO (STB_LOCAL, STT_SECTION); elfsym.st_other = 0; elfsym.st_value = 0; for (i = 1; i < elf_numsections (abfd); i++) { o = bfd_section_from_elf_index (abfd, i); if (o != NULL) { o->target_index = bfd_get_symcount (abfd); elfsym.st_shndx = i; if (!info->relocatable) elfsym.st_value = o->vma; if (!elf_link_output_sym (&finfo, NULL, &elfsym, o, NULL)) goto error_return; } if (i == SHN_LORESERVE - 1) i += SHN_HIRESERVE + 1 - SHN_LORESERVE; } } /* Allocate some memory to hold information read in from the input files. */ if (max_contents_size != 0) { finfo.contents = bfd_malloc (max_contents_size); if (finfo.contents == NULL) goto error_return; } if (max_external_reloc_size != 0) { finfo.external_relocs = bfd_malloc (max_external_reloc_size); if (finfo.external_relocs == NULL) goto error_return; } if (max_internal_reloc_count != 0) { amt = max_internal_reloc_count * bed->s->int_rels_per_ext_rel; amt *= sizeof (Elf_Internal_Rela); finfo.internal_relocs = bfd_malloc (amt); if (finfo.internal_relocs == NULL) goto error_return; } if (max_sym_count != 0) { amt = max_sym_count * bed->s->sizeof_sym; finfo.external_syms = bfd_malloc (amt); if (finfo.external_syms == NULL) goto error_return; amt = max_sym_count * sizeof (Elf_Internal_Sym); finfo.internal_syms = bfd_malloc (amt); if (finfo.internal_syms == NULL) goto error_return; amt = max_sym_count * sizeof (long); finfo.indices = bfd_malloc (amt); if (finfo.indices == NULL) goto error_return; amt = max_sym_count * sizeof (asection *); finfo.sections = bfd_malloc (amt); if (finfo.sections == NULL) goto error_return; } if (max_sym_shndx_count != 0) { amt = max_sym_shndx_count * sizeof (Elf_External_Sym_Shndx); finfo.locsym_shndx = bfd_malloc (amt); if (finfo.locsym_shndx == NULL) goto error_return; } if (elf_hash_table (info)->tls_sec) { bfd_vma base, end = 0; asection *sec; for (sec = elf_hash_table (info)->tls_sec; sec && (sec->flags & SEC_THREAD_LOCAL); sec = sec->next) { bfd_size_type size = sec->size; if (size == 0 && (sec->flags & SEC_HAS_CONTENTS) == 0) { struct bfd_link_order *o = sec->map_tail.link_order; if (o != NULL) size = o->offset + o->size; } end = sec->vma + size; } base = elf_hash_table (info)->tls_sec->vma; end = align_power (end, elf_hash_table (info)->tls_sec->alignment_power); elf_hash_table (info)->tls_size = end - base; } /* Reorder SHF_LINK_ORDER sections. */ for (o = abfd->sections; o != NULL; o = o->next) { if (!elf_fixup_link_order (abfd, o)) return FALSE; } /* Since ELF permits relocations to be against local symbols, we must have the local symbols available when we do the relocations. Since we would rather only read the local symbols once, and we would rather not keep them in memory, we handle all the relocations for a single input file at the same time. Unfortunately, there is no way to know the total number of local symbols until we have seen all of them, and the local symbol indices precede the global symbol indices. This means that when we are generating relocatable output, and we see a reloc against a global symbol, we can not know the symbol index until we have finished examining all the local symbols to see which ones we are going to output. To deal with this, we keep the relocations in memory, and don't output them until the end of the link. This is an unfortunate waste of memory, but I don't see a good way around it. Fortunately, it only happens when performing a relocatable link, which is not the common case. FIXME: If keep_memory is set we could write the relocs out and then read them again; I don't know how bad the memory loss will be. */ for (sub = info->input_bfds; sub != NULL; sub = sub->link_next) sub->output_has_begun = FALSE; for (o = abfd->sections; o != NULL; o = o->next) { for (p = o->map_head.link_order; p != NULL; p = p->next) { if (p->type == bfd_indirect_link_order && (bfd_get_flavour ((sub = p->u.indirect.section->owner)) == bfd_target_elf_flavour) && elf_elfheader (sub)->e_ident[EI_CLASS] == bed->s->elfclass) { if (! sub->output_has_begun) { if (! elf_link_input_bfd (&finfo, sub)) goto error_return; sub->output_has_begun = TRUE; } } else if (p->type == bfd_section_reloc_link_order || p->type == bfd_symbol_reloc_link_order) { if (! elf_reloc_link_order (abfd, info, o, p)) goto error_return; } else { if (! _bfd_default_link_order (abfd, info, o, p)) goto error_return; } } } /* Free symbol buffer if needed. */ if (!info->reduce_memory_overheads) { for (sub = info->input_bfds; sub != NULL; sub = sub->link_next) if (bfd_get_flavour (sub) == bfd_target_elf_flavour && elf_tdata (sub)->symbuf) { free (elf_tdata (sub)->symbuf); elf_tdata (sub)->symbuf = NULL; } } /* Output any global symbols that got converted to local in a version script or due to symbol visibility. We do this in a separate step since ELF requires all local symbols to appear prior to any global symbols. FIXME: We should only do this if some global symbols were, in fact, converted to become local. FIXME: Will this work correctly with the Irix 5 linker? */ eoinfo.failed = FALSE; eoinfo.finfo = &finfo; eoinfo.localsyms = TRUE; elf_link_hash_traverse (elf_hash_table (info), elf_link_output_extsym, &eoinfo); if (eoinfo.failed) return FALSE; /* If backend needs to output some local symbols not present in the hash table, do it now. */ if (bed->elf_backend_output_arch_local_syms) { typedef bfd_boolean (*out_sym_func) (void *, const char *, Elf_Internal_Sym *, asection *, struct elf_link_hash_entry *); if (! ((*bed->elf_backend_output_arch_local_syms) (abfd, info, &finfo, (out_sym_func) elf_link_output_sym))) return FALSE; } /* That wrote out all the local symbols. Finish up the symbol table with the global symbols. Even if we want to strip everything we can, we still need to deal with those global symbols that got converted to local in a version script. */ /* The sh_info field records the index of the first non local symbol. */ symtab_hdr->sh_info = bfd_get_symcount (abfd); if (dynamic && finfo.dynsym_sec->output_section != bfd_abs_section_ptr) { Elf_Internal_Sym sym; bfd_byte *dynsym = finfo.dynsym_sec->contents; long last_local = 0; /* Write out the section symbols for the output sections. */ if (info->shared || elf_hash_table (info)->is_relocatable_executable) { asection *s; sym.st_size = 0; sym.st_name = 0; sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_SECTION); sym.st_other = 0; for (s = abfd->sections; s != NULL; s = s->next) { int indx; bfd_byte *dest; long dynindx; dynindx = elf_section_data (s)->dynindx; if (dynindx <= 0) continue; indx = elf_section_data (s)->this_idx; BFD_ASSERT (indx > 0); sym.st_shndx = indx; if (! check_dynsym (abfd, &sym)) return FALSE; sym.st_value = s->vma; dest = dynsym + dynindx * bed->s->sizeof_sym; if (last_local < dynindx) last_local = dynindx; bed->s->swap_symbol_out (abfd, &sym, dest, 0); } } /* Write out the local dynsyms. */ if (elf_hash_table (info)->dynlocal) { struct elf_link_local_dynamic_entry *e; for (e = elf_hash_table (info)->dynlocal; e ; e = e->next) { asection *s; bfd_byte *dest; sym.st_size = e->isym.st_size; sym.st_other = e->isym.st_other; /* Copy the internal symbol as is. Note that we saved a word of storage and overwrote the original st_name with the dynstr_index. */ sym = e->isym; if (e->isym.st_shndx != SHN_UNDEF && (e->isym.st_shndx < SHN_LORESERVE || e->isym.st_shndx > SHN_HIRESERVE)) { s = bfd_section_from_elf_index (e->input_bfd, e->isym.st_shndx); sym.st_shndx = elf_section_data (s->output_section)->this_idx; if (! check_dynsym (abfd, &sym)) return FALSE; sym.st_value = (s->output_section->vma + s->output_offset + e->isym.st_value); } if (last_local < e->dynindx) last_local = e->dynindx; dest = dynsym + e->dynindx * bed->s->sizeof_sym; bed->s->swap_symbol_out (abfd, &sym, dest, 0); } } elf_section_data (finfo.dynsym_sec->output_section)->this_hdr.sh_info = last_local + 1; } /* We get the global symbols from the hash table. */ eoinfo.failed = FALSE; eoinfo.localsyms = FALSE; eoinfo.finfo = &finfo; elf_link_hash_traverse (elf_hash_table (info), elf_link_output_extsym, &eoinfo); if (eoinfo.failed) return FALSE; /* If backend needs to output some symbols not present in the hash table, do it now. */ if (bed->elf_backend_output_arch_syms) { typedef bfd_boolean (*out_sym_func) (void *, const char *, Elf_Internal_Sym *, asection *, struct elf_link_hash_entry *); if (! ((*bed->elf_backend_output_arch_syms) (abfd, info, &finfo, (out_sym_func) elf_link_output_sym))) return FALSE; } /* Flush all symbols to the file. */ if (! elf_link_flush_output_syms (&finfo, bed)) return FALSE; /* Now we know the size of the symtab section. */ off += symtab_hdr->sh_size; symtab_shndx_hdr = &elf_tdata (abfd)->symtab_shndx_hdr; if (symtab_shndx_hdr->sh_name != 0) { symtab_shndx_hdr->sh_type = SHT_SYMTAB_SHNDX; symtab_shndx_hdr->sh_entsize = sizeof (Elf_External_Sym_Shndx); symtab_shndx_hdr->sh_addralign = sizeof (Elf_External_Sym_Shndx); amt = bfd_get_symcount (abfd) * sizeof (Elf_External_Sym_Shndx); symtab_shndx_hdr->sh_size = amt; off = _bfd_elf_assign_file_position_for_section (symtab_shndx_hdr, off, TRUE); if (bfd_seek (abfd, symtab_shndx_hdr->sh_offset, SEEK_SET) != 0 || (bfd_bwrite (finfo.symshndxbuf, amt, abfd) != amt)) return FALSE; } /* Finish up and write out the symbol string table (.strtab) section. */ symstrtab_hdr = &elf_tdata (abfd)->strtab_hdr; /* sh_name was set in prep_headers. */ symstrtab_hdr->sh_type = SHT_STRTAB; symstrtab_hdr->sh_flags = 0; symstrtab_hdr->sh_addr = 0; symstrtab_hdr->sh_size = _bfd_stringtab_size (finfo.symstrtab); symstrtab_hdr->sh_entsize = 0; symstrtab_hdr->sh_link = 0; symstrtab_hdr->sh_info = 0; /* sh_offset is set just below. */ symstrtab_hdr->sh_addralign = 1; off = _bfd_elf_assign_file_position_for_section (symstrtab_hdr, off, TRUE); elf_tdata (abfd)->next_file_pos = off; if (bfd_get_symcount (abfd) > 0) { if (bfd_seek (abfd, symstrtab_hdr->sh_offset, SEEK_SET) != 0 || ! _bfd_stringtab_emit (abfd, finfo.symstrtab)) return FALSE; } /* Adjust the relocs to have the correct symbol indices. */ for (o = abfd->sections; o != NULL; o = o->next) { if ((o->flags & SEC_RELOC) == 0) continue; elf_link_adjust_relocs (abfd, &elf_section_data (o)->rel_hdr, elf_section_data (o)->rel_count, elf_section_data (o)->rel_hashes); if (elf_section_data (o)->rel_hdr2 != NULL) elf_link_adjust_relocs (abfd, elf_section_data (o)->rel_hdr2, elf_section_data (o)->rel_count2, (elf_section_data (o)->rel_hashes + elf_section_data (o)->rel_count)); /* Set the reloc_count field to 0 to prevent write_relocs from trying to swap the relocs out itself. */ o->reloc_count = 0; } if (dynamic && info->combreloc && dynobj != NULL) relativecount = elf_link_sort_relocs (abfd, info, &reldyn); /* If we are linking against a dynamic object, or generating a shared library, finish up the dynamic linking information. */ if (dynamic) { bfd_byte *dyncon, *dynconend; /* Fix up .dynamic entries. */ o = bfd_get_section_by_name (dynobj, ".dynamic"); BFD_ASSERT (o != NULL); dyncon = o->contents; dynconend = o->contents + o->size; for (; dyncon < dynconend; dyncon += bed->s->sizeof_dyn) { Elf_Internal_Dyn dyn; const char *name; unsigned int type; bed->s->swap_dyn_in (dynobj, dyncon, &dyn); switch (dyn.d_tag) { default: continue; case DT_NULL: if (relativecount > 0 && dyncon + bed->s->sizeof_dyn < dynconend) { switch (elf_section_data (reldyn)->this_hdr.sh_type) { case SHT_REL: dyn.d_tag = DT_RELCOUNT; break; case SHT_RELA: dyn.d_tag = DT_RELACOUNT; break; default: continue; } dyn.d_un.d_val = relativecount; relativecount = 0; break; } continue; case DT_INIT: name = info->init_function; goto get_sym; case DT_FINI: name = info->fini_function; get_sym: { struct elf_link_hash_entry *h; h = elf_link_hash_lookup (elf_hash_table (info), name, FALSE, FALSE, TRUE); if (h != NULL && (h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak)) { dyn.d_un.d_val = h->root.u.def.value; o = h->root.u.def.section; if (o->output_section != NULL) dyn.d_un.d_val += (o->output_section->vma + o->output_offset); else { /* The symbol is imported from another shared library and does not apply to this one. */ dyn.d_un.d_val = 0; } break; } } continue; case DT_PREINIT_ARRAYSZ: name = ".preinit_array"; goto get_size; case DT_INIT_ARRAYSZ: name = ".init_array"; goto get_size; case DT_FINI_ARRAYSZ: name = ".fini_array"; get_size: o = bfd_get_section_by_name (abfd, name); if (o == NULL) { (*_bfd_error_handler) (_("%B: could not find output section %s"), abfd, name); goto error_return; } if (o->size == 0) (*_bfd_error_handler) (_("warning: %s section has zero size"), name); dyn.d_un.d_val = o->size; break; case DT_PREINIT_ARRAY: name = ".preinit_array"; goto get_vma; case DT_INIT_ARRAY: name = ".init_array"; goto get_vma; case DT_FINI_ARRAY: name = ".fini_array"; goto get_vma; case DT_HASH: name = ".hash"; goto get_vma; case DT_GNU_HASH: name = ".gnu.hash"; goto get_vma; case DT_STRTAB: name = ".dynstr"; goto get_vma; case DT_SYMTAB: name = ".dynsym"; goto get_vma; case DT_VERDEF: name = ".gnu.version_d"; goto get_vma; case DT_VERNEED: name = ".gnu.version_r"; goto get_vma; case DT_VERSYM: name = ".gnu.version"; get_vma: o = bfd_get_section_by_name (abfd, name); if (o == NULL) { (*_bfd_error_handler) (_("%B: could not find output section %s"), abfd, name); goto error_return; } dyn.d_un.d_ptr = o->vma; break; case DT_REL: case DT_RELA: case DT_RELSZ: case DT_RELASZ: if (dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ) type = SHT_REL; else type = SHT_RELA; dyn.d_un.d_val = 0; for (i = 1; i < elf_numsections (abfd); i++) { Elf_Internal_Shdr *hdr; hdr = elf_elfsections (abfd)[i]; if (hdr->sh_type == type && (hdr->sh_flags & SHF_ALLOC) != 0) { if (dyn.d_tag == DT_RELSZ || dyn.d_tag == DT_RELASZ) dyn.d_un.d_val += hdr->sh_size; else { if (dyn.d_un.d_val == 0 || hdr->sh_addr < dyn.d_un.d_val) dyn.d_un.d_val = hdr->sh_addr; } } } break; } bed->s->swap_dyn_out (dynobj, &dyn, dyncon); } } /* If we have created any dynamic sections, then output them. */ if (dynobj != NULL) { if (! (*bed->elf_backend_finish_dynamic_sections) (abfd, info)) goto error_return; /* Check for DT_TEXTREL (late, in case the backend removes it). */ if (info->warn_shared_textrel && info->shared) { bfd_byte *dyncon, *dynconend; /* Fix up .dynamic entries. */ o = bfd_get_section_by_name (dynobj, ".dynamic"); BFD_ASSERT (o != NULL); dyncon = o->contents; dynconend = o->contents + o->size; for (; dyncon < dynconend; dyncon += bed->s->sizeof_dyn) { Elf_Internal_Dyn dyn; bed->s->swap_dyn_in (dynobj, dyncon, &dyn); if (dyn.d_tag == DT_TEXTREL) { info->callbacks->einfo (_("%P: warning: creating a DT_TEXTREL in a shared object.\n")); break; } } } for (o = dynobj->sections; o != NULL; o = o->next) { if ((o->flags & SEC_HAS_CONTENTS) == 0 || o->size == 0 || o->output_section == bfd_abs_section_ptr) continue; if ((o->flags & SEC_LINKER_CREATED) == 0) { /* At this point, we are only interested in sections created by _bfd_elf_link_create_dynamic_sections. */ continue; } if (elf_hash_table (info)->stab_info.stabstr == o) continue; if (elf_hash_table (info)->eh_info.hdr_sec == o) continue; if ((elf_section_data (o->output_section)->this_hdr.sh_type != SHT_STRTAB) || strcmp (bfd_get_section_name (abfd, o), ".dynstr") != 0) { if (! bfd_set_section_contents (abfd, o->output_section, o->contents, (file_ptr) o->output_offset, o->size)) goto error_return; } else { /* The contents of the .dynstr section are actually in a stringtab. */ off = elf_section_data (o->output_section)->this_hdr.sh_offset; if (bfd_seek (abfd, off, SEEK_SET) != 0 || ! _bfd_elf_strtab_emit (abfd, elf_hash_table (info)->dynstr)) goto error_return; } } } if (info->relocatable) { bfd_boolean failed = FALSE; bfd_map_over_sections (abfd, bfd_elf_set_group_contents, &failed); if (failed) goto error_return; } /* If we have optimized stabs strings, output them. */ if (elf_hash_table (info)->stab_info.stabstr != NULL) { if (! _bfd_write_stab_strings (abfd, &elf_hash_table (info)->stab_info)) goto error_return; } if (info->eh_frame_hdr) { if (! _bfd_elf_write_section_eh_frame_hdr (abfd, info)) goto error_return; } if (finfo.symstrtab != NULL) _bfd_stringtab_free (finfo.symstrtab); if (finfo.contents != NULL) free (finfo.contents); if (finfo.external_relocs != NULL) free (finfo.external_relocs); if (finfo.internal_relocs != NULL) free (finfo.internal_relocs); if (finfo.external_syms != NULL) free (finfo.external_syms); if (finfo.locsym_shndx != NULL) free (finfo.locsym_shndx); if (finfo.internal_syms != NULL) free (finfo.internal_syms); if (finfo.indices != NULL) free (finfo.indices); if (finfo.sections != NULL) free (finfo.sections); if (finfo.symbuf != NULL) free (finfo.symbuf); if (finfo.symshndxbuf != NULL) free (finfo.symshndxbuf); for (o = abfd->sections; o != NULL; o = o->next) { if ((o->flags & SEC_RELOC) != 0 && elf_section_data (o)->rel_hashes != NULL) free (elf_section_data (o)->rel_hashes); } elf_tdata (abfd)->linker = TRUE; if (attr_section) { bfd_byte *contents = bfd_malloc (attr_size); if (contents == NULL) goto error_return; bfd_elf_set_obj_attr_contents (abfd, contents, attr_size); bfd_set_section_contents (abfd, attr_section, contents, 0, attr_size); free (contents); } return TRUE; error_return: if (finfo.symstrtab != NULL) _bfd_stringtab_free (finfo.symstrtab); if (finfo.contents != NULL) free (finfo.contents); if (finfo.external_relocs != NULL) free (finfo.external_relocs); if (finfo.internal_relocs != NULL) free (finfo.internal_relocs); if (finfo.external_syms != NULL) free (finfo.external_syms); if (finfo.locsym_shndx != NULL) free (finfo.locsym_shndx); if (finfo.internal_syms != NULL) free (finfo.internal_syms); if (finfo.indices != NULL) free (finfo.indices); if (finfo.sections != NULL) free (finfo.sections); if (finfo.symbuf != NULL) free (finfo.symbuf); if (finfo.symshndxbuf != NULL) free (finfo.symshndxbuf); for (o = abfd->sections; o != NULL; o = o->next) { if ((o->flags & SEC_RELOC) != 0 && elf_section_data (o)->rel_hashes != NULL) free (elf_section_data (o)->rel_hashes); } return FALSE; } /* Garbage collect unused sections. */ /* Default gc_mark_hook. */ asection * _bfd_elf_gc_mark_hook (asection *sec, struct bfd_link_info *info ATTRIBUTE_UNUSED, Elf_Internal_Rela *rel ATTRIBUTE_UNUSED, struct elf_link_hash_entry *h, Elf_Internal_Sym *sym) { if (h != NULL) { switch (h->root.type) { case bfd_link_hash_defined: case bfd_link_hash_defweak: return h->root.u.def.section; case bfd_link_hash_common: return h->root.u.c.p->section; default: break; } } else return bfd_section_from_elf_index (sec->owner, sym->st_shndx); return NULL; } /* The mark phase of garbage collection. For a given section, mark it and any sections in this section's group, and all the sections which define symbols to which it refers. */ bfd_boolean _bfd_elf_gc_mark (struct bfd_link_info *info, asection *sec, elf_gc_mark_hook_fn gc_mark_hook) { bfd_boolean ret; bfd_boolean is_eh; asection *group_sec; sec->gc_mark = 1; /* Mark all the sections in the group. */ group_sec = elf_section_data (sec)->next_in_group; if (group_sec && !group_sec->gc_mark) if (!_bfd_elf_gc_mark (info, group_sec, gc_mark_hook)) return FALSE; /* Look through the section relocs. */ ret = TRUE; is_eh = strcmp (sec->name, ".eh_frame") == 0; if ((sec->flags & SEC_RELOC) != 0 && sec->reloc_count > 0) { Elf_Internal_Rela *relstart, *rel, *relend; Elf_Internal_Shdr *symtab_hdr; struct elf_link_hash_entry **sym_hashes; size_t nlocsyms; size_t extsymoff; bfd *input_bfd = sec->owner; const struct elf_backend_data *bed = get_elf_backend_data (input_bfd); Elf_Internal_Sym *isym = NULL; int r_sym_shift; symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; sym_hashes = elf_sym_hashes (input_bfd); /* Read the local symbols. */ if (elf_bad_symtab (input_bfd)) { nlocsyms = symtab_hdr->sh_size / bed->s->sizeof_sym; extsymoff = 0; } else extsymoff = nlocsyms = symtab_hdr->sh_info; isym = (Elf_Internal_Sym *) symtab_hdr->contents; if (isym == NULL && nlocsyms != 0) { isym = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, nlocsyms, 0, NULL, NULL, NULL); if (isym == NULL) return FALSE; } /* Read the relocations. */ relstart = _bfd_elf_link_read_relocs (input_bfd, sec, NULL, NULL, info->keep_memory); if (relstart == NULL) { ret = FALSE; goto out1; } relend = relstart + sec->reloc_count * bed->s->int_rels_per_ext_rel; if (bed->s->arch_size == 32) r_sym_shift = 8; else r_sym_shift = 32; for (rel = relstart; rel < relend; rel++) { unsigned long r_symndx; asection *rsec; struct elf_link_hash_entry *h; r_symndx = rel->r_info >> r_sym_shift; if (r_symndx == 0) continue; if (r_symndx >= nlocsyms || ELF_ST_BIND (isym[r_symndx].st_info) != STB_LOCAL) { h = sym_hashes[r_symndx - extsymoff]; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; rsec = (*gc_mark_hook) (sec, info, rel, h, NULL); } else { rsec = (*gc_mark_hook) (sec, info, rel, NULL, &isym[r_symndx]); } if (rsec && !rsec->gc_mark) { if (bfd_get_flavour (rsec->owner) != bfd_target_elf_flavour) rsec->gc_mark = 1; else if (is_eh) rsec->gc_mark_from_eh = 1; else if (!_bfd_elf_gc_mark (info, rsec, gc_mark_hook)) { ret = FALSE; goto out2; } } } out2: if (elf_section_data (sec)->relocs != relstart) free (relstart); out1: if (isym != NULL && symtab_hdr->contents != (unsigned char *) isym) { if (! info->keep_memory) free (isym); else symtab_hdr->contents = (unsigned char *) isym; } } return ret; } /* Sweep symbols in swept sections. Called via elf_link_hash_traverse. */ struct elf_gc_sweep_symbol_info { struct bfd_link_info *info; void (*hide_symbol) (struct bfd_link_info *, struct elf_link_hash_entry *, bfd_boolean); }; static bfd_boolean elf_gc_sweep_symbol (struct elf_link_hash_entry *h, void *data) { if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if ((h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak) && !h->root.u.def.section->gc_mark && !(h->root.u.def.section->owner->flags & DYNAMIC)) { struct elf_gc_sweep_symbol_info *inf = data; (*inf->hide_symbol) (inf->info, h, TRUE); } return TRUE; } /* The sweep phase of garbage collection. Remove all garbage sections. */ typedef bfd_boolean (*gc_sweep_hook_fn) (bfd *, struct bfd_link_info *, asection *, const Elf_Internal_Rela *); static bfd_boolean elf_gc_sweep (bfd *abfd, struct bfd_link_info *info) { bfd *sub; const struct elf_backend_data *bed = get_elf_backend_data (abfd); gc_sweep_hook_fn gc_sweep_hook = bed->gc_sweep_hook; unsigned long section_sym_count; struct elf_gc_sweep_symbol_info sweep_info; for (sub = info->input_bfds; sub != NULL; sub = sub->link_next) { asection *o; if (bfd_get_flavour (sub) != bfd_target_elf_flavour) continue; for (o = sub->sections; o != NULL; o = o->next) { /* Keep debug and special sections. */ if ((o->flags & (SEC_DEBUGGING | SEC_LINKER_CREATED)) != 0 || elf_section_data (o)->this_hdr.sh_type == SHT_NOTE || (o->flags & (SEC_ALLOC | SEC_LOAD | SEC_RELOC)) == 0) o->gc_mark = 1; if (o->gc_mark) continue; /* Skip sweeping sections already excluded. */ if (o->flags & SEC_EXCLUDE) continue; /* Since this is early in the link process, it is simple to remove a section from the output. */ o->flags |= SEC_EXCLUDE; if (info->print_gc_sections && o->size != 0) _bfd_error_handler (_("Removing unused section '%s' in file '%B'"), sub, o->name); /* But we also have to update some of the relocation info we collected before. */ if (gc_sweep_hook && (o->flags & SEC_RELOC) != 0 && o->reloc_count > 0 && !bfd_is_abs_section (o->output_section)) { Elf_Internal_Rela *internal_relocs; bfd_boolean r; internal_relocs = _bfd_elf_link_read_relocs (o->owner, o, NULL, NULL, info->keep_memory); if (internal_relocs == NULL) return FALSE; r = (*gc_sweep_hook) (o->owner, info, o, internal_relocs); if (elf_section_data (o)->relocs != internal_relocs) free (internal_relocs); if (!r) return FALSE; } } } /* Remove the symbols that were in the swept sections from the dynamic symbol table. GCFIXME: Anyone know how to get them out of the static symbol table as well? */ sweep_info.info = info; sweep_info.hide_symbol = bed->elf_backend_hide_symbol; elf_link_hash_traverse (elf_hash_table (info), elf_gc_sweep_symbol, &sweep_info); _bfd_elf_link_renumber_dynsyms (abfd, info, &section_sym_count); return TRUE; } /* Propagate collected vtable information. This is called through elf_link_hash_traverse. */ static bfd_boolean elf_gc_propagate_vtable_entries_used (struct elf_link_hash_entry *h, void *okp) { if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* Those that are not vtables. */ if (h->vtable == NULL || h->vtable->parent == NULL) return TRUE; /* Those vtables that do not have parents, we cannot merge. */ if (h->vtable->parent == (struct elf_link_hash_entry *) -1) return TRUE; /* If we've already been done, exit. */ if (h->vtable->used && h->vtable->used[-1]) return TRUE; /* Make sure the parent's table is up to date. */ elf_gc_propagate_vtable_entries_used (h->vtable->parent, okp); if (h->vtable->used == NULL) { /* None of this table's entries were referenced. Re-use the parent's table. */ h->vtable->used = h->vtable->parent->vtable->used; h->vtable->size = h->vtable->parent->vtable->size; } else { size_t n; bfd_boolean *cu, *pu; /* Or the parent's entries into ours. */ cu = h->vtable->used; cu[-1] = TRUE; pu = h->vtable->parent->vtable->used; if (pu != NULL) { const struct elf_backend_data *bed; unsigned int log_file_align; bed = get_elf_backend_data (h->root.u.def.section->owner); log_file_align = bed->s->log_file_align; n = h->vtable->parent->vtable->size >> log_file_align; while (n--) { if (*pu) *cu = TRUE; pu++; cu++; } } } return TRUE; } static bfd_boolean elf_gc_smash_unused_vtentry_relocs (struct elf_link_hash_entry *h, void *okp) { asection *sec; bfd_vma hstart, hend; Elf_Internal_Rela *relstart, *relend, *rel; const struct elf_backend_data *bed; unsigned int log_file_align; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; /* Take care of both those symbols that do not describe vtables as well as those that are not loaded. */ if (h->vtable == NULL || h->vtable->parent == NULL) return TRUE; BFD_ASSERT (h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak); sec = h->root.u.def.section; hstart = h->root.u.def.value; hend = hstart + h->size; relstart = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL, TRUE); if (!relstart) return *(bfd_boolean *) okp = FALSE; bed = get_elf_backend_data (sec->owner); log_file_align = bed->s->log_file_align; relend = relstart + sec->reloc_count * bed->s->int_rels_per_ext_rel; for (rel = relstart; rel < relend; ++rel) if (rel->r_offset >= hstart && rel->r_offset < hend) { /* If the entry is in use, do nothing. */ if (h->vtable->used && (rel->r_offset - hstart) < h->vtable->size) { bfd_vma entry = (rel->r_offset - hstart) >> log_file_align; if (h->vtable->used[entry]) continue; } /* Otherwise, kill it. */ rel->r_offset = rel->r_info = rel->r_addend = 0; } return TRUE; } /* Mark sections containing dynamically referenced symbols. When building shared libraries, we must assume that any visible symbol is referenced. */ bfd_boolean bfd_elf_gc_mark_dynamic_ref_symbol (struct elf_link_hash_entry *h, void *inf) { struct bfd_link_info *info = (struct bfd_link_info *) inf; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if ((h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak) && (h->ref_dynamic || (!info->executable && h->def_regular && ELF_ST_VISIBILITY (h->other) != STV_INTERNAL && ELF_ST_VISIBILITY (h->other) != STV_HIDDEN))) h->root.u.def.section->flags |= SEC_KEEP; return TRUE; } /* Do mark and sweep of unused sections. */ bfd_boolean bfd_elf_gc_sections (bfd *abfd, struct bfd_link_info *info) { bfd_boolean ok = TRUE; bfd *sub; elf_gc_mark_hook_fn gc_mark_hook; const struct elf_backend_data *bed = get_elf_backend_data (abfd); if (!bed->can_gc_sections || info->relocatable || info->emitrelocations || !is_elf_hash_table (info->hash)) { (*_bfd_error_handler)(_("Warning: gc-sections option ignored")); return TRUE; } /* Apply transitive closure to the vtable entry usage info. */ elf_link_hash_traverse (elf_hash_table (info), elf_gc_propagate_vtable_entries_used, &ok); if (!ok) return FALSE; /* Kill the vtable relocations that were not used. */ elf_link_hash_traverse (elf_hash_table (info), elf_gc_smash_unused_vtentry_relocs, &ok); if (!ok) return FALSE; /* Mark dynamically referenced symbols. */ if (elf_hash_table (info)->dynamic_sections_created) elf_link_hash_traverse (elf_hash_table (info), bed->gc_mark_dynamic_ref, info); /* Grovel through relocs to find out who stays ... */ gc_mark_hook = bed->gc_mark_hook; for (sub = info->input_bfds; sub != NULL; sub = sub->link_next) { asection *o; if (bfd_get_flavour (sub) != bfd_target_elf_flavour) continue; for (o = sub->sections; o != NULL; o = o->next) if ((o->flags & (SEC_EXCLUDE | SEC_KEEP)) == SEC_KEEP && !o->gc_mark) if (!_bfd_elf_gc_mark (info, o, gc_mark_hook)) return FALSE; } /* Allow the backend to mark additional target specific sections. */ if (bed->gc_mark_extra_sections) bed->gc_mark_extra_sections(info, gc_mark_hook); /* ... again for sections marked from eh_frame. */ for (sub = info->input_bfds; sub != NULL; sub = sub->link_next) { asection *o; if (bfd_get_flavour (sub) != bfd_target_elf_flavour) continue; /* Keep .gcc_except_table.* if the associated .text.* (or the associated .gnu.linkonce.t.* if .text.* doesn't exist) is marked. This isn't very nice, but the proper solution, splitting .eh_frame up and using comdat doesn't pan out easily due to needing special relocs to handle the difference of two symbols in separate sections. Don't keep code sections referenced by .eh_frame. */ #define TEXT_PREFIX ".text." #define TEXT_PREFIX2 ".gnu.linkonce.t." #define GCC_EXCEPT_TABLE_PREFIX ".gcc_except_table." for (o = sub->sections; o != NULL; o = o->next) if (!o->gc_mark && o->gc_mark_from_eh && (o->flags & SEC_CODE) == 0) { if (CONST_STRNEQ (o->name, GCC_EXCEPT_TABLE_PREFIX)) { char *fn_name; const char *sec_name; asection *fn_text; unsigned o_name_prefix_len , fn_name_prefix_len, tmp; o_name_prefix_len = strlen (GCC_EXCEPT_TABLE_PREFIX); sec_name = o->name + o_name_prefix_len; fn_name_prefix_len = strlen (TEXT_PREFIX); tmp = strlen (TEXT_PREFIX2); if (tmp > fn_name_prefix_len) fn_name_prefix_len = tmp; fn_name = bfd_malloc (fn_name_prefix_len + strlen (sec_name) + 1); if (fn_name == NULL) return FALSE; /* Try the first prefix. */ sprintf (fn_name, "%s%s", TEXT_PREFIX, sec_name); fn_text = bfd_get_section_by_name (sub, fn_name); /* Try the second prefix. */ if (fn_text == NULL) { sprintf (fn_name, "%s%s", TEXT_PREFIX2, sec_name); fn_text = bfd_get_section_by_name (sub, fn_name); } free (fn_name); if (fn_text == NULL || !fn_text->gc_mark) continue; } /* If not using specially named exception table section, then keep whatever we are using. */ if (!_bfd_elf_gc_mark (info, o, gc_mark_hook)) return FALSE; } } /* ... and mark SEC_EXCLUDE for those that go. */ return elf_gc_sweep (abfd, info); } /* Called from check_relocs to record the existence of a VTINHERIT reloc. */ bfd_boolean bfd_elf_gc_record_vtinherit (bfd *abfd, asection *sec, struct elf_link_hash_entry *h, bfd_vma offset) { struct elf_link_hash_entry **sym_hashes, **sym_hashes_end; struct elf_link_hash_entry **search, *child; bfd_size_type extsymcount; const struct elf_backend_data *bed = get_elf_backend_data (abfd); /* The sh_info field of the symtab header tells us where the external symbols start. We don't care about the local symbols at this point. */ extsymcount = elf_tdata (abfd)->symtab_hdr.sh_size / bed->s->sizeof_sym; if (!elf_bad_symtab (abfd)) extsymcount -= elf_tdata (abfd)->symtab_hdr.sh_info; sym_hashes = elf_sym_hashes (abfd); sym_hashes_end = sym_hashes + extsymcount; /* Hunt down the child symbol, which is in this section at the same offset as the relocation. */ for (search = sym_hashes; search != sym_hashes_end; ++search) { if ((child = *search) != NULL && (child->root.type == bfd_link_hash_defined || child->root.type == bfd_link_hash_defweak) && child->root.u.def.section == sec && child->root.u.def.value == offset) goto win; } (*_bfd_error_handler) ("%B: %A+%lu: No symbol found for INHERIT", abfd, sec, (unsigned long) offset); bfd_set_error (bfd_error_invalid_operation); return FALSE; win: if (!child->vtable) { child->vtable = bfd_zalloc (abfd, sizeof (*child->vtable)); if (!child->vtable) return FALSE; } if (!h) { /* This *should* only be the absolute section. It could potentially be that someone has defined a non-global vtable though, which would be bad. It isn't worth paging in the local symbols to be sure though; that case should simply be handled by the assembler. */ child->vtable->parent = (struct elf_link_hash_entry *) -1; } else child->vtable->parent = h; return TRUE; } /* Called from check_relocs to record the existence of a VTENTRY reloc. */ bfd_boolean bfd_elf_gc_record_vtentry (bfd *abfd ATTRIBUTE_UNUSED, asection *sec ATTRIBUTE_UNUSED, struct elf_link_hash_entry *h, bfd_vma addend) { const struct elf_backend_data *bed = get_elf_backend_data (abfd); unsigned int log_file_align = bed->s->log_file_align; if (!h->vtable) { h->vtable = bfd_zalloc (abfd, sizeof (*h->vtable)); if (!h->vtable) return FALSE; } if (addend >= h->vtable->size) { size_t size, bytes, file_align; bfd_boolean *ptr = h->vtable->used; /* While the symbol is undefined, we have to be prepared to handle a zero size. */ file_align = 1 << log_file_align; if (h->root.type == bfd_link_hash_undefined) size = addend + file_align; else { size = h->size; if (addend >= size) { /* Oops! We've got a reference past the defined end of the table. This is probably a bug -- shall we warn? */ size = addend + file_align; } } size = (size + file_align - 1) & -file_align; /* Allocate one extra entry for use as a "done" flag for the consolidation pass. */ bytes = ((size >> log_file_align) + 1) * sizeof (bfd_boolean); if (ptr) { ptr = bfd_realloc (ptr - 1, bytes); if (ptr != NULL) { size_t oldbytes; oldbytes = (((h->vtable->size >> log_file_align) + 1) * sizeof (bfd_boolean)); memset (((char *) ptr) + oldbytes, 0, bytes - oldbytes); } } else ptr = bfd_zmalloc (bytes); if (ptr == NULL) return FALSE; /* And arrange for that done flag to be at index -1. */ h->vtable->used = ptr + 1; h->vtable->size = size; } h->vtable->used[addend >> log_file_align] = TRUE; return TRUE; } struct alloc_got_off_arg { bfd_vma gotoff; unsigned int got_elt_size; }; /* We need a special top-level link routine to convert got reference counts to real got offsets. */ static bfd_boolean elf_gc_allocate_got_offsets (struct elf_link_hash_entry *h, void *arg) { struct alloc_got_off_arg *gofarg = arg; if (h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if (h->got.refcount > 0) { h->got.offset = gofarg->gotoff; gofarg->gotoff += gofarg->got_elt_size; } else h->got.offset = (bfd_vma) -1; return TRUE; } /* And an accompanying bit to work out final got entry offsets once we're done. Should be called from final_link. */ bfd_boolean bfd_elf_gc_common_finalize_got_offsets (bfd *abfd, struct bfd_link_info *info) { bfd *i; const struct elf_backend_data *bed = get_elf_backend_data (abfd); bfd_vma gotoff; unsigned int got_elt_size = bed->s->arch_size / 8; struct alloc_got_off_arg gofarg; if (! is_elf_hash_table (info->hash)) return FALSE; /* The GOT offset is relative to the .got section, but the GOT header is put into the .got.plt section, if the backend uses it. */ if (bed->want_got_plt) gotoff = 0; else gotoff = bed->got_header_size; /* Do the local .got entries first. */ for (i = info->input_bfds; i; i = i->link_next) { bfd_signed_vma *local_got; bfd_size_type j, locsymcount; Elf_Internal_Shdr *symtab_hdr; if (bfd_get_flavour (i) != bfd_target_elf_flavour) continue; local_got = elf_local_got_refcounts (i); if (!local_got) continue; symtab_hdr = &elf_tdata (i)->symtab_hdr; if (elf_bad_symtab (i)) locsymcount = symtab_hdr->sh_size / bed->s->sizeof_sym; else locsymcount = symtab_hdr->sh_info; for (j = 0; j < locsymcount; ++j) { if (local_got[j] > 0) { local_got[j] = gotoff; gotoff += got_elt_size; } else local_got[j] = (bfd_vma) -1; } } /* Then the global .got entries. .plt refcounts are handled by adjust_dynamic_symbol */ gofarg.gotoff = gotoff; gofarg.got_elt_size = got_elt_size; elf_link_hash_traverse (elf_hash_table (info), elf_gc_allocate_got_offsets, &gofarg); return TRUE; } /* Many folk need no more in the way of final link than this, once got entry reference counting is enabled. */ bfd_boolean bfd_elf_gc_common_final_link (bfd *abfd, struct bfd_link_info *info) { if (!bfd_elf_gc_common_finalize_got_offsets (abfd, info)) return FALSE; /* Invoke the regular ELF backend linker to do all the work. */ return bfd_elf_final_link (abfd, info); } bfd_boolean bfd_elf_reloc_symbol_deleted_p (bfd_vma offset, void *cookie) { struct elf_reloc_cookie *rcookie = cookie; if (rcookie->bad_symtab) rcookie->rel = rcookie->rels; for (; rcookie->rel < rcookie->relend; rcookie->rel++) { unsigned long r_symndx; if (! rcookie->bad_symtab) if (rcookie->rel->r_offset > offset) return FALSE; if (rcookie->rel->r_offset != offset) continue; r_symndx = rcookie->rel->r_info >> rcookie->r_sym_shift; if (r_symndx == SHN_UNDEF) return TRUE; if (r_symndx >= rcookie->locsymcount || ELF_ST_BIND (rcookie->locsyms[r_symndx].st_info) != STB_LOCAL) { struct elf_link_hash_entry *h; h = rcookie->sym_hashes[r_symndx - rcookie->extsymoff]; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; if ((h->root.type == bfd_link_hash_defined || h->root.type == bfd_link_hash_defweak) && elf_discarded_section (h->root.u.def.section)) return TRUE; else return FALSE; } else { /* It's not a relocation against a global symbol, but it could be a relocation against a local symbol for a discarded section. */ asection *isec; Elf_Internal_Sym *isym; /* Need to: get the symbol; get the section. */ isym = &rcookie->locsyms[r_symndx]; if (isym->st_shndx < SHN_LORESERVE || isym->st_shndx > SHN_HIRESERVE) { isec = bfd_section_from_elf_index (rcookie->abfd, isym->st_shndx); if (isec != NULL && elf_discarded_section (isec)) return TRUE; } } return FALSE; } return FALSE; } /* Discard unneeded references to discarded sections. Returns TRUE if any section's size was changed. */ /* This function assumes that the relocations are in sorted order, which is true for all known assemblers. */ bfd_boolean bfd_elf_discard_info (bfd *output_bfd, struct bfd_link_info *info) { struct elf_reloc_cookie cookie; asection *stab, *eh; Elf_Internal_Shdr *symtab_hdr; const struct elf_backend_data *bed; bfd *abfd; unsigned int count; bfd_boolean ret = FALSE; if (info->traditional_format || !is_elf_hash_table (info->hash)) return FALSE; for (abfd = info->input_bfds; abfd != NULL; abfd = abfd->link_next) { if (bfd_get_flavour (abfd) != bfd_target_elf_flavour) continue; bed = get_elf_backend_data (abfd); if ((abfd->flags & DYNAMIC) != 0) continue; eh = NULL; if (!info->relocatable) { eh = bfd_get_section_by_name (abfd, ".eh_frame"); if (eh != NULL && (eh->size == 0 || bfd_is_abs_section (eh->output_section))) eh = NULL; } stab = bfd_get_section_by_name (abfd, ".stab"); if (stab != NULL && (stab->size == 0 || bfd_is_abs_section (stab->output_section) || stab->sec_info_type != ELF_INFO_TYPE_STABS)) stab = NULL; if (stab == NULL && eh == NULL && bed->elf_backend_discard_info == NULL) continue; symtab_hdr = &elf_tdata (abfd)->symtab_hdr; cookie.abfd = abfd; cookie.sym_hashes = elf_sym_hashes (abfd); cookie.bad_symtab = elf_bad_symtab (abfd); if (cookie.bad_symtab) { cookie.locsymcount = symtab_hdr->sh_size / bed->s->sizeof_sym; cookie.extsymoff = 0; } else { cookie.locsymcount = symtab_hdr->sh_info; cookie.extsymoff = symtab_hdr->sh_info; } if (bed->s->arch_size == 32) cookie.r_sym_shift = 8; else cookie.r_sym_shift = 32; cookie.locsyms = (Elf_Internal_Sym *) symtab_hdr->contents; if (cookie.locsyms == NULL && cookie.locsymcount != 0) { cookie.locsyms = bfd_elf_get_elf_syms (abfd, symtab_hdr, cookie.locsymcount, 0, NULL, NULL, NULL); if (cookie.locsyms == NULL) { info->callbacks->einfo (_("%P%X: can not read symbols: %E\n")); return FALSE; } } if (stab != NULL) { cookie.rels = NULL; count = stab->reloc_count; if (count != 0) cookie.rels = _bfd_elf_link_read_relocs (abfd, stab, NULL, NULL, info->keep_memory); if (cookie.rels != NULL) { cookie.rel = cookie.rels; cookie.relend = cookie.rels; cookie.relend += count * bed->s->int_rels_per_ext_rel; if (_bfd_discard_section_stabs (abfd, stab, elf_section_data (stab)->sec_info, bfd_elf_reloc_symbol_deleted_p, &cookie)) ret = TRUE; if (elf_section_data (stab)->relocs != cookie.rels) free (cookie.rels); } } if (eh != NULL) { cookie.rels = NULL; count = eh->reloc_count; if (count != 0) cookie.rels = _bfd_elf_link_read_relocs (abfd, eh, NULL, NULL, info->keep_memory); cookie.rel = cookie.rels; cookie.relend = cookie.rels; if (cookie.rels != NULL) cookie.relend += count * bed->s->int_rels_per_ext_rel; if (_bfd_elf_discard_section_eh_frame (abfd, info, eh, bfd_elf_reloc_symbol_deleted_p, &cookie)) ret = TRUE; if (cookie.rels != NULL && elf_section_data (eh)->relocs != cookie.rels) free (cookie.rels); } if (bed->elf_backend_discard_info != NULL && (*bed->elf_backend_discard_info) (abfd, &cookie, info)) ret = TRUE; if (cookie.locsyms != NULL && symtab_hdr->contents != (unsigned char *) cookie.locsyms) { if (! info->keep_memory) free (cookie.locsyms); else symtab_hdr->contents = (unsigned char *) cookie.locsyms; } } if (info->eh_frame_hdr && !info->relocatable && _bfd_elf_discard_section_eh_frame_hdr (output_bfd, info)) ret = TRUE; return ret; } void _bfd_elf_section_already_linked (bfd *abfd, struct bfd_section *sec, struct bfd_link_info *info) { flagword flags; const char *name, *p; struct bfd_section_already_linked *l; struct bfd_section_already_linked_hash_entry *already_linked_list; if (sec->output_section == bfd_abs_section_ptr) return; flags = sec->flags; /* Return if it isn't a linkonce section. A comdat group section also has SEC_LINK_ONCE set. */ if ((flags & SEC_LINK_ONCE) == 0) return; /* Don't put group member sections on our list of already linked sections. They are handled as a group via their group section. */ if (elf_sec_group (sec) != NULL) return; /* FIXME: When doing a relocatable link, we may have trouble copying relocations in other sections that refer to local symbols in the section being discarded. Those relocations will have to be converted somehow; as of this writing I'm not sure that any of the backends handle that correctly. It is tempting to instead not discard link once sections when doing a relocatable link (technically, they should be discarded whenever we are building constructors). However, that fails, because the linker winds up combining all the link once sections into a single large link once section, which defeats the purpose of having link once sections in the first place. Also, not merging link once sections in a relocatable link causes trouble for MIPS ELF, which relies on link once semantics to handle the .reginfo section correctly. */ name = bfd_get_section_name (abfd, sec); if (CONST_STRNEQ (name, ".gnu.linkonce.") && (p = strchr (name + sizeof (".gnu.linkonce.") - 1, '.')) != NULL) p++; else p = name; already_linked_list = bfd_section_already_linked_table_lookup (p); for (l = already_linked_list->entry; l != NULL; l = l->next) { /* We may have 2 different types of sections on the list: group sections and linkonce sections. Match like sections. */ if ((flags & SEC_GROUP) == (l->sec->flags & SEC_GROUP) && strcmp (name, l->sec->name) == 0 && bfd_coff_get_comdat_section (l->sec->owner, l->sec) == NULL) { /* The section has already been linked. See if we should issue a warning. */ switch (flags & SEC_LINK_DUPLICATES) { default: abort (); case SEC_LINK_DUPLICATES_DISCARD: break; case SEC_LINK_DUPLICATES_ONE_ONLY: (*_bfd_error_handler) (_("%B: ignoring duplicate section `%A'"), abfd, sec); break; case SEC_LINK_DUPLICATES_SAME_SIZE: if (sec->size != l->sec->size) (*_bfd_error_handler) (_("%B: duplicate section `%A' has different size"), abfd, sec); break; case SEC_LINK_DUPLICATES_SAME_CONTENTS: if (sec->size != l->sec->size) (*_bfd_error_handler) (_("%B: duplicate section `%A' has different size"), abfd, sec); else if (sec->size != 0) { bfd_byte *sec_contents, *l_sec_contents = NULL; if (!bfd_malloc_and_get_section (abfd, sec, &sec_contents)) (*_bfd_error_handler) (_("%B: warning: could not read contents of section `%A'"), abfd, sec); else if (!bfd_malloc_and_get_section (l->sec->owner, l->sec, &l_sec_contents)) (*_bfd_error_handler) (_("%B: warning: could not read contents of section `%A'"), l->sec->owner, l->sec); else if (memcmp (sec_contents, l_sec_contents, sec->size) != 0) (*_bfd_error_handler) (_("%B: warning: duplicate section `%A' has different contents"), abfd, sec); if (sec_contents) free (sec_contents); if (l_sec_contents) free (l_sec_contents); } break; } /* Set the output_section field so that lang_add_section does not create a lang_input_section structure for this section. Since there might be a symbol in the section being discarded, we must retain a pointer to the section which we are really going to use. */ sec->output_section = bfd_abs_section_ptr; sec->kept_section = l->sec; if (flags & SEC_GROUP) { asection *first = elf_next_in_group (sec); asection *s = first; while (s != NULL) { s->output_section = bfd_abs_section_ptr; /* Record which group discards it. */ s->kept_section = l->sec; s = elf_next_in_group (s); /* These lists are circular. */ if (s == first) break; } } return; } } /* A single member comdat group section may be discarded by a linkonce section and vice versa. */ if ((flags & SEC_GROUP) != 0) { asection *first = elf_next_in_group (sec); if (first != NULL && elf_next_in_group (first) == first) /* Check this single member group against linkonce sections. */ for (l = already_linked_list->entry; l != NULL; l = l->next) if ((l->sec->flags & SEC_GROUP) == 0 && bfd_coff_get_comdat_section (l->sec->owner, l->sec) == NULL && bfd_elf_match_symbols_in_sections (l->sec, first, info)) { first->output_section = bfd_abs_section_ptr; first->kept_section = l->sec; sec->output_section = bfd_abs_section_ptr; break; } } else /* Check this linkonce section against single member groups. */ for (l = already_linked_list->entry; l != NULL; l = l->next) if (l->sec->flags & SEC_GROUP) { asection *first = elf_next_in_group (l->sec); if (first != NULL && elf_next_in_group (first) == first && bfd_elf_match_symbols_in_sections (first, sec, info)) { sec->output_section = bfd_abs_section_ptr; sec->kept_section = first; break; } } /* This is the first section with this name. Record it. */ bfd_section_already_linked_table_insert (already_linked_list, sec); } bfd_boolean _bfd_elf_common_definition (Elf_Internal_Sym *sym) { return sym->st_shndx == SHN_COMMON; } unsigned int _bfd_elf_common_section_index (asection *sec ATTRIBUTE_UNUSED) { return SHN_COMMON; } asection * _bfd_elf_common_section (asection *sec ATTRIBUTE_UNUSED) { return bfd_com_section_ptr; }
829840.c
#include "curve25519-donna.h" #include "randombytes.h" #if !defined(CURVE25519_SUFFIX) #define CURVE25519_SUFFIX #endif #define CURVE25519_FN3(fn,suffix) fn##suffix #define CURVE25519_FN2(fn,suffix) CURVE25519_FN3(fn,suffix) #define CURVE25519_FN(fn) CURVE25519_FN2(fn,CURVE25519_SUFFIX) #ifdef PRIVATE_API static #endif void CURVE25519_FN(curve25519_donna) (curve25519_key mypublic, const curve25519_key secret, const curve25519_key basepoint) { curve25519_key e; size_t i; for (i = 0;i < 32;++i) e[i] = secret[i]; e[0] &= 0xf8; e[31] &= 0x7f; e[31] |= 0x40; curve25519_scalarmult_donna(mypublic, e, basepoint); } #ifdef PRIVATE_API static #endif void CURVE25519_FN(curve25519_donna_basepoint) (curve25519_key mypublic, const curve25519_key secret) { static const curve25519_key basepoint = {9}; CURVE25519_FN(curve25519_donna)(mypublic, secret, basepoint); } #ifdef PRIVATE_API static #endif void CURVE25519_FN(curve25519_donna_keypair) (curve25519_key mypublic, curve25519_key mysecret) { randombytes(mysecret, 32); curve25519_donna_basepoint(mypublic, mysecret); }
753628.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE78_OS_Command_Injection__wchar_t_listen_socket_execl_13.c Label Definition File: CWE78_OS_Command_Injection.no_path.label.xml Template File: sources-sink-13.tmpl.c */ /* * @description * CWE: 78 OS Command Injection * BadSource: listen_socket Read data using a listen socket (server side) * GoodSource: Benign input * Sink: execl * BadSink : execute command with wexecl * Flow Variant: 13 Control flow: if(global_const_five==5) and if(global_const_five!=5) * * */ #include "std_testcase.h" #include <wchar.h> #ifdef _WIN32 # define COMMAND_INT_PATH L"%WINDIR%\\system32\\cmd.exe" # define COMMAND_INT L"cmd.exe" # define COMMAND_ARG1 L"/c" # define COMMAND_ARG2 L"dir" # define COMMAND_ARG3 data #else /* NOT _WIN32 */ # define COMMAND_INT_PATH L"/bin/sh" # define COMMAND_INT L"sh" # define COMMAND_ARG1 L"ls" # define COMMAND_ARG2 data # define COMMAND_ARG3 NULL #endif #ifdef _WIN32 # include <winsock2.h> # include <windows.h> # include <direct.h> # define PATH_SZ 100 # pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ # define CLOSE_SOCKET closesocket #else # define PATH_SZ PATH_MAX # define INVALID_SOCKET -1 # define SOCKET_ERROR -1 # define CLOSE_SOCKET close # define SOCKET int #endif #define TCP_PORT 27015 #define LISTEN_BACKLOG 5 #ifdef _WIN32 #include <process.h> # define EXECL _wexecl #else /* NOT _WIN32 */ # define EXECL wexecl #endif #ifndef OMITBAD void CWE78_OS_Command_Injection__wchar_t_listen_socket_execl_13_bad() { wchar_t * data; wchar_t data_buf[100] = L""; data = data_buf; if(global_const_five==5) { { #ifdef _WIN32 WSADATA wsa_data; int wsa_data_init = 0; #endif int recv_rv; struct sockaddr_in s_in; wchar_t *replace; SOCKET listen_socket = INVALID_SOCKET; SOCKET accept_socket = INVALID_SOCKET; size_t data_len = wcslen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsa_data) != NO_ERROR) break; wsa_data_init = 1; #endif listen_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listen_socket == INVALID_SOCKET) break; memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = AF_INET; s_in.sin_addr.s_addr = INADDR_ANY; s_in.sin_port = htons(TCP_PORT); if (bind(listen_socket, (struct sockaddr*)&s_in, sizeof(s_in)) == SOCKET_ERROR) break; if (listen(listen_socket, LISTEN_BACKLOG) == SOCKET_ERROR) break; accept_socket = accept(listen_socket, NULL, NULL); if (accept_socket == SOCKET_ERROR) break; /* Abort on error or the connection was closed */ recv_rv = recv(accept_socket, (char *)data+data_len, (int)(100-data_len-1), 0); if (recv_rv == SOCKET_ERROR || recv_rv == 0) break; /* Append null terminator */ data[recv_rv] = L'\0'; /* Eliminate CRLF */ replace = wcschr(data, L'\r'); if (replace) *replace = L'\0'; replace = wcschr(data, L'\n'); if (replace) *replace = L'\0'; } while (0); if (listen_socket != INVALID_SOCKET) CLOSE_SOCKET(listen_socket); if (accept_socket != INVALID_SOCKET) CLOSE_SOCKET(accept_socket); #ifdef _WIN32 if (wsa_data_init) WSACleanup(); #endif } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ /* FIX: Benign input preventing command injection */ wcscat(data, L"*.*"); } /* wexecl - specify the path where the command is located */ /* POSSIBLE FLAW: Execute command without validating input possibly leading to command injection */ EXECL(COMMAND_INT_PATH, COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG2, COMMAND_ARG3, NULL); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B1() - use goodsource and badsink by changing the global_const_five==5 to global_const_five!=5 */ static void goodG2B1() { wchar_t * data; wchar_t data_buf[100] = L""; data = data_buf; if(global_const_five!=5) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { #ifdef _WIN32 WSADATA wsa_data; int wsa_data_init = 0; #endif int recv_rv; struct sockaddr_in s_in; wchar_t *replace; SOCKET listen_socket = INVALID_SOCKET; SOCKET accept_socket = INVALID_SOCKET; size_t data_len = wcslen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsa_data) != NO_ERROR) break; wsa_data_init = 1; #endif listen_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listen_socket == INVALID_SOCKET) break; memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = AF_INET; s_in.sin_addr.s_addr = INADDR_ANY; s_in.sin_port = htons(TCP_PORT); if (bind(listen_socket, (struct sockaddr*)&s_in, sizeof(s_in)) == SOCKET_ERROR) break; if (listen(listen_socket, LISTEN_BACKLOG) == SOCKET_ERROR) break; accept_socket = accept(listen_socket, NULL, NULL); if (accept_socket == SOCKET_ERROR) break; /* Abort on error or the connection was closed */ recv_rv = recv(accept_socket, (char *)data+data_len, (int)(100-data_len-1), 0); if (recv_rv == SOCKET_ERROR || recv_rv == 0) break; /* Append null terminator */ data[recv_rv] = L'\0'; /* Eliminate CRLF */ replace = wcschr(data, L'\r'); if (replace) *replace = L'\0'; replace = wcschr(data, L'\n'); if (replace) *replace = L'\0'; } while (0); if (listen_socket != INVALID_SOCKET) CLOSE_SOCKET(listen_socket); if (accept_socket != INVALID_SOCKET) CLOSE_SOCKET(accept_socket); #ifdef _WIN32 if (wsa_data_init) WSACleanup(); #endif } } else { /* FIX: Benign input preventing command injection */ wcscat(data, L"*.*"); } /* wexecl - specify the path where the command is located */ /* POSSIBLE FLAW: Execute command without validating input possibly leading to command injection */ EXECL(COMMAND_INT_PATH, COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG2, COMMAND_ARG3, NULL); } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */ static void goodG2B2() { wchar_t * data; wchar_t data_buf[100] = L""; data = data_buf; if(global_const_five==5) { /* FIX: Benign input preventing command injection */ wcscat(data, L"*.*"); } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { #ifdef _WIN32 WSADATA wsa_data; int wsa_data_init = 0; #endif int recv_rv; struct sockaddr_in s_in; wchar_t *replace; SOCKET listen_socket = INVALID_SOCKET; SOCKET accept_socket = INVALID_SOCKET; size_t data_len = wcslen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsa_data) != NO_ERROR) break; wsa_data_init = 1; #endif listen_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listen_socket == INVALID_SOCKET) break; memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = AF_INET; s_in.sin_addr.s_addr = INADDR_ANY; s_in.sin_port = htons(TCP_PORT); if (bind(listen_socket, (struct sockaddr*)&s_in, sizeof(s_in)) == SOCKET_ERROR) break; if (listen(listen_socket, LISTEN_BACKLOG) == SOCKET_ERROR) break; accept_socket = accept(listen_socket, NULL, NULL); if (accept_socket == SOCKET_ERROR) break; /* Abort on error or the connection was closed */ recv_rv = recv(accept_socket, (char *)data+data_len, (int)(100-data_len-1), 0); if (recv_rv == SOCKET_ERROR || recv_rv == 0) break; /* Append null terminator */ data[recv_rv] = L'\0'; /* Eliminate CRLF */ replace = wcschr(data, L'\r'); if (replace) *replace = L'\0'; replace = wcschr(data, L'\n'); if (replace) *replace = L'\0'; } while (0); if (listen_socket != INVALID_SOCKET) CLOSE_SOCKET(listen_socket); if (accept_socket != INVALID_SOCKET) CLOSE_SOCKET(accept_socket); #ifdef _WIN32 if (wsa_data_init) WSACleanup(); #endif } } /* wexecl - specify the path where the command is located */ /* POSSIBLE FLAW: Execute command without validating input possibly leading to command injection */ EXECL(COMMAND_INT_PATH, COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG2, COMMAND_ARG3, NULL); } void CWE78_OS_Command_Injection__wchar_t_listen_socket_execl_13_good() { goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE78_OS_Command_Injection__wchar_t_listen_socket_execl_13_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE78_OS_Command_Injection__wchar_t_listen_socket_execl_13_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
682511.c
/******************************************************************************** * Copyright (c) 2020 AVL List GmbH and others * * This program and the accompanying materials are made available under the * terms of the Apache Software License 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * SPDX-License-Identifier: Apache-2.0 ********************************************************************************/ #include "reader/task/ResultsInput.h" #include "util/string.h" #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ static void ResultsInputDestructor(ResultsInput * input) { if (input->outputDirectory) { mcx_free(input->outputDirectory); } if (input->backends) { object_destroy(input->backends); } } static ResultsInput * ResultsInputCreate(ResultsInput * input) { input->outputDirectory = NULL; OPTIONAL_UNSET(input->resultLevel); input->backends = NULL; return input; } OBJECT_CLASS(ResultsInput, InputElement); ResultsInput * CreateDefaultResultsInput(InputType type) { ResultsInput * input = (ResultsInput*)object_create(ResultsInput); InputElement * element = (InputElement*)input; BackendInput * csvBackend = NULL; if (!input) { return NULL; } element->type = type; element->context = NULL; input->outputDirectory = mcx_string_copy("results"); OPTIONAL_SET(input->resultLevel, STORE_SYNCHRONIZATION); // Use CSV as a default backend input->backends = (BackendsInput*)object_create(BackendsInput); if (!input->backends) { goto cleanup; } csvBackend = (BackendInput*)object_create(BackendInput); if (!csvBackend) { goto cleanup; } OPTIONAL_SET(csvBackend->storeAtRuntime, TRUE); csvBackend->type = BACKEND_CSV; if (input->backends->backends->PushBack(input->backends->backends, (Object *)csvBackend) == RETURN_ERROR) { goto cleanup; } return input; cleanup: object_destroy(input); object_destroy(csvBackend); return NULL; } #ifdef __cplusplus } /* closing brace for extern "C" */ #endif /* __cplusplus */
644499.c
#include "scenario.h" #include "string_utilities.h" #include <stdlib.h> const Scenario* Scenario_new(Location location, const wchar_t* keyword, const wchar_t* name, const wchar_t* description, const Tags* tags, const Steps* steps) { Scenario* scenario = (Scenario*)malloc(sizeof(Scenario)); scenario->scenario_delete = (item_delete_function)Scenario_delete; scenario->type = Gherkin_Scenario; scenario->location.line = location.line; scenario->location.column = location.column; scenario->keyword = 0; if (keyword) { scenario->keyword = StringUtilities_copy_string(keyword); } scenario->name = 0; if (name) { scenario->name = StringUtilities_copy_string(name); } scenario->description = description; scenario->tags = tags; scenario->steps = steps; return scenario; } void Scenario_delete(const Scenario* scenario) { if (!scenario) { return; } if (scenario->keyword) { free((void*)scenario->keyword); } if (scenario->name) { free((void*)scenario->name); } if (scenario->description) { free((void*)scenario->description); } if (scenario->tags) { Tags_delete(scenario->tags); } if (scenario->steps) { Steps_delete(scenario->steps); } free((void*)scenario); } void Scenario_transfer(Scenario* to_scenario, Scenario* from_scenario) { to_scenario->type = from_scenario->type; to_scenario->location.line = from_scenario->location.line; to_scenario->location.column = from_scenario->location.column; to_scenario->keyword = from_scenario->keyword; from_scenario->keyword = 0; to_scenario->name = from_scenario->name; from_scenario->name = 0; to_scenario->description = from_scenario->description; from_scenario->description = 0; to_scenario->tags = from_scenario->tags; from_scenario->tags = 0; to_scenario->steps = from_scenario->steps; from_scenario->steps = 0; Scenario_delete(from_scenario); }
543832.c
/******************************************************************** * * * THIS FILE IS PART OF THE libopusfile SOFTWARE CODEC SOURCE CODE. * * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS * * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE * * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. * * * * THE libopusfile SOURCE CODE IS (C) COPYRIGHT 1994-2020 * * by the Xiph.Org Foundation and contributors https://xiph.org/ * * * ******************************************************************** function: stdio-based convenience library for opening/seeking/decoding last mod: $Id: vorbisfile.c 17573 2010-10-27 14:53:59Z xiphmont $ ********************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "internal.h" #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <limits.h> #include <string.h> #include <math.h> #include "opusfile.h" /*This implementation is largely based off of libvorbisfile. All of the Ogg bits work roughly the same, though I have made some "improvements" that have not been folded back there, yet.*/ /*A 'chained bitstream' is an Ogg Opus bitstream that contains more than one logical bitstream arranged end to end (the only form of Ogg multiplexing supported by this library. Grouping (parallel multiplexing) is not supported, except to the extent that if there are multiple logical Ogg streams in a single link of the chain, we will ignore all but the first Opus stream we find.*/ /*An Ogg Opus file can be played beginning to end (streamed) without worrying ahead of time about chaining (see opusdec from the opus-tools package). If we have the whole file, however, and want random access (seeking/scrubbing) or desire to know the total length/time of a file, we need to account for the possibility of chaining.*/ /*We can handle things a number of ways. We can determine the entire bitstream structure right off the bat, or find pieces on demand. This library determines and caches structure for the entire bitstream, but builds a virtual decoder on the fly when moving between links in the chain.*/ /*There are also different ways to implement seeking. Enough information exists in an Ogg bitstream to seek to sample-granularity positions in the output. Or, one can seek by picking some portion of the stream roughly in the desired area if we only want coarse navigation through the stream. We implement and expose both strategies.*/ /*The maximum number of bytes in a page (including the page headers).*/ #define OP_PAGE_SIZE_MAX (65307) /*The default amount to seek backwards per step when trying to find the previous page. This must be at least as large as the maximum size of a page.*/ #define OP_CHUNK_SIZE (65536) /*The maximum amount to seek backwards per step when trying to find the previous page.*/ #define OP_CHUNK_SIZE_MAX (1024*(opus_int32)1024) /*A smaller read size is needed for low-rate streaming.*/ #define OP_READ_SIZE (2048) int op_test(OpusHead *_head, const unsigned char *_initial_data,size_t _initial_bytes){ ogg_sync_state oy; char *data; int err; /*The first page of a normal Opus file will be at most 57 bytes (27 Ogg page header bytes + 1 lacing value + 21 Opus header bytes + 8 channel mapping bytes). It will be at least 47 bytes (27 Ogg page header bytes + 1 lacing value + 19 Opus header bytes using channel mapping family 0). If we don't have at least that much data, give up now.*/ if(_initial_bytes<47)return OP_FALSE; /*Only proceed if we start with the magic OggS string. This is to prevent us spending a lot of time allocating memory and looking for Ogg pages in non-Ogg files.*/ if(memcmp(_initial_data,"OggS",4)!=0)return OP_ENOTFORMAT; if(OP_UNLIKELY(_initial_bytes>(size_t)LONG_MAX))return OP_EFAULT; ogg_sync_init(&oy); data=ogg_sync_buffer(&oy,(long)_initial_bytes); if(data!=NULL){ ogg_stream_state os; ogg_page og; int ret; memcpy(data,_initial_data,_initial_bytes); ogg_sync_wrote(&oy,(long)_initial_bytes); ogg_stream_init(&os,-1); err=OP_FALSE; do{ ogg_packet op; ret=ogg_sync_pageout(&oy,&og); /*Ignore holes.*/ if(ret<0)continue; /*Stop if we run out of data.*/ if(!ret)break; ogg_stream_reset_serialno(&os,ogg_page_serialno(&og)); ogg_stream_pagein(&os,&og); /*Only process the first packet on this page (if it's a BOS packet, it's required to be the only one).*/ if(ogg_stream_packetout(&os,&op)==1){ if(op.b_o_s){ ret=opus_head_parse(_head,op.packet,op.bytes); /*If this didn't look like Opus, keep going.*/ if(ret==OP_ENOTFORMAT)continue; /*Otherwise we're done, one way or another.*/ err=ret; } /*We finished parsing the headers. There is no Opus to be found.*/ else err=OP_ENOTFORMAT; } } while(err==OP_FALSE); ogg_stream_clear(&os); } else err=OP_EFAULT; ogg_sync_clear(&oy); return err; } /*Many, many internal helpers. The intention is not to be confusing. Rampant duplication and monolithic function implementation (though we do have some large, omnibus functions still) would be harder to understand anyway. The high level functions are last. Begin grokking near the end of the file if you prefer to read things top-down.*/ /*The read/seek functions track absolute position within the stream.*/ /*Read a little more data from the file/pipe into the ogg_sync framer. _nbytes: The maximum number of bytes to read. Return: A positive number of bytes read on success, 0 on end-of-file, or a negative value on failure.*/ static int op_get_data(OggOpusFile *_of,int _nbytes){ unsigned char *buffer; int nbytes; OP_ASSERT(_nbytes>0); buffer=(unsigned char *)ogg_sync_buffer(&_of->oy,_nbytes); nbytes=(int)(*_of->callbacks.read)(_of->stream,buffer,_nbytes); OP_ASSERT(nbytes<=_nbytes); if(OP_LIKELY(nbytes>0))ogg_sync_wrote(&_of->oy,nbytes); return nbytes; } /*Save a tiny smidge of verbosity to make the code more readable.*/ static int op_seek_helper(OggOpusFile *_of,opus_int64 _offset){ if(_offset==_of->offset)return 0; if(_of->callbacks.seek==NULL ||(*_of->callbacks.seek)(_of->stream,_offset,SEEK_SET)){ return OP_EREAD; } _of->offset=_offset; ogg_sync_reset(&_of->oy); return 0; } /*Get the current position indicator of the underlying stream. This should be the same as the value reported by tell().*/ static opus_int64 op_position(const OggOpusFile *_of){ /*The current position indicator is _not_ simply offset. We may also have unprocessed, buffered data in the sync state.*/ return _of->offset+_of->oy.fill-_of->oy.returned; } /*From the head of the stream, get the next page. _boundary specifies if the function is allowed to fetch more data from the stream (and how much) or only use internally buffered data. _boundary: -1: Unbounded search. 0: Read no additional data. Use only cached data. n: Search for the start of a new page up to file position n. Return: n>=0: Found a page at absolute offset n. OP_FALSE: Hit the _boundary limit. OP_EREAD: An underlying read operation failed. OP_BADLINK: We hit end-of-file before reaching _boundary.*/ static opus_int64 op_get_next_page(OggOpusFile *_of,ogg_page *_og, opus_int64 _boundary){ while(_boundary<=0||_of->offset<_boundary){ int more; more=ogg_sync_pageseek(&_of->oy,_og); /*Skipped (-more) bytes.*/ if(OP_UNLIKELY(more<0))_of->offset-=more; else if(more==0){ int read_nbytes; int ret; /*Send more paramedics.*/ if(!_boundary)return OP_FALSE; if(_boundary<0)read_nbytes=OP_READ_SIZE; else{ opus_int64 position; position=op_position(_of); if(position>=_boundary)return OP_FALSE; read_nbytes=(int)OP_MIN(_boundary-position,OP_READ_SIZE); } ret=op_get_data(_of,read_nbytes); if(OP_UNLIKELY(ret<0))return OP_EREAD; if(OP_UNLIKELY(ret==0)){ /*Only fail cleanly on EOF if we didn't have a known boundary. Otherwise, we should have been able to reach that boundary, and this is a fatal error.*/ return OP_UNLIKELY(_boundary<0)?OP_FALSE:OP_EBADLINK; } } else{ /*Got a page. Return the page start offset and advance the internal offset past the page end.*/ opus_int64 page_offset; page_offset=_of->offset; _of->offset+=more; OP_ASSERT(page_offset>=0); return page_offset; } } return OP_FALSE; } static int op_add_serialno(const ogg_page *_og, ogg_uint32_t **_serialnos,int *_nserialnos,int *_cserialnos){ ogg_uint32_t *serialnos; int nserialnos; int cserialnos; ogg_uint32_t s; s=ogg_page_serialno(_og); serialnos=*_serialnos; nserialnos=*_nserialnos; cserialnos=*_cserialnos; if(OP_UNLIKELY(nserialnos>=cserialnos)){ if(OP_UNLIKELY(cserialnos>INT_MAX/(int)sizeof(*serialnos)-1>>1)){ return OP_EFAULT; } cserialnos=2*cserialnos+1; OP_ASSERT(nserialnos<cserialnos); serialnos=(ogg_uint32_t *)_ogg_realloc(serialnos, sizeof(*serialnos)*cserialnos); if(OP_UNLIKELY(serialnos==NULL))return OP_EFAULT; } serialnos[nserialnos++]=s; *_serialnos=serialnos; *_nserialnos=nserialnos; *_cserialnos=cserialnos; return 0; } /*Returns nonzero if found.*/ static int op_lookup_serialno(ogg_uint32_t _s, const ogg_uint32_t *_serialnos,int _nserialnos){ int i; for(i=0;i<_nserialnos&&_serialnos[i]!=_s;i++); return i<_nserialnos; } static int op_lookup_page_serialno(const ogg_page *_og, const ogg_uint32_t *_serialnos,int _nserialnos){ return op_lookup_serialno(ogg_page_serialno(_og),_serialnos,_nserialnos); } typedef struct OpusSeekRecord OpusSeekRecord; /*We use this to remember the pages we found while enumerating the links of a chained stream. We keep track of the starting and ending offsets, as well as the point we started searching from, so we know where to bisect. We also keep the serial number, so we can tell if the page belonged to the current link or not, as well as the granule position, to aid in estimating the start of the link.*/ struct OpusSeekRecord{ /*The earliest byte we know of such that reading forward from it causes capture to be regained at this page.*/ opus_int64 search_start; /*The offset of this page.*/ opus_int64 offset; /*The size of this page.*/ opus_int32 size; /*The serial number of this page.*/ ogg_uint32_t serialno; /*The granule position of this page.*/ ogg_int64_t gp; }; /*Find the last page beginning before _offset with a valid granule position. There is no '_boundary' parameter as it will always have to read more data. This is much dirtier than the above, as Ogg doesn't have any backward search linkage. This search prefers pages of the specified serial number. If a page of the specified serial number is spotted during the seek-back-and-read-forward, it will return the info of last page of the matching serial number, instead of the very last page, unless the very last page belongs to a different link than preferred serial number. If no page of the specified serial number is seen, it will return the info of the last page. [out] _sr: Returns information about the page that was found on success. _offset: The _offset before which to find a page. Any page returned will consist of data entirely before _offset. _serialno: The preferred serial number. If a page with this serial number is found, it will be returned even if another page in the same link is found closer to _offset. This is purely opportunistic: there is no guarantee such a page will be found if it exists. _serialnos: The list of serial numbers in the link that contains the preferred serial number. _nserialnos: The number of serial numbers in the current link. Return: 0 on success, or a negative value on failure. OP_EREAD: Failed to read more data (error or EOF). OP_EBADLINK: We couldn't find a page even after seeking back to the start of the stream.*/ static int op_get_prev_page_serial(OggOpusFile *_of,OpusSeekRecord *_sr, opus_int64 _offset,ogg_uint32_t _serialno, const ogg_uint32_t *_serialnos,int _nserialnos){ OpusSeekRecord preferred_sr; ogg_page og; opus_int64 begin; opus_int64 end; opus_int64 original_end; opus_int32 chunk_size; int preferred_found; original_end=end=begin=_offset; preferred_found=0; _offset=-1; chunk_size=OP_CHUNK_SIZE; do{ opus_int64 search_start; int ret; OP_ASSERT(chunk_size>=OP_PAGE_SIZE_MAX); begin=OP_MAX(begin-chunk_size,0); ret=op_seek_helper(_of,begin); if(OP_UNLIKELY(ret<0))return ret; search_start=begin; while(_of->offset<end){ opus_int64 llret; ogg_uint32_t serialno; llret=op_get_next_page(_of,&og,end); if(OP_UNLIKELY(llret<OP_FALSE))return (int)llret; else if(llret==OP_FALSE)break; serialno=ogg_page_serialno(&og); /*Save the information for this page. We're not interested in the page itself... just the serial number, byte offset, page size, and granule position.*/ _sr->search_start=search_start; _sr->offset=_offset=llret; _sr->serialno=serialno; OP_ASSERT(_of->offset-_offset>=0); OP_ASSERT(_of->offset-_offset<=OP_PAGE_SIZE_MAX); _sr->size=(opus_int32)(_of->offset-_offset); _sr->gp=ogg_page_granulepos(&og); /*If this page is from the stream we're looking for, remember it.*/ if(serialno==_serialno){ preferred_found=1; *&preferred_sr=*_sr; } if(!op_lookup_serialno(serialno,_serialnos,_nserialnos)){ /*We fell off the end of the link, which means we seeked back too far and shouldn't have been looking in that link to begin with. If we found the preferred serial number, forget that we saw it.*/ preferred_found=0; } search_start=llret+1; } /*We started from the beginning of the stream and found nothing. This should be impossible unless the contents of the stream changed out from under us after we read from it.*/ if(OP_UNLIKELY(!begin)&&OP_UNLIKELY(_offset<0))return OP_EBADLINK; /*Bump up the chunk size. This is mildly helpful when seeks are very expensive (http).*/ chunk_size=OP_MIN(2*chunk_size,OP_CHUNK_SIZE_MAX); /*Avoid quadratic complexity if we hit an invalid patch of the file.*/ end=OP_MIN(begin+OP_PAGE_SIZE_MAX-1,original_end); } while(_offset<0); if(preferred_found)*_sr=*&preferred_sr; return 0; } /*Find the last page beginning before _offset with the given serial number and a valid granule position. Unlike the above search, this continues until it finds such a page, but does not stray outside the current link. We could implement it (inefficiently) by calling op_get_prev_page_serial() repeatedly until it returned a page that had both our preferred serial number and a valid granule position, but doing it with a separate function allows us to avoid repeatedly re-scanning valid pages from other streams as we seek-back-and-read-forward. [out] _gp: Returns the granule position of the page that was found on success. _offset: The _offset before which to find a page. Any page returned will consist of data entirely before _offset. _serialno: The target serial number. _serialnos: The list of serial numbers in the link that contains the preferred serial number. _nserialnos: The number of serial numbers in the current link. Return: The offset of the page on success, or a negative value on failure. OP_EREAD: Failed to read more data (error or EOF). OP_EBADLINK: We couldn't find a page even after seeking back past the beginning of the link.*/ static opus_int64 op_get_last_page(OggOpusFile *_of,ogg_int64_t *_gp, opus_int64 _offset,ogg_uint32_t _serialno, const ogg_uint32_t *_serialnos,int _nserialnos){ ogg_page og; ogg_int64_t gp; opus_int64 begin; opus_int64 end; opus_int64 original_end; opus_int32 chunk_size; /*The target serial number must belong to the current link.*/ OP_ASSERT(op_lookup_serialno(_serialno,_serialnos,_nserialnos)); original_end=end=begin=_offset; _offset=-1; /*We shouldn't have to initialize gp, but gcc is too dumb to figure out that ret>=0 implies we entered the if(page_gp!=-1) block at least once.*/ gp=-1; chunk_size=OP_CHUNK_SIZE; do{ int left_link; int ret; OP_ASSERT(chunk_size>=OP_PAGE_SIZE_MAX); begin=OP_MAX(begin-chunk_size,0); ret=op_seek_helper(_of,begin); if(OP_UNLIKELY(ret<0))return ret; left_link=0; while(_of->offset<end){ opus_int64 llret; ogg_uint32_t serialno; llret=op_get_next_page(_of,&og,end); if(OP_UNLIKELY(llret<OP_FALSE))return llret; else if(llret==OP_FALSE)break; serialno=ogg_page_serialno(&og); if(serialno==_serialno){ ogg_int64_t page_gp; /*The page is from the right stream...*/ page_gp=ogg_page_granulepos(&og); if(page_gp!=-1){ /*And has a valid granule position. Let's remember it.*/ _offset=llret; gp=page_gp; } } else if(OP_UNLIKELY(!op_lookup_serialno(serialno, _serialnos,_nserialnos))){ /*We fell off the start of the link, which means we don't need to keep seeking any farther back.*/ left_link=1; } } /*We started from at or before the beginning of the link and found nothing. This should be impossible unless the contents of the stream changed out from under us after we read from it.*/ if((OP_UNLIKELY(left_link)||OP_UNLIKELY(!begin))&&OP_UNLIKELY(_offset<0)){ return OP_EBADLINK; } /*Bump up the chunk size. This is mildly helpful when seeks are very expensive (http).*/ chunk_size=OP_MIN(2*chunk_size,OP_CHUNK_SIZE_MAX); /*Avoid quadratic complexity if we hit an invalid patch of the file.*/ end=OP_MIN(begin+OP_PAGE_SIZE_MAX-1,original_end); } while(_offset<0); *_gp=gp; return _offset; } /*Uses the local ogg_stream storage in _of. This is important for non-streaming input sources.*/ static int op_fetch_headers_impl(OggOpusFile *_of,OpusHead *_head, OpusTags *_tags,ogg_uint32_t **_serialnos,int *_nserialnos, int *_cserialnos,ogg_page *_og){ ogg_packet op; int ret; if(_serialnos!=NULL)*_nserialnos=0; /*Extract the serialnos of all BOS pages plus the first set of Opus headers we see in the link.*/ while(ogg_page_bos(_og)){ if(_serialnos!=NULL){ if(OP_UNLIKELY(op_lookup_page_serialno(_og,*_serialnos,*_nserialnos))){ /*A dupe serialnumber in an initial header packet set==invalid stream.*/ return OP_EBADHEADER; } ret=op_add_serialno(_og,_serialnos,_nserialnos,_cserialnos); if(OP_UNLIKELY(ret<0))return ret; } if(_of->ready_state<OP_STREAMSET){ /*We don't have an Opus stream in this link yet, so begin prospective stream setup. We need a stream to get packets.*/ ogg_stream_reset_serialno(&_of->os,ogg_page_serialno(_og)); ogg_stream_pagein(&_of->os,_og); if(OP_LIKELY(ogg_stream_packetout(&_of->os,&op)>0)){ ret=opus_head_parse(_head,op.packet,op.bytes); /*Found a valid Opus header. Continue setup.*/ if(OP_LIKELY(ret>=0))_of->ready_state=OP_STREAMSET; /*If it's just a stream type we don't recognize, ignore it. Everything else is fatal.*/ else if(ret!=OP_ENOTFORMAT)return ret; } /*TODO: Should a BOS page with no packets be an error?*/ } /*Get the next page. No need to clamp the boundary offset against _of->end, as all errors become OP_ENOTFORMAT or OP_EBADHEADER.*/ if(OP_UNLIKELY(op_get_next_page(_of,_og, OP_ADV_OFFSET(_of->offset,OP_CHUNK_SIZE))<0)){ return _of->ready_state<OP_STREAMSET?OP_ENOTFORMAT:OP_EBADHEADER; } } if(OP_UNLIKELY(_of->ready_state!=OP_STREAMSET))return OP_ENOTFORMAT; /*If the first non-header page belonged to our Opus stream, submit it.*/ if(_of->os.serialno==ogg_page_serialno(_og))ogg_stream_pagein(&_of->os,_og); /*Loop getting packets.*/ for(;;){ switch(ogg_stream_packetout(&_of->os,&op)){ case 0:{ /*Loop getting pages.*/ for(;;){ /*No need to clamp the boundary offset against _of->end, as all errors become OP_EBADHEADER.*/ if(OP_UNLIKELY(op_get_next_page(_of,_og, OP_ADV_OFFSET(_of->offset,OP_CHUNK_SIZE))<0)){ return OP_EBADHEADER; } /*If this page belongs to the correct stream, go parse it.*/ if(_of->os.serialno==ogg_page_serialno(_og)){ ogg_stream_pagein(&_of->os,_og); break; } /*If the link ends before we see the Opus comment header, abort.*/ if(OP_UNLIKELY(ogg_page_bos(_og)))return OP_EBADHEADER; /*Otherwise, keep looking.*/ } }break; /*We shouldn't get a hole in the headers!*/ case -1:return OP_EBADHEADER; default:{ /*Got a packet. It should be the comment header.*/ ret=opus_tags_parse(_tags,op.packet,op.bytes); if(OP_UNLIKELY(ret<0))return ret; /*Make sure the page terminated at the end of the comment header. If there is another packet on the page, or part of a packet, then reject the stream. Otherwise seekable sources won't be able to seek back to the start properly.*/ ret=ogg_stream_packetout(&_of->os,&op); if(OP_UNLIKELY(ret!=0) ||OP_UNLIKELY(_og->header[_og->header_len-1]==255)){ /*If we fail, the caller assumes our tags are uninitialized.*/ opus_tags_clear(_tags); return OP_EBADHEADER; } return 0; } } } } static int op_fetch_headers(OggOpusFile *_of,OpusHead *_head, OpusTags *_tags,ogg_uint32_t **_serialnos,int *_nserialnos, int *_cserialnos,ogg_page *_og){ ogg_page og; int ret; if(!_og){ /*No need to clamp the boundary offset against _of->end, as all errors become OP_ENOTFORMAT.*/ if(OP_UNLIKELY(op_get_next_page(_of,&og, OP_ADV_OFFSET(_of->offset,OP_CHUNK_SIZE))<0)){ return OP_ENOTFORMAT; } _og=&og; } _of->ready_state=OP_OPENED; ret=op_fetch_headers_impl(_of,_head,_tags,_serialnos,_nserialnos, _cserialnos,_og); /*Revert back from OP_STREAMSET to OP_OPENED on failure, to prevent double-free of the tags in an unseekable stream.*/ if(OP_UNLIKELY(ret<0))_of->ready_state=OP_OPENED; return ret; } /*Granule position manipulation routines. A granule position is defined to be an unsigned 64-bit integer, with the special value -1 in two's complement indicating an unset or invalid granule position. We are not guaranteed to have an unsigned 64-bit type, so we construct the following routines that a) Properly order negative numbers as larger than positive numbers, and b) Check for underflow or overflow past the special -1 value. This lets us operate on the full, valid range of granule positions in a consistent and safe manner. This full range is organized into distinct regions: [ -1 (invalid) ][ 0 ... OP_INT64_MAX ][ OP_INT64_MIN ... -2 ][-1 (invalid) ] No one should actually use granule positions so large that they're negative, even if they are technically valid, as very little software handles them correctly (including most of Xiph.Org's). This library also refuses to support durations so large they won't fit in a signed 64-bit integer (to avoid exposing this mess to the application, and to simplify a good deal of internal arithmetic), so the only way to use them successfully is if pcm_start is very large. This means there isn't anything you can do with negative granule positions that you couldn't have done with purely non-negative ones. The main purpose of these routines is to allow us to think very explicitly about the possible failure cases of all granule position manipulations.*/ /*Safely adds a small signed integer to a valid (not -1) granule position. The result can use the full 64-bit range of values (both positive and negative), but will fail on overflow (wrapping past -1; wrapping past OP_INT64_MAX is explicitly okay). [out] _dst_gp: The resulting granule position. Only modified on success. _src_gp: The granule position to add to. This must not be -1. _delta: The amount to add. This is allowed to be up to 32 bits to support the maximum duration of a single Ogg page (255 packets * 120 ms per packet == 1,468,800 samples at 48 kHz). Return: 0 on success, or OP_EINVAL if the result would wrap around past -1.*/ static int op_granpos_add(ogg_int64_t *_dst_gp,ogg_int64_t _src_gp, opus_int32 _delta){ /*The code below handles this case correctly, but there's no reason we should ever be called with these values, so make sure we aren't.*/ OP_ASSERT(_src_gp!=-1); if(_delta>0){ /*Adding this amount to the granule position would overflow its 64-bit range.*/ if(OP_UNLIKELY(_src_gp<0)&&OP_UNLIKELY(_src_gp>=-1-_delta))return OP_EINVAL; if(OP_UNLIKELY(_src_gp>OP_INT64_MAX-_delta)){ /*Adding this amount to the granule position would overflow the positive half of its 64-bit range. Since signed overflow is undefined in C, do it in a way the compiler isn't allowed to screw up.*/ _delta-=(opus_int32)(OP_INT64_MAX-_src_gp)+1; _src_gp=OP_INT64_MIN; } } else if(_delta<0){ /*Subtracting this amount from the granule position would underflow its 64-bit range.*/ if(_src_gp>=0&&OP_UNLIKELY(_src_gp<-_delta))return OP_EINVAL; if(OP_UNLIKELY(_src_gp<OP_INT64_MIN-_delta)){ /*Subtracting this amount from the granule position would underflow the negative half of its 64-bit range. Since signed underflow is undefined in C, do it in a way the compiler isn't allowed to screw up.*/ _delta+=(opus_int32)(_src_gp-OP_INT64_MIN)+1; _src_gp=OP_INT64_MAX; } } *_dst_gp=_src_gp+_delta; return 0; } /*Safely computes the difference between two granule positions. The difference must fit in a signed 64-bit integer, or the function fails. It correctly handles the case where the granule position has wrapped around from positive values to negative ones. [out] _delta: The difference between the granule positions. Only modified on success. _gp_a: The granule position to subtract from. This must not be -1. _gp_b: The granule position to subtract. This must not be -1. Return: 0 on success, or OP_EINVAL if the result would not fit in a signed 64-bit integer.*/ static int op_granpos_diff(ogg_int64_t *_delta, ogg_int64_t _gp_a,ogg_int64_t _gp_b){ int gp_a_negative; int gp_b_negative; /*The code below handles these cases correctly, but there's no reason we should ever be called with these values, so make sure we aren't.*/ OP_ASSERT(_gp_a!=-1); OP_ASSERT(_gp_b!=-1); gp_a_negative=OP_UNLIKELY(_gp_a<0); gp_b_negative=OP_UNLIKELY(_gp_b<0); if(OP_UNLIKELY(gp_a_negative^gp_b_negative)){ ogg_int64_t da; ogg_int64_t db; if(gp_a_negative){ /*_gp_a has wrapped to a negative value but _gp_b hasn't: the difference should be positive.*/ /*Step 1: Handle wrapping.*/ /*_gp_a < 0 => da < 0.*/ da=(OP_INT64_MIN-_gp_a)-1; /*_gp_b >= 0 => db >= 0.*/ db=OP_INT64_MAX-_gp_b; /*Step 2: Check for overflow.*/ if(OP_UNLIKELY(OP_INT64_MAX+da<db))return OP_EINVAL; *_delta=db-da; } else{ /*_gp_b has wrapped to a negative value but _gp_a hasn't: the difference should be negative.*/ /*Step 1: Handle wrapping.*/ /*_gp_a >= 0 => da <= 0*/ da=_gp_a+OP_INT64_MIN; /*_gp_b < 0 => db <= 0*/ db=OP_INT64_MIN-_gp_b; /*Step 2: Check for overflow.*/ if(OP_UNLIKELY(da<OP_INT64_MIN-db))return OP_EINVAL; *_delta=da+db; } } else *_delta=_gp_a-_gp_b; return 0; } static int op_granpos_cmp(ogg_int64_t _gp_a,ogg_int64_t _gp_b){ /*The invalid granule position -1 should behave like NaN: neither greater than nor less than any other granule position, nor equal to any other granule position, including itself. However, that means there isn't anything we could sensibly return from this function for it.*/ OP_ASSERT(_gp_a!=-1); OP_ASSERT(_gp_b!=-1); /*Handle the wrapping cases.*/ if(OP_UNLIKELY(_gp_a<0)){ if(_gp_b>=0)return 1; /*Else fall through.*/ } else if(OP_UNLIKELY(_gp_b<0))return -1; /*No wrapping case.*/ return (_gp_a>_gp_b)-(_gp_b>_gp_a); } /*Returns the duration of the packet (in samples at 48 kHz), or a negative value on error.*/ static int op_get_packet_duration(const unsigned char *_data,int _len){ int nframes; int frame_size; int nsamples; nframes=opus_packet_get_nb_frames(_data,_len); if(OP_UNLIKELY(nframes<0))return OP_EBADPACKET; frame_size=opus_packet_get_samples_per_frame(_data,48000); nsamples=nframes*frame_size; if(OP_UNLIKELY(nsamples>120*48))return OP_EBADPACKET; return nsamples; } /*This function more properly belongs in info.c, but we define it here to allow the static granule position manipulation functions to remain static.*/ ogg_int64_t opus_granule_sample(const OpusHead *_head,ogg_int64_t _gp){ opus_int32 pre_skip; pre_skip=_head->pre_skip; if(_gp!=-1&&op_granpos_add(&_gp,_gp,-pre_skip))_gp=-1; return _gp; } /*Grab all the packets currently in the stream state, and compute their durations. _of->op_count is set to the number of packets collected. [out] _durations: Returns the durations of the individual packets. Return: The total duration of all packets, or OP_HOLE if there was a hole.*/ static opus_int32 op_collect_audio_packets(OggOpusFile *_of, int _durations[255]){ opus_int32 total_duration; int op_count; /*Count the durations of all packets in the page.*/ op_count=0; total_duration=0; for(;;){ int ret; /*This takes advantage of undocumented libogg behavior that returned ogg_packet buffers are valid at least until the next page is submitted. Relying on this is not too terrible, as _none_ of the Ogg memory ownership/lifetime rules are well-documented. But I can read its code and know this will work.*/ ret=ogg_stream_packetout(&_of->os,_of->op+op_count); if(!ret)break; if(OP_UNLIKELY(ret<0)){ /*We shouldn't get holes in the middle of pages.*/ OP_ASSERT(op_count==0); /*Set the return value and break out of the loop. We want to make sure op_count gets set to 0, because we've ingested a page, so any previously loaded packets are now invalid.*/ total_duration=OP_HOLE; break; } /*Unless libogg is broken, we can't get more than 255 packets from a single page.*/ OP_ASSERT(op_count<255); _durations[op_count]=op_get_packet_duration(_of->op[op_count].packet, _of->op[op_count].bytes); if(OP_LIKELY(_durations[op_count]>0)){ /*With at most 255 packets on a page, this can't overflow.*/ total_duration+=_durations[op_count++]; } /*Ignore packets with an invalid TOC sequence.*/ else if(op_count>0){ /*But save the granule position, if there was one.*/ _of->op[op_count-1].granulepos=_of->op[op_count].granulepos; } } _of->op_pos=0; _of->op_count=op_count; return total_duration; } /*Starting from current cursor position, get the initial PCM offset of the next page. This also validates the granule position on the first page with a completed audio data packet, as required by the spec. If this link is completely empty (no pages with completed packets), then this function sets pcm_start=pcm_end=0 and returns the BOS page of the next link (if any). In the seekable case, we initialize pcm_end=-1 before calling this function, so that later we can detect that the link was empty before calling op_find_final_pcm_offset(). [inout] _link: The link for which to find pcm_start. [out] _og: Returns the BOS page of the next link if this link was empty. In the unseekable case, we can then feed this to op_fetch_headers() to start the next link. The caller may pass NULL (e.g., for seekable streams), in which case this page will be discarded. Return: 0 on success, 1 if there is a buffered BOS page available, or a negative value on unrecoverable error.*/ static int op_find_initial_pcm_offset(OggOpusFile *_of, OggOpusLink *_link,ogg_page *_og){ ogg_page og; opus_int64 page_offset; ogg_int64_t pcm_start; ogg_int64_t prev_packet_gp; ogg_int64_t cur_page_gp; ogg_uint32_t serialno; opus_int32 total_duration; int durations[255]; int cur_page_eos; int op_count; int pi; if(_og==NULL)_og=&og; serialno=_of->os.serialno; op_count=0; /*We shouldn't have to initialize total_duration, but gcc is too dumb to figure out that op_count>0 implies we've been through the whole loop at least once.*/ total_duration=0; do{ page_offset=op_get_next_page(_of,_og,_of->end); /*We should get a page unless the file is truncated or mangled. Otherwise there are no audio data packets in the whole logical stream.*/ if(OP_UNLIKELY(page_offset<0)){ /*Fail if there was a read error.*/ if(page_offset<OP_FALSE)return (int)page_offset; /*Fail if the pre-skip is non-zero, since it's asking us to skip more samples than exist.*/ if(_link->head.pre_skip>0)return OP_EBADTIMESTAMP; _link->pcm_file_offset=0; /*Set pcm_end and end_offset so we can skip the call to op_find_final_pcm_offset().*/ _link->pcm_start=_link->pcm_end=0; _link->end_offset=_link->data_offset; return 0; } /*Similarly, if we hit the next link in the chain, we've gone too far.*/ if(OP_UNLIKELY(ogg_page_bos(_og))){ if(_link->head.pre_skip>0)return OP_EBADTIMESTAMP; /*Set pcm_end and end_offset so we can skip the call to op_find_final_pcm_offset().*/ _link->pcm_file_offset=0; _link->pcm_start=_link->pcm_end=0; _link->end_offset=_link->data_offset; /*Tell the caller we've got a buffered page for them.*/ return 1; } /*Ignore pages from other streams (not strictly necessary, because of the checks in ogg_stream_pagein(), but saves some work).*/ if(serialno!=(ogg_uint32_t)ogg_page_serialno(_og))continue; ogg_stream_pagein(&_of->os,_og); /*Bitrate tracking: add the header's bytes here. The body bytes are counted when we consume the packets.*/ _of->bytes_tracked+=_og->header_len; /*Count the durations of all packets in the page.*/ do total_duration=op_collect_audio_packets(_of,durations); /*Ignore holes.*/ while(OP_UNLIKELY(total_duration<0)); op_count=_of->op_count; } while(op_count<=0); /*We found the first page with a completed audio data packet: actually look at the granule position. RFC 3533 says, "A special value of -1 (in two's complement) indicates that no packets finish on this page," which does not say that a granule position that is NOT -1 indicates that some packets DO finish on that page (even though this was the intention, libogg itself violated this intention for years before we fixed it). The Ogg Opus specification only imposes its start-time requirements on the granule position of the first page with completed packets, so we ignore any set granule positions until then.*/ cur_page_gp=_of->op[op_count-1].granulepos; /*But getting a packet without a valid granule position on the page is not okay.*/ if(cur_page_gp==-1)return OP_EBADTIMESTAMP; cur_page_eos=_of->op[op_count-1].e_o_s; if(OP_LIKELY(!cur_page_eos)){ /*The EOS flag wasn't set. Work backwards from the provided granule position to get the starting PCM offset.*/ if(OP_UNLIKELY(op_granpos_add(&pcm_start,cur_page_gp,-total_duration)<0)){ /*The starting granule position MUST not be smaller than the amount of audio on the first page with completed packets.*/ return OP_EBADTIMESTAMP; } } else{ /*The first page with completed packets was also the last.*/ if(OP_LIKELY(op_granpos_add(&pcm_start,cur_page_gp,-total_duration)<0)){ /*If there's less audio on the page than indicated by the granule position, then we're doing end-trimming, and the starting PCM offset is zero by spec mandate.*/ pcm_start=0; /*However, the end-trimming MUST not ask us to trim more samples than exist after applying the pre-skip.*/ if(OP_UNLIKELY(op_granpos_cmp(cur_page_gp,_link->head.pre_skip)<0)){ return OP_EBADTIMESTAMP; } } } /*Timestamp the individual packets.*/ prev_packet_gp=pcm_start; for(pi=0;pi<op_count;pi++){ if(cur_page_eos){ ogg_int64_t diff; OP_ALWAYS_TRUE(!op_granpos_diff(&diff,cur_page_gp,prev_packet_gp)); diff=durations[pi]-diff; /*If we have samples to trim...*/ if(diff>0){ /*If we trimmed the entire packet, stop (the spec says encoders shouldn't do this, but we support it anyway).*/ if(OP_UNLIKELY(diff>durations[pi]))break; _of->op[pi].granulepos=prev_packet_gp=cur_page_gp; /*Move the EOS flag to this packet, if necessary, so we'll trim the samples.*/ _of->op[pi].e_o_s=1; continue; } } /*Update the granule position as normal.*/ OP_ALWAYS_TRUE(!op_granpos_add(&_of->op[pi].granulepos, prev_packet_gp,durations[pi])); prev_packet_gp=_of->op[pi].granulepos; } /*Update the packet count after end-trimming.*/ _of->op_count=pi; _of->cur_discard_count=_link->head.pre_skip; _link->pcm_file_offset=0; _of->prev_packet_gp=_link->pcm_start=pcm_start; _of->prev_page_offset=page_offset; return 0; } /*Starting from current cursor position, get the final PCM offset of the previous page. This also validates the duration of the link, which, while not strictly required by the spec, we need to ensure duration calculations don't overflow. This is only done for seekable sources. We must validate that op_find_initial_pcm_offset() succeeded for this link before calling this function, otherwise it will scan the entire stream backwards until it reaches the start, and then fail.*/ static int op_find_final_pcm_offset(OggOpusFile *_of, const ogg_uint32_t *_serialnos,int _nserialnos,OggOpusLink *_link, opus_int64 _offset,ogg_uint32_t _end_serialno,ogg_int64_t _end_gp, ogg_int64_t *_total_duration){ ogg_int64_t total_duration; ogg_int64_t duration; ogg_uint32_t cur_serialno; /*For the time being, fetch end PCM offset the simple way.*/ cur_serialno=_link->serialno; if(_end_serialno!=cur_serialno||_end_gp==-1){ _offset=op_get_last_page(_of,&_end_gp,_offset, cur_serialno,_serialnos,_nserialnos); if(OP_UNLIKELY(_offset<0))return (int)_offset; } /*At worst we should have found the first page with completed packets.*/ if(OP_UNLIKELY(_offset<_link->data_offset))return OP_EBADLINK; /*This implementation requires that the difference between the first and last granule positions in each link be representable in a signed, 64-bit number, and that each link also have at least as many samples as the pre-skip requires.*/ if(OP_UNLIKELY(op_granpos_diff(&duration,_end_gp,_link->pcm_start)<0) ||OP_UNLIKELY(duration<_link->head.pre_skip)){ return OP_EBADTIMESTAMP; } /*We also require that the total duration be representable in a signed, 64-bit number.*/ duration-=_link->head.pre_skip; total_duration=*_total_duration; if(OP_UNLIKELY(OP_INT64_MAX-duration<total_duration))return OP_EBADTIMESTAMP; *_total_duration=total_duration+duration; _link->pcm_end=_end_gp; _link->end_offset=_offset; return 0; } /*Rescale the number _x from the range [0,_from] to [0,_to]. _from and _to must be positive.*/ static opus_int64 op_rescale64(opus_int64 _x,opus_int64 _from,opus_int64 _to){ opus_int64 frac; opus_int64 ret; int i; if(_x>=_from)return _to; if(_x<=0)return 0; frac=0; for(i=0;i<63;i++){ frac<<=1; OP_ASSERT(_x<=_from); if(_x>=_from>>1){ _x-=_from-_x; frac|=1; } else _x<<=1; } ret=0; for(i=0;i<63;i++){ if(frac&1)ret=(ret&_to&1)+(ret>>1)+(_to>>1); else ret>>=1; frac>>=1; } return ret; } /*The minimum granule position spacing allowed for making predictions. This corresponds to about 1 second of audio at 48 kHz for both Opus and Vorbis, or one keyframe interval in Theora with the default keyframe spacing of 256.*/ #define OP_GP_SPACING_MIN (48000) /*Try to estimate the location of the next link using the current seek records, assuming the initial granule position of any streams we've found is 0.*/ static opus_int64 op_predict_link_start(const OpusSeekRecord *_sr,int _nsr, opus_int64 _searched,opus_int64 _end_searched,opus_int32 _bias){ opus_int64 bisect; int sri; int srj; /*Require that we be at least OP_CHUNK_SIZE from the end. We don't require that we be at least OP_CHUNK_SIZE from the beginning, because if we are we'll just scan forward without seeking.*/ _end_searched-=OP_CHUNK_SIZE; if(_searched>=_end_searched)return -1; bisect=_end_searched; for(sri=0;sri<_nsr;sri++){ ogg_int64_t gp1; ogg_int64_t gp2_min; ogg_uint32_t serialno1; opus_int64 offset1; /*If the granule position is negative, either it's invalid or we'd cause overflow. If it is larger than OP_INT64_MAX-OP_GP_SPACING_MIN, then no positive granule position would satisfy our minimum spacing requirements below.*/ gp1=_sr[sri].gp; if(gp1<0||gp1>OP_INT64_MAX-OP_GP_SPACING_MIN)continue; /*We require some minimum distance between granule positions to make an estimate. We don't actually know what granule position scheme is being used, because we have no idea what kind of stream these came from. Therefore we require a minimum spacing between them, with the expectation that while bitrates and granule position increments might vary locally in quite complex ways, they are globally smooth.*/ gp2_min=gp1+OP_GP_SPACING_MIN; offset1=_sr[sri].offset; serialno1=_sr[sri].serialno; for(srj=sri;srj-->0;){ ogg_int64_t gp2; opus_int64 offset2; opus_int64 num; ogg_int64_t den; ogg_int64_t ipart; gp2=_sr[srj].gp; if(gp2<gp2_min)continue; /*Oh, and also make sure these came from the same stream.*/ if(_sr[srj].serialno!=serialno1)continue; offset2=_sr[srj].offset; /*For once, we can subtract with impunity.*/ den=gp2-gp1; ipart=gp2/den; num=offset2-offset1; OP_ASSERT(num>0); if(ipart>0&&(offset2-_searched)/ipart<num)continue; offset2-=ipart*num; gp2-=ipart*den; offset2-=op_rescale64(gp2,den,num)-_bias; if(offset2<_searched)continue; bisect=OP_MIN(bisect,offset2); break; } } return bisect>=_end_searched?-1:bisect; } /*Finds each bitstream link, one at a time, using a bisection search. This has to begin by knowing the offset of the first link's initial page.*/ static int op_bisect_forward_serialno(OggOpusFile *_of, opus_int64 _searched,OpusSeekRecord *_sr,int _csr, ogg_uint32_t **_serialnos,int *_nserialnos,int *_cserialnos){ ogg_page og; OggOpusLink *links; int nlinks; int clinks; ogg_uint32_t *serialnos; int nserialnos; ogg_int64_t total_duration; int nsr; int ret; links=_of->links; nlinks=clinks=_of->nlinks; total_duration=0; /*We start with one seek record, for the last page in the file. We build up a list of records for places we seek to during link enumeration. This list is kept sorted in reverse order. We only care about seek locations that were _not_ in the current link, therefore we can add them one at a time to the end of the list as we improve the lower bound on the location where the next link starts.*/ nsr=1; for(;;){ opus_int64 end_searched; opus_int64 bisect; opus_int64 next; opus_int64 last; ogg_int64_t end_offset; ogg_int64_t end_gp; int sri; serialnos=*_serialnos; nserialnos=*_nserialnos; if(OP_UNLIKELY(nlinks>=clinks)){ if(OP_UNLIKELY(clinks>INT_MAX-1>>1))return OP_EFAULT; clinks=2*clinks+1; OP_ASSERT(nlinks<clinks); links=(OggOpusLink *)_ogg_realloc(links,sizeof(*links)*clinks); if(OP_UNLIKELY(links==NULL))return OP_EFAULT; _of->links=links; } /*Invariants: We have the headers and serial numbers for the link beginning at 'begin'. We have the offset and granule position of the last page in the file (potentially not a page we care about).*/ /*Scan the seek records we already have to save us some bisection.*/ for(sri=0;sri<nsr;sri++){ if(op_lookup_serialno(_sr[sri].serialno,serialnos,nserialnos))break; } /*Is the last page in our current list of serial numbers?*/ if(sri<=0)break; /*Last page wasn't found. We have at least one more link.*/ last=-1; end_searched=_sr[sri-1].search_start; next=_sr[sri-1].offset; end_gp=-1; if(sri<nsr){ _searched=_sr[sri].offset+_sr[sri].size; if(_sr[sri].serialno==links[nlinks-1].serialno){ end_gp=_sr[sri].gp; end_offset=_sr[sri].offset; } } nsr=sri; bisect=-1; /*If we've already found the end of at least one link, try to pick the first bisection point at twice the average link size. This is a good choice for files with lots of links that are all about the same size.*/ if(nlinks>1){ opus_int64 last_offset; opus_int64 avg_link_size; opus_int64 upper_limit; last_offset=links[nlinks-1].offset; avg_link_size=last_offset/(nlinks-1); upper_limit=end_searched-OP_CHUNK_SIZE-avg_link_size; if(OP_LIKELY(last_offset>_searched-avg_link_size) &&OP_LIKELY(last_offset<upper_limit)){ bisect=last_offset+avg_link_size; if(OP_LIKELY(bisect<upper_limit))bisect+=avg_link_size; } } /*We guard against garbage separating the last and first pages of two links below.*/ while(_searched<end_searched){ opus_int32 next_bias; /*If we don't have a better estimate, use simple bisection.*/ if(bisect==-1)bisect=_searched+(end_searched-_searched>>1); /*If we're within OP_CHUNK_SIZE of the start, scan forward.*/ if(bisect-_searched<OP_CHUNK_SIZE)bisect=_searched; /*Otherwise we're skipping data. Forget the end page, if we saw one, as we might miss a later one.*/ else end_gp=-1; ret=op_seek_helper(_of,bisect); if(OP_UNLIKELY(ret<0))return ret; last=op_get_next_page(_of,&og,_sr[nsr-1].offset); if(OP_UNLIKELY(last<OP_FALSE))return (int)last; next_bias=0; if(last==OP_FALSE)end_searched=bisect; else{ ogg_uint32_t serialno; ogg_int64_t gp; serialno=ogg_page_serialno(&og); gp=ogg_page_granulepos(&og); if(!op_lookup_serialno(serialno,serialnos,nserialnos)){ end_searched=bisect; next=last; /*In reality we should always have enough room, but be paranoid.*/ if(OP_LIKELY(nsr<_csr)){ _sr[nsr].search_start=bisect; _sr[nsr].offset=last; OP_ASSERT(_of->offset-last>=0); OP_ASSERT(_of->offset-last<=OP_PAGE_SIZE_MAX); _sr[nsr].size=(opus_int32)(_of->offset-last); _sr[nsr].serialno=serialno; _sr[nsr].gp=gp; nsr++; } } else{ _searched=_of->offset; next_bias=OP_CHUNK_SIZE; if(serialno==links[nlinks-1].serialno){ /*This page was from the stream we want, remember it. If it's the last such page in the link, we won't have to go back looking for it later.*/ end_gp=gp; end_offset=last; } } } bisect=op_predict_link_start(_sr,nsr,_searched,end_searched,next_bias); } /*Bisection point found. Get the final granule position of the previous link, assuming op_find_initial_pcm_offset() didn't already determine the link was empty.*/ if(OP_LIKELY(links[nlinks-1].pcm_end==-1)){ if(end_gp==-1){ /*If we don't know where the end page is, we'll have to seek back and look for it, starting from the end of the link.*/ end_offset=next; /*Also forget the last page we read. It won't be available after the seek.*/ last=-1; } ret=op_find_final_pcm_offset(_of,serialnos,nserialnos, links+nlinks-1,end_offset,links[nlinks-1].serialno,end_gp, &total_duration); if(OP_UNLIKELY(ret<0))return ret; } if(last!=next){ /*The last page we read was not the first page the next link. Move the cursor position to the offset of that first page. This only performs an actual seek if the first page of the next link does not start at the end of the last page from the current Opus stream with a valid granule position.*/ ret=op_seek_helper(_of,next); if(OP_UNLIKELY(ret<0))return ret; } ret=op_fetch_headers(_of,&links[nlinks].head,&links[nlinks].tags, _serialnos,_nserialnos,_cserialnos,last!=next?NULL:&og); if(OP_UNLIKELY(ret<0))return ret; /*Mark the current link count so it can be cleaned up on error.*/ _of->nlinks=nlinks+1; links[nlinks].offset=next; links[nlinks].data_offset=_of->offset; links[nlinks].serialno=_of->os.serialno; links[nlinks].pcm_end=-1; /*This might consume a page from the next link, however the next bisection always starts with a seek.*/ ret=op_find_initial_pcm_offset(_of,links+nlinks,NULL); if(OP_UNLIKELY(ret<0))return ret; links[nlinks].pcm_file_offset=total_duration; _searched=_of->offset; ++nlinks; } /*Last page is in the starting serialno list, so we've reached the last link. Now find the last granule position for it (if we didn't the first time we looked at the end of the stream, and if op_find_initial_pcm_offset() didn't already determine the link was empty).*/ if(OP_LIKELY(links[nlinks-1].pcm_end==-1)){ ret=op_find_final_pcm_offset(_of,serialnos,nserialnos, links+nlinks-1,_sr[0].offset,_sr[0].serialno,_sr[0].gp,&total_duration); if(OP_UNLIKELY(ret<0))return ret; } /*Trim back the links array if necessary.*/ links=(OggOpusLink *)_ogg_realloc(links,sizeof(*links)*nlinks); if(OP_LIKELY(links!=NULL))_of->links=links; /*We also don't need these anymore.*/ _ogg_free(*_serialnos); *_serialnos=NULL; *_cserialnos=*_nserialnos=0; return 0; } static void op_update_gain(OggOpusFile *_of){ OpusHead *head; opus_int32 gain_q8; int li; /*If decode isn't ready, then we'll apply the gain when we initialize the decoder.*/ if(_of->ready_state<OP_INITSET)return; gain_q8=_of->gain_offset_q8; li=_of->seekable?_of->cur_link:0; head=&_of->links[li].head; /*We don't have to worry about overflow here because the header gain and track gain must lie in the range [-32768,32767], and the user-supplied offset has been pre-clamped to [-98302,98303].*/ switch(_of->gain_type){ case OP_ALBUM_GAIN:{ int album_gain_q8; album_gain_q8=0; opus_tags_get_album_gain(&_of->links[li].tags,&album_gain_q8); gain_q8+=album_gain_q8; gain_q8+=head->output_gain; }break; case OP_TRACK_GAIN:{ int track_gain_q8; track_gain_q8=0; opus_tags_get_track_gain(&_of->links[li].tags,&track_gain_q8); gain_q8+=track_gain_q8; gain_q8+=head->output_gain; }break; case OP_HEADER_GAIN:gain_q8+=head->output_gain;break; case OP_ABSOLUTE_GAIN:break; default:OP_ASSERT(0); } gain_q8=OP_CLAMP(-32768,gain_q8,32767); OP_ASSERT(_of->od!=NULL); #if defined(OPUS_SET_GAIN) opus_multistream_decoder_ctl(_of->od,OPUS_SET_GAIN(gain_q8)); #else /*A fallback that works with both float and fixed-point is a bunch of work, so just force people to use a sufficiently new version. This is deployed well enough at this point that this shouldn't be a burden.*/ # error "libopus 1.0.1 or later required" #endif } static int op_make_decode_ready(OggOpusFile *_of){ const OpusHead *head; int li; int stream_count; int coupled_count; int channel_count; if(_of->ready_state>OP_STREAMSET)return 0; if(OP_UNLIKELY(_of->ready_state<OP_STREAMSET))return OP_EFAULT; li=_of->seekable?_of->cur_link:0; head=&_of->links[li].head; stream_count=head->stream_count; coupled_count=head->coupled_count; channel_count=head->channel_count; /*Check to see if the current decoder is compatible with the current link.*/ if(_of->od!=NULL&&_of->od_stream_count==stream_count &&_of->od_coupled_count==coupled_count&&_of->od_channel_count==channel_count &&memcmp(_of->od_mapping,head->mapping, sizeof(*head->mapping)*channel_count)==0){ opus_multistream_decoder_ctl(_of->od,OPUS_RESET_STATE); } else{ int err; opus_multistream_decoder_destroy(_of->od); _of->od=opus_multistream_decoder_create(48000,channel_count, stream_count,coupled_count,head->mapping,&err); if(_of->od==NULL)return OP_EFAULT; _of->od_stream_count=stream_count; _of->od_coupled_count=coupled_count; _of->od_channel_count=channel_count; memcpy(_of->od_mapping,head->mapping,sizeof(*head->mapping)*channel_count); } _of->ready_state=OP_INITSET; _of->bytes_tracked=0; _of->samples_tracked=0; #if !defined(OP_FIXED_POINT) _of->state_channel_count=0; /*Use the serial number for the PRNG seed to get repeatable output for straight play-throughs.*/ _of->dither_seed=_of->links[li].serialno; #endif op_update_gain(_of); return 0; } static int op_open_seekable2_impl(OggOpusFile *_of){ /*64 seek records should be enough for anybody. Actually, with a bisection search in a 63-bit range down to OP_CHUNK_SIZE granularity, much more than enough.*/ OpusSeekRecord sr[64]; opus_int64 data_offset; int ret; /*We can seek, so set out learning all about this file.*/ (*_of->callbacks.seek)(_of->stream,0,SEEK_END); _of->offset=_of->end=(*_of->callbacks.tell)(_of->stream); if(OP_UNLIKELY(_of->end<0))return OP_EREAD; data_offset=_of->links[0].data_offset; if(OP_UNLIKELY(_of->end<data_offset))return OP_EBADLINK; /*Get the offset of the last page of the physical bitstream, or, if we're lucky, the last Opus page of the first link, as most Ogg Opus files will contain a single logical bitstream.*/ ret=op_get_prev_page_serial(_of,sr,_of->end, _of->links[0].serialno,_of->serialnos,_of->nserialnos); if(OP_UNLIKELY(ret<0))return ret; /*If there's any trailing junk, forget about it.*/ _of->end=sr[0].offset+sr[0].size; if(OP_UNLIKELY(_of->end<data_offset))return OP_EBADLINK; /*Now enumerate the bitstream structure.*/ return op_bisect_forward_serialno(_of,data_offset,sr,sizeof(sr)/sizeof(*sr), &_of->serialnos,&_of->nserialnos,&_of->cserialnos); } static int op_open_seekable2(OggOpusFile *_of){ ogg_sync_state oy_start; ogg_stream_state os_start; ogg_packet *op_start; opus_int64 prev_page_offset; opus_int64 start_offset; int start_op_count; int ret; /*We're partially open and have a first link header state in storage in _of. Save off that stream state so we can come back to it. It would be simpler to just dump all this state and seek back to links[0].data_offset when we're done. But we do the extra work to allow us to seek back to _exactly_ the same stream position we're at now. This allows, e.g., the HTTP backend to continue reading from the original connection (if it's still available), instead of opening a new one. This means we can open and start playing a normal Opus file with a single link and reasonable packet sizes using only two HTTP requests.*/ start_op_count=_of->op_count; /*This is a bit too large to put on the stack unconditionally.*/ op_start=(ogg_packet *)_ogg_malloc(sizeof(*op_start)*start_op_count); if(op_start==NULL)return OP_EFAULT; *&oy_start=_of->oy; *&os_start=_of->os; prev_page_offset=_of->prev_page_offset; start_offset=_of->offset; memcpy(op_start,_of->op,sizeof(*op_start)*start_op_count); OP_ASSERT((*_of->callbacks.tell)(_of->stream)==op_position(_of)); ogg_sync_init(&_of->oy); ogg_stream_init(&_of->os,-1); ret=op_open_seekable2_impl(_of); /*Restore the old stream state.*/ ogg_stream_clear(&_of->os); ogg_sync_clear(&_of->oy); *&_of->oy=*&oy_start; *&_of->os=*&os_start; _of->offset=start_offset; _of->op_count=start_op_count; memcpy(_of->op,op_start,sizeof(*_of->op)*start_op_count); _ogg_free(op_start); _of->prev_packet_gp=_of->links[0].pcm_start; _of->prev_page_offset=prev_page_offset; _of->cur_discard_count=_of->links[0].head.pre_skip; if(OP_UNLIKELY(ret<0))return ret; /*And restore the position indicator.*/ ret=(*_of->callbacks.seek)(_of->stream,op_position(_of),SEEK_SET); return OP_UNLIKELY(ret<0)?OP_EREAD:0; } /*Clear out the current logical bitstream decoder.*/ static void op_decode_clear(OggOpusFile *_of){ /*We don't actually free the decoder. We might be able to re-use it for the next link.*/ _of->op_count=0; _of->od_buffer_size=0; _of->prev_packet_gp=-1; _of->prev_page_offset=-1; if(!_of->seekable){ OP_ASSERT(_of->ready_state>=OP_INITSET); opus_tags_clear(&_of->links[0].tags); } _of->ready_state=OP_OPENED; } static void op_clear(OggOpusFile *_of){ OggOpusLink *links; _ogg_free(_of->od_buffer); if(_of->od!=NULL)opus_multistream_decoder_destroy(_of->od); links=_of->links; if(!_of->seekable){ if(_of->ready_state>OP_OPENED||_of->ready_state==OP_PARTOPEN){ opus_tags_clear(&links[0].tags); } } else if(OP_LIKELY(links!=NULL)){ int nlinks; int link; nlinks=_of->nlinks; for(link=0;link<nlinks;link++)opus_tags_clear(&links[link].tags); } _ogg_free(links); _ogg_free(_of->serialnos); ogg_stream_clear(&_of->os); ogg_sync_clear(&_of->oy); if(_of->callbacks.close!=NULL)(*_of->callbacks.close)(_of->stream); } static int op_open1(OggOpusFile *_of, void *_stream,const OpusFileCallbacks *_cb, const unsigned char *_initial_data,size_t _initial_bytes){ ogg_page og; ogg_page *pog; int seekable; int ret; memset(_of,0,sizeof(*_of)); if(OP_UNLIKELY(_initial_bytes>(size_t)LONG_MAX))return OP_EFAULT; _of->end=-1; _of->stream=_stream; *&_of->callbacks=*_cb; /*At a minimum, we need to be able to read data.*/ if(OP_UNLIKELY(_of->callbacks.read==NULL))return OP_EREAD; /*Initialize the framing state.*/ ogg_sync_init(&_of->oy); /*Perhaps some data was previously read into a buffer for testing against other stream types. Allow initialization from this previously read data (especially as we may be reading from a non-seekable stream). This requires copying it into a buffer allocated by ogg_sync_buffer() and doesn't support seeking, so this is not a good mechanism to use for decoding entire files from RAM.*/ if(_initial_bytes>0){ char *buffer; buffer=ogg_sync_buffer(&_of->oy,(long)_initial_bytes); memcpy(buffer,_initial_data,_initial_bytes*sizeof(*buffer)); ogg_sync_wrote(&_of->oy,(long)_initial_bytes); } /*Can we seek? Stevens suggests the seek test is portable. It's actually not for files on win32, but we address that by fixing it in our callback implementation (see stream.c).*/ seekable=_cb->seek!=NULL&&(*_cb->seek)(_stream,0,SEEK_CUR)!=-1; /*If seek is implemented, tell must also be implemented.*/ if(seekable){ opus_int64 pos; if(OP_UNLIKELY(_of->callbacks.tell==NULL))return OP_EINVAL; pos=(*_of->callbacks.tell)(_of->stream); /*If the current position is not equal to the initial bytes consumed, absolute seeking will not work.*/ if(OP_UNLIKELY(pos!=(opus_int64)_initial_bytes))return OP_EINVAL; } _of->seekable=seekable; /*Don't seek yet. Set up a 'single' (current) logical bitstream entry for partial open.*/ _of->links=(OggOpusLink *)_ogg_malloc(sizeof(*_of->links)); /*The serialno gets filled in later by op_fetch_headers().*/ ogg_stream_init(&_of->os,-1); pog=NULL; for(;;){ /*Fetch all BOS pages, store the Opus header and all seen serial numbers, and load subsequent Opus setup headers.*/ ret=op_fetch_headers(_of,&_of->links[0].head,&_of->links[0].tags, &_of->serialnos,&_of->nserialnos,&_of->cserialnos,pog); if(OP_UNLIKELY(ret<0))break; _of->nlinks=1; _of->links[0].offset=0; _of->links[0].data_offset=_of->offset; _of->links[0].pcm_end=-1; _of->links[0].serialno=_of->os.serialno; /*Fetch the initial PCM offset.*/ ret=op_find_initial_pcm_offset(_of,_of->links,&og); if(seekable||OP_LIKELY(ret<=0))break; /*This link was empty, but we already have the BOS page for the next one in og. We can't seek, so start processing the next link right now.*/ opus_tags_clear(&_of->links[0].tags); _of->nlinks=0; if(!seekable)_of->cur_link++; pog=&og; } if(OP_LIKELY(ret>=0))_of->ready_state=OP_PARTOPEN; return ret; } static int op_open2(OggOpusFile *_of){ int ret; OP_ASSERT(_of->ready_state==OP_PARTOPEN); if(_of->seekable){ _of->ready_state=OP_OPENED; ret=op_open_seekable2(_of); } else ret=0; if(OP_LIKELY(ret>=0)){ /*We have buffered packets from op_find_initial_pcm_offset(). Move to OP_INITSET so we can use them.*/ _of->ready_state=OP_STREAMSET; ret=op_make_decode_ready(_of); if(OP_LIKELY(ret>=0))return 0; } /*Don't auto-close the stream on failure.*/ _of->callbacks.close=NULL; op_clear(_of); return ret; } OggOpusFile *op_test_callbacks(void *_stream,const OpusFileCallbacks *_cb, const unsigned char *_initial_data,size_t _initial_bytes,int *_error){ OggOpusFile *of; int ret; of=(OggOpusFile *)_ogg_malloc(sizeof(*of)); ret=OP_EFAULT; if(OP_LIKELY(of!=NULL)){ ret=op_open1(of,_stream,_cb,_initial_data,_initial_bytes); if(OP_LIKELY(ret>=0)){ if(_error!=NULL)*_error=0; return of; } /*Don't auto-close the stream on failure.*/ of->callbacks.close=NULL; op_clear(of); _ogg_free(of); } if(_error!=NULL)*_error=ret; return NULL; } OggOpusFile *op_open_callbacks(void *_stream,const OpusFileCallbacks *_cb, const unsigned char *_initial_data,size_t _initial_bytes,int *_error){ OggOpusFile *of; of=op_test_callbacks(_stream,_cb,_initial_data,_initial_bytes,_error); if(OP_LIKELY(of!=NULL)){ int ret; ret=op_open2(of); if(OP_LIKELY(ret>=0))return of; if(_error!=NULL)*_error=ret; _ogg_free(of); } return NULL; } /*Convenience routine to clean up from failure for the open functions that create their own streams.*/ static OggOpusFile *op_open_close_on_failure(void *_stream, const OpusFileCallbacks *_cb,int *_error){ OggOpusFile *of; if(OP_UNLIKELY(_stream==NULL)){ if(_error!=NULL)*_error=OP_EFAULT; return NULL; } of=op_open_callbacks(_stream,_cb,NULL,0,_error); if(OP_UNLIKELY(of==NULL))(*_cb->close)(_stream); return of; } OggOpusFile *op_open_file(const char *_path,int *_error){ OpusFileCallbacks cb; return op_open_close_on_failure(op_fopen(&cb,_path,"rb"),&cb,_error); } OggOpusFile *op_open_memory(const unsigned char *_data,size_t _size, int *_error){ OpusFileCallbacks cb; return op_open_close_on_failure(op_mem_stream_create(&cb,_data,_size),&cb, _error); } /*Convenience routine to clean up from failure for the open functions that create their own streams.*/ static OggOpusFile *op_test_close_on_failure(void *_stream, const OpusFileCallbacks *_cb,int *_error){ OggOpusFile *of; if(OP_UNLIKELY(_stream==NULL)){ if(_error!=NULL)*_error=OP_EFAULT; return NULL; } of=op_test_callbacks(_stream,_cb,NULL,0,_error); if(OP_UNLIKELY(of==NULL))(*_cb->close)(_stream); return of; } OggOpusFile *op_test_file(const char *_path,int *_error){ OpusFileCallbacks cb; return op_test_close_on_failure(op_fopen(&cb,_path,"rb"),&cb,_error); } OggOpusFile *op_test_memory(const unsigned char *_data,size_t _size, int *_error){ OpusFileCallbacks cb; return op_test_close_on_failure(op_mem_stream_create(&cb,_data,_size),&cb, _error); } int op_test_open(OggOpusFile *_of){ int ret; if(OP_UNLIKELY(_of->ready_state!=OP_PARTOPEN))return OP_EINVAL; ret=op_open2(_of); /*op_open2() will clear this structure on failure. Reset its contents to prevent double-frees in op_free().*/ if(OP_UNLIKELY(ret<0))memset(_of,0,sizeof(*_of)); return ret; } void op_free(OggOpusFile *_of){ if(OP_LIKELY(_of!=NULL)){ op_clear(_of); _ogg_free(_of); } } int op_seekable(const OggOpusFile *_of){ return _of->seekable; } int op_link_count(const OggOpusFile *_of){ return _of->nlinks; } opus_uint32 op_serialno(const OggOpusFile *_of,int _li){ if(OP_UNLIKELY(_li>=_of->nlinks))_li=_of->nlinks-1; if(!_of->seekable)_li=0; return _of->links[_li<0?_of->cur_link:_li].serialno; } int op_channel_count(const OggOpusFile *_of,int _li){ return op_head(_of,_li)->channel_count; } opus_int64 op_raw_total(const OggOpusFile *_of,int _li){ if(OP_UNLIKELY(_of->ready_state<OP_OPENED) ||OP_UNLIKELY(!_of->seekable) ||OP_UNLIKELY(_li>=_of->nlinks)){ return OP_EINVAL; } if(_li<0)return _of->end; return (_li+1>=_of->nlinks?_of->end:_of->links[_li+1].offset) -(_li>0?_of->links[_li].offset:0); } ogg_int64_t op_pcm_total(const OggOpusFile *_of,int _li){ OggOpusLink *links; ogg_int64_t pcm_total; ogg_int64_t diff; int nlinks; nlinks=_of->nlinks; if(OP_UNLIKELY(_of->ready_state<OP_OPENED) ||OP_UNLIKELY(!_of->seekable) ||OP_UNLIKELY(_li>=nlinks)){ return OP_EINVAL; } links=_of->links; /*We verify that the granule position differences are larger than the pre-skip and that the total duration does not overflow during link enumeration, so we don't have to check here.*/ pcm_total=0; if(_li<0){ pcm_total=links[nlinks-1].pcm_file_offset; _li=nlinks-1; } OP_ALWAYS_TRUE(!op_granpos_diff(&diff, links[_li].pcm_end,links[_li].pcm_start)); return pcm_total+(diff-links[_li].head.pre_skip); } const OpusHead *op_head(const OggOpusFile *_of,int _li){ if(OP_UNLIKELY(_li>=_of->nlinks))_li=_of->nlinks-1; if(!_of->seekable)_li=0; return &_of->links[_li<0?_of->cur_link:_li].head; } const OpusTags *op_tags(const OggOpusFile *_of,int _li){ if(OP_UNLIKELY(_li>=_of->nlinks))_li=_of->nlinks-1; if(!_of->seekable){ if(_of->ready_state<OP_STREAMSET&&_of->ready_state!=OP_PARTOPEN){ return NULL; } _li=0; } else if(_li<0)_li=_of->ready_state>=OP_STREAMSET?_of->cur_link:0; return &_of->links[_li].tags; } int op_current_link(const OggOpusFile *_of){ if(OP_UNLIKELY(_of->ready_state<OP_OPENED))return OP_EINVAL; return _of->cur_link; } /*Compute an average bitrate given a byte and sample count. Return: The bitrate in bits per second.*/ static opus_int32 op_calc_bitrate(opus_int64 _bytes,ogg_int64_t _samples){ if(OP_UNLIKELY(_samples<=0))return OP_INT32_MAX; /*These rates are absurd, but let's handle them anyway.*/ if(OP_UNLIKELY(_bytes>(OP_INT64_MAX-(_samples>>1))/(48000*8))){ ogg_int64_t den; if(OP_UNLIKELY(_bytes/(OP_INT32_MAX/(48000*8))>=_samples)){ return OP_INT32_MAX; } den=_samples/(48000*8); return (opus_int32)((_bytes+(den>>1))/den); } /*This can't actually overflow in normal operation: even with a pre-skip of 545 2.5 ms frames with 8 streams running at 1282*8+1 bytes per packet (1275 byte frames + Opus framing overhead + Ogg lacing values), that all produce a single sample of decoded output, we still don't top 45 Mbps. The only way to get bitrates larger than that is with excessive Opus padding, more encoded streams than output channels, or lots and lots of Ogg pages with no packets on them.*/ return (opus_int32)OP_MIN((_bytes*48000*8+(_samples>>1))/_samples, OP_INT32_MAX); } opus_int32 op_bitrate(const OggOpusFile *_of,int _li){ if(OP_UNLIKELY(_of->ready_state<OP_OPENED)||OP_UNLIKELY(!_of->seekable) ||OP_UNLIKELY(_li>=_of->nlinks)){ return OP_EINVAL; } return op_calc_bitrate(op_raw_total(_of,_li),op_pcm_total(_of,_li)); } opus_int32 op_bitrate_instant(OggOpusFile *_of){ ogg_int64_t samples_tracked; opus_int32 ret; if(OP_UNLIKELY(_of->ready_state<OP_OPENED))return OP_EINVAL; samples_tracked=_of->samples_tracked; if(OP_UNLIKELY(samples_tracked==0))return OP_FALSE; ret=op_calc_bitrate(_of->bytes_tracked,samples_tracked); _of->bytes_tracked=0; _of->samples_tracked=0; return ret; } /*Given a serialno, find a link with a corresponding Opus stream, if it exists. Return: The index of the link to which the page belongs, or a negative number if it was not a desired Opus bitstream section.*/ static int op_get_link_from_serialno(const OggOpusFile *_of,int _cur_link, opus_int64 _page_offset,ogg_uint32_t _serialno){ const OggOpusLink *links; int nlinks; int li_lo; int li_hi; OP_ASSERT(_of->seekable); links=_of->links; nlinks=_of->nlinks; li_lo=0; /*Start off by guessing we're just a multiplexed page in the current link.*/ li_hi=_cur_link+1<nlinks&&_page_offset<links[_cur_link+1].offset? _cur_link+1:nlinks; do{ if(_page_offset>=links[_cur_link].offset)li_lo=_cur_link; else li_hi=_cur_link; _cur_link=li_lo+(li_hi-li_lo>>1); } while(li_hi-li_lo>1); /*We've identified the link that should contain this page. Make sure it's a page we care about.*/ if(links[_cur_link].serialno!=_serialno)return OP_FALSE; return _cur_link; } /*Fetch and process a page. This handles the case where we're at a bitstream boundary and dumps the decoding machine. If the decoding machine is unloaded, it loads it. It also keeps prev_packet_gp up to date (seek and read both use this). Return: <0) Error, OP_HOLE (lost packet), or OP_EOF. 0) Got at least one audio data packet.*/ static int op_fetch_and_process_page(OggOpusFile *_of, ogg_page *_og,opus_int64 _page_offset,int _spanp,int _ignore_holes){ OggOpusLink *links; ogg_uint32_t cur_serialno; int seekable; int cur_link; int ret; /*We shouldn't get here if we have unprocessed packets.*/ OP_ASSERT(_of->ready_state<OP_INITSET||_of->op_pos>=_of->op_count); seekable=_of->seekable; links=_of->links; cur_link=seekable?_of->cur_link:0; cur_serialno=links[cur_link].serialno; /*Handle one page.*/ for(;;){ ogg_page og; OP_ASSERT(_of->ready_state>=OP_OPENED); /*If we were given a page to use, use it.*/ if(_og!=NULL){ *&og=*_og; _og=NULL; } /*Keep reading until we get a page with the correct serialno.*/ else _page_offset=op_get_next_page(_of,&og,_of->end); /*EOF: Leave uninitialized.*/ if(_page_offset<0)return _page_offset<OP_FALSE?(int)_page_offset:OP_EOF; if(OP_LIKELY(_of->ready_state>=OP_STREAMSET) &&cur_serialno!=(ogg_uint32_t)ogg_page_serialno(&og)){ /*Two possibilities: 1) Another stream is multiplexed into this logical section, or*/ if(OP_LIKELY(!ogg_page_bos(&og)))continue; /* 2) Our decoding just traversed a bitstream boundary.*/ if(!_spanp)return OP_EOF; if(OP_LIKELY(_of->ready_state>=OP_INITSET))op_decode_clear(_of); } /*Bitrate tracking: add the header's bytes here. The body bytes are counted when we consume the packets.*/ else _of->bytes_tracked+=og.header_len; /*Do we need to load a new machine before submitting the page? This is different in the seekable and non-seekable cases. In the seekable case, we already have all the header information loaded and cached. We just initialize the machine with it and continue on our merry way. In the non-seekable (streaming) case, we'll only be at a boundary if we just left the previous logical bitstream, and we're now nominally at the header of the next bitstream.*/ if(OP_UNLIKELY(_of->ready_state<OP_STREAMSET)){ if(seekable){ ogg_uint32_t serialno; serialno=ogg_page_serialno(&og); /*Match the serialno to bitstream section.*/ OP_ASSERT(cur_link>=0&&cur_link<_of->nlinks); if(links[cur_link].serialno!=serialno){ /*It wasn't a page from the current link. Is it from the next one?*/ if(OP_LIKELY(cur_link+1<_of->nlinks&&links[cur_link+1].serialno== serialno)){ cur_link++; } else{ int new_link; new_link= op_get_link_from_serialno(_of,cur_link,_page_offset,serialno); /*Not a desired Opus bitstream section. Keep trying.*/ if(new_link<0)continue; cur_link=new_link; } } cur_serialno=serialno; _of->cur_link=cur_link; ogg_stream_reset_serialno(&_of->os,serialno); _of->ready_state=OP_STREAMSET; /*If we're at the start of this link, initialize the granule position and pre-skip tracking.*/ if(_page_offset<=links[cur_link].data_offset){ _of->prev_packet_gp=links[cur_link].pcm_start; _of->prev_page_offset=-1; _of->cur_discard_count=links[cur_link].head.pre_skip; /*Ignore a hole at the start of a new link (this is common for streams joined in the middle) or after seeking.*/ _ignore_holes=1; } } else{ do{ /*We're streaming. Fetch the two header packets, build the info struct.*/ ret=op_fetch_headers(_of,&links[0].head,&links[0].tags, NULL,NULL,NULL,&og); if(OP_UNLIKELY(ret<0))return ret; /*op_find_initial_pcm_offset() will suppress any initial hole for us, so no need to set _ignore_holes.*/ ret=op_find_initial_pcm_offset(_of,links,&og); if(OP_UNLIKELY(ret<0))return ret; _of->links[0].serialno=cur_serialno=_of->os.serialno; _of->cur_link++; } /*If the link was empty, keep going, because we already have the BOS page of the next one in og.*/ while(OP_UNLIKELY(ret>0)); /*If we didn't get any packets out of op_find_initial_pcm_offset(), keep going (this is possible if end-trimming trimmed them all).*/ if(_of->op_count<=0)continue; /*Otherwise, we're done. TODO: This resets bytes_tracked, which misses the header bytes already processed by op_find_initial_pcm_offset().*/ ret=op_make_decode_ready(_of); if(OP_UNLIKELY(ret<0))return ret; return 0; } } /*The buffered page is the data we want, and we're ready for it. Add it to the stream state.*/ if(OP_UNLIKELY(_of->ready_state==OP_STREAMSET)){ ret=op_make_decode_ready(_of); if(OP_UNLIKELY(ret<0))return ret; } /*Extract all the packets from the current page.*/ ogg_stream_pagein(&_of->os,&og); if(OP_LIKELY(_of->ready_state>=OP_INITSET)){ opus_int32 total_duration; int durations[255]; int op_count; int report_hole; report_hole=0; total_duration=op_collect_audio_packets(_of,durations); if(OP_UNLIKELY(total_duration<0)){ /*libogg reported a hole (a gap in the page sequence numbers). Drain the packets from the page anyway. If we don't, they'll still be there when we fetch the next page. Then, when we go to pull out packets, we might get more than 255, which would overrun our packet buffer. We repeat this call until we get any actual packets, since we might have buffered multiple out-of-sequence pages with no packets on them.*/ do total_duration=op_collect_audio_packets(_of,durations); while(total_duration<0); if(!_ignore_holes){ /*Report the hole to the caller after we finish timestamping the packets.*/ report_hole=1; /*We had lost or damaged pages, so reset our granule position tracking. This makes holes behave the same as a small raw seek. If the next page is the EOS page, we'll discard it (because we can't perform end trimming properly), and we'll always discard at least 80 ms of audio (to allow decoder state to re-converge). We could try to fill in the gap with PLC by looking at timestamps in the non-EOS case, but that's complicated and error prone and we can't rely on the timestamps being valid.*/ _of->prev_packet_gp=-1; } } op_count=_of->op_count; /*If we found at least one audio data packet, compute per-packet granule positions for them.*/ if(op_count>0){ ogg_int64_t diff; ogg_int64_t prev_packet_gp; ogg_int64_t cur_packet_gp; ogg_int64_t cur_page_gp; int cur_page_eos; int pi; cur_page_gp=_of->op[op_count-1].granulepos; cur_page_eos=_of->op[op_count-1].e_o_s; prev_packet_gp=_of->prev_packet_gp; if(OP_UNLIKELY(prev_packet_gp==-1)){ opus_int32 cur_discard_count; /*This is the first call after a raw seek. Try to reconstruct prev_packet_gp from scratch.*/ OP_ASSERT(seekable); if(OP_UNLIKELY(cur_page_eos)){ /*If the first page we hit after our seek was the EOS page, and we didn't start from data_offset or before, we don't have enough information to do end-trimming. Proceed to the next link, rather than risk playing back some samples that shouldn't have been played.*/ _of->op_count=0; if(report_hole)return OP_HOLE; continue; } /*By default discard 80 ms of data after a seek, unless we seek into the pre-skip region.*/ cur_discard_count=80*48; cur_page_gp=_of->op[op_count-1].granulepos; /*Try to initialize prev_packet_gp. If the current page had packets but didn't have a granule position, or the granule position it had was too small (both illegal), just use the starting granule position for the link.*/ prev_packet_gp=links[cur_link].pcm_start; if(OP_LIKELY(cur_page_gp!=-1)){ op_granpos_add(&prev_packet_gp,cur_page_gp,-total_duration); } if(OP_LIKELY(!op_granpos_diff(&diff, prev_packet_gp,links[cur_link].pcm_start))){ opus_int32 pre_skip; /*If we start at the beginning of the pre-skip region, or we're at least 80 ms from the end of the pre-skip region, we discard to the end of the pre-skip region. Otherwise, we still use the 80 ms default, which will discard past the end of the pre-skip region.*/ pre_skip=links[cur_link].head.pre_skip; if(diff>=0&&diff<=OP_MAX(0,pre_skip-80*48)){ cur_discard_count=pre_skip-(int)diff; } } _of->cur_discard_count=cur_discard_count; } if(OP_UNLIKELY(cur_page_gp==-1)){ /*This page had completed packets but didn't have a valid granule position. This is illegal, but we'll try to handle it by continuing to count forwards from the previous page.*/ if(op_granpos_add(&cur_page_gp,prev_packet_gp,total_duration)<0){ /*The timestamp for this page overflowed.*/ cur_page_gp=links[cur_link].pcm_end; } } /*If we hit the last page, handle end-trimming.*/ if(OP_UNLIKELY(cur_page_eos) &&OP_LIKELY(!op_granpos_diff(&diff,cur_page_gp,prev_packet_gp)) &&OP_LIKELY(diff<total_duration)){ cur_packet_gp=prev_packet_gp; for(pi=0;pi<op_count;pi++){ /*Check for overflow.*/ if(diff<0&&OP_UNLIKELY(OP_INT64_MAX+diff<durations[pi])){ diff=durations[pi]+1; } else diff=durations[pi]-diff; /*If we have samples to trim...*/ if(diff>0){ /*If we trimmed the entire packet, stop (the spec says encoders shouldn't do this, but we support it anyway).*/ if(OP_UNLIKELY(diff>durations[pi]))break; cur_packet_gp=cur_page_gp; /*Move the EOS flag to this packet, if necessary, so we'll trim the samples during decode.*/ _of->op[pi].e_o_s=1; } else{ /*Update the granule position as normal.*/ OP_ALWAYS_TRUE(!op_granpos_add(&cur_packet_gp, cur_packet_gp,durations[pi])); } _of->op[pi].granulepos=cur_packet_gp; OP_ALWAYS_TRUE(!op_granpos_diff(&diff,cur_page_gp,cur_packet_gp)); } } else{ /*Propagate timestamps to earlier packets. op_granpos_add(&prev_packet_gp,prev_packet_gp,total_duration) should succeed and give prev_packet_gp==cur_page_gp. But we don't bother to check that, as there isn't much we can do if it's not true, and it actually will not be true on the first page after a seek, if there was a continued packet. The only thing we guarantee is that the start and end granule positions of the packets are valid, and that they are monotonic within a page. They might be completely out of range for this link (we'll check that elsewhere), or non-monotonic between pages.*/ if(OP_UNLIKELY(op_granpos_add(&prev_packet_gp, cur_page_gp,-total_duration)<0)){ /*The starting timestamp for the first packet on this page underflowed. This is illegal, but we ignore it.*/ prev_packet_gp=0; } for(pi=0;pi<op_count;pi++){ if(OP_UNLIKELY(op_granpos_add(&cur_packet_gp, cur_page_gp,-total_duration)<0)){ /*The start timestamp for this packet underflowed. This is illegal, but we ignore it.*/ cur_packet_gp=0; } total_duration-=durations[pi]; OP_ASSERT(total_duration>=0); OP_ALWAYS_TRUE(!op_granpos_add(&cur_packet_gp, cur_packet_gp,durations[pi])); _of->op[pi].granulepos=cur_packet_gp; } OP_ASSERT(total_duration==0); } _of->prev_packet_gp=prev_packet_gp; _of->prev_page_offset=_page_offset; _of->op_count=op_count=pi; } if(report_hole)return OP_HOLE; /*If end-trimming didn't trim all the packets, we're done.*/ if(op_count>0)return 0; } } } int op_raw_seek(OggOpusFile *_of,opus_int64 _pos){ int ret; if(OP_UNLIKELY(_of->ready_state<OP_OPENED))return OP_EINVAL; /*Don't dump the decoder state if we can't seek.*/ if(OP_UNLIKELY(!_of->seekable))return OP_ENOSEEK; if(OP_UNLIKELY(_pos<0)||OP_UNLIKELY(_pos>_of->end))return OP_EINVAL; /*Clear out any buffered, decoded data.*/ op_decode_clear(_of); _of->bytes_tracked=0; _of->samples_tracked=0; ret=op_seek_helper(_of,_pos); if(OP_UNLIKELY(ret<0))return OP_EREAD; ret=op_fetch_and_process_page(_of,NULL,-1,1,1); /*If we hit EOF, op_fetch_and_process_page() leaves us uninitialized. Instead, jump to the end.*/ if(ret==OP_EOF){ int cur_link; op_decode_clear(_of); cur_link=_of->nlinks-1; _of->cur_link=cur_link; _of->prev_packet_gp=_of->links[cur_link].pcm_end; _of->cur_discard_count=0; ret=0; } return ret; } /*Convert a PCM offset relative to the start of the whole stream to a granule position in an individual link.*/ static ogg_int64_t op_get_granulepos(const OggOpusFile *_of, ogg_int64_t _pcm_offset,int *_li){ const OggOpusLink *links; ogg_int64_t duration; ogg_int64_t pcm_start; opus_int32 pre_skip; int nlinks; int li_lo; int li_hi; OP_ASSERT(_pcm_offset>=0); nlinks=_of->nlinks; links=_of->links; li_lo=0; li_hi=nlinks; do{ int li; li=li_lo+(li_hi-li_lo>>1); if(links[li].pcm_file_offset<=_pcm_offset)li_lo=li; else li_hi=li; } while(li_hi-li_lo>1); _pcm_offset-=links[li_lo].pcm_file_offset; pcm_start=links[li_lo].pcm_start; pre_skip=links[li_lo].head.pre_skip; OP_ALWAYS_TRUE(!op_granpos_diff(&duration,links[li_lo].pcm_end,pcm_start)); duration-=pre_skip; if(_pcm_offset>=duration)return -1; _pcm_offset+=pre_skip; if(OP_UNLIKELY(pcm_start>OP_INT64_MAX-_pcm_offset)){ /*Adding this amount to the granule position would overflow the positive half of its 64-bit range. Since signed overflow is undefined in C, do it in a way the compiler isn't allowed to screw up.*/ _pcm_offset-=OP_INT64_MAX-pcm_start+1; pcm_start=OP_INT64_MIN; } pcm_start+=_pcm_offset; *_li=li_lo; return pcm_start; } /*A small helper to determine if an Ogg page contains data that continues onto a subsequent page.*/ static int op_page_continues(const ogg_page *_og){ int nlacing; OP_ASSERT(_og->header_len>=27); nlacing=_og->header[26]; OP_ASSERT(_og->header_len>=27+nlacing); /*This also correctly handles the (unlikely) case of nlacing==0, because 0!=255.*/ return _og->header[27+nlacing-1]==255; } /*A small helper to buffer the continued packet data from a page.*/ static void op_buffer_continued_data(OggOpusFile *_of,ogg_page *_og){ ogg_packet op; ogg_stream_pagein(&_of->os,_og); /*Drain any packets that did end on this page (and ignore holes). We only care about the continued packet data.*/ while(ogg_stream_packetout(&_of->os,&op)); } /*This controls how close the target has to be to use the current stream position to subdivide the initial range. Two minutes seems to be a good default.*/ #define OP_CUR_TIME_THRESH (120*48*(opus_int32)1000) /*Note: The OP_SMALL_FOOTPRINT #define doesn't (currently) save much code size, but it's meant to serve as documentation for portions of the seeking algorithm that are purely optional, to aid others learning from/porting this code to other contexts.*/ /*#define OP_SMALL_FOOTPRINT (1)*/ /*Search within link _li for the page with the highest granule position preceding (or equal to) _target_gp. There is a danger here: missing pages or incorrect frame number information in the bitstream could make our task impossible. Account for that (and report it as an error condition).*/ static int op_pcm_seek_page(OggOpusFile *_of, ogg_int64_t _target_gp,int _li){ const OggOpusLink *link; ogg_page og; ogg_int64_t pcm_pre_skip; ogg_int64_t pcm_start; ogg_int64_t pcm_end; ogg_int64_t best_gp; ogg_int64_t diff; ogg_uint32_t serialno; opus_int32 pre_skip; opus_int64 begin; opus_int64 end; opus_int64 boundary; opus_int64 best; opus_int64 best_start; opus_int64 page_offset; opus_int64 d0; opus_int64 d1; opus_int64 d2; int force_bisect; int buffering; int ret; _of->bytes_tracked=0; _of->samples_tracked=0; link=_of->links+_li; best_gp=pcm_start=link->pcm_start; pcm_end=link->pcm_end; serialno=link->serialno; best=best_start=begin=link->data_offset; page_offset=-1; buffering=0; /*We discard the first 80 ms of data after a seek, so seek back that much farther. If we can't, simply seek to the beginning of the link.*/ if(OP_UNLIKELY(op_granpos_add(&_target_gp,_target_gp,-80*48)<0) ||OP_UNLIKELY(op_granpos_cmp(_target_gp,pcm_start)<0)){ _target_gp=pcm_start; } /*Special case seeking to the start of the link.*/ pre_skip=link->head.pre_skip; OP_ALWAYS_TRUE(!op_granpos_add(&pcm_pre_skip,pcm_start,pre_skip)); if(op_granpos_cmp(_target_gp,pcm_pre_skip)<0)end=boundary=begin; else{ end=boundary=link->end_offset; #if !defined(OP_SMALL_FOOTPRINT) /*If we were decoding from this link, we can narrow the range a bit.*/ if(_li==_of->cur_link&&_of->ready_state>=OP_INITSET){ opus_int64 offset; int op_count; op_count=_of->op_count; /*The offset can be out of range if we were reading through the stream and encountered a page with the granule position for another link outside of the data range identified during link enumeration when we were opening the file. We will just ignore the current position in that case. The only way the offset can be valid _and_ we can fail the granule position checks below is if someone changed the contents of the last page since we read it. We'd be within our rights to just return OP_EBADLINK, but instead we'll simply ignore the current position in that case, too.*/ offset=_of->offset; if(op_count>0&&OP_LIKELY(begin<=offset&&offset<=end)){ ogg_int64_t gp; /*Make sure the timestamp is valid. The granule position might be -1 if we collected the packets from a page without a granule position after reporting a hole.*/ gp=_of->op[op_count-1].granulepos; if(OP_LIKELY(gp!=-1)&&OP_LIKELY(op_granpos_cmp(pcm_start,gp)<0) &&OP_LIKELY(op_granpos_cmp(pcm_end,gp)>0)){ OP_ALWAYS_TRUE(!op_granpos_diff(&diff,gp,_target_gp)); /*We only actually use the current time if either a) We can cut off at least half the range, or b) We're seeking sufficiently close to the current position that it's likely to be informative. Otherwise it appears using the whole link range to estimate the first seek location gives better results, on average.*/ if(diff<0){ if(offset-begin>=end-begin>>1||diff>-OP_CUR_TIME_THRESH){ best=begin=offset; best_gp=pcm_start=gp; /*If we have buffered data from a continued packet, remember the offset of the previous page's start, so that if we do wind up having to seek back here later, we can prime the stream with the continued packet data. With no continued packet, we remember the end of the page.*/ best_start=_of->os.body_returned<_of->os.body_fill? _of->prev_page_offset:best; /*If there's completed packets and data in the stream state, prev_page_offset should always be set.*/ OP_ASSERT(best_start>=0); /*Buffer any continued packet data starting from here.*/ buffering=1; } } else{ ogg_int64_t prev_page_gp; /*We might get lucky and already have the packet with the target buffered. Worth checking. For very small files (with all of the data in a single page, generally 1 second or less), we can loop them continuously without seeking at all.*/ if(op_granpos_add(&prev_page_gp,_of->op[0].granulepos, -op_get_packet_duration(_of->op[0].packet,_of->op[0].bytes))<0) { /*We validate/sanitize the per-packet timestamps, so the only way we should fail to calculate a granule position for the previous page is if the first page with completed packets in the stream is also the last, and end-trimming causes the apparent granule position preceding the first sample in the first packet to underflow. The starting PCM offset is then 0 by spec mandate (see also: op_find_initial_pcm_offset()).*/ OP_ASSERT(_of->op[0].e_o_s); prev_page_gp=0; } if(op_granpos_cmp(prev_page_gp,_target_gp)<=0){ /*Don't call op_decode_clear(), because it will dump our packets.*/ _of->op_pos=0; _of->od_buffer_size=0; _of->prev_packet_gp=prev_page_gp; /*_of->prev_page_offset already points to the right place.*/ _of->ready_state=OP_STREAMSET; return op_make_decode_ready(_of); } /*No such luck. Check if we can cut off at least half the range, though.*/ if(offset-begin<=end-begin>>1||diff<OP_CUR_TIME_THRESH){ /*We really want the page start here, but this will do.*/ end=boundary=offset; pcm_end=gp; } } } } } #endif } /*This code was originally based on the "new search algorithm by HB (Nicholas Vinen)" from libvorbisfile. It has been modified substantially since.*/ op_decode_clear(_of); if(!buffering)ogg_stream_reset_serialno(&_of->os,serialno); _of->cur_link=_li; _of->ready_state=OP_STREAMSET; /*Initialize the interval size history.*/ d2=d1=d0=end-begin; force_bisect=0; while(begin<end){ opus_int64 bisect; opus_int64 next_boundary; opus_int32 chunk_size; if(end-begin<OP_CHUNK_SIZE)bisect=begin; else{ /*Update the interval size history.*/ d0=d1>>1; d1=d2>>1; d2=end-begin>>1; if(force_bisect)bisect=begin+(end-begin>>1); else{ ogg_int64_t diff2; OP_ALWAYS_TRUE(!op_granpos_diff(&diff,_target_gp,pcm_start)); OP_ALWAYS_TRUE(!op_granpos_diff(&diff2,pcm_end,pcm_start)); /*Take a (pretty decent) guess.*/ bisect=begin+op_rescale64(diff,diff2,end-begin)-OP_CHUNK_SIZE; } if(bisect-OP_CHUNK_SIZE<begin)bisect=begin; force_bisect=0; } if(bisect!=_of->offset){ /*Discard any buffered continued packet data.*/ if(buffering)ogg_stream_reset(&_of->os); buffering=0; page_offset=-1; ret=op_seek_helper(_of,bisect); if(OP_UNLIKELY(ret<0))return ret; } chunk_size=OP_CHUNK_SIZE; next_boundary=boundary; /*Now scan forward and figure out where we landed. In the ideal case, we will see a page with a granule position at or before our target, followed by a page with a granule position after our target (or the end of the search interval). Then we can just drop out and will have all of the data we need with no additional seeking. If we landed too far before, or after, we'll break out and do another bisection.*/ while(begin<end){ page_offset=op_get_next_page(_of,&og,boundary); if(page_offset<0){ if(page_offset<OP_FALSE)return (int)page_offset; /*There are no more pages in our interval from our stream with a valid timestamp that start at position bisect or later.*/ /*If we scanned the whole interval, we're done.*/ if(bisect<=begin+1)end=begin; else{ /*Otherwise, back up one chunk. First, discard any data from a continued packet.*/ if(buffering)ogg_stream_reset(&_of->os); buffering=0; bisect=OP_MAX(bisect-chunk_size,begin); ret=op_seek_helper(_of,bisect); if(OP_UNLIKELY(ret<0))return ret; /*Bump up the chunk size.*/ chunk_size=OP_MIN(2*chunk_size,OP_CHUNK_SIZE_MAX); /*If we did find a page from another stream or without a timestamp, don't read past it.*/ boundary=next_boundary; } } else{ ogg_int64_t gp; int has_packets; /*Save the offset of the first page we found after the seek, regardless of the stream it came from or whether or not it has a timestamp.*/ next_boundary=OP_MIN(page_offset,next_boundary); if(serialno!=(ogg_uint32_t)ogg_page_serialno(&og))continue; has_packets=ogg_page_packets(&og)>0; /*Force the gp to -1 (as it should be per spec) if no packets end on this page. Otherwise we might get confused when we try to pull out a packet with that timestamp and can't find it.*/ gp=has_packets?ogg_page_granulepos(&og):-1; if(gp==-1){ if(buffering){ if(OP_LIKELY(!has_packets))ogg_stream_pagein(&_of->os,&og); else{ /*If packets did end on this page, but we still didn't have a valid granule position (in violation of the spec!), stop buffering continued packet data. Otherwise we might continue past the packet we actually wanted.*/ ogg_stream_reset(&_of->os); buffering=0; } } continue; } if(op_granpos_cmp(gp,_target_gp)<0){ /*We found a page that ends before our target. Advance to the raw offset of the next page.*/ begin=_of->offset; if(OP_UNLIKELY(op_granpos_cmp(pcm_start,gp)>0) ||OP_UNLIKELY(op_granpos_cmp(pcm_end,gp)<0)){ /*Don't let pcm_start get out of range! That could happen with an invalid timestamp.*/ break; } /*Save the byte offset of the end of the page with this granule position.*/ best=best_start=begin; /*Buffer any data from a continued packet, if necessary. This avoids the need to seek back here if the next timestamp we encounter while scanning forward lies after our target.*/ if(buffering)ogg_stream_reset(&_of->os); if(op_page_continues(&og)){ op_buffer_continued_data(_of,&og); /*If we have a continued packet, remember the offset of this page's start, so that if we do wind up having to seek back here later, we can prime the stream with the continued packet data. With no continued packet, we remember the end of the page.*/ best_start=page_offset; } /*Then force buffering on, so that if a packet starts (but does not end) on the next page, we still avoid the extra seek back.*/ buffering=1; best_gp=pcm_start=gp; OP_ALWAYS_TRUE(!op_granpos_diff(&diff,_target_gp,pcm_start)); /*If we're more than a second away from our target, break out and do another bisection.*/ if(diff>48000)break; /*Otherwise, keep scanning forward (do NOT use begin+1).*/ bisect=begin; } else{ /*We found a page that ends after our target.*/ /*If we scanned the whole interval before we found it, we're done.*/ if(bisect<=begin+1)end=begin; else{ end=bisect; /*In later iterations, don't read past the first page we found.*/ boundary=next_boundary; /*If we're not making much progress shrinking the interval size, start forcing straight bisection to limit the worst case.*/ force_bisect=end-begin>d0*2; /*Don't let pcm_end get out of range! That could happen with an invalid timestamp.*/ if(OP_LIKELY(op_granpos_cmp(pcm_end,gp)>0) &&OP_LIKELY(op_granpos_cmp(pcm_start,gp)<=0)){ pcm_end=gp; } break; } } } } } /*Found our page.*/ OP_ASSERT(op_granpos_cmp(best_gp,pcm_start)>=0); /*Seek, if necessary. If we were buffering data from a continued packet, we should be able to continue to scan forward to get the rest of the data (even if page_offset==-1). Otherwise, we need to seek back to best_start.*/ if(!buffering){ if(best_start!=page_offset){ page_offset=-1; ret=op_seek_helper(_of,best_start); if(OP_UNLIKELY(ret<0))return ret; } if(best_start<best){ /*Retrieve the page at best_start, if we do not already have it.*/ if(page_offset<0){ page_offset=op_get_next_page(_of,&og,link->end_offset); if(OP_UNLIKELY(page_offset<OP_FALSE))return (int)page_offset; if(OP_UNLIKELY(page_offset!=best_start))return OP_EBADLINK; } op_buffer_continued_data(_of,&og); page_offset=-1; } } /*Update prev_packet_gp to allow per-packet granule position assignment.*/ _of->prev_packet_gp=best_gp; _of->prev_page_offset=best_start; ret=op_fetch_and_process_page(_of,page_offset<0?NULL:&og,page_offset,0,1); if(OP_UNLIKELY(ret<0))return OP_EBADLINK; /*Verify result.*/ if(OP_UNLIKELY(op_granpos_cmp(_of->prev_packet_gp,_target_gp)>0)){ return OP_EBADLINK; } /*Our caller will set cur_discard_count to handle pre-roll.*/ return 0; } int op_pcm_seek(OggOpusFile *_of,ogg_int64_t _pcm_offset){ const OggOpusLink *link; ogg_int64_t pcm_start; ogg_int64_t target_gp; ogg_int64_t prev_packet_gp; ogg_int64_t skip; ogg_int64_t diff; int op_count; int op_pos; int ret; int li; if(OP_UNLIKELY(_of->ready_state<OP_OPENED))return OP_EINVAL; if(OP_UNLIKELY(!_of->seekable))return OP_ENOSEEK; if(OP_UNLIKELY(_pcm_offset<0))return OP_EINVAL; target_gp=op_get_granulepos(_of,_pcm_offset,&li); if(OP_UNLIKELY(target_gp==-1))return OP_EINVAL; link=_of->links+li; pcm_start=link->pcm_start; OP_ALWAYS_TRUE(!op_granpos_diff(&_pcm_offset,target_gp,pcm_start)); #if !defined(OP_SMALL_FOOTPRINT) /*For small (90 ms or less) forward seeks within the same link, just decode forward. This also optimizes the case of seeking to the current position.*/ if(li==_of->cur_link&&_of->ready_state>=OP_INITSET){ ogg_int64_t gp; gp=_of->prev_packet_gp; if(OP_LIKELY(gp!=-1)){ ogg_int64_t discard_count; int nbuffered; nbuffered=OP_MAX(_of->od_buffer_size-_of->od_buffer_pos,0); OP_ALWAYS_TRUE(!op_granpos_add(&gp,gp,-nbuffered)); /*We do _not_ add cur_discard_count to gp. Otherwise the total amount to discard could grow without bound, and it would be better just to do a full seek.*/ if(OP_LIKELY(!op_granpos_diff(&discard_count,target_gp,gp))){ /*We use a threshold of 90 ms instead of 80, since 80 ms is the _minimum_ we would have discarded after a full seek. Assuming 20 ms frames (the default), we'd discard 90 ms on average.*/ if(discard_count>=0&&OP_UNLIKELY(discard_count<90*48)){ _of->cur_discard_count=(opus_int32)discard_count; return 0; } } } } #endif ret=op_pcm_seek_page(_of,target_gp,li); if(OP_UNLIKELY(ret<0))return ret; /*Now skip samples until we actually get to our target.*/ /*Figure out where we should skip to.*/ if(_pcm_offset<=link->head.pre_skip)skip=0; else skip=OP_MAX(_pcm_offset-80*48,0); OP_ASSERT(_pcm_offset-skip>=0); OP_ASSERT(_pcm_offset-skip<OP_INT32_MAX-120*48); /*Skip packets until we find one with samples past our skip target.*/ for(;;){ op_count=_of->op_count; prev_packet_gp=_of->prev_packet_gp; for(op_pos=_of->op_pos;op_pos<op_count;op_pos++){ ogg_int64_t cur_packet_gp; cur_packet_gp=_of->op[op_pos].granulepos; if(OP_LIKELY(!op_granpos_diff(&diff,cur_packet_gp,pcm_start)) &&diff>skip){ break; } prev_packet_gp=cur_packet_gp; } _of->prev_packet_gp=prev_packet_gp; _of->op_pos=op_pos; if(op_pos<op_count)break; /*We skipped all the packets on this page. Fetch another.*/ ret=op_fetch_and_process_page(_of,NULL,-1,0,1); if(OP_UNLIKELY(ret<0))return OP_EBADLINK; } /*We skipped too far, or couldn't get within 2 billion samples of the target. Either the timestamps were illegal or there was a hole in the data.*/ if(op_granpos_diff(&diff,prev_packet_gp,pcm_start)||diff>skip ||_pcm_offset-diff>=OP_INT32_MAX){ return OP_EBADLINK; } /*TODO: If there are further holes/illegal timestamps, we still won't decode to the correct sample. However, at least op_pcm_tell() will report the correct value immediately after returning.*/ _of->cur_discard_count=(opus_int32)(_pcm_offset-diff); return 0; } opus_int64 op_raw_tell(const OggOpusFile *_of){ if(OP_UNLIKELY(_of->ready_state<OP_OPENED))return OP_EINVAL; return _of->offset; } /*Convert a granule position from a given link to a PCM offset relative to the start of the whole stream. For unseekable sources, this gets reset to 0 at the beginning of each link.*/ static ogg_int64_t op_get_pcm_offset(const OggOpusFile *_of, ogg_int64_t _gp,int _li){ const OggOpusLink *links; ogg_int64_t pcm_offset; links=_of->links; OP_ASSERT(_li>=0&&_li<_of->nlinks); pcm_offset=links[_li].pcm_file_offset; if(_of->seekable&&OP_UNLIKELY(op_granpos_cmp(_gp,links[_li].pcm_end)>0)){ _gp=links[_li].pcm_end; } if(OP_LIKELY(op_granpos_cmp(_gp,links[_li].pcm_start)>0)){ ogg_int64_t delta; if(OP_UNLIKELY(op_granpos_diff(&delta,_gp,links[_li].pcm_start)<0)){ /*This means an unseekable stream claimed to have a page from more than 2 billion days after we joined.*/ OP_ASSERT(!_of->seekable); return OP_INT64_MAX; } if(delta<links[_li].head.pre_skip)delta=0; else delta-=links[_li].head.pre_skip; /*In the seekable case, _gp was limited by pcm_end. In the unseekable case, pcm_offset should be 0.*/ OP_ASSERT(pcm_offset<=OP_INT64_MAX-delta); pcm_offset+=delta; } return pcm_offset; } ogg_int64_t op_pcm_tell(const OggOpusFile *_of){ ogg_int64_t gp; int nbuffered; int li; if(OP_UNLIKELY(_of->ready_state<OP_OPENED))return OP_EINVAL; gp=_of->prev_packet_gp; if(gp==-1)return 0; nbuffered=OP_MAX(_of->od_buffer_size-_of->od_buffer_pos,0); OP_ALWAYS_TRUE(!op_granpos_add(&gp,gp,-nbuffered)); li=_of->seekable?_of->cur_link:0; if(op_granpos_add(&gp,gp,_of->cur_discard_count)<0){ gp=_of->links[li].pcm_end; } return op_get_pcm_offset(_of,gp,li); } void op_set_decode_callback(OggOpusFile *_of, op_decode_cb_func _decode_cb,void *_ctx){ _of->decode_cb=_decode_cb; _of->decode_cb_ctx=_ctx; } int op_set_gain_offset(OggOpusFile *_of, int _gain_type,opus_int32 _gain_offset_q8){ if(_gain_type!=OP_HEADER_GAIN&&_gain_type!=OP_ALBUM_GAIN &&_gain_type!=OP_TRACK_GAIN&&_gain_type!=OP_ABSOLUTE_GAIN){ return OP_EINVAL; } _of->gain_type=_gain_type; /*The sum of header gain and track gain lies in the range [-65536,65534]. These bounds allow the offset to set the final value to anywhere in the range [-32768,32767], which is what we'll clamp it to before applying.*/ _of->gain_offset_q8=OP_CLAMP(-98302,_gain_offset_q8,98303); op_update_gain(_of); return 0; } void op_set_dither_enabled(OggOpusFile *_of,int _enabled){ #if !defined(OP_FIXED_POINT) _of->dither_disabled=!_enabled; if(!_enabled)_of->dither_mute=65; #endif } /*Allocate the decoder scratch buffer. This is done lazily, since if the user provides large enough buffers, we'll never need it.*/ static int op_init_buffer(OggOpusFile *_of){ int nchannels_max; if(_of->seekable){ const OggOpusLink *links; int nlinks; int li; links=_of->links; nlinks=_of->nlinks; nchannels_max=1; for(li=0;li<nlinks;li++){ nchannels_max=OP_MAX(nchannels_max,links[li].head.channel_count); } } else nchannels_max=OP_NCHANNELS_MAX; _of->od_buffer=(op_sample *)_ogg_malloc( sizeof(*_of->od_buffer)*nchannels_max*120*48); if(_of->od_buffer==NULL)return OP_EFAULT; return 0; } /*Decode a single packet into the target buffer.*/ static int op_decode(OggOpusFile *_of,op_sample *_pcm, const ogg_packet *_op,int _nsamples,int _nchannels){ int ret; /*First we try using the application-provided decode callback.*/ if(_of->decode_cb!=NULL){ #if defined(OP_FIXED_POINT) ret=(*_of->decode_cb)(_of->decode_cb_ctx,_of->od,_pcm,_op, _nsamples,_nchannels,OP_DEC_FORMAT_SHORT,_of->cur_link); #else ret=(*_of->decode_cb)(_of->decode_cb_ctx,_of->od,_pcm,_op, _nsamples,_nchannels,OP_DEC_FORMAT_FLOAT,_of->cur_link); #endif } else ret=OP_DEC_USE_DEFAULT; /*If the application didn't want to handle decoding, do it ourselves.*/ if(ret==OP_DEC_USE_DEFAULT){ #if defined(OP_FIXED_POINT) ret=opus_multistream_decode(_of->od, _op->packet,_op->bytes,_pcm,_nsamples,0); #else ret=opus_multistream_decode_float(_of->od, _op->packet,_op->bytes,_pcm,_nsamples,0); #endif OP_ASSERT(ret<0||ret==_nsamples); } /*If the application returned a positive value other than 0 or OP_DEC_USE_DEFAULT, fail.*/ else if(OP_UNLIKELY(ret>0))return OP_EBADPACKET; if(OP_UNLIKELY(ret<0))return OP_EBADPACKET; return ret; } /*Read more samples from the stream, using the same API as op_read() or op_read_float().*/ static int op_read_native(OggOpusFile *_of, op_sample *_pcm,int _buf_size,int *_li){ if(OP_UNLIKELY(_of->ready_state<OP_OPENED))return OP_EINVAL; for(;;){ int ret; if(OP_LIKELY(_of->ready_state>=OP_INITSET)){ int nchannels; int od_buffer_pos; int nsamples; int op_pos; nchannels=_of->links[_of->seekable?_of->cur_link:0].head.channel_count; od_buffer_pos=_of->od_buffer_pos; nsamples=_of->od_buffer_size-od_buffer_pos; /*If we have buffered samples, return them.*/ if(nsamples>0){ if(nsamples*nchannels>_buf_size)nsamples=_buf_size/nchannels; OP_ASSERT(_pcm!=NULL||nsamples<=0); /*Check nsamples again so we don't pass NULL to memcpy() if _buf_size is zero. That would technically be undefined behavior, even if the number of bytes to copy were zero.*/ if(nsamples>0){ memcpy(_pcm,_of->od_buffer+nchannels*od_buffer_pos, sizeof(*_pcm)*nchannels*nsamples); od_buffer_pos+=nsamples; _of->od_buffer_pos=od_buffer_pos; } if(_li!=NULL)*_li=_of->cur_link; return nsamples; } /*If we have buffered packets, decode one.*/ op_pos=_of->op_pos; if(OP_LIKELY(op_pos<_of->op_count)){ const ogg_packet *pop; ogg_int64_t diff; opus_int32 cur_discard_count; int duration; int trimmed_duration; pop=_of->op+op_pos++; _of->op_pos=op_pos; cur_discard_count=_of->cur_discard_count; duration=op_get_packet_duration(pop->packet,pop->bytes); /*We don't buffer packets with an invalid TOC sequence.*/ OP_ASSERT(duration>0); trimmed_duration=duration; /*Perform end-trimming.*/ if(OP_UNLIKELY(pop->e_o_s)){ if(OP_UNLIKELY(op_granpos_cmp(pop->granulepos, _of->prev_packet_gp)<=0)){ trimmed_duration=0; } else if(OP_LIKELY(!op_granpos_diff(&diff, pop->granulepos,_of->prev_packet_gp))){ trimmed_duration=(int)OP_MIN(diff,trimmed_duration); } } _of->prev_packet_gp=pop->granulepos; if(OP_UNLIKELY(duration*nchannels>_buf_size)){ op_sample *buf; /*If the user's buffer is too small, decode into a scratch buffer.*/ buf=_of->od_buffer; if(OP_UNLIKELY(buf==NULL)){ ret=op_init_buffer(_of); if(OP_UNLIKELY(ret<0))return ret; buf=_of->od_buffer; } ret=op_decode(_of,buf,pop,duration,nchannels); if(OP_UNLIKELY(ret<0))return ret; /*Perform pre-skip/pre-roll.*/ od_buffer_pos=(int)OP_MIN(trimmed_duration,cur_discard_count); cur_discard_count-=od_buffer_pos; _of->cur_discard_count=cur_discard_count; _of->od_buffer_pos=od_buffer_pos; _of->od_buffer_size=trimmed_duration; /*Update bitrate tracking based on the actual samples we used from what was decoded.*/ _of->bytes_tracked+=pop->bytes; _of->samples_tracked+=trimmed_duration-od_buffer_pos; } else{ OP_ASSERT(_pcm!=NULL); /*Otherwise decode directly into the user's buffer.*/ ret=op_decode(_of,_pcm,pop,duration,nchannels); if(OP_UNLIKELY(ret<0))return ret; if(OP_LIKELY(trimmed_duration>0)){ /*Perform pre-skip/pre-roll.*/ od_buffer_pos=(int)OP_MIN(trimmed_duration,cur_discard_count); cur_discard_count-=od_buffer_pos; _of->cur_discard_count=cur_discard_count; trimmed_duration-=od_buffer_pos; if(OP_LIKELY(trimmed_duration>0) &&OP_UNLIKELY(od_buffer_pos>0)){ memmove(_pcm,_pcm+od_buffer_pos*nchannels, sizeof(*_pcm)*trimmed_duration*nchannels); } /*Update bitrate tracking based on the actual samples we used from what was decoded.*/ _of->bytes_tracked+=pop->bytes; _of->samples_tracked+=trimmed_duration; if(OP_LIKELY(trimmed_duration>0)){ if(_li!=NULL)*_li=_of->cur_link; return trimmed_duration; } } } /*Don't grab another page yet. This one might have more packets, or might have buffered data now.*/ continue; } } /*Suck in another page.*/ ret=op_fetch_and_process_page(_of,NULL,-1,1,0); if(OP_UNLIKELY(ret==OP_EOF)){ if(_li!=NULL)*_li=_of->cur_link; return 0; } if(OP_UNLIKELY(ret<0))return ret; } } /*A generic filter to apply to the decoded audio data. _src is non-const because we will destructively modify the contents of the source buffer that we consume in some cases.*/ typedef int (*op_read_filter_func)(OggOpusFile *_of,void *_dst,int _dst_sz, op_sample *_src,int _nsamples,int _nchannels); /*Decode some samples and then apply a custom filter to them. This is used to convert to different output formats.*/ static int op_filter_read_native(OggOpusFile *_of,void *_dst,int _dst_sz, op_read_filter_func _filter,int *_li){ int ret; /*Ensure we have some decoded samples in our buffer.*/ ret=op_read_native(_of,NULL,0,_li); /*Now apply the filter to them.*/ if(OP_LIKELY(ret>=0)&&OP_LIKELY(_of->ready_state>=OP_INITSET)){ int od_buffer_pos; od_buffer_pos=_of->od_buffer_pos; ret=_of->od_buffer_size-od_buffer_pos; if(OP_LIKELY(ret>0)){ int nchannels; nchannels=_of->links[_of->seekable?_of->cur_link:0].head.channel_count; ret=(*_filter)(_of,_dst,_dst_sz, _of->od_buffer+nchannels*od_buffer_pos,ret,nchannels); OP_ASSERT(ret>=0); OP_ASSERT(ret<=_of->od_buffer_size-od_buffer_pos); od_buffer_pos+=ret; _of->od_buffer_pos=od_buffer_pos; } } return ret; } #if !defined(OP_FIXED_POINT)||!defined(OP_DISABLE_FLOAT_API) /*Matrices for downmixing from the supported channel counts to stereo. The matrices with 5 or more channels are normalized to a total volume of 2.0, since most mixes sound too quiet if normalized to 1.0 (as there is generally little volume in the side/rear channels).*/ static const float OP_STEREO_DOWNMIX[OP_NCHANNELS_MAX-2][OP_NCHANNELS_MAX][2]={ /*3.0*/ { {0.5858F,0.0F},{0.4142F,0.4142F},{0.0F,0.5858F} }, /*quadrophonic*/ { {0.4226F,0.0F},{0.0F,0.4226F},{0.366F,0.2114F},{0.2114F,0.336F} }, /*5.0*/ { {0.651F,0.0F},{0.46F,0.46F},{0.0F,0.651F},{0.5636F,0.3254F}, {0.3254F,0.5636F} }, /*5.1*/ { {0.529F,0.0F},{0.3741F,0.3741F},{0.0F,0.529F},{0.4582F,0.2645F}, {0.2645F,0.4582F},{0.3741F,0.3741F} }, /*6.1*/ { {0.4553F,0.0F},{0.322F,0.322F},{0.0F,0.4553F},{0.3943F,0.2277F}, {0.2277F,0.3943F},{0.2788F,0.2788F},{0.322F,0.322F} }, /*7.1*/ { {0.3886F,0.0F},{0.2748F,0.2748F},{0.0F,0.3886F},{0.3366F,0.1943F}, {0.1943F,0.3366F},{0.3366F,0.1943F},{0.1943F,0.3366F},{0.2748F,0.2748F} } }; #endif #if defined(OP_FIXED_POINT) /*Matrices for downmixing from the supported channel counts to stereo. The matrices with 5 or more channels are normalized to a total volume of 2.0, since most mixes sound too quiet if normalized to 1.0 (as there is generally little volume in the side/rear channels). Hence we keep the coefficients in Q14, so the downmix values won't overflow a 32-bit number.*/ static const opus_int16 OP_STEREO_DOWNMIX_Q14 [OP_NCHANNELS_MAX-2][OP_NCHANNELS_MAX][2]={ /*3.0*/ { {9598,0},{6786,6786},{0,9598} }, /*quadrophonic*/ { {6924,0},{0,6924},{5996,3464},{3464,5996} }, /*5.0*/ { {10666,0},{7537,7537},{0,10666},{9234,5331},{5331,9234} }, /*5.1*/ { {8668,0},{6129,6129},{0,8668},{7507,4335},{4335,7507},{6129,6129} }, /*6.1*/ { {7459,0},{5275,5275},{0,7459},{6460,3731},{3731,6460},{4568,4568}, {5275,5275} }, /*7.1*/ { {6368,0},{4502,4502},{0,6368},{5515,3183},{3183,5515},{5515,3183}, {3183,5515},{4502,4502} } }; int op_read(OggOpusFile *_of,opus_int16 *_pcm,int _buf_size,int *_li){ return op_read_native(_of,_pcm,_buf_size,_li); } static int op_stereo_filter(OggOpusFile *_of,void *_dst,int _dst_sz, op_sample *_src,int _nsamples,int _nchannels){ (void)_of; _nsamples=OP_MIN(_nsamples,_dst_sz>>1); if(_nchannels==2)memcpy(_dst,_src,_nsamples*2*sizeof(*_src)); else{ opus_int16 *dst; int i; dst=(opus_int16 *)_dst; if(_nchannels==1){ for(i=0;i<_nsamples;i++)dst[2*i+0]=dst[2*i+1]=_src[i]; } else{ for(i=0;i<_nsamples;i++){ opus_int32 l; opus_int32 r; int ci; l=r=0; for(ci=0;ci<_nchannels;ci++){ opus_int32 s; s=_src[_nchannels*i+ci]; l+=OP_STEREO_DOWNMIX_Q14[_nchannels-3][ci][0]*s; r+=OP_STEREO_DOWNMIX_Q14[_nchannels-3][ci][1]*s; } /*TODO: For 5 or more channels, we should do soft clipping here.*/ dst[2*i+0]=(opus_int16)OP_CLAMP(-32768,l+8192>>14,32767); dst[2*i+1]=(opus_int16)OP_CLAMP(-32768,r+8192>>14,32767); } } } return _nsamples; } int op_read_stereo(OggOpusFile *_of,opus_int16 *_pcm,int _buf_size){ return op_filter_read_native(_of,_pcm,_buf_size,op_stereo_filter,NULL); } # if !defined(OP_DISABLE_FLOAT_API) static int op_short2float_filter(OggOpusFile *_of,void *_dst,int _dst_sz, op_sample *_src,int _nsamples,int _nchannels){ float *dst; int i; (void)_of; dst=(float *)_dst; if(OP_UNLIKELY(_nsamples*_nchannels>_dst_sz))_nsamples=_dst_sz/_nchannels; _dst_sz=_nsamples*_nchannels; for(i=0;i<_dst_sz;i++)dst[i]=(1.0F/32768)*_src[i]; return _nsamples; } int op_read_float(OggOpusFile *_of,float *_pcm,int _buf_size,int *_li){ return op_filter_read_native(_of,_pcm,_buf_size,op_short2float_filter,_li); } static int op_short2float_stereo_filter(OggOpusFile *_of, void *_dst,int _dst_sz,op_sample *_src,int _nsamples,int _nchannels){ float *dst; int i; dst=(float *)_dst; _nsamples=OP_MIN(_nsamples,_dst_sz>>1); if(_nchannels==1){ _nsamples=op_short2float_filter(_of,dst,_nsamples,_src,_nsamples,1); for(i=_nsamples;i-->0;)dst[2*i+0]=dst[2*i+1]=dst[i]; } else if(_nchannels<5){ /*For 3 or 4 channels, we can downmix in fixed point without risk of clipping.*/ if(_nchannels>2){ _nsamples=op_stereo_filter(_of,_src,_nsamples*2, _src,_nsamples,_nchannels); } return op_short2float_filter(_of,dst,_dst_sz,_src,_nsamples,2); } else{ /*For 5 or more channels, we convert to floats and then downmix (so that we don't risk clipping).*/ for(i=0;i<_nsamples;i++){ float l; float r; int ci; l=r=0; for(ci=0;ci<_nchannels;ci++){ float s; s=(1.0F/32768)*_src[_nchannels*i+ci]; l+=OP_STEREO_DOWNMIX[_nchannels-3][ci][0]*s; r+=OP_STEREO_DOWNMIX[_nchannels-3][ci][1]*s; } dst[2*i+0]=l; dst[2*i+1]=r; } } return _nsamples; } int op_read_float_stereo(OggOpusFile *_of,float *_pcm,int _buf_size){ return op_filter_read_native(_of,_pcm,_buf_size, op_short2float_stereo_filter,NULL); } # endif #else # if defined(OP_HAVE_LRINTF) # include <math.h> # define op_float2int(_x) (lrintf(_x)) # else # define op_float2int(_x) ((int)((_x)+((_x)<0?-0.5F:0.5F))) # endif /*The dithering code here is adapted from opusdec, part of opus-tools. It was originally written by Greg Maxwell.*/ static opus_uint32 op_rand(opus_uint32 _seed){ return _seed*96314165+907633515&0xFFFFFFFFU; } /*This implements 16-bit quantization with full triangular dither and IIR noise shaping. The noise shaping filters were designed by Sebastian Gesemann, and are based on the LAME ATH curves with flattening to limit their peak gain to 20 dB. Everyone else's noise shaping filters are mildly crazy. The 48 kHz version of this filter is just a warped version of the 44.1 kHz filter and probably could be improved by shifting the HF shelf up in frequency a little bit, since 48 kHz has a bit more room and being more conservative against bat-ears is probably more important than more noise suppression. This process can increase the peak level of the signal (in theory by the peak error of 1.5 +20 dB, though that is unobservably rare). To avoid clipping, the signal is attenuated by a couple thousandths of a dB. Initially, the approach taken here was to only attenuate by the 99.9th percentile, making clipping rare but not impossible (like SoX), but the limited gain of the filter means that the worst case was only two thousandths of a dB more, so this just uses the worst case. The attenuation is probably also helpful to prevent clipping in the DAC reconstruction filters or downstream resampling, in any case.*/ # define OP_GAIN (32753.0F) # define OP_PRNG_GAIN (1.0F/(float)0xFFFFFFFF) /*48 kHz noise shaping filter, sd=2.34.*/ static const float OP_FCOEF_B[4]={ 2.2374F,-0.7339F,-0.1251F,-0.6033F }; static const float OP_FCOEF_A[4]={ 0.9030F,0.0116F,-0.5853F,-0.2571F }; static int op_float2short_filter(OggOpusFile *_of,void *_dst,int _dst_sz, float *_src,int _nsamples,int _nchannels){ opus_int16 *dst; int ci; int i; dst=(opus_int16 *)_dst; if(OP_UNLIKELY(_nsamples*_nchannels>_dst_sz))_nsamples=_dst_sz/_nchannels; # if defined(OP_SOFT_CLIP) if(_of->state_channel_count!=_nchannels){ for(ci=0;ci<_nchannels;ci++)_of->clip_state[ci]=0; } opus_pcm_soft_clip(_src,_nsamples,_nchannels,_of->clip_state); # endif if(_of->dither_disabled){ for(i=0;i<_nchannels*_nsamples;i++){ dst[i]=op_float2int(OP_CLAMP(-32768,32768.0F*_src[i],32767)); } } else{ opus_uint32 seed; int mute; seed=_of->dither_seed; mute=_of->dither_mute; if(_of->state_channel_count!=_nchannels)mute=65; /*In order to avoid replacing digital silence with quiet dither noise, we mute if the output has been silent for a while.*/ if(mute>64)memset(_of->dither_a,0,sizeof(*_of->dither_a)*4*_nchannels); for(i=0;i<_nsamples;i++){ int silent; silent=1; for(ci=0;ci<_nchannels;ci++){ float r; float s; float err; int si; int j; s=_src[_nchannels*i+ci]; silent&=s==0; s*=OP_GAIN; err=0; for(j=0;j<4;j++){ err+=OP_FCOEF_B[j]*_of->dither_b[ci*4+j] -OP_FCOEF_A[j]*_of->dither_a[ci*4+j]; } for(j=3;j-->0;)_of->dither_a[ci*4+j+1]=_of->dither_a[ci*4+j]; for(j=3;j-->0;)_of->dither_b[ci*4+j+1]=_of->dither_b[ci*4+j]; _of->dither_a[ci*4]=err; s-=err; if(mute>16)r=0; else{ seed=op_rand(seed); r=seed*OP_PRNG_GAIN; seed=op_rand(seed); r-=seed*OP_PRNG_GAIN; } /*Clamp in float out of paranoia that the input will be > 96 dBFS and wrap if the integer is clamped.*/ si=op_float2int(OP_CLAMP(-32768,s+r,32767)); dst[_nchannels*i+ci]=(opus_int16)si; /*Including clipping in the noise shaping is generally disastrous: the futile effort to restore the clipped energy results in more clipping. However, small amounts---at the level which could normally be created by dither and rounding---are harmless and can even reduce clipping somewhat due to the clipping sometimes reducing the dither + rounding error.*/ _of->dither_b[ci*4]=mute>16?0:OP_CLAMP(-1.5F,si-s,1.5F); } mute++; if(!silent)mute=0; } _of->dither_mute=OP_MIN(mute,65); _of->dither_seed=seed; } _of->state_channel_count=_nchannels; return _nsamples; } int op_read(OggOpusFile *_of,opus_int16 *_pcm,int _buf_size,int *_li){ return op_filter_read_native(_of,_pcm,_buf_size,op_float2short_filter,_li); } int op_read_float(OggOpusFile *_of,float *_pcm,int _buf_size,int *_li){ _of->state_channel_count=0; return op_read_native(_of,_pcm,_buf_size,_li); } static int op_stereo_filter(OggOpusFile *_of,void *_dst,int _dst_sz, op_sample *_src,int _nsamples,int _nchannels){ (void)_of; _nsamples=OP_MIN(_nsamples,_dst_sz>>1); if(_nchannels==2)memcpy(_dst,_src,_nsamples*2*sizeof(*_src)); else{ float *dst; int i; dst=(float *)_dst; if(_nchannels==1){ for(i=0;i<_nsamples;i++)dst[2*i+0]=dst[2*i+1]=_src[i]; } else{ for(i=0;i<_nsamples;i++){ float l; float r; int ci; l=r=0; for(ci=0;ci<_nchannels;ci++){ l+=OP_STEREO_DOWNMIX[_nchannels-3][ci][0]*_src[_nchannels*i+ci]; r+=OP_STEREO_DOWNMIX[_nchannels-3][ci][1]*_src[_nchannels*i+ci]; } dst[2*i+0]=l; dst[2*i+1]=r; } } } return _nsamples; } static int op_float2short_stereo_filter(OggOpusFile *_of, void *_dst,int _dst_sz,op_sample *_src,int _nsamples,int _nchannels){ opus_int16 *dst; dst=(opus_int16 *)_dst; if(_nchannels==1){ int i; _nsamples=op_float2short_filter(_of,dst,_dst_sz>>1,_src,_nsamples,1); for(i=_nsamples;i-->0;)dst[2*i+0]=dst[2*i+1]=dst[i]; } else{ if(_nchannels>2){ _nsamples=OP_MIN(_nsamples,_dst_sz>>1); _nsamples=op_stereo_filter(_of,_src,_nsamples*2, _src,_nsamples,_nchannels); } _nsamples=op_float2short_filter(_of,dst,_dst_sz,_src,_nsamples,2); } return _nsamples; } int op_read_stereo(OggOpusFile *_of,opus_int16 *_pcm,int _buf_size){ return op_filter_read_native(_of,_pcm,_buf_size, op_float2short_stereo_filter,NULL); } int op_read_float_stereo(OggOpusFile *_of,float *_pcm,int _buf_size){ _of->state_channel_count=0; return op_filter_read_native(_of,_pcm,_buf_size,op_stereo_filter,NULL); } #endif
879408.c
/****************************************************************************** * * Copyright (C) 2019 Xilinx, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * * ******************************************************************************/ #include "xpm_common.h" #include "xpm_pldomain.h" #include "xpm_device.h" #include "xpm_domain_iso.h" #include "xpm_regs.h" #include "xpm_reset.h" #include "xpm_bisr.h" #include "xpm_pmc.h" #include "xparameters.h" #include "sleep.h" #define XPM_NODEIDX_DEV_GT_MIN XPM_NODEIDX_DEV_GT_0 #define XPM_NODEIDX_DEV_GT_MAX XPM_NODEIDX_DEV_GT_10 //If TRIM_CRAM[31:0]=0 (FUSE not programmed). Then set rw_read_voltages to 0.61V + 0.625V #define CRAM_TRIM_RW_READ_VOLTAGE 0x185 XCframe CframeIns={0}; /* CFRAME Driver Instance */ XCfupmc CfupmcIns={0}; /* CFU Driver Instance */ u32 PlpdHouseCleanBypass = 0; static XStatus PldInitFinish(u32 *Args, u32 NumOfArgs) { XStatus Status = XST_SUCCESS; (void)Args; (void)NumOfArgs; if (XST_SUCCESS == XPmPower_CheckPower( PMC_GLOBAL_PWR_SUPPLY_STATUS_VCCINT_RAM_MASK | PMC_GLOBAL_PWR_SUPPLY_STATUS_VCCAUX_MASK)) { /* Remove vccaux-vccram domain isolation */ Status = XPmDomainIso_Control(XPM_NODEIDX_ISO_VCCAUX_VCCRAM, FALSE_IMMEDIATE); if (XST_SUCCESS != Status) { goto done; } } if (XST_SUCCESS == XPmPower_CheckPower( PMC_GLOBAL_PWR_SUPPLY_STATUS_VCCINT_RAM_MASK | PMC_GLOBAL_PWR_SUPPLY_STATUS_VCCINT_SOC_MASK)) { /* Remove vccaux-vccram domain isolation */ Status = XPmDomainIso_Control(XPM_NODEIDX_ISO_VCCRAM_SOC, FALSE_IMMEDIATE); if (XST_SUCCESS != Status) { goto done; } } XCfupmc_GlblSeqInit(&CfupmcIns); done: return Status; } static XStatus PldGtyMbist(u32 BaseAddress) { XStatus Status = XST_SUCCESS; PmOut32(BaseAddress + GTY_PCSR_MASK_OFFSET, GTY_PCSR_MEM_CLEAR_TRIGGER_MASK); PmOut32(BaseAddress + GTY_PCSR_CONTROL_OFFSET, GTY_PCSR_MEM_CLEAR_TRIGGER_MASK); Status = XPm_PollForMask(BaseAddress + GTY_PCSR_STATUS_OFFSET, GTY_PCSR_STATUS_MEM_CLEAR_DONE_MASK, XPM_POLL_TIMEOUT); if (XST_SUCCESS != Status) { goto done; } Status = XPm_PollForMask(BaseAddress + GTY_PCSR_STATUS_OFFSET, GTY_PCSR_STATUS_MEM_CLEAR_PASS_MASK, XPM_POLL_TIMEOUT); if (XST_SUCCESS != Status) { goto done; } /* Unwrite trigger bits */ PmOut32(BaseAddress + GTY_PCSR_MASK_OFFSET, GTY_PCSR_MEM_CLEAR_TRIGGER_MASK); PmOut32(BaseAddress + GTY_PCSR_CONTROL_OFFSET, 0); done: return Status; } static void PldApplyTrim(u32 TrimType) { u32 TrimVal; Xuint128 VggTrim={0}; XPm_Device *EfuseCache = XPmDevice_GetById(PM_DEV_EFUSE_CACHE); if (NULL == EfuseCache) { goto done; } /* Read the corresponding efuse registers for TRIM values */ switch (TrimType) { /* Read VGG trim efuse registers */ case XPM_PL_TRIM_VGG: { PmIn32(EfuseCache->Node.BaseAddress + EFUSE_CACHE_TRIM_CFRM_VGG_0_OFFSET, VggTrim.Word0); PmIn32(EfuseCache->Node.BaseAddress + EFUSE_CACHE_TRIM_CFRM_VGG_1_OFFSET, VggTrim.Word1); PmIn32(EfuseCache->Node.BaseAddress + EFUSE_CACHE_TRIM_CFRM_VGG_2_OFFSET, VggTrim.Word2); XCframe_VggTrim(&CframeIns, &VggTrim); } break; /* Read CRAM trim efuse registers */ case XPM_PL_TRIM_CRAM: { PmIn32(EfuseCache->Node.BaseAddress + EFUSE_CACHE_TRIM_CRAM_OFFSET, TrimVal); /* if eFUSE is not programmed, then set rw_read_voltages to 0.61V + 0.625V by writing */ if ((TrimVal == 0) && (PLATFORM_VERSION_SILICON == Platform) && (PLATFORM_VERSION_SILICON_ES1 == PlatformVersion)) TrimVal = CRAM_TRIM_RW_READ_VOLTAGE; XCframe_CramTrim(&CframeIns, TrimVal); } break; /* Read BRAM trim efuse registers */ case XPM_PL_TRIM_BRAM: { PmIn32(EfuseCache->Node.BaseAddress + EFUSE_CACHE_TRIM_BRAM_OFFSET, TrimVal); XCframe_BramTrim(&CframeIns, TrimVal); } break; /* Read URAM trim efuse registers */ case XPM_PL_TRIM_URAM: { PmIn32(EfuseCache->Node.BaseAddress + EFUSE_CACHE_TRIM_URAM_OFFSET, TrimVal); XCframe_UramTrim(&CframeIns, TrimVal); } break; default: { break; } } done: return; } XStatus PldCfuInit() { XStatus Status; XCfupmc_Config *Config; if(CfupmcIns.IsReady) { Status = XST_SUCCESS; goto done; } /* * Initialize the CFU driver so that it's ready to use * look up the configuration in the config table, * then initialize it. */ Config = XCfupmc_LookupConfig((u16)XPAR_XCFUPMC_0_DEVICE_ID); if (NULL == Config) { Status = XST_FAILURE; goto done; } Status = XCfupmc_CfgInitialize(&CfupmcIns, Config, Config->BaseAddress); if (Status != XST_SUCCESS) { goto done; } /* * Performs the self-test to check hardware build. */ Status = XCfupmc_SelfTest(&CfupmcIns); if (Status != XST_SUCCESS) { goto done; } done: return Status; } static XStatus PldCframeInit() { XStatus Status; XCframe_Config *Config; if(CframeIns.IsReady) { Status = XST_SUCCESS; goto done; } /* * Initialize the Cframe driver so that it's ready to use * look up the configuration in the config table, * then initialize it. */ Config = XCframe_LookupConfig((u16)XPAR_XCFRAME_0_DEVICE_ID); if (NULL == Config) { Status = XST_FAILURE; goto done; } Status = XCframe_CfgInitialize(&CframeIns, Config, Config->BaseAddress); if (XST_SUCCESS != Status) { goto done; } /* * Performs the self-test to check hardware build. */ Status = XCframe_SelfTest(&CframeIns); if (XST_SUCCESS != Status) { goto done; } done: return Status; } static XStatus GtyHouseClean() { XStatus Status = XST_SUCCESS; unsigned int i; XPm_Device *Device; u32 GtyAddresses[XPM_NODEIDX_DEV_GT_MAX - XPM_NODEIDX_DEV_GT_MIN + 1]; for (i = 0; i < ARRAY_SIZE(GtyAddresses); i++) { Device = XPmDevice_GetById(GT_DEVID(XPM_NODEIDX_DEV_GT_MIN + i)); GtyAddresses[i] = Device->Node.BaseAddress; } for (i = 0; i < ARRAY_SIZE(GtyAddresses); i++) { PmOut32(GtyAddresses[i] + GTY_PCSR_LOCK_OFFSET, PCSR_UNLOCK_VAL); /* Deassert INITCTRL */ PmOut32(GtyAddresses[i] + GTY_PCSR_MASK_OFFSET, GTY_PCSR_INITCTRL_MASK); PmOut32(GtyAddresses[i] + GTY_PCSR_CONTROL_OFFSET, 0); PmOut32(GtyAddresses[i] + GTY_PCSR_LOCK_OFFSET, 1); } if(!PlpdHouseCleanBypass) { /* Bisr repair - Bisr should be triggered only for Addresses for which repair * data is found and so not calling in loop. Trigger is handled in below routine * */ Status = XPmBisr_Repair(GTY_TAG_ID); if (XST_SUCCESS != Status) { goto done; } for (i = 0; i < ARRAY_SIZE(GtyAddresses); i++) { PmOut32(GtyAddresses[i] + GTY_PCSR_LOCK_OFFSET, PCSR_UNLOCK_VAL); /* Mbist */ Status = PldGtyMbist(GtyAddresses[i]); if (XST_SUCCESS != Status) { /* Gt Mem clear is found to be failing on some parts. Just print message and return not to break execution */ PmInfo("ERROR: GT Mem clear Failed\r\n"); Status = XST_SUCCESS; PmOut32(GtyAddresses[i] + GTY_PCSR_LOCK_OFFSET, 1); goto done; } PmOut32(GtyAddresses[i] + GTY_PCSR_LOCK_OFFSET, 1); } } done: return Status; } static XStatus PldInitStart(u32 *Args, u32 NumOfArgs) { XStatus Status = XST_SUCCESS; XPm_Pmc *Pmc; u32 PlPowerUpTime=0; (void)Args; (void)NumOfArgs; /* Reset Bypass flag */ PlpdHouseCleanBypass = 0; /* Proceed only if vccint, vccaux, vccint_ram is 1 */ while (XST_SUCCESS != XPmPower_CheckPower(PMC_GLOBAL_PWR_SUPPLY_STATUS_VCCINT_PL_MASK | PMC_GLOBAL_PWR_SUPPLY_STATUS_VCCINT_RAM_MASK | PMC_GLOBAL_PWR_SUPPLY_STATUS_VCCAUX_MASK)) { /** Wait for PL power up */ usleep(10); PlPowerUpTime++; if (PlPowerUpTime > XPM_POLL_TIMEOUT) { XPlmi_Printf(DEBUG_GENERAL, "ERROR: PL Power Up TimeOut\n\r"); Status = XST_FAILURE; /* TODO: Request PMC to power up all required rails and wait for the acknowledgement.*/ goto done; } } /* Remove POR for PL */ Status = XPmReset_AssertbyId(PM_RST_PL_POR, PM_RESET_ACTION_RELEASE); /* Toggle PS POR */ if((PLATFORM_VERSION_SILICON == Platform) && (PLATFORM_VERSION_SILICON_ES1 == PlatformVersion)) { /* EDT-995767: Theres a bug with ES1, due to which a small percent (<2%) of device may miss pl_por_b during power, which could result CFRAME wait up in wrong state. The work around requires to toggle PL_POR twice after PL supplies is up. */ Status = XPmReset_AssertbyId(PM_RST_PL_POR, PM_RESET_ACTION_PULSE); } Pmc = (XPm_Pmc *)XPmDevice_GetById(PM_DEV_PMC_PROC);; if (NULL == Pmc) { Status = XST_FAILURE; goto done; } /* Check for PL PowerUp */ Status = XPm_PollForMask(Pmc->PmcGlobalBaseAddr + PMC_GLOBAL_PL_STATUS_OFFSET, PMC_GLOBAL_PL_STATUS_POR_PL_B_MASK, XPM_POLL_TIMEOUT); if(XST_SUCCESS != Status) { goto done; } /* Remove SRST for PL */ Status = XPmReset_AssertbyId(PM_RST_PL_SRST, PM_RESET_ACTION_RELEASE); /* Remove PL-SOC isolation */ Status = XPmDomainIso_Control(XPM_NODEIDX_ISO_PL_SOC, FALSE_IMMEDIATE); if (XST_SUCCESS != Status) { goto done; } /* Remove PMC-SOC isolation */ Status = XPmDomainIso_Control(XPM_NODEIDX_ISO_PMC_SOC_NPI, FALSE_IMMEDIATE); if (XST_SUCCESS != Status) { goto done; } Status = PldCfuInit(); if (XST_SUCCESS != Status) { goto done; } Status = PldCframeInit(); if (XST_SUCCESS != Status) { goto done; } /* Enable the global signals */ XCfupmc_SetGlblSigEn(&CfupmcIns, (u8 )TRUE); done: return Status; } static XStatus PldHouseClean(u32 *Args, u32 NumOfArgs) { XStatus Status = XST_SUCCESS; XPm_PlDomain *Pld; u32 Value = 0; /* If Arg0 is set, bypass houseclean */ if(NumOfArgs && Args[0] == 1) PlpdHouseCleanBypass = 1; if (PLATFORM_VERSION_SILICON == Platform) { /*House clean GTY*/ Status = GtyHouseClean(); if (XST_SUCCESS != Status) { XPlmi_Printf(DEBUG_GENERAL, "ERROR: %s : GTY HC failed", __func__); } } Status = XPmDomainIso_Control(XPM_NODEIDX_ISO_PMC_PL_CFRAME, FALSE_IMMEDIATE); if (XST_SUCCESS != Status) { goto done; } Pld = (XPm_PlDomain *)XPmPower_GetById(PM_POWER_PLD); if (NULL == Pld) { Status = XST_FAILURE; goto done; } //#ifndef PLPD_HOUSECLEAN_BYPASS if(!PlpdHouseCleanBypass) { /* Enable ROWON */ XCframe_WriteCmd(&CframeIns, XCFRAME_FRAME_BCAST, XCFRAME_CMD_REG_ROWON); /* HCLEANR type 3,4,5,6 */ XCframe_WriteCmd(&CframeIns, XCFRAME_FRAME_BCAST, XCFRAME_CMD_REG_HCLEANR); /* HB BISR REPAIR */ Status = XPmBisr_Repair(DCMAC_TAG_ID); if (XST_SUCCESS != Status) { goto done; } Status = XPmBisr_Repair(ILKN_TAG_ID); if (XST_SUCCESS != Status) { goto done; } Status = XPmBisr_Repair(MRMAC_TAG_ID); if (XST_SUCCESS != Status) { goto done; } Status = XPmBisr_Repair(SDFEC_TAG_ID); if (XST_SUCCESS != Status) { goto done; } /* BRAM/URAM TRIM */ PldApplyTrim(XPM_PL_TRIM_BRAM); PldApplyTrim(XPM_PL_TRIM_URAM); /* BRAM/URAM repair */ Status = XPmBisr_Repair(BRAM_TAG_ID); if (XST_SUCCESS != Status) { goto done; } Status = XPmBisr_Repair(URAM_TAG_ID); if (XST_SUCCESS != Status) { goto done; } /* HCLEAN type 0,1,2 */ XCframe_WriteCmd(&CframeIns, XCFRAME_FRAME_BCAST, XCFRAME_CMD_REG_HCLEAN); /* Poll for house clean completion */ XPlmi_Printf(DEBUG_INFO, "INFO: %s : Waiitng for PL HC complete....", __func__); while ((Xil_In32(Pld->CfuApbBaseAddr + CFU_APB_CFU_STATUS_OFFSET) & CFU_APB_CFU_STATUS_HC_COMPLETE_MASK) != CFU_APB_CFU_STATUS_HC_COMPLETE_MASK); XPlmi_Printf(DEBUG_INFO, "Done\r\n"); XPlmi_Printf(DEBUG_INFO, "INFO: %s : CFRAME_BUSY to go low...", __func__); while ((Xil_In32(Pld->CfuApbBaseAddr + CFU_APB_CFU_STATUS_OFFSET) & CFU_APB_CFU_STATUS_CFI_CFRAME_BUSY_MASK) == CFU_APB_CFU_STATUS_CFI_CFRAME_BUSY_MASK); XPlmi_Printf(DEBUG_INFO, "Done\r\n"); /* VGG TRIM */ PldApplyTrim(XPM_PL_TRIM_VGG); /* CRAM TRIM */ PldApplyTrim(XPM_PL_TRIM_CRAM); if (PLATFORM_VERSION_SILICON != Platform) { Status = XST_SUCCESS; goto done; } /* LAGUNA REPAIR - not needed for now */ /* There is no status for Bisr done in hard ip. But we must ensure * BISR is complete before scan clear */ /*TBD - Wait for how long?? Wei to confirm with DFT guys */ /* Fake read */ /* each register is 128 bits long so issue 4 reads */ XPlmi_Printf(DEBUG_INFO, "INFO: %s : CFRAME Fake Read...", __func__); PmIn32(Pld->Cframe0RegBaseAddr + 0, Value); PmIn32(Pld->Cframe0RegBaseAddr + 4, Value); PmIn32(Pld->Cframe0RegBaseAddr + 8, Value); PmIn32(Pld->Cframe0RegBaseAddr + 12, Value); XPlmi_Printf(DEBUG_INFO, "Done\r\n"); /* Unlock CFU writes */ PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_PROTECT_OFFSET, 0); /* PL scan clear / MBIST */ PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_MASK_OFFSET, CFU_APB_CFU_FGCR_SC_HBC_TRIGGER_MASK); PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_FGCR_OFFSET, CFU_APB_CFU_FGCR_SC_HBC_TRIGGER_MASK); /* Poll for status */ XPlmi_Printf(DEBUG_INFO, "INFO: %s : Wait for Hard Block Scan Clear / MBIST complete...", __func__); Status = XPm_PollForMask(Pld->CfuApbBaseAddr + CFU_APB_CFU_STATUS_OFFSET, CFU_APB_CFU_STATUS_SCAN_CLEAR_DONE_MASK, XPM_POLL_TIMEOUT); if (XST_SUCCESS != Status) { XPlmi_Printf(DEBUG_INFO, "ERROR\r\n"); /** HACK: Continuing even if CFI SC is not completed */ Status = XST_SUCCESS; //Status = XST_FAILURE; //goto done; } else { XPlmi_Printf(DEBUG_INFO, "Done\r\n"); } /* Check if Scan Clear Passed */ if ((XPm_In32(Pld->CfuApbBaseAddr + CFU_APB_CFU_STATUS_OFFSET) & CFU_APB_CFU_STATUS_SCAN_CLEAR_PASS_MASK) != CFU_APB_CFU_STATUS_SCAN_CLEAR_PASS_MASK) { XPlmi_Printf(DEBUG_GENERAL, "ERROR: %s: Hard Block Scan Clear / MBIST FAILED\r\n", __func__); /** HACK: Continuing even if CFI SC is not pass */ Status = XST_SUCCESS; //Status = XST_FAILURE; //goto done; } /* Unwrite trigger bits for PL scan clear / MBIST */ PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_MASK_OFFSET, CFU_APB_CFU_FGCR_SC_HBC_TRIGGER_MASK); PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_FGCR_OFFSET, 0); /* Lock CFU writes */ PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_PROTECT_OFFSET, 1); } //#endif /* PLPD_HOUSECLEAN_BYPASS */ /* Unlock CFU writes */ PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_PROTECT_OFFSET, 0); /* Set init_complete */ PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_MASK_OFFSET, CFU_APB_CFU_FGCR_INIT_COMPLETE_MASK); PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_FGCR_OFFSET, CFU_APB_CFU_FGCR_INIT_COMPLETE_MASK); /* Lock CFU writes */ PmOut32(Pld->CfuApbBaseAddr + CFU_APB_CFU_PROTECT_OFFSET, 1); /* Compilation warning fix */ (void)Value; done: return Status; } struct XPm_PowerDomainOps PldOps = { .InitStart = PldInitStart, .InitFinish = PldInitFinish, .PlHouseclean = PldHouseClean, }; static XStatus (*HandlePowerEvent)(XPm_Node *Node, u32 Event); static XStatus HandlePlDomainEvent(XPm_Node *Node, u32 Event) { u32 Status = XST_FAILURE; XPm_Power *Power = (XPm_Power *)Node; PmDbg("State=%d, Event=%d\n\r", Node->State, Event); switch (Node->State) { case XPM_POWER_STATE_ON: if (XPM_POWER_EVENT_PWR_UP == Event) { Status = XST_SUCCESS; Power->UseCount++; } else if (XPM_POWER_EVENT_PWR_DOWN == Event) { Status = XST_SUCCESS; Power->UseCount--; Node->State = XPM_POWER_STATE_OFF; } else { Status = XST_FAILURE; } break; case XPM_POWER_STATE_OFF: if (XPM_POWER_EVENT_PWR_UP == Event) { Status = XST_SUCCESS; Power->UseCount++; Node->State = XPM_POWER_STATE_ON; } else if (XPM_POWER_EVENT_PWR_DOWN == Event) { Status = XST_SUCCESS; Power->UseCount--; } else { Status = XST_FAILURE; } break; default: PmWarn("Wrong state %d for event %d\n", Node->State, Event); break; } return Status; } XStatus XPmPlDomain_Init(XPm_PlDomain *PlDomain, u32 Id, u32 BaseAddress, XPm_Power *Parent, u32 *OtherBaseAddresses, u32 OtherBaseAddressCnt) { XStatus Status = XST_SUCCESS; XPmPowerDomain_Init(&PlDomain->Domain, Id, BaseAddress, Parent, &PldOps); PlDomain->Domain.Power.Node.State = XPM_POWER_STATE_OFF; PlDomain->Domain.Power.UseCount = 1; HandlePowerEvent = PlDomain->Domain.Power.Node.HandleEvent; PlDomain->Domain.Power.Node.HandleEvent = HandlePlDomainEvent; /* Make sure enough base addresses are being passed */ if (2 <= OtherBaseAddressCnt) { PlDomain->CfuApbBaseAddr = OtherBaseAddresses[0]; PlDomain->Cframe0RegBaseAddr = OtherBaseAddresses[1]; } else { Status = XST_FAILURE; } return Status; }
34842.c
/* >>~~ ACM PROBLEM ~~<< ID: 369 Name: Combinations Author: Arash Shakery Email: [email protected] Language: C */ #include <stdio.h> int main() { char A[25]; char P[]={2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97}; int i,j,n,m,t,s; long long res; while (scanf("%d%d",&n,&m)!=EOF) { s=0; memset(A,0,sizeof(A)); if(n==0 && m==0)break; if(m>n/2){m=n-m;s=1;}; for(i=n;i>n-m;i--) { t=i; for(j=0;j<25;j++) while(t%P[j]==0) { A[j]++; t/=P[j]; } } for(i=m;i>0;i--) { t=i; for(j=0;j<25;j++) while(t%P[j]==0) { A[j]--; t/=P[j]; } } res=1; for(i=0;i<25;i++) while((A[i]--)>0) res*=P[i]; if(s)m=n-m; printf("%d things taken %d at a time is %d exactly.\n",n,m,res); } return 0; }
662057.c
#include "sd.h" #include "uart.h" #ifndef PITON_SD_BASE_ADDR #define PITON_SD_BASE_ADDR 0xf000000000L #endif #ifndef PITON_SD_LENGTH #define PITON_SD_LENGTH 0xff0300000L #endif int init_sd() { print_uart("initializing SD... \r\n"); return 0; } int sd_copy(void *dst, uint32_t src_lba, uint32_t size) { uint64_t raw_addr = PITON_SD_BASE_ADDR; raw_addr += ((uint64_t)src_lba) << 9; uint64_t * addr = (uint64_t *)raw_addr; volatile uint64_t * p = (uint64_t *)dst; for (uint32_t blk = 0; blk < size; blk++) { if(blk % 100 == 0) { print_uart("copying block "); print_uart_dec(blk, 1); print_uart(" of "); print_uart_dec(size, 1); print_uart(" blocks ("); print_uart_dec((blk*100)/size, 1); print_uart(" %)\r\n"); } for (uint32_t offset = 0; offset < 64; offset++) { *(p++) = *(addr++); } } return 0; }
645711.c
/* * Copyright 2004-2009, Axel Dörfler, [email protected]. * Distributed under the terms of the MIT License. */ #include <stdio.h> #include <errno.h> #include <errno_private.h> #include <syscalls.h> int remove(const char* path) { // TODO: find a better way that does not require two syscalls for directories int status = _kern_unlink(-1, path); if (status == B_IS_A_DIRECTORY) status = _kern_remove_dir(-1, path); if (status != B_OK) { __set_errno(status); return -1; } return status; }
405543.c
#include "sockaddr_conv.h" #include "conversions.h" #include "sendrecvmsg.h" /* for ancillary registry */ #ifdef PHP_WIN32 # include "windows_common.h" #endif #include <Zend/zend_llist.h> #include <zend_smart_str.h> #ifndef PHP_WIN32 # include <sys/types.h> # include <sys/socket.h> # include <arpa/inet.h> # include <netinet/in.h> # include <sys/un.h> # include <sys/ioctl.h> # include <net/if.h> #else # include <win32/php_stdint.h> #endif #include <limits.h> #include <stdarg.h> #include <stddef.h> #ifdef PHP_WIN32 typedef unsigned short sa_family_t; # define msghdr _WSAMSG /* struct _WSAMSG { LPSOCKADDR name; //void *msg_name INT namelen; //socklen_t msg_namelen LPWSABUF lpBuffers; //struct iovec *msg_iov ULONG dwBufferCount; //size_t msg_iovlen WSABUF Control; //void *msg_control, size_t msg_controllen DWORD dwFlags; //int msg_flags } struct __WSABUF { u_long len; //size_t iov_len (2nd member) char FAR *buf; //void *iov_base (1st member) } struct _WSACMSGHDR { UINT cmsg_len; //socklen_t cmsg_len INT cmsg_level; //int cmsg_level INT cmsg_type; //int cmsg_type; followed by UCHAR cmsg_data[] } */ # define msg_name name # define msg_namelen namelen # define msg_iov lpBuffers # define msg_iovlen dwBufferCount # define msg_control Control.buf # define msg_controllen Control.len # define msg_flags dwFlags # define iov_base buf # define iov_len len # define cmsghdr _WSACMSGHDR # ifdef CMSG_DATA # undef CMSG_DATA # endif # define CMSG_DATA WSA_CMSG_DATA #endif #define MAX_USER_BUFF_SIZE ((size_t)(100*1024*1024)) #define DEFAULT_BUFF_SIZE 8192 struct _ser_context { HashTable params; /* stores pointers; has to be first */ struct err_s err; zend_llist keys, /* common part to res_context ends here */ allocations; php_socket *sock; }; struct _res_context { HashTable params; /* stores pointers; has to be first */ struct err_s err; zend_llist keys; }; typedef struct { /* zval info */ const char *name; unsigned name_size; int required; /* structure info */ size_t field_offset; /* 0 to pass full structure, e.g. when more than one field is to be changed; in that case the callbacks need to know the name of the fields */ /* callbacks */ from_zval_write_field *from_zval; to_zval_read_field *to_zval; } field_descriptor; #define KEY_FILL_SOCKADDR "fill_sockaddr" #define KEY_RECVMSG_RET "recvmsg_ret" #define KEY_CMSG_LEN "cmsg_len" const struct key_value empty_key_value_list[] = {{0}}; /* PARAMETERS */ static int param_get_bool(void *ctx, const char *key, int def) { int *elem; if ((elem = zend_hash_str_find_ptr(ctx, key, strlen(key))) != NULL) { return *elem; } else { return def; } } /* MEMORY */ static inline void *accounted_emalloc(size_t alloc_size, ser_context *ctx) { void *ret = emalloc(alloc_size); zend_llist_add_element(&ctx->allocations, &ret); return ret; } static inline void *accounted_ecalloc(size_t nmemb, size_t alloc_size, ser_context *ctx) { void *ret = ecalloc(nmemb, alloc_size); zend_llist_add_element(&ctx->allocations, &ret); return ret; } static inline void *accounted_safe_ecalloc(size_t nmemb, size_t alloc_size, size_t offset, ser_context *ctx) { void *ret = safe_emalloc(nmemb, alloc_size, offset); memset(ret, '\0', nmemb * alloc_size + offset); zend_llist_add_element(&ctx->allocations, &ret); return ret; } /* ERRORS */ static void do_from_to_zval_err(struct err_s *err, zend_llist *keys, const char *what_conv, const char *fmt, va_list ap) { smart_str path = {0}; const char **node; char *user_msg; int user_msg_size; zend_llist_position pos; if (err->has_error) { return; } for (node = zend_llist_get_first_ex(keys, &pos); node != NULL; node = zend_llist_get_next_ex(keys, &pos)) { smart_str_appends(&path, *node); smart_str_appends(&path, " > "); } if (path.s && ZSTR_LEN(path.s) > 3) { ZSTR_LEN(path.s) -= 3; } smart_str_0(&path); user_msg_size = vspprintf(&user_msg, 0, fmt, ap); err->has_error = 1; err->level = E_WARNING; spprintf(&err->msg, 0, "error converting %s data (path: %s): %.*s", what_conv, path.s && *ZSTR_VAL(path.s) != '\0' ? ZSTR_VAL(path.s) : "unavailable", user_msg_size, user_msg); err->should_free = 1; efree(user_msg); smart_str_free(&path); } ZEND_ATTRIBUTE_FORMAT(printf, 2 ,3) static void do_from_zval_err(ser_context *ctx, const char *fmt, ...) { va_list ap; va_start(ap, fmt); do_from_to_zval_err(&ctx->err, &ctx->keys, "user", fmt, ap); va_end(ap); } ZEND_ATTRIBUTE_FORMAT(printf, 2 ,3) static void do_to_zval_err(res_context *ctx, const char *fmt, ...) { va_list ap; va_start(ap, fmt); do_from_to_zval_err(&ctx->err, &ctx->keys, "native", fmt, ap); va_end(ap); } void err_msg_dispose(struct err_s *err) { if (err->msg != NULL) { php_error_docref0(NULL, err->level, "%s", err->msg); if (err->should_free) { efree(err->msg); } } } void allocations_dispose(zend_llist **allocations) { zend_llist_destroy(*allocations); efree(*allocations); *allocations = NULL; } static unsigned from_array_iterate(const zval *arr, void (*func)(zval *elem, unsigned i, void **args, ser_context *ctx), void **args, ser_context *ctx) { unsigned i; zval *elem; char buf[sizeof("element #4294967295")]; char *bufp = buf; /* Note i starts at 1, not 0! */ i = 1; ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(arr), elem) { if (snprintf(buf, sizeof(buf), "element #%u", i) >= sizeof(buf)) { memcpy(buf, "element", sizeof("element")); } zend_llist_add_element(&ctx->keys, &bufp); func(elem, i, args, ctx); zend_llist_remove_tail(&ctx->keys); if (ctx->err.has_error) { break; } i++; } ZEND_HASH_FOREACH_END(); return i -1; } /* Generic Aggregated conversions */ static void from_zval_write_aggregation(const zval *container, char *structure, const field_descriptor *descriptors, ser_context *ctx) { const field_descriptor *descr; zval *elem; if (Z_TYPE_P(container) != IS_ARRAY) { do_from_zval_err(ctx, "%s", "expected an array here"); } for (descr = descriptors; descr->name != NULL && !ctx->err.has_error; descr++) { if ((elem = zend_hash_str_find(Z_ARRVAL_P(container), descr->name, descr->name_size - 1)) != NULL) { if (descr->from_zval == NULL) { do_from_zval_err(ctx, "No information on how to convert value " "of key '%s'", descr->name); break; } zend_llist_add_element(&ctx->keys, (void*)&descr->name); descr->from_zval(elem, ((char*)structure) + descr->field_offset, ctx); zend_llist_remove_tail(&ctx->keys); } else if (descr->required) { do_from_zval_err(ctx, "The key '%s' is required", descr->name); break; } } } static void to_zval_read_aggregation(const char *structure, zval *zarr, /* initialized array */ const field_descriptor *descriptors, res_context *ctx) { const field_descriptor *descr; assert(Z_TYPE_P(zarr) == IS_ARRAY); assert(Z_ARRVAL_P(zarr) != NULL); for (descr = descriptors; descr->name != NULL && !ctx->err.has_error; descr++) { zval *new_zv, tmp; if (descr->to_zval == NULL) { do_to_zval_err(ctx, "No information on how to convert native " "field into value for key '%s'", descr->name); break; } ZVAL_NULL(&tmp); new_zv = zend_symtable_str_update(Z_ARRVAL_P(zarr), descr->name, descr->name_size - 1, &tmp); zend_llist_add_element(&ctx->keys, (void*)&descr->name); descr->to_zval(structure + descr->field_offset, new_zv, ctx); zend_llist_remove_tail(&ctx->keys); } } /* CONVERSIONS for integers */ static zend_long from_zval_integer_common(const zval *arr_value, ser_context *ctx) { zend_long ret = 0; zval lzval; ZVAL_NULL(&lzval); if (Z_TYPE_P(arr_value) != IS_LONG) { ZVAL_COPY(&lzval, (zval *)arr_value); arr_value = &lzval; } switch (Z_TYPE_P(arr_value)) { case IS_LONG: long_case: ret = Z_LVAL_P(arr_value); break; /* if not long we're operating on lzval */ case IS_DOUBLE: double_case: convert_to_long(&lzval); goto long_case; case IS_OBJECT: case IS_STRING: { zend_long lval; double dval; convert_to_string(&lzval); switch (is_numeric_string(Z_STRVAL(lzval), Z_STRLEN(lzval), &lval, &dval, 0)) { case IS_DOUBLE: zval_dtor(&lzval); ZVAL_DOUBLE(&lzval, dval); goto double_case; case IS_LONG: zval_dtor(&lzval); ZVAL_LONG(&lzval, lval); goto long_case; } /* if we get here, we don't have a numeric string */ do_from_zval_err(ctx, "expected an integer, but got a non numeric " "string (possibly from a converted object): '%s'", Z_STRVAL_P(arr_value)); break; } default: do_from_zval_err(ctx, "%s", "expected an integer, either of a PHP " "integer type or of a convertible type"); break; } zval_dtor(&lzval); return ret; } void from_zval_write_int(const zval *arr_value, char *field, ser_context *ctx) { zend_long lval; int ival; lval = from_zval_integer_common(arr_value, ctx); if (ctx->err.has_error) { return; } if (lval > INT_MAX || lval < INT_MIN) { do_from_zval_err(ctx, "%s", "given PHP integer is out of bounds " "for a native int"); return; } ival = (int)lval; memcpy(field, &ival, sizeof(ival)); } static void from_zval_write_uint32(const zval *arr_value, char *field, ser_context *ctx) { zend_long lval; uint32_t ival; lval = from_zval_integer_common(arr_value, ctx); if (ctx->err.has_error) { return; } if (sizeof(zend_long) > sizeof(uint32_t) && (lval < 0 || lval > 0xFFFFFFFF)) { do_from_zval_err(ctx, "%s", "given PHP integer is out of bounds " "for an unsigned 32-bit integer"); return; } ival = (uint32_t)lval; memcpy(field, &ival, sizeof(ival)); } static void from_zval_write_net_uint16(const zval *arr_value, char *field, ser_context *ctx) { zend_long lval; uint16_t ival; lval = from_zval_integer_common(arr_value, ctx); if (ctx->err.has_error) { return; } if (lval < 0 || lval > 0xFFFF) { do_from_zval_err(ctx, "%s", "given PHP integer is out of bounds " "for an unsigned 16-bit integer"); return; } ival = htons((uint16_t)lval); memcpy(field, &ival, sizeof(ival)); } static void from_zval_write_sa_family(const zval *arr_value, char *field, ser_context *ctx) { zend_long lval; sa_family_t ival; lval = from_zval_integer_common(arr_value, ctx); if (ctx->err.has_error) { return; } if (lval < 0 || lval > (sa_family_t)-1) { /* sa_family_t is unsigned */ do_from_zval_err(ctx, "%s", "given PHP integer is out of bounds " "for a sa_family_t value"); return; } ival = (sa_family_t)lval; memcpy(field, &ival, sizeof(ival)); } static void from_zval_write_pid_t(const zval *arr_value, char *field, ser_context *ctx) { zend_long lval; pid_t ival; lval = from_zval_integer_common(arr_value, ctx); if (ctx->err.has_error) { return; } if (lval < 0 || (pid_t)lval != lval) { /* pid_t is signed */ do_from_zval_err(ctx, "%s", "given PHP integer is out of bounds " "for a pid_t value"); return; } ival = (pid_t)lval; memcpy(field, &ival, sizeof(ival)); } static void from_zval_write_uid_t(const zval *arr_value, char *field, ser_context *ctx) { zend_long lval; uid_t ival; lval = from_zval_integer_common(arr_value, ctx); if (ctx->err.has_error) { return; } /* uid_t can be signed or unsigned (generally unsigned) */ if ((uid_t)-1 > (uid_t)0) { if (sizeof(zend_long) > sizeof(uid_t) && (lval < 0 || (uid_t)lval != lval)) { do_from_zval_err(ctx, "%s", "given PHP integer is out of bounds " "for a uid_t value"); return; } } else { if (sizeof(zend_long) > sizeof(uid_t) && (uid_t)lval != lval) { do_from_zval_err(ctx, "%s", "given PHP integer is out of bounds " "for a uid_t value"); return; } } ival = (uid_t)lval; memcpy(field, &ival, sizeof(ival)); } void to_zval_read_int(const char *data, zval *zv, res_context *ctx) { int ival; memcpy(&ival, data, sizeof(ival)); ZVAL_LONG(zv, (zend_long)ival); } static void to_zval_read_unsigned(const char *data, zval *zv, res_context *ctx) { unsigned ival; memcpy(&ival, data, sizeof(ival)); ZVAL_LONG(zv, (zend_long)ival); } static void to_zval_read_net_uint16(const char *data, zval *zv, res_context *ctx) { uint16_t ival; memcpy(&ival, data, sizeof(ival)); ZVAL_LONG(zv, (zend_long)ntohs(ival)); } static void to_zval_read_uint32(const char *data, zval *zv, res_context *ctx) { uint32_t ival; memcpy(&ival, data, sizeof(ival)); ZVAL_LONG(zv, (zend_long)ival); } static void to_zval_read_sa_family(const char *data, zval *zv, res_context *ctx) { sa_family_t ival; memcpy(&ival, data, sizeof(ival)); ZVAL_LONG(zv, (zend_long)ival); } static void to_zval_read_pid_t(const char *data, zval *zv, res_context *ctx) { pid_t ival; memcpy(&ival, data, sizeof(ival)); ZVAL_LONG(zv, (zend_long)ival); } static void to_zval_read_uid_t(const char *data, zval *zv, res_context *ctx) { uid_t ival; memcpy(&ival, data, sizeof(ival)); ZVAL_LONG(zv, (zend_long)ival); } /* CONVERSIONS for sockaddr */ static void from_zval_write_sin_addr(const zval *zaddr_str, char *inaddr, ser_context *ctx) { int res; struct sockaddr_in saddr = {0}; zend_string *addr_str; addr_str = zval_get_string((zval *) zaddr_str); res = php_set_inet_addr(&saddr, ZSTR_VAL(addr_str), ctx->sock); if (res) { memcpy(inaddr, &saddr.sin_addr, sizeof saddr.sin_addr); } else { /* error already emitted, but let's emit another more relevant */ do_from_zval_err(ctx, "could not resolve address '%s' to get an AF_INET " "address", ZSTR_VAL(addr_str)); } zend_string_release(addr_str); } static void to_zval_read_sin_addr(const char *data, zval *zv, res_context *ctx) { const struct in_addr *addr = (const struct in_addr *)data; socklen_t size = INET_ADDRSTRLEN; zend_string *str = zend_string_alloc(size - 1, 0); memset(ZSTR_VAL(str), '\0', size); ZVAL_NEW_STR(zv, str); if (inet_ntop(AF_INET, addr, Z_STRVAL_P(zv), size) == NULL) { do_to_zval_err(ctx, "could not convert IPv4 address to string " "(errno %d)", errno); return; } Z_STRLEN_P(zv) = strlen(Z_STRVAL_P(zv)); } static const field_descriptor descriptors_sockaddr_in[] = { {"family", sizeof("family"), 0, offsetof(struct sockaddr_in, sin_family), from_zval_write_sa_family, to_zval_read_sa_family}, {"addr", sizeof("addr"), 0, offsetof(struct sockaddr_in, sin_addr), from_zval_write_sin_addr, to_zval_read_sin_addr}, {"port", sizeof("port"), 0, offsetof(struct sockaddr_in, sin_port), from_zval_write_net_uint16, to_zval_read_net_uint16}, {0} }; static void from_zval_write_sockaddr_in(const zval *container, char *sockaddr, ser_context *ctx) { from_zval_write_aggregation(container, sockaddr, descriptors_sockaddr_in, ctx); } static void to_zval_read_sockaddr_in(const char *data, zval *zv, res_context *ctx) { to_zval_read_aggregation(data, zv, descriptors_sockaddr_in, ctx); } #if HAVE_IPV6 static void from_zval_write_sin6_addr(const zval *zaddr_str, char *addr6, ser_context *ctx) { int res; struct sockaddr_in6 saddr6 = {0}; zend_string *addr_str; addr_str = zval_get_string((zval *) zaddr_str); res = php_set_inet6_addr(&saddr6, ZSTR_VAL(addr_str), ctx->sock); if (res) { memcpy(addr6, &saddr6.sin6_addr, sizeof saddr6.sin6_addr); } else { /* error already emitted, but let's emit another more relevant */ do_from_zval_err(ctx, "could not resolve address '%s' to get an AF_INET6 " "address", Z_STRVAL_P(zaddr_str)); } zend_string_release(addr_str); } static void to_zval_read_sin6_addr(const char *data, zval *zv, res_context *ctx) { const struct in6_addr *addr = (const struct in6_addr *)data; socklen_t size = INET6_ADDRSTRLEN; zend_string *str = zend_string_alloc(size - 1, 0); memset(ZSTR_VAL(str), '\0', size); ZVAL_NEW_STR(zv, str); if (inet_ntop(AF_INET6, addr, Z_STRVAL_P(zv), size) == NULL) { do_to_zval_err(ctx, "could not convert IPv6 address to string " "(errno %d)", errno); return; } Z_STRLEN_P(zv) = strlen(Z_STRVAL_P(zv)); } static const field_descriptor descriptors_sockaddr_in6[] = { {"family", sizeof("family"), 0, offsetof(struct sockaddr_in6, sin6_family), from_zval_write_sa_family, to_zval_read_sa_family}, {"addr", sizeof("addr"), 0, offsetof(struct sockaddr_in6, sin6_addr), from_zval_write_sin6_addr, to_zval_read_sin6_addr}, {"port", sizeof("port"), 0, offsetof(struct sockaddr_in6, sin6_port), from_zval_write_net_uint16, to_zval_read_net_uint16}, {"flowinfo", sizeof("flowinfo"), 0, offsetof(struct sockaddr_in6, sin6_flowinfo), from_zval_write_uint32, to_zval_read_uint32}, {"scope_id", sizeof("scope_id"), 0, offsetof(struct sockaddr_in6, sin6_scope_id), from_zval_write_uint32, to_zval_read_uint32}, {0} }; static void from_zval_write_sockaddr_in6(const zval *container, char *sockaddr6, ser_context *ctx) { from_zval_write_aggregation(container, sockaddr6, descriptors_sockaddr_in6, ctx); } static void to_zval_read_sockaddr_in6(const char *data, zval *zv, res_context *ctx) { to_zval_read_aggregation(data, zv, descriptors_sockaddr_in6, ctx); } #endif /* HAVE_IPV6 */ static void from_zval_write_sun_path(const zval *path, char *sockaddr_un_c, ser_context *ctx) { zend_string *path_str; struct sockaddr_un *saddr = (struct sockaddr_un*)sockaddr_un_c; path_str = zval_get_string((zval *) path); /* code in this file relies on the path being nul terminated, even though * this is not required, at least on linux for abstract paths. It also * assumes that the path is not empty */ if (ZSTR_LEN(path_str) == 0) { do_from_zval_err(ctx, "%s", "the path is cannot be empty"); return; } if (ZSTR_LEN(path_str) >= sizeof(saddr->sun_path)) { do_from_zval_err(ctx, "the path is too long, the maximum permitted " "length is %ld", sizeof(saddr->sun_path) - 1); return; } memcpy(&saddr->sun_path, ZSTR_VAL(path_str), ZSTR_LEN(path_str)); saddr->sun_path[ZSTR_LEN(path_str)] = '\0'; zend_string_release(path_str); } static void to_zval_read_sun_path(const char *data, zval *zv, res_context *ctx) { struct sockaddr_un *saddr = (struct sockaddr_un*)data; char *nul_pos; nul_pos = memchr(&saddr->sun_path, '\0', sizeof(saddr->sun_path)); if (nul_pos == NULL) { do_to_zval_err(ctx, "could not find a NUL in the path"); return; } ZVAL_STRINGL(zv, saddr->sun_path, nul_pos - (char*)&saddr->sun_path); } static const field_descriptor descriptors_sockaddr_un[] = { {"family", sizeof("family"), 0, offsetof(struct sockaddr_un, sun_family), from_zval_write_sa_family, to_zval_read_sa_family}, {"path", sizeof("path"), 0, 0, from_zval_write_sun_path, to_zval_read_sun_path}, {0} }; static void from_zval_write_sockaddr_un(const zval *container, char *sockaddr, ser_context *ctx) { from_zval_write_aggregation(container, sockaddr, descriptors_sockaddr_un, ctx); } static void to_zval_read_sockaddr_un(const char *data, zval *zv, res_context *ctx) { to_zval_read_aggregation(data, zv, descriptors_sockaddr_un, ctx); } static void from_zval_write_sockaddr_aux(const zval *container, struct sockaddr **sockaddr_ptr, socklen_t *sockaddr_len, ser_context *ctx) { int family; zval *elem; int fill_sockaddr; if (Z_TYPE_P(container) != IS_ARRAY) { do_from_zval_err(ctx, "%s", "expected an array here"); return; } fill_sockaddr = param_get_bool(ctx, KEY_FILL_SOCKADDR, 1); if ((elem = zend_hash_str_find(Z_ARRVAL_P(container), "family", sizeof("family") - 1)) != NULL && Z_TYPE_P(elem) != IS_NULL) { const char *node = "family"; zend_llist_add_element(&ctx->keys, &node); from_zval_write_int(elem, (char*)&family, ctx); zend_llist_remove_tail(&ctx->keys); } else { family = ctx->sock->type; } switch (family) { case AF_INET: /* though not all OSes support sockaddr_in used in IPv6 sockets */ if (ctx->sock->type != AF_INET && ctx->sock->type != AF_INET6) { do_from_zval_err(ctx, "the specified family (number %d) is not " "supported on this socket", family); return; } *sockaddr_ptr = accounted_ecalloc(1, sizeof(struct sockaddr_in), ctx); *sockaddr_len = sizeof(struct sockaddr_in); if (fill_sockaddr) { from_zval_write_sockaddr_in(container, (char*)*sockaddr_ptr, ctx); (*sockaddr_ptr)->sa_family = AF_INET; } break; #if HAVE_IPV6 case AF_INET6: if (ctx->sock->type != AF_INET6) { do_from_zval_err(ctx, "the specified family (AF_INET6) is not " "supported on this socket"); return; } *sockaddr_ptr = accounted_ecalloc(1, sizeof(struct sockaddr_in6), ctx); *sockaddr_len = sizeof(struct sockaddr_in6); if (fill_sockaddr) { from_zval_write_sockaddr_in6(container, (char*)*sockaddr_ptr, ctx); (*sockaddr_ptr)->sa_family = AF_INET6; } break; #endif /* HAVE_IPV6 */ case AF_UNIX: if (ctx->sock->type != AF_UNIX) { do_from_zval_err(ctx, "the specified family (AF_UNIX) is not " "supported on this socket"); return; } *sockaddr_ptr = accounted_ecalloc(1, sizeof(struct sockaddr_un), ctx); if (fill_sockaddr) { struct sockaddr_un *sock_un = (struct sockaddr_un*)*sockaddr_ptr; from_zval_write_sockaddr_un(container, (char*)*sockaddr_ptr, ctx); (*sockaddr_ptr)->sa_family = AF_UNIX; /* calculating length is more complicated here. Giving the size of * struct sockaddr_un here and relying on the nul termination of * sun_path does not work for paths in the abstract namespace. Note * that we always assume the path is not empty and nul terminated */ *sockaddr_len = offsetof(struct sockaddr_un, sun_path) + (sock_un->sun_path[0] == '\0' ? (1 + strlen(&sock_un->sun_path[1])) : strlen(sock_un->sun_path)); } else { *sockaddr_len = sizeof(struct sockaddr_un); } break; default: do_from_zval_err(ctx, "%s", "the only families currently supported are " "AF_INET, AF_INET6 and AF_UNIX"); break; } } static void to_zval_read_sockaddr_aux(const char *sockaddr_c, zval *zv, res_context *ctx) { const struct sockaddr *saddr = (struct sockaddr *)sockaddr_c; if (saddr->sa_family == 0) { ZVAL_NULL(zv); return; } array_init(zv); switch (saddr->sa_family) { case AF_INET: to_zval_read_sockaddr_in(sockaddr_c, zv, ctx); break; #if HAVE_IPV6 case AF_INET6: to_zval_read_sockaddr_in6(sockaddr_c, zv, ctx); break; #endif /* HAVE_IPV6 */ case AF_UNIX: to_zval_read_sockaddr_un(sockaddr_c, zv, ctx); break; default: do_to_zval_err(ctx, "cannot read struct sockaddr with family %d; " "not supported", (int)saddr->sa_family); break; } } /* CONVERSIONS for cmsghdr */ /* * [ level => , type => , data => [],] * struct cmsghdr { * socklen_t cmsg_len; // data byte count, including header * int cmsg_level; // originating protocol * int cmsg_type; // protocol-specific type * // followed by unsigned char cmsg_data[]; * }; */ static void from_zval_write_control(const zval *arr, void **control_buf, zend_llist_element *alloc, size_t *control_len, size_t *offset, ser_context *ctx) { struct cmsghdr *cmsghdr; int level, type; size_t data_len, req_space, space_left; ancillary_reg_entry *entry; static const field_descriptor descriptor_level[] = { {"level", sizeof("level"), 0, 0, from_zval_write_int, 0}, {0} }; static const field_descriptor descriptor_type[] = { {"type", sizeof("type"), 0, 0, from_zval_write_int, 0}, {0} }; field_descriptor descriptor_data[] = { {"data", sizeof("data"), 0, 0, 0, 0}, {0} }; from_zval_write_aggregation(arr, (char *)&level, descriptor_level, ctx); if (ctx->err.has_error) { return; } from_zval_write_aggregation(arr, (char *)&type, descriptor_type, ctx); if (ctx->err.has_error) { return; } entry = get_ancillary_reg_entry(level, type); if (entry == NULL) { do_from_zval_err(ctx, "cmsghdr with level %d and type %d not supported", level, type); return; } if (entry->calc_space) { zval *data_elem; /* arr must be an array at this point */ if ((data_elem = zend_hash_str_find(Z_ARRVAL_P(arr), "data", sizeof("data") - 1)) == NULL) { do_from_zval_err(ctx, "cmsghdr should have a 'data' element here"); return; } data_len = entry->calc_space(data_elem, ctx); if (ctx->err.has_error) { return; } } else { data_len = entry->size; } req_space = CMSG_SPACE(data_len); space_left = *control_len - *offset; assert(*control_len >= *offset); if (space_left < req_space) { *control_buf = safe_erealloc(*control_buf, 2, req_space, *control_len); *control_len += 2 * req_space; memset((char *)*control_buf + *offset, '\0', *control_len - *offset); memcpy(&alloc->data, control_buf, sizeof *control_buf); } cmsghdr = (struct cmsghdr*)(((char*)*control_buf) + *offset); cmsghdr->cmsg_level = level; cmsghdr->cmsg_type = type; cmsghdr->cmsg_len = CMSG_LEN(data_len); descriptor_data[0].from_zval = entry->from_array; from_zval_write_aggregation(arr, (char*)CMSG_DATA(cmsghdr), descriptor_data, ctx); *offset += req_space; } static void from_zval_write_control_array(const zval *arr, char *msghdr_c, ser_context *ctx) { char buf[sizeof("element #4294967295")]; char *bufp = buf; zval *elem; uint32_t i = 0; int num_elems; void *control_buf; zend_llist_element *alloc; size_t control_len, cur_offset; struct msghdr *msg = (struct msghdr*)msghdr_c; if (Z_TYPE_P(arr) != IS_ARRAY) { do_from_zval_err(ctx, "%s", "expected an array here"); return; } num_elems = zend_hash_num_elements(Z_ARRVAL_P(arr)); if (num_elems == 0) { return; } /* estimate each message at 20 bytes */ control_buf = accounted_safe_ecalloc(num_elems, CMSG_SPACE(20), 0, ctx); alloc = ctx->allocations.tail; control_len = (size_t)num_elems * CMSG_SPACE(20); cur_offset = 0; ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(arr), elem) { if (ctx->err.has_error) { break; } if (snprintf(buf, sizeof(buf), "element #%u", (unsigned)i++) >= sizeof(buf)) { memcpy(buf, "element", sizeof("element")); } zend_llist_add_element(&ctx->keys, &bufp); from_zval_write_control(elem, &control_buf, alloc, &control_len, &cur_offset, ctx); zend_llist_remove_tail(&ctx->keys); } ZEND_HASH_FOREACH_END(); msg->msg_control = control_buf; msg->msg_controllen = cur_offset; /* not control_len, which may be larger */ } static void to_zval_read_cmsg_data(const char *cmsghdr_c, zval *zv, res_context *ctx) { const struct cmsghdr *cmsg = (const struct cmsghdr *)cmsghdr_c; ancillary_reg_entry *entry; size_t len, *len_p = &len; entry = get_ancillary_reg_entry(cmsg->cmsg_level, cmsg->cmsg_type); if (entry == NULL) { do_to_zval_err(ctx, "cmsghdr with level %d and type %d not supported", cmsg->cmsg_level, cmsg->cmsg_type); return; } if (CMSG_LEN(entry->size) > cmsg->cmsg_len) { do_to_zval_err(ctx, "the cmsghdr structure is unexpectedly small; " "expected a length of at least %pd, but got %pd", (zend_long)CMSG_LEN(entry->size), (zend_long)cmsg->cmsg_len); return; } len = (size_t)cmsg->cmsg_len; /* use another var because type of cmsg_len varies */ if (zend_hash_str_add_ptr(&ctx->params, KEY_CMSG_LEN, sizeof(KEY_CMSG_LEN) - 1, len_p) == NULL) { do_to_zval_err(ctx, "%s", "could not set parameter " KEY_CMSG_LEN); return; } entry->to_array((const char *)CMSG_DATA(cmsg), zv, ctx); zend_hash_str_del(&ctx->params, KEY_CMSG_LEN, sizeof(KEY_CMSG_LEN) - 1); } static void to_zval_read_control(const char *cmsghdr_c, zval *zv, res_context *ctx) { /* takes a cmsghdr, not a msghdr like from_zval_write_control */ static const field_descriptor descriptors[] = { {"level", sizeof("level"), 0, offsetof(struct cmsghdr, cmsg_level), 0, to_zval_read_int}, {"type", sizeof("type"), 0, offsetof(struct cmsghdr, cmsg_type), 0, to_zval_read_int}, {"data", sizeof("data"), 0, 0 /* cmsghdr passed */, 0, to_zval_read_cmsg_data}, {0} }; array_init_size(zv, 3); to_zval_read_aggregation(cmsghdr_c, zv, descriptors, ctx); } static void to_zval_read_control_array(const char *msghdr_c, zval *zv, res_context *ctx) { struct msghdr *msg = (struct msghdr *)msghdr_c; struct cmsghdr *cmsg; char buf[sizeof("element #4294967295")]; char *bufp = buf; uint32_t i = 1; /*if (msg->msg_flags & MSG_CTRUNC) { php_error_docref0(NULL, E_WARNING, "The MSG_CTRUNC flag is present; will not " "attempt to read control messages"); ZVAL_FALSE(zv); return; }*/ array_init(zv); for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL && !ctx->err.has_error; cmsg = CMSG_NXTHDR(msg, cmsg)) { zval *elem, tmp; ZVAL_NULL(&tmp); elem = zend_hash_next_index_insert(Z_ARRVAL_P(zv), &tmp); if (snprintf(buf, sizeof(buf), "element #%u", (unsigned)i++) >= sizeof(buf)) { memcpy(buf, "element", sizeof("element")); } zend_llist_add_element(&ctx->keys, &bufp); to_zval_read_control((const char *)cmsg, elem, ctx); zend_llist_remove_tail(&ctx->keys); } } /* CONVERSIONS for msghdr */ static void from_zval_write_name(const zval *zname_arr, char *msghdr_c, ser_context *ctx) { struct sockaddr *sockaddr; socklen_t sockaddr_len; struct msghdr *msghdr = (struct msghdr *)msghdr_c; from_zval_write_sockaddr_aux(zname_arr, &sockaddr, &sockaddr_len, ctx); msghdr->msg_name = sockaddr; msghdr->msg_namelen = sockaddr_len; } static void to_zval_read_name(const char *sockaddr_p, zval *zv, res_context *ctx) { void *name = (void*)*(void**)sockaddr_p; if (name == NULL) { ZVAL_NULL(zv); } else { to_zval_read_sockaddr_aux(name, zv, ctx); } } static void from_zval_write_msghdr_buffer_size(const zval *elem, char *msghdr_c, ser_context *ctx) { zend_long lval; struct msghdr *msghdr = (struct msghdr *)msghdr_c; lval = from_zval_integer_common(elem, ctx); if (ctx->err.has_error) { return; } if (lval < 0 || lval > MAX_USER_BUFF_SIZE) { do_from_zval_err(ctx, "the buffer size must be between 1 and %pd; " "given %pd", (zend_long)MAX_USER_BUFF_SIZE, lval); return; } msghdr->msg_iovlen = 1; msghdr->msg_iov = accounted_emalloc(sizeof(*msghdr->msg_iov) * 1, ctx); msghdr->msg_iov[0].iov_base = accounted_emalloc((size_t)lval, ctx); msghdr->msg_iov[0].iov_len = (size_t)lval; } static void from_zval_write_iov_array_aux(zval *elem, unsigned i, void **args, ser_context *ctx) { struct msghdr *msg = args[0]; size_t len; if (Z_REFCOUNTED_P(elem)) { Z_ADDREF_P(elem); } convert_to_string_ex(elem); len = Z_STRLEN_P(elem); msg->msg_iov[i - 1].iov_base = accounted_emalloc(len, ctx); msg->msg_iov[i - 1].iov_len = len; memcpy(msg->msg_iov[i - 1].iov_base, Z_STRVAL_P(elem), len); zval_ptr_dtor(elem); } static void from_zval_write_iov_array(const zval *arr, char *msghdr_c, ser_context *ctx) { int num_elem; struct msghdr *msg = (struct msghdr*)msghdr_c; if (Z_TYPE_P(arr) != IS_ARRAY) { do_from_zval_err(ctx, "%s", "expected an array here"); return; } num_elem = zend_hash_num_elements(Z_ARRVAL_P(arr)); if (num_elem == 0) { return; } msg->msg_iov = accounted_safe_ecalloc(num_elem, sizeof *msg->msg_iov, 0, ctx); msg->msg_iovlen = (size_t)num_elem; from_array_iterate(arr, from_zval_write_iov_array_aux, (void**)&msg, ctx); } static void from_zval_write_controllen(const zval *elem, char *msghdr_c, ser_context *ctx) { struct msghdr *msghdr = (struct msghdr *)msghdr_c; uint32_t len; /* controllen should be an unsigned with at least 32-bit. Let's assume * this least common denominator */ from_zval_write_uint32(elem, (char*)&len, ctx); if (!ctx->err.has_error && len == 0) { do_from_zval_err(ctx, "controllen cannot be 0"); return; } msghdr->msg_control = accounted_emalloc(len, ctx); msghdr->msg_controllen = len; } void from_zval_write_msghdr_send(const zval *container, char *msghdr_c, ser_context *ctx) { static const field_descriptor descriptors[] = { {"name", sizeof("name"), 0, 0, from_zval_write_name, 0}, {"iov", sizeof("iov"), 0, 0, from_zval_write_iov_array, 0}, {"control", sizeof("control"), 0, 0, from_zval_write_control_array, 0}, {0} }; from_zval_write_aggregation(container, msghdr_c, descriptors, ctx); } void from_zval_write_msghdr_recv(const zval *container, char *msghdr_c, ser_context *ctx) { /* zval to struct msghdr, version for recvmsg(). It differs from the version * for sendmsg() in that it: * - has a buffer_size instead of an iov array; * - has no control element; has a controllen element instead * struct msghdr { * void *msg_name; * socklen_t msg_namelen; * struct iovec *msg_iov; * size_t msg_iovlen; * void *msg_control; * size_t msg_controllen; //can also be socklen_t * int msg_flags; * }; */ static const field_descriptor descriptors[] = { {"name", sizeof("name"), 0, 0, from_zval_write_name, 0}, {"buffer_size", sizeof("buffer_size"), 0, 0, from_zval_write_msghdr_buffer_size, 0}, {"controllen", sizeof("controllen"), 1, 0, from_zval_write_controllen, 0}, {0} }; struct msghdr *msghdr = (struct msghdr *)msghdr_c; const int falsev = 0, *falsevp = &falsev; if (zend_hash_str_add_ptr(&ctx->params, KEY_FILL_SOCKADDR, sizeof(KEY_FILL_SOCKADDR) - 1, (void *)falsevp) == NULL) { do_from_zval_err(ctx, "could not add fill_sockaddr; this is a bug"); return; } from_zval_write_aggregation(container, msghdr_c, descriptors, ctx); zend_hash_str_del(&ctx->params, KEY_FILL_SOCKADDR, sizeof(KEY_FILL_SOCKADDR) - 1); if (ctx->err.has_error) { return; } if (msghdr->msg_iovlen == 0) { msghdr->msg_iovlen = 1; msghdr->msg_iov = accounted_emalloc(sizeof(*msghdr->msg_iov) * 1, ctx); msghdr->msg_iov[0].iov_base = accounted_emalloc((size_t)DEFAULT_BUFF_SIZE, ctx); msghdr->msg_iov[0].iov_len = (size_t)DEFAULT_BUFF_SIZE; } } static void to_zval_read_iov(const char *msghdr_c, zval *zv, res_context *ctx) { const struct msghdr *msghdr = (const struct msghdr *)msghdr_c; size_t iovlen = msghdr->msg_iovlen; ssize_t *recvmsg_ret, bytes_left; uint i; if (iovlen > UINT_MAX) { do_to_zval_err(ctx, "unexpectedly large value for iov_len: %lu", (unsigned long)iovlen); } array_init_size(zv, (uint)iovlen); if ((recvmsg_ret = zend_hash_str_find_ptr(&ctx->params, KEY_RECVMSG_RET, sizeof(KEY_RECVMSG_RET) - 1)) == NULL) { do_to_zval_err(ctx, "recvmsg_ret not found in params. This is a bug"); return; } bytes_left = *recvmsg_ret; for (i = 0; bytes_left > 0 && i < (uint)iovlen; i++) { zval elem; size_t len = MIN(msghdr->msg_iov[i].iov_len, (size_t)bytes_left); zend_string *buf = zend_string_alloc(len, 0); memcpy(ZSTR_VAL(buf), msghdr->msg_iov[i].iov_base, ZSTR_LEN(buf)); ZSTR_VAL(buf)[ZSTR_LEN(buf)] = '\0'; ZVAL_NEW_STR(&elem, buf); add_next_index_zval(zv, &elem); bytes_left -= len; } } void to_zval_read_msghdr(const char *msghdr_c, zval *zv, res_context *ctx) { static const field_descriptor descriptors[] = { {"name", sizeof("name"), 0, offsetof(struct msghdr, msg_name), 0, to_zval_read_name}, {"control", sizeof("control"), 0, 0, 0, to_zval_read_control_array}, {"iov", sizeof("iov"), 0, 0, 0, to_zval_read_iov}, {"flags", sizeof("flags"), 0, offsetof(struct msghdr, msg_flags), 0, to_zval_read_int}, {0} }; array_init_size(zv, 4); to_zval_read_aggregation(msghdr_c, zv, descriptors, ctx); } /* CONVERSIONS for if_index */ static void from_zval_write_ifindex(const zval *zv, char *uinteger, ser_context *ctx) { unsigned ret = 0; if (Z_TYPE_P(zv) == IS_LONG) { if (Z_LVAL_P(zv) < 0 || Z_LVAL_P(zv) > UINT_MAX) { /* allow 0 (unspecified interface) */ do_from_zval_err(ctx, "the interface index cannot be negative or " "larger than %u; given %pd", UINT_MAX, Z_LVAL_P(zv)); } else { ret = (unsigned)Z_LVAL_P(zv); } } else { zend_string *str; str = zval_get_string((zval *) zv); #if HAVE_IF_NAMETOINDEX ret = if_nametoindex(ZSTR_VAL(str)); if (ret == 0) { do_from_zval_err(ctx, "no interface with name \"%s\" could be found", ZSTR_VAL(str)); } #elif defined(SIOCGIFINDEX) { struct ifreq ifr; if (strlcpy(ifr.ifr_name, ZSTR_VAL(str), sizeof(ifr.ifr_name)) >= sizeof(ifr.ifr_name)) { do_from_zval_err(ctx, "the interface name \"%s\" is too large ", ZSTR_VAL(str)); } else if (ioctl(ctx->sock->bsd_socket, SIOCGIFINDEX, &ifr) < 0) { if (errno == ENODEV) { do_from_zval_err(ctx, "no interface with name \"%s\" could be " "found", ZSTR_VAL(str)); } else { do_from_zval_err(ctx, "error fetching interface index for " "interface with name \"%s\" (errno %d)", ZSTR_VAL(str), errno); } } else { ret = (unsigned)ifr.ifr_ifindex; } } #else do_from_zval_err(ctx, "this platform does not support looking up an interface by " "name, an integer interface index must be supplied instead"); #endif zend_string_release(str); } if (!ctx->err.has_error) { memcpy(uinteger, &ret, sizeof(ret)); } } /* CONVERSIONS for struct in6_pktinfo */ #if defined(IPV6_PKTINFO) && HAVE_IPV6 static const field_descriptor descriptors_in6_pktinfo[] = { {"addr", sizeof("addr"), 1, offsetof(struct in6_pktinfo, ipi6_addr), from_zval_write_sin6_addr, to_zval_read_sin6_addr}, {"ifindex", sizeof("ifindex"), 1, offsetof(struct in6_pktinfo, ipi6_ifindex), from_zval_write_ifindex, to_zval_read_unsigned}, {0} }; void from_zval_write_in6_pktinfo(const zval *container, char *in6_pktinfo_c, ser_context *ctx) { from_zval_write_aggregation(container, in6_pktinfo_c, descriptors_in6_pktinfo, ctx); } void to_zval_read_in6_pktinfo(const char *data, zval *zv, res_context *ctx) { array_init_size(zv, 2); to_zval_read_aggregation(data, zv, descriptors_in6_pktinfo, ctx); } #endif /* CONVERSIONS for struct ucred */ #ifdef SO_PASSCRED static const field_descriptor descriptors_ucred[] = { {"pid", sizeof("pid"), 1, offsetof(struct ucred, pid), from_zval_write_pid_t, to_zval_read_pid_t}, {"uid", sizeof("uid"), 1, offsetof(struct ucred, uid), from_zval_write_uid_t, to_zval_read_uid_t}, /* assume the type gid_t is the same as uid_t: */ {"gid", sizeof("gid"), 1, offsetof(struct ucred, gid), from_zval_write_uid_t, to_zval_read_uid_t}, {0} }; void from_zval_write_ucred(const zval *container, char *ucred_c, ser_context *ctx) { from_zval_write_aggregation(container, ucred_c, descriptors_ucred, ctx); } void to_zval_read_ucred(const char *data, zval *zv, res_context *ctx) { array_init_size(zv, 3); to_zval_read_aggregation(data, zv, descriptors_ucred, ctx); } #endif /* CONVERSIONS for SCM_RIGHTS */ #ifdef SCM_RIGHTS size_t calculate_scm_rights_space(const zval *arr, ser_context *ctx) { int num_elems; if (Z_TYPE_P(arr) != IS_ARRAY) { do_from_zval_err(ctx, "%s", "expected an array here"); return (size_t)-1; } num_elems = zend_hash_num_elements(Z_ARRVAL_P(arr)); if (num_elems == 0) { do_from_zval_err(ctx, "%s", "expected at least one element in this array"); return (size_t)-1; } return zend_hash_num_elements(Z_ARRVAL_P(arr)) * sizeof(int); } static void from_zval_write_fd_array_aux(zval *elem, unsigned i, void **args, ser_context *ctx) { int *iarr = args[0]; if (Z_TYPE_P(elem) == IS_RESOURCE) { php_stream *stream; php_socket *sock; sock = (php_socket *)zend_fetch_resource_ex(elem, NULL, php_sockets_le_socket()); if (sock) { iarr[i] = sock->bsd_socket; return; } stream = (php_stream *)zend_fetch_resource2_ex(elem, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream == NULL) { do_from_zval_err(ctx, "resource is not a stream or a socket"); return; } if (php_stream_cast(stream, PHP_STREAM_AS_FD, (void **)&iarr[i - 1], REPORT_ERRORS) == FAILURE) { do_from_zval_err(ctx, "cast stream to file descriptor failed"); return; } } else { do_from_zval_err(ctx, "expected a resource variable"); } } void from_zval_write_fd_array(const zval *arr, char *int_arr, ser_context *ctx) { if (Z_TYPE_P(arr) != IS_ARRAY) { do_from_zval_err(ctx, "%s", "expected an array here"); return; } from_array_iterate(arr, &from_zval_write_fd_array_aux, (void**)&int_arr, ctx); } void to_zval_read_fd_array(const char *data, zval *zv, res_context *ctx) { size_t *cmsg_len; int num_elems, i; struct cmsghdr *dummy_cmsg = 0; size_t data_offset; data_offset = (unsigned char *)CMSG_DATA(dummy_cmsg) - (unsigned char *)dummy_cmsg; if ((cmsg_len = zend_hash_str_find_ptr(&ctx->params, KEY_CMSG_LEN, sizeof(KEY_CMSG_LEN) - 1)) == NULL) { do_to_zval_err(ctx, "could not get value of parameter " KEY_CMSG_LEN); return; } if (*cmsg_len < data_offset) { do_to_zval_err(ctx, "length of cmsg is smaller than its data member " "offset (%pd vs %pd)", (zend_long)*cmsg_len, (zend_long)data_offset); return; } num_elems = (*cmsg_len - data_offset) / sizeof(int); array_init_size(zv, num_elems); for (i = 0; i < num_elems; i++) { zval elem; int fd; struct stat statbuf; fd = *((int *)data + i); /* determine whether we have a socket */ if (fstat(fd, &statbuf) == -1) { do_to_zval_err(ctx, "error creating resource for received file " "descriptor %d: fstat() call failed with errno %d", fd, errno); return; } if (S_ISSOCK(statbuf.st_mode)) { php_socket *sock = socket_import_file_descriptor(fd); ZVAL_RES(&elem, zend_register_resource(sock, php_sockets_le_socket())); } else { php_stream *stream = php_stream_fopen_from_fd(fd, "rw", NULL); php_stream_to_zval(stream, &elem); } add_next_index_zval(zv, &elem); } } #endif /* ENTRY POINT for conversions */ static void free_from_zval_allocation(void *alloc_ptr_ptr) { efree(*(void**)alloc_ptr_ptr); } void *from_zval_run_conversions(const zval *container, php_socket *sock, from_zval_write_field *writer, size_t struct_size, const char *top_name, zend_llist **allocations /* out */, struct err_s *err /* in/out */) { ser_context ctx; char *structure; *allocations = NULL; if (err->has_error) { return NULL; } memset(&ctx, 0, sizeof(ctx)); zend_hash_init(&ctx.params, 8, NULL, NULL, 0); zend_llist_init(&ctx.keys, sizeof(const char *), NULL, 0); zend_llist_init(&ctx.allocations, sizeof(void *), &free_from_zval_allocation, 0); ctx.sock = sock; structure = ecalloc(1, struct_size); zend_llist_add_element(&ctx.keys, &top_name); zend_llist_add_element(&ctx.allocations, &structure); /* main call */ writer(container, structure, &ctx); if (ctx.err.has_error) { zend_llist_destroy(&ctx.allocations); /* deallocates structure as well */ structure = NULL; *err = ctx.err; } else { *allocations = emalloc(sizeof **allocations); **allocations = ctx.allocations; } zend_llist_destroy(&ctx.keys); zend_hash_destroy(&ctx.params); return structure; } zval *to_zval_run_conversions(const char *structure, to_zval_read_field *reader, const char *top_name, const struct key_value *key_value_pairs, struct err_s *err, zval *zv) { res_context ctx; const struct key_value *kv; if (err->has_error) { return NULL; } memset(&ctx, 0, sizeof(ctx)); zend_llist_init(&ctx.keys, sizeof(const char *), NULL, 0); zend_llist_add_element(&ctx.keys, &top_name); zend_hash_init(&ctx.params, 8, NULL, NULL, 0); for (kv = key_value_pairs; kv->key != NULL; kv++) { zend_hash_str_update_ptr(&ctx.params, kv->key, kv->key_size - 1, kv->value); } ZVAL_NULL(zv); /* main call */ reader(structure, zv, &ctx); if (ctx.err.has_error) { zval_ptr_dtor(zv); ZVAL_UNDEF(zv); *err = ctx.err; } zend_llist_destroy(&ctx.keys); zend_hash_destroy(&ctx.params); return Z_ISUNDEF_P(zv)? NULL : zv; }
422205.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE253_Incorrect_Check_of_Return_Value__char_w32CreateNamedPipe_12.c Label Definition File: CWE253_Incorrect_Check_of_Return_Value.string.label.xml Template File: point-flaw-12.tmpl.c */ /* * @description * CWE: 253 Incorrect Check of Return Value * Sinks: w32CreateNamedPipe * GoodSink: Correctly check if CreateNamedPipeA() failed * BadSink : Incorrectly check if CreateNamedPipeA() failed * Flow Variant: 12 Control flow: if(global_returns_t_or_f()) * * */ #include "std_testcase.h" #include <windows.h> #define BUFSIZE 1024 #ifndef OMITBAD void CWE253_Incorrect_Check_of_Return_Value__char_w32CreateNamedPipe_12_bad() { if(global_returns_t_or_f()) { { char * pipeName = "\\\\.\\pipe\\mypipe"; HANDLE hPipe = INVALID_HANDLE_VALUE; BOOL fConnected = FALSE; hPipe = CreateNamedPipeA( pipeName, FILE_FLAG_FIRST_PIPE_INSTANCE, /* FILE_FLAG_FIRST_PIPE_INSTANCE - this flag must be set */ PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, NMPWAIT_USE_DEFAULT_WAIT, NULL); /* FLAW: If CreateNamedPipeA() failed, the return value will be INVALID_HANDLE_VALUE, but we are checking to see if the return value is NULL */ if (hPipe == NULL) { exit(1); } /* FLAW: If CreateNamedPipeA() failed, GetLastError() could return ERROR_ACCESS_DENIED but we are checking to see if the return value is negative */ if (GetLastError() == -1) { exit(1); } fConnected = ConnectNamedPipe(hPipe, NULL) ? TRUE : (GetLastError() == ERROR_PIPE_CONNECTED); /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hPipe); } } else { { char * pipeName = "\\\\.\\pipe\\mypipe"; HANDLE hPipe = INVALID_HANDLE_VALUE; BOOL fConnected = FALSE; hPipe = CreateNamedPipeA( pipeName, FILE_FLAG_FIRST_PIPE_INSTANCE, /* FILE_FLAG_FIRST_PIPE_INSTANCE - this flag must be set */ PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, NMPWAIT_USE_DEFAULT_WAIT, NULL); /* FIX: check for the correct return value */ if (hPipe == INVALID_HANDLE_VALUE) { exit(1); } /* FIX: check for the correct return value */ if (GetLastError() == ERROR_ACCESS_DENIED) { exit(1); } fConnected = ConnectNamedPipe(hPipe, NULL) ? TRUE : (GetLastError() == ERROR_PIPE_CONNECTED); /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hPipe); } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* good1() uses the GoodSink on both sides of the "if" statement */ static void good1() { if(global_returns_t_or_f()) { { char * pipeName = "\\\\.\\pipe\\mypipe"; HANDLE hPipe = INVALID_HANDLE_VALUE; BOOL fConnected = FALSE; hPipe = CreateNamedPipeA( pipeName, FILE_FLAG_FIRST_PIPE_INSTANCE, /* FILE_FLAG_FIRST_PIPE_INSTANCE - this flag must be set */ PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, NMPWAIT_USE_DEFAULT_WAIT, NULL); /* FIX: check for the correct return value */ if (hPipe == INVALID_HANDLE_VALUE) { exit(1); } /* FIX: check for the correct return value */ if (GetLastError() == ERROR_ACCESS_DENIED) { exit(1); } fConnected = ConnectNamedPipe(hPipe, NULL) ? TRUE : (GetLastError() == ERROR_PIPE_CONNECTED); /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hPipe); } } else { { char * pipeName = "\\\\.\\pipe\\mypipe"; HANDLE hPipe = INVALID_HANDLE_VALUE; BOOL fConnected = FALSE; hPipe = CreateNamedPipeA( pipeName, FILE_FLAG_FIRST_PIPE_INSTANCE, /* FILE_FLAG_FIRST_PIPE_INSTANCE - this flag must be set */ PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, NMPWAIT_USE_DEFAULT_WAIT, NULL); /* FIX: check for the correct return value */ if (hPipe == INVALID_HANDLE_VALUE) { exit(1); } /* FIX: check for the correct return value */ if (GetLastError() == ERROR_ACCESS_DENIED) { exit(1); } fConnected = ConnectNamedPipe(hPipe, NULL) ? TRUE : (GetLastError() == ERROR_PIPE_CONNECTED); /* We'll leave out most of the implementation since it has nothing to do with the CWE * and since the checkers are looking for certain function calls anyway */ CloseHandle(hPipe); } } } void CWE253_Incorrect_Check_of_Return_Value__char_w32CreateNamedPipe_12_good() { good1(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE253_Incorrect_Check_of_Return_Value__char_w32CreateNamedPipe_12_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE253_Incorrect_Check_of_Return_Value__char_w32CreateNamedPipe_12_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
683699.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE122_Heap_Based_Buffer_Overflow__c_CWE805_int64_t_memcpy_17.c Label Definition File: CWE122_Heap_Based_Buffer_Overflow__c_CWE805.label.xml Template File: sources-sink-17.tmpl.c */ /* * @description * CWE: 122 Heap Based Buffer Overflow * BadSource: Allocate using malloc() and set data pointer to a small buffer * GoodSource: Allocate using malloc() and set data pointer to a large buffer * Sink: memcpy * BadSink : Copy int64_t array to data using memcpy * Flow Variant: 17 Control flow: for loops * * */ #include "std_testcase.h" #ifndef OMITBAD void CWE122_Heap_Based_Buffer_Overflow__c_CWE805_int64_t_memcpy_17_bad() { int i; int64_t * data; data = NULL; for(i = 0; i < 1; i++) { /* FLAW: Allocate and point data to a small buffer that is smaller than the large buffer used in the sinks */ data = (int64_t *)malloc(50*sizeof(int64_t)); } { int64_t source[100] = {0}; /* fill with 0's */ /* POTENTIAL FLAW: Possible buffer overflow if data < 100 */ memcpy(data, source, 100*sizeof(int64_t)); printLongLongLine(data[0]); free(data); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B() - use goodsource and badsink by changing the conditions on the for statements */ static void goodG2B() { int h; int64_t * data; data = NULL; for(h = 0; h < 1; h++) { /* FIX: Allocate and point data to a large buffer that is at least as large as the large buffer used in the sink */ data = (int64_t *)malloc(100*sizeof(int64_t)); } { int64_t source[100] = {0}; /* fill with 0's */ /* POTENTIAL FLAW: Possible buffer overflow if data < 100 */ memcpy(data, source, 100*sizeof(int64_t)); printLongLongLine(data[0]); free(data); } } void CWE122_Heap_Based_Buffer_Overflow__c_CWE805_int64_t_memcpy_17_good() { goodG2B(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE122_Heap_Based_Buffer_Overflow__c_CWE805_int64_t_memcpy_17_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE122_Heap_Based_Buffer_Overflow__c_CWE805_int64_t_memcpy_17_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
878780.c
/* { dg-do compile } */ /* { dg-options "-O2 -fdump-ipa-icf" } */ #include <math.h> __attribute__ ((noinline)) float foo() { return sin(12.4f); } __attribute__ ((noinline)) float bar() { return sin(12.4f); } int main() { foo(); bar(); return 0; } /* { dg-final { scan-ipa-dump "Semantic equality hit:foo->bar" "icf" } } */ /* { dg-final { scan-ipa-dump "Equal symbols: 1" "icf" } } */
726075.c
/* * This file is a part of libacars * * Copyright (c) 2018-2021 Tomasz Lemiech <[email protected]> */ #include <sys/time.h> // struct timeval #include <string.h> // strdup #include <libacars/macros.h> // la_assert #include <libacars/hash.h> // la_hash #include <libacars/list.h> // la_list #include <libacars/util.h> // LA_XCALLOC, LA_XFREE, la_octet_string #include <libacars/reassembly.h> typedef struct la_reasm_table_s { void const *key; /* a pointer identifying the protocol owning this reasm_table (la_type_descriptor can be used for this purpose). Due to small number of protocols, hash would be an overkill here. */ la_hash *fragment_table; /* keyed with packet identifiers, values are la_reasm_table_entries */ la_reasm_table_funcs funcs; /* protocol-specific callbacks */ int cleanup_interval; /* expire old entries every cleanup_interval number of processed fragments */ int frag_cnt; /* counts added fragments (up to cleanup_interval) */ } la_reasm_table; struct la_reasm_ctx_s { la_list *rtables; /* list of reasm_tables, one per protocol */ }; // the header of the fragment list typedef struct { int prev_seq_num; /* sequence number of previous fragment */ int frags_collected_total_len; /* sum of msg_data_len for all fragments received */ int total_pdu_len; /* total length of the reassembled message (copied from la_reasm_fragment_info of the 1st fragment) */ struct timeval first_frag_rx_time; /* time of arrival of the first fragment */ struct timeval reasm_timeout; /* reassembly timeout to be applied to this message */ la_list *fragment_list; /* payloads of all fragments gathered so far */ } la_reasm_table_entry; la_reasm_ctx *la_reasm_ctx_new() { LA_NEW(la_reasm_ctx, rctx); return rctx; } static void la_reasm_table_entry_destroy(void *rt_ptr) { if(rt_ptr == NULL) { return; } la_reasm_table_entry *rt_entry = rt_ptr; la_list_free_full(rt_entry->fragment_list, la_octet_string_destroy); LA_XFREE(rt_entry); } static void la_reasm_table_destroy(void *table) { if(table == NULL) { return; } la_reasm_table *rtable = table; la_hash_destroy(rtable->fragment_table); LA_XFREE(rtable); } void la_reasm_ctx_destroy(void *ctx) { if(ctx == NULL) { return; } la_reasm_ctx *rctx = ctx; la_list_free_full(rctx->rtables, la_reasm_table_destroy); LA_XFREE(rctx); } la_reasm_table *la_reasm_table_lookup(la_reasm_ctx *rctx, void const *table_id) { la_assert(rctx != NULL); la_assert(table_id != NULL); for(la_list *l = rctx->rtables; l != NULL; l = la_list_next(l)) { la_reasm_table *rt = l->data; if(rt->key == table_id) { return rt; } } return NULL; } #define LA_REASM_DEFAULT_CLEANUP_INTERVAL 100 la_reasm_table *la_reasm_table_new(la_reasm_ctx *rctx, void const *table_id, la_reasm_table_funcs funcs, int cleanup_interval) { la_assert(rctx != NULL); la_assert(table_id != NULL); la_assert(funcs.get_key); la_assert(funcs.get_tmp_key); la_assert(funcs.hash_key); la_assert(funcs.compare_keys); la_assert(funcs.destroy_key); la_reasm_table *rtable = la_reasm_table_lookup(rctx, table_id); if(rtable != NULL) { goto end; } rtable = LA_XCALLOC(1, sizeof(la_reasm_table)); rtable->key = table_id; rtable->fragment_table = la_hash_new(funcs.hash_key, funcs.compare_keys, funcs.destroy_key, la_reasm_table_entry_destroy); rtable->funcs = funcs; // Replace insane values with reasonable default rtable->cleanup_interval = cleanup_interval > 0 ? cleanup_interval : LA_REASM_DEFAULT_CLEANUP_INTERVAL; rctx->rtables = la_list_append(rctx->rtables, rtable); end: return rtable; } // Checks if time difference between rx_first and rx_last is greater than timeout. static bool la_reasm_timed_out(struct timeval rx_last, struct timeval rx_first, struct timeval timeout) { if(timeout.tv_sec == 0 && timeout.tv_usec == 0) { return false; } struct timeval to = { .tv_sec = rx_first.tv_sec + timeout.tv_sec, .tv_usec = rx_first.tv_usec + timeout.tv_usec }; if(to.tv_usec > 1e9) { to.tv_sec++; to.tv_usec -= 1e9; } la_debug_print(D_INFO, "rx_first: %lu.%lu to: %lu.%lu rx_last: %lu.%lu\n", rx_first.tv_sec, rx_first.tv_usec, to.tv_sec, to.tv_usec, rx_last.tv_sec, rx_last.tv_usec); return (rx_last.tv_sec > to.tv_sec || (rx_last.tv_sec == to.tv_sec && rx_last.tv_usec > to.tv_usec)); } // Callback for la_hash_foreach_remove used during reassembly table cleanups. static bool is_rt_entry_expired(void const *keyptr, void const *valptr, void *ctx) { LA_UNUSED(keyptr); la_assert(valptr != NULL); la_assert(ctx != NULL); la_reasm_table_entry const *rt_entry = valptr; struct timeval *now = ctx; return la_reasm_timed_out(*now, rt_entry->first_frag_rx_time, rt_entry->reasm_timeout); } // Removes expired entries from the given reassembly table. static void la_reasm_table_cleanup(la_reasm_table *rtable, struct timeval now) { la_assert(rtable != NULL); la_assert(rtable->fragment_table != NULL); int deleted_count = la_hash_foreach_remove(rtable->fragment_table, is_rt_entry_expired, &now); // Avoid compiler warning when DEBUG is off LA_UNUSED(deleted_count); la_debug_print(D_INFO, "Expired %d entries\n", deleted_count); } #define SEQ_UNINITIALIZED -2 // Checks if the given sequence number follows the previous one seen. static bool is_seq_num_in_sequence(int prev_seq_num, int cur_seq_num) { return (prev_seq_num == SEQ_UNINITIALIZED || prev_seq_num + 1 == cur_seq_num); } // Core reassembly logic. // Validates the given message fragment and appends it to the reassembly table // fragment list. la_reasm_status la_reasm_fragment_add(la_reasm_table *rtable, la_reasm_fragment_info const *finfo) { la_assert(rtable != NULL); la_assert(finfo != NULL); if(finfo->msg_info == NULL) { return LA_REASM_ARGS_INVALID; } // Don't allow zero timeout. This would prevent stale rt_entries from being expired, // causing a massive memory leak. if(finfo->reasm_timeout.tv_sec == 0 && finfo->reasm_timeout.tv_usec == 0) { return LA_REASM_ARGS_INVALID; } la_reasm_status ret = LA_REASM_UNKNOWN; void *lookup_key = rtable->funcs.get_tmp_key(finfo->msg_info); la_assert(lookup_key != NULL); la_reasm_table_entry *rt_entry = NULL; restart: rt_entry = la_hash_lookup(rtable->fragment_table, lookup_key); if(rt_entry == NULL) { // Don't add if we know that this is not the first fragment of the message. if(finfo->seq_num_first != SEQ_FIRST_NONE && finfo->seq_num_first != finfo->seq_num) { la_debug_print(D_INFO, "No rt_entry found and seq_num %d != seq_num_first %d," " not creating rt_entry\n", finfo->seq_num, finfo->seq_num_first); ret = LA_REASM_FRAG_OUT_OF_SEQUENCE; goto end; } if(finfo->is_final_fragment) { // This is the first received fragment of this message and it's the final // fragment. Either this message is not fragmented or all fragments except the // last one have been lost. In either case there is no point in adding it to // the fragment table. la_debug_print(D_INFO, "No rt_entry found and is_final_fragment=true, not creating rt_entry\n"); ret = LA_REASM_SKIPPED; goto end; } rt_entry = LA_XCALLOC(1, sizeof(la_reasm_table_entry)); rt_entry->prev_seq_num = SEQ_UNINITIALIZED; rt_entry->first_frag_rx_time = finfo->rx_time; rt_entry->reasm_timeout = finfo->reasm_timeout; rt_entry->total_pdu_len = LA_MAX(finfo->total_pdu_len, 0); rt_entry->frags_collected_total_len = 0; la_debug_print(D_INFO, "Adding new rt_table entry (rx_time: %lu.%lu timeout: %lu.%lu)\n", rt_entry->first_frag_rx_time.tv_sec, rt_entry->first_frag_rx_time.tv_usec, rt_entry->reasm_timeout.tv_sec, rt_entry->reasm_timeout.tv_usec); void *msg_key = rtable->funcs.get_key(finfo->msg_info); la_assert(msg_key != NULL); la_hash_insert(rtable->fragment_table, msg_key, rt_entry); } else { la_debug_print(D_INFO, "rt_entry found, prev_seq_num: %d\n", rt_entry->prev_seq_num); } // Check if the sequence number has wrapped (if we're supposed to handle wraparounds) if(finfo->seq_num_wrap != SEQ_WRAP_NONE && finfo->seq_num == 0 && finfo->seq_num_wrap == rt_entry->prev_seq_num + 1) { la_debug_print(D_INFO, "seq_num wrap at %d: %d -> %d\n", finfo->seq_num_wrap, rt_entry->prev_seq_num, finfo->seq_num); // Current seq_num is 0, so set prev_seq_num to -1 to cause the seq_num check to succeed rt_entry->prev_seq_num = -1; } // Check reassembly timeout if(la_reasm_timed_out(finfo->rx_time, rt_entry->first_frag_rx_time, rt_entry->reasm_timeout) == true) { // If reassembly timeout has expired, we treat this fragment as a part of // a new message. Remove the old rt_entry and create new one. la_debug_print(D_INFO, "reasm timeout expired; creating new rt_entry\n"); la_hash_remove(rtable->fragment_table, lookup_key); goto restart; } // Skip duplicates / retransmissions. // If sequence numbers don't wrap, then treat fragments we've seen before as // duplicates too. if(rt_entry->prev_seq_num == finfo->seq_num || (finfo->seq_num_wrap == SEQ_WRAP_NONE && finfo->seq_num < rt_entry->prev_seq_num)) { la_debug_print(D_INFO, "skipping duplicate fragment (seq_num: %d)\n", finfo->seq_num); ret = LA_REASM_DUPLICATE; goto end; } // Check If the sequence number has incremented. if(is_seq_num_in_sequence(rt_entry->prev_seq_num, finfo->seq_num) == false) { // Probably one or more fragments have been lost. Reassembly is not possible. la_debug_print(D_INFO, "seq_num %d out of sequence (prev: %d)\n", finfo->seq_num, rt_entry->prev_seq_num); la_hash_remove(rtable->fragment_table, lookup_key); ret = LA_REASM_FRAG_OUT_OF_SEQUENCE; goto end; } // All checks succeeded. Add the fragment to the list. la_debug_print(D_INFO, "Good seq_num %d (prev: %d), adding fragment to the list\n", finfo->seq_num, rt_entry->prev_seq_num); // Don't append fragments with empty payload (but increment seq_num anyway, // because empty fragment is not an error) if(finfo->msg_data != NULL && finfo->msg_data_len > 0) { uint8_t *msg_data = LA_XCALLOC(finfo->msg_data_len, sizeof(uint8_t)); memcpy(msg_data, finfo->msg_data, finfo->msg_data_len); la_octet_string *ostring = la_octet_string_new(msg_data, finfo->msg_data_len); rt_entry->fragment_list = la_list_append(rt_entry->fragment_list, ostring); } rt_entry->frags_collected_total_len += finfo->msg_data_len; rt_entry->prev_seq_num = finfo->seq_num; // If we've come to this point successfully, then reassembly is complete if: // // - total_pdu_len for this rt_entry is set and we've already collected // required amount of data, or // // - total_pdu_len for this rt_entry is not known and the caller indicates // that this is the final fragment of this message. // // Otherwise we expect more fragments to come. if(rt_entry->total_pdu_len > 0) { ret = rt_entry->frags_collected_total_len >= rt_entry->total_pdu_len ? LA_REASM_COMPLETE : LA_REASM_IN_PROGRESS; } else { ret = finfo->is_final_fragment ? LA_REASM_COMPLETE : LA_REASM_IN_PROGRESS; } end: // Update fragment counter and expire old entries if necessary. // Expiration is performed in relation to rx_time of the fragment currently // being processed. This allows processing historical data with timestamps in // the past. if(++rtable->frag_cnt > rtable->cleanup_interval) { la_reasm_table_cleanup(rtable, finfo->rx_time); rtable->frag_cnt = 0; } la_debug_print(D_INFO, "Result: %d\n", ret); LA_XFREE(lookup_key); return ret; } // Returns the reassembled payload and removes the packet data from reassembly table int la_reasm_payload_get(la_reasm_table *rtable, void const *msg_info, uint8_t **result) { la_assert(rtable != NULL); la_assert(msg_info != NULL); la_assert(result != NULL); void *tmp_key = rtable->funcs.get_tmp_key(msg_info); la_assert(tmp_key); size_t result_len = -1; la_reasm_table_entry *rt_entry = la_hash_lookup(rtable->fragment_table, tmp_key); if(rt_entry == NULL) { result_len = -1; goto end; } if(rt_entry->frags_collected_total_len < 1) { result_len = 0; goto end; } // Append a NULL byte at the end of the reassembled buffer, so that it can be // cast to char * if this is a text message. uint8_t *reasm_buf = LA_XCALLOC(rt_entry->frags_collected_total_len + 1, sizeof(uint8_t)); uint8_t *ptr = reasm_buf; for(la_list *l = rt_entry->fragment_list; l != NULL; l = la_list_next(l)) { la_octet_string *ostring = l->data; memcpy(ptr, ostring->buf, ostring->len); ptr += ostring->len; } reasm_buf[rt_entry->frags_collected_total_len] = '\0'; // buffer len is frags_collected_total_len + 1 *result = reasm_buf; result_len = rt_entry->frags_collected_total_len; la_hash_remove(rtable->fragment_table, tmp_key); end: LA_XFREE(tmp_key); return result_len; } char const *la_reasm_status_name_get(la_reasm_status status) { static char const *reasm_status_names[] = { [LA_REASM_UNKNOWN] = "unknown", [LA_REASM_COMPLETE] = "complete", [LA_REASM_IN_PROGRESS] = "in progress", [LA_REASM_SKIPPED] = "skipped", [LA_REASM_DUPLICATE] = "duplicate", [LA_REASM_FRAG_OUT_OF_SEQUENCE] = "out of sequence", [LA_REASM_ARGS_INVALID] = "invalid args" }; if(status < 0 || status > LA_REASM_STATUS_MAX) { return NULL; } return reasm_status_names[status]; }
789837.c
#include "eif_eiffel.h" #ifdef __cplusplus extern "C" { #endif /* ANY */ static EIF_TYPE_INDEX ptf0[] = {0xFFFF}; static struct eif_par_types par0 = {0, ptf0, (uint16) 0, (uint16) 0, (char) 0}; /* TEST_CASE */ static EIF_TYPE_INDEX ptf1[] = {0,0xFFFF}; static struct eif_par_types par1 = {1, ptf1, (uint16) 1, (uint16) 0, (char) 0}; /* SYSTEM_STRING_FACTORY */ static EIF_TYPE_INDEX ptf2[] = {0,0xFFFF}; static struct eif_par_types par2 = {2, ptf2, (uint16) 1, (uint16) 0, (char) 0}; /* SHARED_EXECUTION_ENVIRONMENT */ static EIF_TYPE_INDEX ptf3[] = {0,0xFFFF}; static struct eif_par_types par3 = {3, ptf3, (uint16) 1, (uint16) 0, (char) 0}; /* ISE_SCOOP_RUNTIME */ static EIF_TYPE_INDEX ptf4[] = {0,0xFFFF}; static struct eif_par_types par4 = {4, ptf4, (uint16) 1, (uint16) 0, (char) 0}; /* SED_META_MODEL */ static EIF_TYPE_INDEX ptf5[] = {0,0xFFFF}; static struct eif_par_types par5 = {5, ptf5, (uint16) 1, (uint16) 0, (char) 0}; /* RT_DBG_EXECUTION_PARAMETERS */ static EIF_TYPE_INDEX ptf6[] = {0,0xFFFF}; static struct eif_par_types par6 = {6, ptf6, (uint16) 1, (uint16) 0, (char) 0}; /* IDENTIFIED_CONTROLLER */ static EIF_TYPE_INDEX ptf7[] = {0,0xFFFF}; static struct eif_par_types par7 = {7, ptf7, (uint16) 1, (uint16) 0, (char) 0}; /* RT_DEBUGGER */ static EIF_TYPE_INDEX ptf8[] = {0,0xFFFF}; static struct eif_par_types par8 = {8, ptf8, (uint16) 1, (uint16) 0, (char) 0}; /* DECLARATOR */ static EIF_TYPE_INDEX ptf9[] = {0,0xFFFF}; static struct eif_par_types par9 = {9, ptf9, (uint16) 1, (uint16) 0, (char) 0}; /* SED_TYPE_MISMATCH */ static EIF_TYPE_INDEX ptf10[] = {0,0xFFFF}; static struct eif_par_types par10 = {10, ptf10, (uint16) 1, (uint16) 0, (char) 0}; /* FILE_UTILITIES */ static EIF_TYPE_INDEX ptf11[] = {12,0xFFFF}; static struct eif_par_types par11 = {11, ptf11, (uint16) 1, (uint16) 0, (char) 1}; /* reference FILE_UTILITIES */ static EIF_TYPE_INDEX ptf12[] = {0,0xFFFF}; static struct eif_par_types par12 = {12, ptf12, (uint16) 1, (uint16) 0, (char) 1}; /* SED_ERROR */ static EIF_TYPE_INDEX ptf13[] = {0,0xFFFF}; static struct eif_par_types par13 = {13, ptf13, (uint16) 1, (uint16) 0, (char) 0}; /* STD_FILES */ static EIF_TYPE_INDEX ptf14[] = {0,0xFFFF}; static struct eif_par_types par14 = {14, ptf14, (uint16) 1, (uint16) 0, (char) 0}; /* OPERATING_ENVIRONMENT */ static EIF_TYPE_INDEX ptf15[] = {0,0xFFFF}; static struct eif_par_types par15 = {15, ptf15, (uint16) 1, (uint16) 0, (char) 0}; /* VERSIONABLE */ static EIF_TYPE_INDEX ptf16[] = {0,0xFFFF}; static struct eif_par_types par16 = {16, ptf16, (uint16) 1, (uint16) 0, (char) 0}; /* SED_ERROR_FACTORY */ static EIF_TYPE_INDEX ptf17[] = {0,0xFFFF}; static struct eif_par_types par17 = {17, ptf17, (uint16) 1, (uint16) 0, (char) 0}; /* CHARACTER_PROPERTY */ static EIF_TYPE_INDEX ptf18[] = {0,0xFFFF}; static struct eif_par_types par18 = {18, ptf18, (uint16) 1, (uint16) 0, (char) 0}; /* SED_VERSIONS */ static EIF_TYPE_INDEX ptf19[] = {0,0xFFFF}; static struct eif_par_types par19 = {19, ptf19, (uint16) 1, (uint16) 0, (char) 0}; /* ASCII */ static EIF_TYPE_INDEX ptf20[] = {0,0xFFFF}; static struct eif_par_types par20 = {20, ptf20, (uint16) 1, (uint16) 0, (char) 0}; /* ISE_RUNTIME */ static EIF_TYPE_INDEX ptf21[] = {0,0xFFFF}; static struct eif_par_types par21 = {21, ptf21, (uint16) 1, (uint16) 0, (char) 0}; /* UTF_CONVERTER */ static EIF_TYPE_INDEX ptf22[] = {23,0xFFFF}; static struct eif_par_types par22 = {22, ptf22, (uint16) 1, (uint16) 0, (char) 1}; /* reference UTF_CONVERTER */ static EIF_TYPE_INDEX ptf23[] = {0,0xFFFF}; static struct eif_par_types par23 = {23, ptf23, (uint16) 1, (uint16) 0, (char) 1}; /* PROFILING_SETTING */ static EIF_TYPE_INDEX ptf24[] = {0,0xFFFF}; static struct eif_par_types par24 = {24, ptf24, (uint16) 1, (uint16) 0, (char) 0}; /* TRACING_SETTING */ static EIF_TYPE_INDEX ptf25[] = {0,0xFFFF}; static struct eif_par_types par25 = {25, ptf25, (uint16) 1, (uint16) 0, (char) 0}; /* UNIX_SIGNALS */ static EIF_TYPE_INDEX ptf26[] = {0,0xFFFF}; static struct eif_par_types par26 = {26, ptf26, (uint16) 1, (uint16) 0, (char) 0}; /* SYSTEM_STRING */ static EIF_TYPE_INDEX ptf27[] = {0,0xFFFF}; static struct eif_par_types par27 = {27, ptf27, (uint16) 1, (uint16) 0, (char) 0}; /* BASIC_ROUTINES */ static EIF_TYPE_INDEX ptf28[] = {0,0xFFFF}; static struct eif_par_types par28 = {28, ptf28, (uint16) 1, (uint16) 0, (char) 0}; /* FORMAT_INTEGER */ static EIF_TYPE_INDEX ptf29[] = {0,0xFFFF}; static struct eif_par_types par29 = {29, ptf29, (uint16) 1, (uint16) 0, (char) 0}; /* EXCEP_CONST */ static EIF_TYPE_INDEX ptf30[] = {0,0xFFFF}; static struct eif_par_types par30 = {30, ptf30, (uint16) 1, (uint16) 0, (char) 0}; /* TRACING_HANDLER */ static EIF_TYPE_INDEX ptf31[] = {0,0xFFFF}; static struct eif_par_types par31 = {31, ptf31, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_TRACING_HANDLER */ static EIF_TYPE_INDEX ptf32[] = {31,0xFFFF}; static struct eif_par_types par32 = {32, ptf32, (uint16) 1, (uint16) 0, (char) 0}; /* AGENT_TRACING_HANDLER */ static EIF_TYPE_INDEX ptf33[] = {32,0xFFFF}; static struct eif_par_types par33 = {33, ptf33, (uint16) 1, (uint16) 0, (char) 0}; /* IDENTIFIED_ROUTINES */ static EIF_TYPE_INDEX ptf34[] = {0,0xFFFF}; static struct eif_par_types par34 = {34, ptf34, (uint16) 1, (uint16) 0, (char) 0}; /* SED_ABSTRACT_OBJECTS_TABLE */ static EIF_TYPE_INDEX ptf35[] = {0,0xFFFF}; static struct eif_par_types par35 = {35, ptf35, (uint16) 1, (uint16) 0, (char) 0}; /* SED_STORABLE_FACILITIES */ static EIF_TYPE_INDEX ptf36[] = {0,0xFFFF}; static struct eif_par_types par36 = {36, ptf36, (uint16) 1, (uint16) 0, (char) 0}; /* OBJECT_GRAPH_MARKER */ static EIF_TYPE_INDEX ptf37[] = {0,0xFFFF}; static struct eif_par_types par37 = {37, ptf37, (uint16) 1, (uint16) 0, (char) 0}; /* SED_READER_WRITER */ static EIF_TYPE_INDEX ptf38[] = {0,0xFFFF}; static struct eif_par_types par38 = {38, ptf38, (uint16) 1, (uint16) 0, (char) 0}; /* SED_BINARY_READER_WRITER */ static EIF_TYPE_INDEX ptf39[] = {38,0xFFFF}; static struct eif_par_types par39 = {39, ptf39, (uint16) 1, (uint16) 0, (char) 0}; /* SED_MEMORY_READER_WRITER */ static EIF_TYPE_INDEX ptf40[] = {39,0xFFFF}; static struct eif_par_types par40 = {40, ptf40, (uint16) 1, (uint16) 0, (char) 0}; /* REFACTORING_HELPER */ static EIF_TYPE_INDEX ptf41[] = {0,0xFFFF}; static struct eif_par_types par41 = {41, ptf41, (uint16) 1, (uint16) 0, (char) 0}; /* THREAD_ENVIRONMENT */ static EIF_TYPE_INDEX ptf42[] = {0,0xFFFF}; static struct eif_par_types par42 = {42, ptf42, (uint16) 1, (uint16) 0, (char) 0}; /* RT_EXTENSION_COMMON */ static EIF_TYPE_INDEX ptf43[] = {0,0xFFFF}; static struct eif_par_types par43 = {43, ptf43, (uint16) 1, (uint16) 0, (char) 0}; /* RT_EXTENSION_GENERAL */ static EIF_TYPE_INDEX ptf44[] = {43,0xFFFF}; static struct eif_par_types par44 = {44, ptf44, (uint16) 1, (uint16) 0, (char) 0}; /* RT_EXTENSION */ static EIF_TYPE_INDEX ptf45[] = {44,0xFFFF}; static struct eif_par_types par45 = {45, ptf45, (uint16) 1, (uint16) 0, (char) 0}; /* RT_DBG_COMMON */ static EIF_TYPE_INDEX ptf46[] = {43,0xFFFF}; static struct eif_par_types par46 = {46, ptf46, (uint16) 1, (uint16) 0, (char) 0}; /* MEMORY_STRUCTURE */ static EIF_TYPE_INDEX ptf47[] = {0,0xFFFF}; static struct eif_par_types par47 = {47, ptf47, (uint16) 1, (uint16) 0, (char) 0}; /* OBJECT_GRAPH_TRAVERSABLE */ static EIF_TYPE_INDEX ptf48[] = {0,0xFFFF}; static struct eif_par_types par48 = {48, ptf48, (uint16) 1, (uint16) 0, (char) 0}; /* OBJECT_GRAPH_DEPTH_FIRST_TRAVERSABLE */ static EIF_TYPE_INDEX ptf49[] = {48,0xFFFF}; static struct eif_par_types par49 = {49, ptf49, (uint16) 1, (uint16) 0, (char) 0}; /* OBJECT_GRAPH_BREADTH_FIRST_TRAVERSABLE */ static EIF_TYPE_INDEX ptf50[] = {48,0xFFFF}; static struct eif_par_types par50 = {50, ptf50, (uint16) 1, (uint16) 0, (char) 0}; /* EXCEPTION_MANAGER */ static EIF_TYPE_INDEX ptf51[] = {0,0xFFFF}; static struct eif_par_types par51 = {51, ptf51, (uint16) 1, (uint16) 0, (char) 0}; /* ISE_EXCEPTION_MANAGER */ static EIF_TYPE_INDEX ptf52[] = {51,0xFFFF}; static struct eif_par_types par52 = {52, ptf52, (uint16) 1, (uint16) 0, (char) 0}; /* REFLECTOR_HELPER */ static EIF_TYPE_INDEX ptf53[] = {0,0xFFFF}; static struct eif_par_types par53 = {53, ptf53, (uint16) 1, (uint16) 0, (char) 0}; /* INTERNAL_HELPER */ static EIF_TYPE_INDEX ptf54[] = {53,0xFFFF}; static struct eif_par_types par54 = {54, ptf54, (uint16) 1, (uint16) 0, (char) 0}; /* NUMERIC_INFORMATION */ static EIF_TYPE_INDEX ptf55[] = {0,0xFFFF}; static struct eif_par_types par55 = {55, ptf55, (uint16) 1, (uint16) 0, (char) 0}; /* INTEGER_OVERFLOW_CHECKER */ static EIF_TYPE_INDEX ptf56[] = {55,0xFFFF}; static struct eif_par_types par56 = {56, ptf56, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_TO_NUMERIC_CONVERTOR */ static EIF_TYPE_INDEX ptf57[] = {55,0xFFFF}; static struct eif_par_types par57 = {57, ptf57, (uint16) 1, (uint16) 0, (char) 0}; /* HEXADECIMAL_STRING_TO_INTEGER_CONVERTER */ static EIF_TYPE_INDEX ptf58[] = {57,0xFFFF}; static struct eif_par_types par58 = {58, ptf58, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_TO_REAL_CONVERTOR */ static EIF_TYPE_INDEX ptf59[] = {57,0xFFFF}; static struct eif_par_types par59 = {59, ptf59, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_TO_INTEGER_CONVERTOR */ static EIF_TYPE_INDEX ptf60[] = {57,0xFFFF}; static struct eif_par_types par60 = {60, ptf60, (uint16) 1, (uint16) 0, (char) 0}; /* EXCEPTION_MANAGER_FACTORY */ static EIF_TYPE_INDEX ptf61[] = {0,0xFFFF}; static struct eif_par_types par61 = {61, ptf61, (uint16) 1, (uint16) 0, (char) 0}; /* EXCEPTIONS */ static EIF_TYPE_INDEX ptf62[] = {30,0xFFF7,61,0xFFFF}; static struct eif_par_types par62 = {62, ptf62, (uint16) 2, (uint16) 0, (char) 0}; /* STORABLE */ static EIF_TYPE_INDEX ptf63[] = {62,0xFFFF}; static struct eif_par_types par63 = {63, ptf63, (uint16) 1, (uint16) 0, (char) 0}; /* EXCEPTION */ static EIF_TYPE_INDEX ptf64[] = {61,0xFFFF}; static struct eif_par_types par64 = {64, ptf64, (uint16) 1, (uint16) 0, (char) 0}; /* DEVELOPER_EXCEPTION */ static EIF_TYPE_INDEX ptf65[] = {64,0xFFFF}; static struct eif_par_types par65 = {65, ptf65, (uint16) 1, (uint16) 0, (char) 0}; /* MACHINE_EXCEPTION */ static EIF_TYPE_INDEX ptf66[] = {64,0xFFFF}; static struct eif_par_types par66 = {66, ptf66, (uint16) 1, (uint16) 0, (char) 0}; /* HARDWARE_EXCEPTION */ static EIF_TYPE_INDEX ptf67[] = {66,0xFFFF}; static struct eif_par_types par67 = {67, ptf67, (uint16) 1, (uint16) 0, (char) 0}; /* FLOATING_POINT_FAILURE */ static EIF_TYPE_INDEX ptf68[] = {67,0xFFFF}; static struct eif_par_types par68 = {68, ptf68, (uint16) 1, (uint16) 0, (char) 0}; /* OPERATING_SYSTEM_EXCEPTION */ static EIF_TYPE_INDEX ptf69[] = {66,0xFFFF}; static struct eif_par_types par69 = {69, ptf69, (uint16) 1, (uint16) 0, (char) 0}; /* COM_FAILURE */ static EIF_TYPE_INDEX ptf70[] = {69,0xFFFF}; static struct eif_par_types par70 = {70, ptf70, (uint16) 1, (uint16) 0, (char) 0}; /* OPERATING_SYSTEM_FAILURE */ static EIF_TYPE_INDEX ptf71[] = {69,0xFFFF}; static struct eif_par_types par71 = {71, ptf71, (uint16) 1, (uint16) 0, (char) 0}; /* OPERATING_SYSTEM_SIGNAL_FAILURE */ static EIF_TYPE_INDEX ptf72[] = {69,0xFFFF}; static struct eif_par_types par72 = {72, ptf72, (uint16) 1, (uint16) 0, (char) 0}; /* OBSOLETE_EXCEPTION */ static EIF_TYPE_INDEX ptf73[] = {64,0xFFFF}; static struct eif_par_types par73 = {73, ptf73, (uint16) 1, (uint16) 0, (char) 0}; /* RESUMPTION_FAILURE */ static EIF_TYPE_INDEX ptf74[] = {73,0xFFFF}; static struct eif_par_types par74 = {74, ptf74, (uint16) 1, (uint16) 0, (char) 0}; /* RESCUE_FAILURE */ static EIF_TYPE_INDEX ptf75[] = {73,0xFFFF}; static struct eif_par_types par75 = {75, ptf75, (uint16) 1, (uint16) 0, (char) 0}; /* EXCEPTION_IN_SIGNAL_HANDLER_FAILURE */ static EIF_TYPE_INDEX ptf76[] = {73,0xFFFF}; static struct eif_par_types par76 = {76, ptf76, (uint16) 1, (uint16) 0, (char) 0}; /* SYS_EXCEPTION */ static EIF_TYPE_INDEX ptf77[] = {64,0xFFFF}; static struct eif_par_types par77 = {77, ptf77, (uint16) 1, (uint16) 0, (char) 0}; /* EIFFEL_RUNTIME_PANIC */ static EIF_TYPE_INDEX ptf78[] = {77,0xFFFF}; static struct eif_par_types par78 = {78, ptf78, (uint16) 1, (uint16) 0, (char) 0}; /* OLD_VIOLATION */ static EIF_TYPE_INDEX ptf79[] = {77,0xFFFF}; static struct eif_par_types par79 = {79, ptf79, (uint16) 1, (uint16) 0, (char) 0}; /* EIF_EXCEPTION */ static EIF_TYPE_INDEX ptf80[] = {77,0xFFFF}; static struct eif_par_types par80 = {80, ptf80, (uint16) 1, (uint16) 0, (char) 0}; /* EIFFEL_RUNTIME_EXCEPTION */ static EIF_TYPE_INDEX ptf81[] = {80,0xFFFF}; static struct eif_par_types par81 = {81, ptf81, (uint16) 1, (uint16) 0, (char) 0}; /* EXTERNAL_FAILURE */ static EIF_TYPE_INDEX ptf82[] = {81,0xFFFF}; static struct eif_par_types par82 = {82, ptf82, (uint16) 1, (uint16) 0, (char) 0}; /* NO_MORE_MEMORY */ static EIF_TYPE_INDEX ptf83[] = {81,0xFFFF}; static struct eif_par_types par83 = {83, ptf83, (uint16) 1, (uint16) 0, (char) 0}; /* DATA_EXCEPTION */ static EIF_TYPE_INDEX ptf84[] = {81,0xFFFF}; static struct eif_par_types par84 = {84, ptf84, (uint16) 1, (uint16) 0, (char) 0}; /* SERIALIZATION_FAILURE */ static EIF_TYPE_INDEX ptf85[] = {84,0xFFFF}; static struct eif_par_types par85 = {85, ptf85, (uint16) 1, (uint16) 0, (char) 0}; /* MISMATCH_FAILURE */ static EIF_TYPE_INDEX ptf86[] = {84,0xFFFF}; static struct eif_par_types par86 = {86, ptf86, (uint16) 1, (uint16) 0, (char) 0}; /* IO_FAILURE */ static EIF_TYPE_INDEX ptf87[] = {84,0xFFFF}; static struct eif_par_types par87 = {87, ptf87, (uint16) 1, (uint16) 0, (char) 0}; /* LANGUAGE_EXCEPTION */ static EIF_TYPE_INDEX ptf88[] = {80,0xFFFF}; static struct eif_par_types par88 = {88, ptf88, (uint16) 1, (uint16) 0, (char) 0}; /* VOID_TARGET */ static EIF_TYPE_INDEX ptf89[] = {88,0xFFFF}; static struct eif_par_types par89 = {89, ptf89, (uint16) 1, (uint16) 0, (char) 0}; /* VOID_ASSIGNED_TO_EXPANDED */ static EIF_TYPE_INDEX ptf90[] = {88,0xFFFF}; static struct eif_par_types par90 = {90, ptf90, (uint16) 1, (uint16) 0, (char) 0}; /* ROUTINE_FAILURE */ static EIF_TYPE_INDEX ptf91[] = {88,0xFFFF}; static struct eif_par_types par91 = {91, ptf91, (uint16) 1, (uint16) 0, (char) 0}; /* BAD_INSPECT_VALUE */ static EIF_TYPE_INDEX ptf92[] = {88,0xFFFF}; static struct eif_par_types par92 = {92, ptf92, (uint16) 1, (uint16) 0, (char) 0}; /* EIFFELSTUDIO_SPECIFIC_LANGUAGE_EXCEPTION */ static EIF_TYPE_INDEX ptf93[] = {88,0xFFFF}; static struct eif_par_types par93 = {93, ptf93, (uint16) 1, (uint16) 0, (char) 0}; /* CREATE_ON_DEFERRED */ static EIF_TYPE_INDEX ptf94[] = {93,0xFFFF}; static struct eif_par_types par94 = {94, ptf94, (uint16) 1, (uint16) 0, (char) 0}; /* ADDRESS_APPLIED_TO_MELTED_FEATURE */ static EIF_TYPE_INDEX ptf95[] = {93,0xFFFF}; static struct eif_par_types par95 = {95, ptf95, (uint16) 1, (uint16) 0, (char) 0}; /* ASSERTION_VIOLATION */ static EIF_TYPE_INDEX ptf96[] = {64,0xFFFF}; static struct eif_par_types par96 = {96, ptf96, (uint16) 1, (uint16) 0, (char) 0}; /* LOOP_INVARIANT_VIOLATION */ static EIF_TYPE_INDEX ptf97[] = {96,0xFFFF}; static struct eif_par_types par97 = {97, ptf97, (uint16) 1, (uint16) 0, (char) 0}; /* PRECONDITION_VIOLATION */ static EIF_TYPE_INDEX ptf98[] = {96,0xFFFF}; static struct eif_par_types par98 = {98, ptf98, (uint16) 1, (uint16) 0, (char) 0}; /* POSTCONDITION_VIOLATION */ static EIF_TYPE_INDEX ptf99[] = {96,0xFFFF}; static struct eif_par_types par99 = {99, ptf99, (uint16) 1, (uint16) 0, (char) 0}; /* VARIANT_VIOLATION */ static EIF_TYPE_INDEX ptf100[] = {96,0xFFFF}; static struct eif_par_types par100 = {100, ptf100, (uint16) 1, (uint16) 0, (char) 0}; /* CHECK_VIOLATION */ static EIF_TYPE_INDEX ptf101[] = {96,0xFFFF}; static struct eif_par_types par101 = {101, ptf101, (uint16) 1, (uint16) 0, (char) 0}; /* INVARIANT_VIOLATION */ static EIF_TYPE_INDEX ptf102[] = {96,0xFFFF}; static struct eif_par_types par102 = {102, ptf102, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_SEARCHER */ static EIF_TYPE_INDEX ptf103[] = {0,0xFFFF}; static struct eif_par_types par103 = {103, ptf103, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_8_SEARCHER */ static EIF_TYPE_INDEX ptf104[] = {103,0xFFFF}; static struct eif_par_types par104 = {104, ptf104, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_32_SEARCHER */ static EIF_TYPE_INDEX ptf105[] = {103,0xFFFF}; static struct eif_par_types par105 = {105, ptf105, (uint16) 1, (uint16) 0, (char) 0}; /* PART_COMPARABLE */ static EIF_TYPE_INDEX ptf106[] = {0,0xFFFF}; static struct eif_par_types par106 = {106, ptf106, (uint16) 1, (uint16) 0, (char) 0}; /* COMPARABLE */ static EIF_TYPE_INDEX ptf107[] = {106,0xFFFF}; static struct eif_par_types par107 = {107, ptf107, (uint16) 1, (uint16) 0, (char) 0}; /* SED_UTILITIES */ static EIF_TYPE_INDEX ptf108[] = {0,0xFFFF}; static struct eif_par_types par108 = {108, ptf108, (uint16) 1, (uint16) 0, (char) 0}; /* SED_SESSION_SERIALIZER */ static EIF_TYPE_INDEX ptf109[] = {108,0xFFFF}; static struct eif_par_types par109 = {109, ptf109, (uint16) 1, (uint16) 0, (char) 0}; /* SED_BASIC_SERIALIZER */ static EIF_TYPE_INDEX ptf110[] = {109,0xFFFF}; static struct eif_par_types par110 = {110, ptf110, (uint16) 1, (uint16) 0, (char) 0}; /* SED_RECOVERABLE_SERIALIZER */ static EIF_TYPE_INDEX ptf111[] = {110,0xFFFF}; static struct eif_par_types par111 = {111, ptf111, (uint16) 1, (uint16) 0, (char) 0}; /* SED_INDEPENDENT_SERIALIZER */ static EIF_TYPE_INDEX ptf112[] = {110,0xFFFF}; static struct eif_par_types par112 = {112, ptf112, (uint16) 1, (uint16) 0, (char) 0}; /* SED_SESSION_DESERIALIZER */ static EIF_TYPE_INDEX ptf113[] = {108,0xFFF7,51,0xFFFF}; static struct eif_par_types par113 = {113, ptf113, (uint16) 2, (uint16) 0, (char) 0}; /* SED_BASIC_DESERIALIZER */ static EIF_TYPE_INDEX ptf114[] = {113,0xFFFF}; static struct eif_par_types par114 = {114, ptf114, (uint16) 1, (uint16) 0, (char) 0}; /* SED_INDEPENDENT_DESERIALIZER */ static EIF_TYPE_INDEX ptf115[] = {114,0xFFFF}; static struct eif_par_types par115 = {115, ptf115, (uint16) 1, (uint16) 0, (char) 0}; /* MATH_CONST */ static EIF_TYPE_INDEX ptf116[] = {0,0xFFFF}; static struct eif_par_types par116 = {116, ptf116, (uint16) 1, (uint16) 0, (char) 0}; /* SINGLE_MATH */ static EIF_TYPE_INDEX ptf117[] = {116,0xFFFF}; static struct eif_par_types par117 = {117, ptf117, (uint16) 1, (uint16) 0, (char) 0}; /* DOUBLE_MATH */ static EIF_TYPE_INDEX ptf118[] = {116,0xFFFF}; static struct eif_par_types par118 = {118, ptf118, (uint16) 1, (uint16) 0, (char) 0}; /* FORMAT_DOUBLE */ static EIF_TYPE_INDEX ptf119[] = {29,0xFFF7,118,0xFFFF}; static struct eif_par_types par119 = {119, ptf119, (uint16) 2, (uint16) 0, (char) 0}; /* MEM_CONST */ static EIF_TYPE_INDEX ptf120[] = {0,0xFFFF}; static struct eif_par_types par120 = {120, ptf120, (uint16) 1, (uint16) 0, (char) 0}; /* MEM_INFO */ static EIF_TYPE_INDEX ptf121[] = {47,0xFFF7,120,0xFFFF}; static struct eif_par_types par121 = {121, ptf121, (uint16) 2, (uint16) 0, (char) 0}; /* GC_INFO */ static EIF_TYPE_INDEX ptf122[] = {47,0xFFF7,120,0xFFFF}; static struct eif_par_types par122 = {122, ptf122, (uint16) 2, (uint16) 0, (char) 0}; /* PLATFORM */ static EIF_TYPE_INDEX ptf123[] = {0,0xFFFF}; static struct eif_par_types par123 = {123, ptf123, (uint16) 1, (uint16) 0, (char) 0}; /* SED_MEDIUM_READER_WRITER_1 */ static EIF_TYPE_INDEX ptf124[] = {39,0xFFF7,123,0xFFFF}; static struct eif_par_types par124 = {124, ptf124, (uint16) 2, (uint16) 0, (char) 0}; /* SED_MEDIUM_READER_WRITER */ static EIF_TYPE_INDEX ptf125[] = {39,0xFFF7,123,0xFFFF}; static struct eif_par_types par125 = {125, ptf125, (uint16) 2, (uint16) 0, (char) 0}; /* STRING_HANDLER */ static EIF_TYPE_INDEX ptf126[] = {0,0xFFFF}; static struct eif_par_types par126 = {126, ptf126, (uint16) 1, (uint16) 0, (char) 0}; /* C_STRING */ static EIF_TYPE_INDEX ptf127[] = {126,0xFFFF}; static struct eif_par_types par127 = {127, ptf127, (uint16) 1, (uint16) 0, (char) 0}; /* REFLECTOR_CONSTANTS */ static EIF_TYPE_INDEX ptf128[] = {0,0xFFFF}; static struct eif_par_types par128 = {128, ptf128, (uint16) 1, (uint16) 0, (char) 0}; /* REFLECTOR */ static EIF_TYPE_INDEX ptf129[] = {53,0xFFF7,128,0xFFFF}; static struct eif_par_types par129 = {129, ptf129, (uint16) 2, (uint16) 0, (char) 0}; /* INTERNAL */ static EIF_TYPE_INDEX ptf130[] = {129,0xFFF7,37,0xFFFF}; static struct eif_par_types par130 = {130, ptf130, (uint16) 2, (uint16) 0, (char) 0}; /* ECMA_INTERNAL */ static EIF_TYPE_INDEX ptf131[] = {130,0xFFFF}; static struct eif_par_types par131 = {131, ptf131, (uint16) 1, (uint16) 0, (char) 0}; /* REFLECTED_OBJECT */ static EIF_TYPE_INDEX ptf132[] = {128,0xFFFF}; static struct eif_par_types par132 = {132, ptf132, (uint16) 1, (uint16) 0, (char) 0}; /* RT_DBG_INTERNAL */ static EIF_TYPE_INDEX ptf133[] = {128,0xFFFF}; static struct eif_par_types par133 = {133, ptf133, (uint16) 1, (uint16) 0, (char) 0}; /* RT_DBG_EXECUTION_RECORDER */ static EIF_TYPE_INDEX ptf134[] = {46,0xFFF7,133,0xFFFF}; static struct eif_par_types par134 = {134, ptf134, (uint16) 2, (uint16) 0, (char) 0}; /* REFLECTED_COPY_SEMANTICS_OBJECT */ static EIF_TYPE_INDEX ptf135[] = {132,0xFFF7,128,0xFFFF}; static struct eif_par_types par135 = {135, ptf135, (uint16) 2, (uint16) 0, (char) 0}; /* REFLECTED_REFERENCE_OBJECT */ static EIF_TYPE_INDEX ptf136[] = {132,0xFFF7,128,0xFFFF}; static struct eif_par_types par136 = {136, ptf136, (uint16) 2, (uint16) 0, (char) 0}; /* DEBUG_OUTPUT */ static EIF_TYPE_INDEX ptf137[] = {0,0xFFFF}; static struct eif_par_types par137 = {137, ptf137, (uint16) 1, (uint16) 0, (char) 0}; /* RT_DBG_CALL_RECORD */ static EIF_TYPE_INDEX ptf138[] = {137,0xFFF7,133,0xFFF7,46,0xFFFF}; static struct eif_par_types par138 = {138, ptf138, (uint16) 3, (uint16) 0, (char) 0}; /* ABSTRACT_SPECIAL */ static EIF_TYPE_INDEX ptf139[] = {137,0xFFFF}; static struct eif_par_types par139 = {139, ptf139, (uint16) 1, (uint16) 0, (char) 0}; /* RT_DBG_VALUE_RECORD */ static EIF_TYPE_INDEX ptf140[] = {137,0xFFF7,46,0xFFF7,133,0xFFFF}; static struct eif_par_types par140 = {140, ptf140, (uint16) 3, (uint16) 0, (char) 0}; /* NUMERIC */ static EIF_TYPE_INDEX ptf141[] = {137,0xFFFF}; static struct eif_par_types par141 = {141, ptf141, (uint16) 1, (uint16) 0, (char) 0}; /* CURSOR */ static EIF_TYPE_INDEX ptf142[] = {0,0xFFFF}; static struct eif_par_types par142 = {142, ptf142, (uint16) 1, (uint16) 0, (char) 0}; /* CIRCULAR_CURSOR */ static EIF_TYPE_INDEX ptf143[] = {142,0xFFFF}; static struct eif_par_types par143 = {143, ptf143, (uint16) 1, (uint16) 0, (char) 0}; /* HASH_TABLE_CURSOR */ static EIF_TYPE_INDEX ptf144[] = {142,0xFFFF}; static struct eif_par_types par144 = {144, ptf144, (uint16) 1, (uint16) 0, (char) 0}; /* ARRAYED_LIST_CURSOR */ static EIF_TYPE_INDEX ptf145[] = {142,0xFFFF}; static struct eif_par_types par145 = {145, ptf145, (uint16) 1, (uint16) 0, (char) 0}; /* ARGUMENTS */ static EIF_TYPE_INDEX ptf146[] = {257,0xFF01,232,0xFFFF}; static struct eif_par_types par146 = {146, ptf146, (uint16) 1, (uint16) 0, (char) 0}; /* ARGUMENTS_32 */ static EIF_TYPE_INDEX ptf147[] = {257,0xFF01,241,0xFFFF}; static struct eif_par_types par147 = {147, ptf147, (uint16) 1, (uint16) 0, (char) 0}; /* INTEGER_INTERVAL */ static EIF_TYPE_INDEX ptf148[] = {567,218,0xFFF7,563,218,218,0xFFF7,949,218,0xFFFF}; static struct eif_par_types par148 = {148, ptf148, (uint16) 3, (uint16) 0, (char) 0}; /* ACTIVE_INTEGER_INTERVAL */ static EIF_TYPE_INDEX ptf149[] = {148,0xFFFF}; static struct eif_par_types par149 = {149, ptf149, (uint16) 1, (uint16) 0, (char) 0}; /* NATIVE_STRING_HANDLER */ static EIF_TYPE_INDEX ptf150[] = {0,0xFFFF}; static struct eif_par_types par150 = {150, ptf150, (uint16) 1, (uint16) 0, (char) 0}; /* NATIVE_STRING */ static EIF_TYPE_INDEX ptf151[] = {150,0xFFFF}; static struct eif_par_types par151 = {151, ptf151, (uint16) 1, (uint16) 0, (char) 0}; /* FILE_COMPARER */ static EIF_TYPE_INDEX ptf152[] = {150,0xFFFF}; static struct eif_par_types par152 = {152, ptf152, (uint16) 1, (uint16) 0, (char) 0}; /* EXECUTION_ENVIRONMENT */ static EIF_TYPE_INDEX ptf153[] = {150,0xFFFF}; static struct eif_par_types par153 = {153, ptf153, (uint16) 1, (uint16) 0, (char) 0}; /* FILE_INFO */ static EIF_TYPE_INDEX ptf154[] = {487,209,0xFFF7,150,0xFFFF}; static struct eif_par_types par154 = {154, ptf154, (uint16) 2, (uint16) 0, (char) 0}; /* UNIX_FILE_INFO */ static EIF_TYPE_INDEX ptf155[] = {154,0xFFFF}; static struct eif_par_types par155 = {155, ptf155, (uint16) 1, (uint16) 0, (char) 0}; /* MISMATCH_CORRECTOR */ static EIF_TYPE_INDEX ptf156[] = {0,0xFFFF}; static struct eif_par_types par156 = {156, ptf156, (uint16) 1, (uint16) 0, (char) 0}; /* SED_RECOVERABLE_DESERIALIZER */ static EIF_TYPE_INDEX ptf157[] = {114,0xFFF7,156,0xFFFF}; static struct eif_par_types par157 = {157, ptf157, (uint16) 2, (uint16) 0, (char) 0}; /* MISMATCH_INFORMATION */ static EIF_TYPE_INDEX ptf158[] = {412,0,0xFF01,232,0xFFFF}; static struct eif_par_types par158 = {158, ptf158, (uint16) 1, (uint16) 0, (char) 0}; /* CLASS_NAME_TRANSLATIONS */ static EIF_TYPE_INDEX ptf159[] = {412,0xFF01,232,0xFF01,232,0xFFFF}; static struct eif_par_types par159 = {159, ptf159, (uint16) 1, (uint16) 0, (char) 0}; /* SED_OBJECTS_TABLE */ static EIF_TYPE_INDEX ptf160[] = {35,0xFFF7,886,212,227,0xFFFF}; static struct eif_par_types par160 = {160, ptf160, (uint16) 2, (uint16) 0, (char) 0}; /* RANDOM */ static EIF_TYPE_INDEX ptf161[] = {822,218,0xFFF7,118,0xFFF7,406,218,0xFFFF}; static struct eif_par_types par161 = {161, ptf161, (uint16) 3, (uint16) 0, (char) 0}; /* STRING_ITERATION_CURSOR */ static EIF_TYPE_INDEX ptf162[] = {290,194,0xFFF7,291,194,0xFFFF}; static struct eif_par_types par162 = {162, ptf162, (uint16) 2, (uint16) 0, (char) 0}; /* PRIMES */ static EIF_TYPE_INDEX ptf163[] = {822,218,0xFFF7,406,218,0xFFFF}; static struct eif_par_types par163 = {163, ptf163, (uint16) 2, (uint16) 0, (char) 0}; /* FIBONACCI */ static EIF_TYPE_INDEX ptf164[] = {822,218,0xFFF7,406,218,0xFFFF}; static struct eif_par_types par164 = {164, ptf164, (uint16) 2, (uint16) 0, (char) 0}; /* REPEATABLE */ static EIF_TYPE_INDEX ptf165[] = {246,0xFF01,165,0xFFFF}; static struct eif_par_types par165 = {165, ptf165, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_32_ITERATION_CURSOR */ static EIF_TYPE_INDEX ptf166[] = {916,194,0xFF01,237,0xFFFF}; static struct eif_par_types par166 = {166, ptf166, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_8_ITERATION_CURSOR */ static EIF_TYPE_INDEX ptf167[] = {505,197,0xFF01,230,0xFFFF}; static struct eif_par_types par167 = {167, ptf167, (uint16) 1, (uint16) 0, (char) 0}; /* DISPOSABLE */ static EIF_TYPE_INDEX ptf168[] = {0,0xFFFF}; static struct eif_par_types par168 = {168, ptf168, (uint16) 1, (uint16) 0, (char) 0}; /* FILE_ITERATION_CURSOR */ static EIF_TYPE_INDEX ptf169[] = {168,0xFFF7,407,197,0xFFFF}; static struct eif_par_types par169 = {169, ptf169, (uint16) 2, (uint16) 0, (char) 0}; /* READ_WRITE_LOCK */ static EIF_TYPE_INDEX ptf170[] = {168,0xFFFF}; static struct eif_par_types par170 = {170, ptf170, (uint16) 1, (uint16) 0, (char) 0}; /* MANAGED_POINTER */ static EIF_TYPE_INDEX ptf171[] = {168,0xFFF7,123,0xFFFF}; static struct eif_par_types par171 = {171, ptf171, (uint16) 2, (uint16) 0, (char) 0}; /* MEMORY_STREAM */ static EIF_TYPE_INDEX ptf172[] = {168,0xFFFF}; static struct eif_par_types par172 = {172, ptf172, (uint16) 1, (uint16) 0, (char) 0}; /* MEMORY */ static EIF_TYPE_INDEX ptf173[] = {168,0xFFF7,120,0xFFFF}; static struct eif_par_types par173 = {173, ptf173, (uint16) 2, (uint16) 0, (char) 0}; /* CONDITION_VARIABLE */ static EIF_TYPE_INDEX ptf174[] = {168,0xFFFF}; static struct eif_par_types par174 = {174, ptf174, (uint16) 1, (uint16) 0, (char) 0}; /* DIRECTORY */ static EIF_TYPE_INDEX ptf175[] = {168,0xFFF7,150,0xFFFF}; static struct eif_par_types par175 = {175, ptf175, (uint16) 2, (uint16) 0, (char) 0}; /* SEMAPHORE */ static EIF_TYPE_INDEX ptf176[] = {168,0xFFFF}; static struct eif_par_types par176 = {176, ptf176, (uint16) 1, (uint16) 0, (char) 0}; /* MUTEX */ static EIF_TYPE_INDEX ptf177[] = {168,0xFFF7,42,0xFFFF}; static struct eif_par_types par177 = {177, ptf177, (uint16) 2, (uint16) 0, (char) 0}; /* IDENTIFIED */ static EIF_TYPE_INDEX ptf178[] = {168,0xFFF7,34,0xFFFF}; static struct eif_par_types par178 = {178, ptf178, (uint16) 2, (uint16) 0, (char) 0}; /* IO_MEDIUM */ static EIF_TYPE_INDEX ptf179[] = {168,0xFFF7,126,0xFFFF}; static struct eif_par_types par179 = {179, ptf179, (uint16) 2, (uint16) 0, (char) 0}; /* STREAM */ static EIF_TYPE_INDEX ptf180[] = {179,0xFFFF}; static struct eif_par_types par180 = {180, ptf180, (uint16) 1, (uint16) 0, (char) 0}; /* FILE */ static EIF_TYPE_INDEX ptf181[] = {521,197,0xFFF7,513,197,0xFFF7,179,0xFFF7,150,0xFFFF}; static struct eif_par_types par181 = {181, ptf181, (uint16) 4, (uint16) 0, (char) 0}; /* RAW_FILE */ static EIF_TYPE_INDEX ptf182[] = {181,0xFFFF}; static struct eif_par_types par182 = {182, ptf182, (uint16) 1, (uint16) 0, (char) 0}; /* PLAIN_TEXT_FILE */ static EIF_TYPE_INDEX ptf183[] = {181,0xFFFF}; static struct eif_par_types par183 = {183, ptf183, (uint16) 1, (uint16) 0, (char) 0}; /* HASHABLE */ static EIF_TYPE_INDEX ptf184[] = {0,0xFFFF}; static struct eif_par_types par184 = {184, ptf184, (uint16) 1, (uint16) 0, (char) 0}; /* PATH */ static EIF_TYPE_INDEX ptf185[] = {184,0xFFF7,107,0xFFF7,150,0xFFF7,137,0xFFFF}; static struct eif_par_types par185 = {185, ptf185, (uint16) 4, (uint16) 0, (char) 0}; /* TUPLE */ static EIF_TYPE_INDEX ptf186[] = {184,0xFFF7,156,0xFFF7,256,0xFF04,0,0xFFFF}; static struct eif_par_types par186 = {186, ptf186, (uint16) 3, (uint16) 0, (char) 0}; /* INTEGER_8_REF */ static EIF_TYPE_INDEX ptf187[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par187 = {187, ptf187, (uint16) 3, (uint16) 0, (char) 0}; /* INTEGER_8 */ static EIF_TYPE_INDEX ptf188[] = {189,0xFFFF}; static struct eif_par_types par188 = {188, ptf188, (uint16) 1, (uint16) 0, (char) 1}; /* reference INTEGER_8 */ static EIF_TYPE_INDEX ptf189[] = {187,0xFFFF}; static struct eif_par_types par189 = {189, ptf189, (uint16) 1, (uint16) 0, (char) 1}; /* REAL_32_REF */ static EIF_TYPE_INDEX ptf190[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par190 = {190, ptf190, (uint16) 3, (uint16) 0, (char) 0}; /* REAL_32 */ static EIF_TYPE_INDEX ptf191[] = {192,0xFFFF}; static struct eif_par_types par191 = {191, ptf191, (uint16) 1, (uint16) 0, (char) 1}; /* reference REAL_32 */ static EIF_TYPE_INDEX ptf192[] = {190,0xFFFF}; static struct eif_par_types par192 = {192, ptf192, (uint16) 1, (uint16) 0, (char) 1}; /* CHARACTER_32_REF */ static EIF_TYPE_INDEX ptf193[] = {107,0xFFF7,184,0xFFFF}; static struct eif_par_types par193 = {193, ptf193, (uint16) 2, (uint16) 0, (char) 0}; /* CHARACTER_32 */ static EIF_TYPE_INDEX ptf194[] = {195,0xFFFF}; static struct eif_par_types par194 = {194, ptf194, (uint16) 1, (uint16) 0, (char) 1}; /* reference CHARACTER_32 */ static EIF_TYPE_INDEX ptf195[] = {193,0xFFFF}; static struct eif_par_types par195 = {195, ptf195, (uint16) 1, (uint16) 0, (char) 1}; /* CHARACTER_8_REF */ static EIF_TYPE_INDEX ptf196[] = {107,0xFFF7,184,0xFFFF}; static struct eif_par_types par196 = {196, ptf196, (uint16) 2, (uint16) 0, (char) 0}; /* CHARACTER_8 */ static EIF_TYPE_INDEX ptf197[] = {198,0xFFFF}; static struct eif_par_types par197 = {197, ptf197, (uint16) 1, (uint16) 0, (char) 1}; /* reference CHARACTER_8 */ static EIF_TYPE_INDEX ptf198[] = {196,0xFFFF}; static struct eif_par_types par198 = {198, ptf198, (uint16) 1, (uint16) 0, (char) 1}; /* INTEGER_64_REF */ static EIF_TYPE_INDEX ptf199[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par199 = {199, ptf199, (uint16) 3, (uint16) 0, (char) 0}; /* INTEGER_64 */ static EIF_TYPE_INDEX ptf200[] = {201,0xFFFF}; static struct eif_par_types par200 = {200, ptf200, (uint16) 1, (uint16) 0, (char) 1}; /* reference INTEGER_64 */ static EIF_TYPE_INDEX ptf201[] = {199,0xFFFF}; static struct eif_par_types par201 = {201, ptf201, (uint16) 1, (uint16) 0, (char) 1}; /* BOOLEAN_REF */ static EIF_TYPE_INDEX ptf202[] = {184,0xFFFF}; static struct eif_par_types par202 = {202, ptf202, (uint16) 1, (uint16) 0, (char) 0}; /* BOOLEAN */ static EIF_TYPE_INDEX ptf203[] = {204,0xFFFF}; static struct eif_par_types par203 = {203, ptf203, (uint16) 1, (uint16) 0, (char) 1}; /* reference BOOLEAN */ static EIF_TYPE_INDEX ptf204[] = {202,0xFFFF}; static struct eif_par_types par204 = {204, ptf204, (uint16) 1, (uint16) 0, (char) 1}; /* REAL_64_REF */ static EIF_TYPE_INDEX ptf205[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par205 = {205, ptf205, (uint16) 3, (uint16) 0, (char) 0}; /* REAL_64 */ static EIF_TYPE_INDEX ptf206[] = {207,0xFFFF}; static struct eif_par_types par206 = {206, ptf206, (uint16) 1, (uint16) 0, (char) 1}; /* reference REAL_64 */ static EIF_TYPE_INDEX ptf207[] = {205,0xFFFF}; static struct eif_par_types par207 = {207, ptf207, (uint16) 1, (uint16) 0, (char) 1}; /* NATURAL_8_REF */ static EIF_TYPE_INDEX ptf208[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par208 = {208, ptf208, (uint16) 3, (uint16) 0, (char) 0}; /* NATURAL_8 */ static EIF_TYPE_INDEX ptf209[] = {210,0xFFFF}; static struct eif_par_types par209 = {209, ptf209, (uint16) 1, (uint16) 0, (char) 1}; /* reference NATURAL_8 */ static EIF_TYPE_INDEX ptf210[] = {208,0xFFFF}; static struct eif_par_types par210 = {210, ptf210, (uint16) 1, (uint16) 0, (char) 1}; /* NATURAL_32_REF */ static EIF_TYPE_INDEX ptf211[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par211 = {211, ptf211, (uint16) 3, (uint16) 0, (char) 0}; /* NATURAL_32 */ static EIF_TYPE_INDEX ptf212[] = {213,0xFFFF}; static struct eif_par_types par212 = {212, ptf212, (uint16) 1, (uint16) 0, (char) 1}; /* reference NATURAL_32 */ static EIF_TYPE_INDEX ptf213[] = {211,0xFFFF}; static struct eif_par_types par213 = {213, ptf213, (uint16) 1, (uint16) 0, (char) 1}; /* NATURAL_16_REF */ static EIF_TYPE_INDEX ptf214[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par214 = {214, ptf214, (uint16) 3, (uint16) 0, (char) 0}; /* NATURAL_16 */ static EIF_TYPE_INDEX ptf215[] = {216,0xFFFF}; static struct eif_par_types par215 = {215, ptf215, (uint16) 1, (uint16) 0, (char) 1}; /* reference NATURAL_16 */ static EIF_TYPE_INDEX ptf216[] = {214,0xFFFF}; static struct eif_par_types par216 = {216, ptf216, (uint16) 1, (uint16) 0, (char) 1}; /* INTEGER_32_REF */ static EIF_TYPE_INDEX ptf217[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par217 = {217, ptf217, (uint16) 3, (uint16) 0, (char) 0}; /* INTEGER_32 */ static EIF_TYPE_INDEX ptf218[] = {219,0xFFFF}; static struct eif_par_types par218 = {218, ptf218, (uint16) 1, (uint16) 0, (char) 1}; /* reference INTEGER_32 */ static EIF_TYPE_INDEX ptf219[] = {217,0xFFFF}; static struct eif_par_types par219 = {219, ptf219, (uint16) 1, (uint16) 0, (char) 1}; /* INTEGER_16_REF */ static EIF_TYPE_INDEX ptf220[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par220 = {220, ptf220, (uint16) 3, (uint16) 0, (char) 0}; /* INTEGER_16 */ static EIF_TYPE_INDEX ptf221[] = {222,0xFFFF}; static struct eif_par_types par221 = {221, ptf221, (uint16) 1, (uint16) 0, (char) 1}; /* reference INTEGER_16 */ static EIF_TYPE_INDEX ptf222[] = {220,0xFFFF}; static struct eif_par_types par222 = {222, ptf222, (uint16) 1, (uint16) 0, (char) 1}; /* NATURAL_64_REF */ static EIF_TYPE_INDEX ptf223[] = {141,0xFFF7,107,0xFFF7,184,0xFFFF}; static struct eif_par_types par223 = {223, ptf223, (uint16) 3, (uint16) 0, (char) 0}; /* NATURAL_64 */ static EIF_TYPE_INDEX ptf224[] = {225,0xFFFF}; static struct eif_par_types par224 = {224, ptf224, (uint16) 1, (uint16) 0, (char) 1}; /* reference NATURAL_64 */ static EIF_TYPE_INDEX ptf225[] = {223,0xFFFF}; static struct eif_par_types par225 = {225, ptf225, (uint16) 1, (uint16) 0, (char) 1}; /* POINTER_REF */ static EIF_TYPE_INDEX ptf226[] = {184,0xFFF7,41,0xFFFF}; static struct eif_par_types par226 = {226, ptf226, (uint16) 2, (uint16) 0, (char) 0}; /* POINTER */ static EIF_TYPE_INDEX ptf227[] = {228,0xFFFF}; static struct eif_par_types par227 = {227, ptf227, (uint16) 1, (uint16) 0, (char) 1}; /* reference POINTER */ static EIF_TYPE_INDEX ptf228[] = {226,0xFFFF}; static struct eif_par_types par228 = {228, ptf228, (uint16) 1, (uint16) 0, (char) 1}; /* READABLE_STRING_GENERAL */ static EIF_TYPE_INDEX ptf229[] = {107,0xFFF7,184,0xFFF7,126,0xFFFF}; static struct eif_par_types par229 = {229, ptf229, (uint16) 3, (uint16) 0, (char) 0}; /* READABLE_STRING_8 */ static EIF_TYPE_INDEX ptf230[] = {229,0xFFF7,492,197,0xFFFF}; static struct eif_par_types par230 = {230, ptf230, (uint16) 2, (uint16) 0, (char) 0}; /* STRING_GENERAL */ static EIF_TYPE_INDEX ptf231[] = {229,0xFFFF}; static struct eif_par_types par231 = {231, ptf231, (uint16) 1, (uint16) 0, (char) 0}; /* STRING_8 */ static EIF_TYPE_INDEX ptf232[] = {230,0xFFF7,231,0xFFF7,518,197,218,0xFFF7,522,197,0xFFF7,524,197,0xFFF7,156,0xFFFF}; static struct eif_par_types par232 = {232, ptf232, (uint16) 6, (uint16) 0, (char) 0}; /* SEQ_STRING */ static EIF_TYPE_INDEX ptf233[] = {232,0xFFF7,513,197,0xFFFF}; static struct eif_par_types par233 = {233, ptf233, (uint16) 2, (uint16) 0, (char) 0}; /* PATH_NAME */ static EIF_TYPE_INDEX ptf234[] = {232,0xFFFF}; static struct eif_par_types par234 = {234, ptf234, (uint16) 1, (uint16) 0, (char) 0}; /* DIRECTORY_NAME */ static EIF_TYPE_INDEX ptf235[] = {234,0xFFFF}; static struct eif_par_types par235 = {235, ptf235, (uint16) 1, (uint16) 0, (char) 0}; /* FILE_NAME */ static EIF_TYPE_INDEX ptf236[] = {234,0xFFFF}; static struct eif_par_types par236 = {236, ptf236, (uint16) 1, (uint16) 0, (char) 0}; /* READABLE_STRING_32 */ static EIF_TYPE_INDEX ptf237[] = {229,0xFFF7,293,194,0xFFFF}; static struct eif_par_types par237 = {237, ptf237, (uint16) 2, (uint16) 0, (char) 0}; /* STRING_32 */ static EIF_TYPE_INDEX ptf238[] = {237,0xFFF7,231,0xFFF7,288,194,218,0xFFF7,910,194,0xFFF7,917,194,0xFFF7,156,0xFFFF}; static struct eif_par_types par238 = {238, ptf238, (uint16) 6, (uint16) 0, (char) 0}; /* IMMUTABLE_STRING_GENERAL */ static EIF_TYPE_INDEX ptf239[] = {229,0xFFFF}; static struct eif_par_types par239 = {239, ptf239, (uint16) 1, (uint16) 0, (char) 0}; /* IMMUTABLE_STRING_8 */ static EIF_TYPE_INDEX ptf240[] = {230,0xFFF7,239,0xFFFF}; static struct eif_par_types par240 = {240, ptf240, (uint16) 2, (uint16) 0, (char) 0}; /* IMMUTABLE_STRING_32 */ static EIF_TYPE_INDEX ptf241[] = {237,0xFFF7,239,0xFFF7,156,0xFFFF}; static struct eif_par_types par241 = {241, ptf241, (uint16) 3, (uint16) 0, (char) 0}; /* CONSOLE */ static EIF_TYPE_INDEX ptf242[] = {183,0xFFF7,0,0xFFFF}; static struct eif_par_types par242 = {242, ptf242, (uint16) 2, (uint16) 0, (char) 0}; /* BOOL_STRING */ static EIF_TYPE_INDEX ptf243[] = {815,203,0xFFF7,0,0xFFFF}; static struct eif_par_types par243 = {243, ptf243, (uint16) 2, (uint16) 0, (char) 0}; /* SED_MULTI_OBJECT_SERIALIZATION */ static EIF_TYPE_INDEX ptf244[] = {0,0xFFF7,36,0xFFFF}; static struct eif_par_types par244 = {244, ptf244, (uint16) 2, (uint16) 0, (char) 0}; /* TYPE [G#1] */ static EIF_TYPE_INDEX ptf245[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par245 = {245, ptf245, (uint16) 3, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [G#1] */ static EIF_TYPE_INDEX ptf246[] = {0,0xFFFF}; static struct eif_par_types par246 = {246, ptf246, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [POINTER] */ static EIF_TYPE_INDEX ptf247[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par247 = {247, ptf247, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY [G#1] */ static EIF_TYPE_INDEX ptf248[] = {271,0xFFF8,1,0xFFF7,273,0xFFF8,1,218,0xFFF7,279,0xFFF8,1,0xFFFF}; static struct eif_par_types par248 = {248, ptf248, (uint16) 3, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [G#1] */ static EIF_TYPE_INDEX ptf249[] = {0,0xFFFF}; static struct eif_par_types par249 = {249, ptf249, (uint16) 1, (uint16) 1, (char) 0}; /* PROCEDURE [G#1] */ static EIF_TYPE_INDEX ptf250[] = {254,0xFFF8,1,0xFFFF}; static struct eif_par_types par250 = {250, ptf250, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [G#1] */ static EIF_TYPE_INDEX ptf251[] = {252,0xFFF8,1,0xFFFF}; static struct eif_par_types par251 = {251, ptf251, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [G#1] */ static EIF_TYPE_INDEX ptf252[] = {226,0xFFFF}; static struct eif_par_types par252 = {252, ptf252, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [ANY]] */ static EIF_TYPE_INDEX ptf253[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par253 = {253, ptf253, (uint16) 3, (uint16) 1, (char) 0}; /* ROUTINE [G#1] */ static EIF_TYPE_INDEX ptf254[] = {184,0xFFF7,129,0xFFF7,156,0xFFFF}; static struct eif_par_types par254 = {254, ptf254, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [G#1] */ static EIF_TYPE_INDEX ptf255[] = {257,0xFFF8,1,0xFFFF}; static struct eif_par_types par255 = {255, ptf255, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [G#1] */ static EIF_TYPE_INDEX ptf256[] = {257,0xFFF8,1,0xFFFF}; static struct eif_par_types par256 = {256, ptf256, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [G#1] */ static EIF_TYPE_INDEX ptf257[] = {0,0xFFFF}; static struct eif_par_types par257 = {257, ptf257, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [G#1] */ static EIF_TYPE_INDEX ptf258[] = {246,0xFFF8,1,0xFFF7,257,0xFFF8,1,0xFFFF}; static struct eif_par_types par258 = {258, ptf258, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [G#1] */ static EIF_TYPE_INDEX ptf259[] = {260,0xFFF8,1,0xFF01,256,0xFFF8,1,0xFFFF}; static struct eif_par_types par259 = {259, ptf259, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [G#1, G#2] */ static EIF_TYPE_INDEX ptf260[] = {258,0xFFF8,1,0xFFFF}; static struct eif_par_types par260 = {260, ptf260, (uint16) 1, (uint16) 2, (char) 0}; /* FINITE [G#1] */ static EIF_TYPE_INDEX ptf261[] = {262,0xFFF8,1,0xFFFF}; static struct eif_par_types par261 = {261, ptf261, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [G#1] */ static EIF_TYPE_INDEX ptf262[] = {255,0xFFF8,1,0xFFFF}; static struct eif_par_types par262 = {262, ptf262, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [G#1] */ static EIF_TYPE_INDEX ptf263[] = {269,0xFFF8,1,0xFFFF}; static struct eif_par_types par263 = {263, ptf263, (uint16) 1, (uint16) 1, (char) 0}; /* FUNCTION [G#1, BOOLEAN] */ static EIF_TYPE_INDEX ptf264[] = {254,0xFFF8,1,0xFFFF}; static struct eif_par_types par264 = {264, ptf264, (uint16) 1, (uint16) 2, (char) 0}; /* CURSOR_STRUCTURE [G#1] */ static EIF_TYPE_INDEX ptf265[] = {266,0xFFF8,1,0xFFFF}; static struct eif_par_types par265 = {265, ptf265, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [G#1] */ static EIF_TYPE_INDEX ptf266[] = {267,0xFFF8,1,0xFFFF}; static struct eif_par_types par266 = {266, ptf266, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [G#1] */ static EIF_TYPE_INDEX ptf267[] = {268,0xFFF8,1,0xFFFF}; static struct eif_par_types par267 = {267, ptf267, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [G#1] */ static EIF_TYPE_INDEX ptf268[] = {255,0xFFF8,1,0xFFFF}; static struct eif_par_types par268 = {268, ptf268, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [G#1] */ static EIF_TYPE_INDEX ptf269[] = {255,0xFFF8,1,0xFFFF}; static struct eif_par_types par269 = {269, ptf269, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [G#1] */ static EIF_TYPE_INDEX ptf270[] = {279,0xFFF8,1,0xFFF7,271,0xFFF8,1,0xFFF7,283,0xFFF8,1,0xFFF7,156,0xFFFF}; static struct eif_par_types par270 = {270, ptf270, (uint16) 4, (uint16) 1, (char) 0}; /* RESIZABLE [G#1] */ static EIF_TYPE_INDEX ptf271[] = {272,0xFFF8,1,0xFFFF}; static struct eif_par_types par271 = {271, ptf271, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [G#1] */ static EIF_TYPE_INDEX ptf272[] = {261,0xFFF8,1,0xFFFF}; static struct eif_par_types par272 = {272, ptf272, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE [G#1, INTEGER_32] */ static EIF_TYPE_INDEX ptf273[] = {274,0xFFF8,1,218,0xFFF7,256,0xFFF8,1,0xFFFF}; static struct eif_par_types par273 = {273, ptf273, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [G#1, INTEGER_32] */ static EIF_TYPE_INDEX ptf274[] = {267,0xFFF8,1,0xFFFF}; static struct eif_par_types par274 = {274, ptf274, (uint16) 1, (uint16) 2, (char) 0}; /* ARRAY_ITERATION_CURSOR [G#1] */ static EIF_TYPE_INDEX ptf275[] = {278,0xFFF8,1,0xFF01,248,0xFFF8,1,0xFFFF}; static struct eif_par_types par275 = {275, ptf275, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL [G#1] */ static EIF_TYPE_INDEX ptf276[] = {139,0xFFF7,256,0xFFF8,1,0xFFFF}; static struct eif_par_types par276 = {276, ptf276, (uint16) 2, (uint16) 1, (char) 0}; /* SPECIAL_ITERATION_CURSOR [G#1] */ static EIF_TYPE_INDEX ptf277[] = {278,0xFFF8,1,0xFF01,276,0xFFF8,1,0xFFFF}; static struct eif_par_types par277 = {277, ptf277, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [G#1, G#2] */ static EIF_TYPE_INDEX ptf278[] = {260,0xFFF8,1,0xFFF8,2,0xFFFF}; static struct eif_par_types par278 = {278, ptf278, (uint16) 1, (uint16) 2, (char) 0}; /* TO_SPECIAL [G#1] */ static EIF_TYPE_INDEX ptf279[] = {0,0xFFFF}; static struct eif_par_types par279 = {279, ptf279, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [G#1] */ static EIF_TYPE_INDEX ptf280[] = {278,0xFFF8,1,0xFF01,270,0xFFF8,1,0xFFFF}; static struct eif_par_types par280 = {280, ptf280, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [G#1] */ static EIF_TYPE_INDEX ptf281[] = {266,0xFFF8,1,0xFFF7,282,0xFFF8,1,0xFFF7,261,0xFFF8,1,0xFFFF}; static struct eif_par_types par281 = {281, ptf281, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [G#1] */ static EIF_TYPE_INDEX ptf282[] = {263,0xFFF8,1,0xFFFF}; static struct eif_par_types par282 = {282, ptf282, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [G#1] */ static EIF_TYPE_INDEX ptf283[] = {284,0xFFF8,1,0xFFF7,286,0xFFF8,1,0xFFFF}; static struct eif_par_types par283 = {283, ptf283, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [G#1] */ static EIF_TYPE_INDEX ptf284[] = {285,0xFFF8,1,0xFFFF}; static struct eif_par_types par284 = {284, ptf284, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [G#1] */ static EIF_TYPE_INDEX ptf285[] = {265,0xFFF8,1,0xFFF7,273,0xFFF8,1,218,0xFFF7,281,0xFFF8,1,0xFFFF}; static struct eif_par_types par285 = {285, ptf285, (uint16) 3, (uint16) 1, (char) 0}; /* DYNAMIC_CHAIN [G#1] */ static EIF_TYPE_INDEX ptf286[] = {285,0xFFF8,1,0xFFF7,287,0xFFF8,1,0xFFFF}; static struct eif_par_types par286 = {286, ptf286, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [G#1] */ static EIF_TYPE_INDEX ptf287[] = {261,0xFFF8,1,0xFFFF}; static struct eif_par_types par287 = {287, ptf287, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE [CHARACTER_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf288[] = {289,194,218,0xFFF7,293,194,0xFFFF}; static struct eif_par_types par288 = {288, ptf288, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [CHARACTER_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf289[] = {302,194,0xFFFF}; static struct eif_par_types par289 = {289, ptf289, (uint16) 1, (uint16) 2, (char) 0}; /* ITERATION_CURSOR [CHARACTER_32] */ static EIF_TYPE_INDEX ptf290[] = {0,0xFFFF}; static struct eif_par_types par290 = {290, ptf290, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf291[] = {0,0xFFFF}; static struct eif_par_types par291 = {291, ptf291, (uint16) 1, (uint16) 1, (char) 0}; /* CONTAINER [CHARACTER_32] */ static EIF_TYPE_INDEX ptf292[] = {291,194,0xFFFF}; static struct eif_par_types par292 = {292, ptf292, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf293[] = {291,194,0xFFFF}; static struct eif_par_types par293 = {293, ptf293, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [CHARACTER_32] */ static EIF_TYPE_INDEX ptf294[] = {290,194,0xFFF7,291,194,0xFFFF}; static struct eif_par_types par294 = {294, ptf294, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [CHARACTER_32] */ static EIF_TYPE_INDEX ptf295[] = {296,194,0xFF01,293,194,0xFFFF}; static struct eif_par_types par295 = {295, ptf295, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [CHARACTER_32, G#2] */ static EIF_TYPE_INDEX ptf296[] = {294,194,0xFFFF}; static struct eif_par_types par296 = {296, ptf296, (uint16) 1, (uint16) 2, (char) 0}; /* FINITE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf297[] = {298,194,0xFFFF}; static struct eif_par_types par297 = {297, ptf297, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [CHARACTER_32] */ static EIF_TYPE_INDEX ptf298[] = {292,194,0xFFFF}; static struct eif_par_types par298 = {298, ptf298, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [CHARACTER_32] */ static EIF_TYPE_INDEX ptf299[] = {304,194,0xFFFF}; static struct eif_par_types par299 = {299, ptf299, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf300[] = {301,194,0xFFFF}; static struct eif_par_types par300 = {300, ptf300, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf301[] = {302,194,0xFFFF}; static struct eif_par_types par301 = {301, ptf301, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [CHARACTER_32] */ static EIF_TYPE_INDEX ptf302[] = {303,194,0xFFFF}; static struct eif_par_types par302 = {302, ptf302, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [CHARACTER_32] */ static EIF_TYPE_INDEX ptf303[] = {292,194,0xFFFF}; static struct eif_par_types par303 = {303, ptf303, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf304[] = {292,194,0xFFFF}; static struct eif_par_types par304 = {304, ptf304, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [G#1] */ static EIF_TYPE_INDEX ptf305[] = {140,0xFFFF}; static struct eif_par_types par305 = {305, ptf305, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [REAL_64] */ static EIF_TYPE_INDEX ptf306[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par306 = {306, ptf306, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [REAL_32] */ static EIF_TYPE_INDEX ptf307[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par307 = {307, ptf307, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [NATURAL_8] */ static EIF_TYPE_INDEX ptf308[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par308 = {308, ptf308, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [NATURAL_16] */ static EIF_TYPE_INDEX ptf309[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par309 = {309, ptf309, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [NATURAL_32] */ static EIF_TYPE_INDEX ptf310[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par310 = {310, ptf310, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [NATURAL_64] */ static EIF_TYPE_INDEX ptf311[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par311 = {311, ptf311, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [INTEGER_8] */ static EIF_TYPE_INDEX ptf312[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par312 = {312, ptf312, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [INTEGER_16] */ static EIF_TYPE_INDEX ptf313[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par313 = {313, ptf313, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [INTEGER_32] */ static EIF_TYPE_INDEX ptf314[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par314 = {314, ptf314, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [INTEGER_64] */ static EIF_TYPE_INDEX ptf315[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par315 = {315, ptf315, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf316[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par316 = {316, ptf316, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf317[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par317 = {317, ptf317, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [BOOLEAN] */ static EIF_TYPE_INDEX ptf318[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par318 = {318, ptf318, (uint16) 3, (uint16) 1, (char) 0}; /* SPECIAL_ITERATION_CURSOR [NATURAL_16] */ static EIF_TYPE_INDEX ptf319[] = {320,215,0xFF01,327,215,0xFFFF}; static struct eif_par_types par319 = {319, ptf319, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [NATURAL_16, G#2] */ static EIF_TYPE_INDEX ptf320[] = {321,215,0xFFF8,2,0xFFFF}; static struct eif_par_types par320 = {320, ptf320, (uint16) 1, (uint16) 2, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [NATURAL_16, G#2] */ static EIF_TYPE_INDEX ptf321[] = {325,215,0xFFFF}; static struct eif_par_types par321 = {321, ptf321, (uint16) 1, (uint16) 2, (char) 0}; /* READABLE_INDEXABLE [NATURAL_16] */ static EIF_TYPE_INDEX ptf322[] = {323,215,0xFFFF}; static struct eif_par_types par322 = {322, ptf322, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [NATURAL_16] */ static EIF_TYPE_INDEX ptf323[] = {0,0xFFFF}; static struct eif_par_types par323 = {323, ptf323, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [NATURAL_16] */ static EIF_TYPE_INDEX ptf324[] = {0,0xFFFF}; static struct eif_par_types par324 = {324, ptf324, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [NATURAL_16] */ static EIF_TYPE_INDEX ptf325[] = {324,215,0xFFF7,323,215,0xFFFF}; static struct eif_par_types par325 = {325, ptf325, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [NATURAL_16] */ static EIF_TYPE_INDEX ptf326[] = {321,215,0xFF01,322,215,0xFFFF}; static struct eif_par_types par326 = {326, ptf326, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL [NATURAL_16] */ static EIF_TYPE_INDEX ptf327[] = {139,0xFFF7,322,215,0xFFFF}; static struct eif_par_types par327 = {327, ptf327, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [NATURAL_16] */ static EIF_TYPE_INDEX ptf328[] = {0,0xFFFF}; static struct eif_par_types par328 = {328, ptf328, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [NATURAL_16] */ static EIF_TYPE_INDEX ptf329[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par329 = {329, ptf329, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY [NATURAL_16] */ static EIF_TYPE_INDEX ptf330[] = {351,215,0xFFF7,347,215,218,0xFFF7,353,215,0xFFFF}; static struct eif_par_types par330 = {330, ptf330, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [NATURAL_16] */ static EIF_TYPE_INDEX ptf331[] = {323,215,0xFFFF}; static struct eif_par_types par331 = {331, ptf331, (uint16) 1, (uint16) 1, (char) 0}; /* FINITE [NATURAL_16] */ static EIF_TYPE_INDEX ptf332[] = {333,215,0xFFFF}; static struct eif_par_types par332 = {332, ptf332, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [NATURAL_16] */ static EIF_TYPE_INDEX ptf333[] = {331,215,0xFFFF}; static struct eif_par_types par333 = {333, ptf333, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [NATURAL_16] */ static EIF_TYPE_INDEX ptf334[] = {339,215,0xFFFF}; static struct eif_par_types par334 = {334, ptf334, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [NATURAL_16] */ static EIF_TYPE_INDEX ptf335[] = {336,215,0xFFFF}; static struct eif_par_types par335 = {335, ptf335, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [NATURAL_16] */ static EIF_TYPE_INDEX ptf336[] = {337,215,0xFFFF}; static struct eif_par_types par336 = {336, ptf336, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [NATURAL_16] */ static EIF_TYPE_INDEX ptf337[] = {338,215,0xFFFF}; static struct eif_par_types par337 = {337, ptf337, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [NATURAL_16] */ static EIF_TYPE_INDEX ptf338[] = {331,215,0xFFFF}; static struct eif_par_types par338 = {338, ptf338, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [NATURAL_16] */ static EIF_TYPE_INDEX ptf339[] = {331,215,0xFFFF}; static struct eif_par_types par339 = {339, ptf339, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [NATURAL_16] */ static EIF_TYPE_INDEX ptf340[] = {353,215,0xFFF7,351,215,0xFFF7,344,215,0xFFF7,156,0xFFFF}; static struct eif_par_types par340 = {340, ptf340, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [NATURAL_16] */ static EIF_TYPE_INDEX ptf341[] = {320,215,0xFF01,340,215,0xFFFF}; static struct eif_par_types par341 = {341, ptf341, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [NATURAL_16] */ static EIF_TYPE_INDEX ptf342[] = {336,215,0xFFF7,343,215,0xFFF7,332,215,0xFFFF}; static struct eif_par_types par342 = {342, ptf342, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [NATURAL_16] */ static EIF_TYPE_INDEX ptf343[] = {334,215,0xFFFF}; static struct eif_par_types par343 = {343, ptf343, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [NATURAL_16] */ static EIF_TYPE_INDEX ptf344[] = {345,215,0xFFF7,349,215,0xFFFF}; static struct eif_par_types par344 = {344, ptf344, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [NATURAL_16] */ static EIF_TYPE_INDEX ptf345[] = {346,215,0xFFFF}; static struct eif_par_types par345 = {345, ptf345, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [NATURAL_16] */ static EIF_TYPE_INDEX ptf346[] = {335,215,0xFFF7,347,215,218,0xFFF7,342,215,0xFFFF}; static struct eif_par_types par346 = {346, ptf346, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [NATURAL_16, INTEGER_32] */ static EIF_TYPE_INDEX ptf347[] = {348,215,218,0xFFF7,322,215,0xFFFF}; static struct eif_par_types par347 = {347, ptf347, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [NATURAL_16, INTEGER_32] */ static EIF_TYPE_INDEX ptf348[] = {337,215,0xFFFF}; static struct eif_par_types par348 = {348, ptf348, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [NATURAL_16] */ static EIF_TYPE_INDEX ptf349[] = {346,215,0xFFF7,350,215,0xFFFF}; static struct eif_par_types par349 = {349, ptf349, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [NATURAL_16] */ static EIF_TYPE_INDEX ptf350[] = {332,215,0xFFFF}; static struct eif_par_types par350 = {350, ptf350, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [NATURAL_16] */ static EIF_TYPE_INDEX ptf351[] = {352,215,0xFFFF}; static struct eif_par_types par351 = {351, ptf351, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [NATURAL_16] */ static EIF_TYPE_INDEX ptf352[] = {332,215,0xFFFF}; static struct eif_par_types par352 = {352, ptf352, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [NATURAL_16] */ static EIF_TYPE_INDEX ptf353[] = {0,0xFFFF}; static struct eif_par_types par353 = {353, ptf353, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [NATURAL_16] */ static EIF_TYPE_INDEX ptf354[] = {320,215,0xFF01,330,215,0xFFFF}; static struct eif_par_types par354 = {354, ptf354, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY [POINTER] */ static EIF_TYPE_INDEX ptf355[] = {387,227,0xFFF7,383,227,218,0xFFF7,389,227,0xFFFF}; static struct eif_par_types par355 = {355, ptf355, (uint16) 3, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [POINTER] */ static EIF_TYPE_INDEX ptf356[] = {0,0xFFFF}; static struct eif_par_types par356 = {356, ptf356, (uint16) 1, (uint16) 1, (char) 0}; /* CONTAINER [POINTER] */ static EIF_TYPE_INDEX ptf357[] = {360,227,0xFFFF}; static struct eif_par_types par357 = {357, ptf357, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [POINTER] */ static EIF_TYPE_INDEX ptf358[] = {0,0xFFFF}; static struct eif_par_types par358 = {358, ptf358, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [POINTER] */ static EIF_TYPE_INDEX ptf359[] = {360,227,0xFFFF}; static struct eif_par_types par359 = {359, ptf359, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [POINTER] */ static EIF_TYPE_INDEX ptf360[] = {0,0xFFFF}; static struct eif_par_types par360 = {360, ptf360, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [POINTER] */ static EIF_TYPE_INDEX ptf361[] = {358,227,0xFFF7,360,227,0xFFFF}; static struct eif_par_types par361 = {361, ptf361, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [POINTER] */ static EIF_TYPE_INDEX ptf362[] = {363,227,0xFF01,359,227,0xFFFF}; static struct eif_par_types par362 = {362, ptf362, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [POINTER, G#2] */ static EIF_TYPE_INDEX ptf363[] = {361,227,0xFFFF}; static struct eif_par_types par363 = {363, ptf363, (uint16) 1, (uint16) 2, (char) 0}; /* FINITE [POINTER] */ static EIF_TYPE_INDEX ptf364[] = {365,227,0xFFFF}; static struct eif_par_types par364 = {364, ptf364, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [POINTER] */ static EIF_TYPE_INDEX ptf365[] = {357,227,0xFFFF}; static struct eif_par_types par365 = {365, ptf365, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [POINTER] */ static EIF_TYPE_INDEX ptf366[] = {371,227,0xFFFF}; static struct eif_par_types par366 = {366, ptf366, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [POINTER] */ static EIF_TYPE_INDEX ptf367[] = {368,227,0xFFFF}; static struct eif_par_types par367 = {367, ptf367, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [POINTER] */ static EIF_TYPE_INDEX ptf368[] = {369,227,0xFFFF}; static struct eif_par_types par368 = {368, ptf368, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [POINTER] */ static EIF_TYPE_INDEX ptf369[] = {370,227,0xFFFF}; static struct eif_par_types par369 = {369, ptf369, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [POINTER] */ static EIF_TYPE_INDEX ptf370[] = {357,227,0xFFFF}; static struct eif_par_types par370 = {370, ptf370, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [POINTER] */ static EIF_TYPE_INDEX ptf371[] = {357,227,0xFFFF}; static struct eif_par_types par371 = {371, ptf371, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [POINTER] */ static EIF_TYPE_INDEX ptf372[] = {389,227,0xFFF7,387,227,0xFFF7,380,227,0xFFF7,156,0xFFFF}; static struct eif_par_types par372 = {372, ptf372, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [POINTER] */ static EIF_TYPE_INDEX ptf373[] = {374,227,0xFF01,372,227,0xFFFF}; static struct eif_par_types par373 = {373, ptf373, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [POINTER, G#2] */ static EIF_TYPE_INDEX ptf374[] = {363,227,0xFFF8,2,0xFFFF}; static struct eif_par_types par374 = {374, ptf374, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL [POINTER] */ static EIF_TYPE_INDEX ptf375[] = {139,0xFFF7,359,227,0xFFFF}; static struct eif_par_types par375 = {375, ptf375, (uint16) 2, (uint16) 1, (char) 0}; /* SPECIAL_ITERATION_CURSOR [POINTER] */ static EIF_TYPE_INDEX ptf376[] = {374,227,0xFF01,375,227,0xFFFF}; static struct eif_par_types par376 = {376, ptf376, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [POINTER] */ static EIF_TYPE_INDEX ptf377[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par377 = {377, ptf377, (uint16) 3, (uint16) 1, (char) 0}; /* SEQUENCE [POINTER] */ static EIF_TYPE_INDEX ptf378[] = {368,227,0xFFF7,379,227,0xFFF7,364,227,0xFFFF}; static struct eif_par_types par378 = {378, ptf378, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [POINTER] */ static EIF_TYPE_INDEX ptf379[] = {366,227,0xFFFF}; static struct eif_par_types par379 = {379, ptf379, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [POINTER] */ static EIF_TYPE_INDEX ptf380[] = {381,227,0xFFF7,385,227,0xFFFF}; static struct eif_par_types par380 = {380, ptf380, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [POINTER] */ static EIF_TYPE_INDEX ptf381[] = {382,227,0xFFFF}; static struct eif_par_types par381 = {381, ptf381, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [POINTER] */ static EIF_TYPE_INDEX ptf382[] = {367,227,0xFFF7,383,227,218,0xFFF7,378,227,0xFFFF}; static struct eif_par_types par382 = {382, ptf382, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [POINTER, INTEGER_32] */ static EIF_TYPE_INDEX ptf383[] = {384,227,218,0xFFF7,359,227,0xFFFF}; static struct eif_par_types par383 = {383, ptf383, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [POINTER, INTEGER_32] */ static EIF_TYPE_INDEX ptf384[] = {369,227,0xFFFF}; static struct eif_par_types par384 = {384, ptf384, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [POINTER] */ static EIF_TYPE_INDEX ptf385[] = {382,227,0xFFF7,386,227,0xFFFF}; static struct eif_par_types par385 = {385, ptf385, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [POINTER] */ static EIF_TYPE_INDEX ptf386[] = {364,227,0xFFFF}; static struct eif_par_types par386 = {386, ptf386, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [POINTER] */ static EIF_TYPE_INDEX ptf387[] = {388,227,0xFFFF}; static struct eif_par_types par387 = {387, ptf387, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [POINTER] */ static EIF_TYPE_INDEX ptf388[] = {364,227,0xFFFF}; static struct eif_par_types par388 = {388, ptf388, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [POINTER] */ static EIF_TYPE_INDEX ptf389[] = {0,0xFFFF}; static struct eif_par_types par389 = {389, ptf389, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [POINTER] */ static EIF_TYPE_INDEX ptf390[] = {374,227,0xFF01,355,227,0xFFFF}; static struct eif_par_types par390 = {390, ptf390, (uint16) 1, (uint16) 1, (char) 0}; /* CONTAINER [NATURAL_32] */ static EIF_TYPE_INDEX ptf391[] = {394,212,0xFFFF}; static struct eif_par_types par391 = {391, ptf391, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [NATURAL_32] */ static EIF_TYPE_INDEX ptf392[] = {0,0xFFFF}; static struct eif_par_types par392 = {392, ptf392, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [NATURAL_32] */ static EIF_TYPE_INDEX ptf393[] = {394,212,0xFFFF}; static struct eif_par_types par393 = {393, ptf393, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [NATURAL_32] */ static EIF_TYPE_INDEX ptf394[] = {0,0xFFFF}; static struct eif_par_types par394 = {394, ptf394, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [NATURAL_32] */ static EIF_TYPE_INDEX ptf395[] = {392,212,0xFFF7,394,212,0xFFFF}; static struct eif_par_types par395 = {395, ptf395, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [NATURAL_32] */ static EIF_TYPE_INDEX ptf396[] = {397,212,0xFF01,393,212,0xFFFF}; static struct eif_par_types par396 = {396, ptf396, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [NATURAL_32, G#2] */ static EIF_TYPE_INDEX ptf397[] = {395,212,0xFFFF}; static struct eif_par_types par397 = {397, ptf397, (uint16) 1, (uint16) 2, (char) 0}; /* FINITE [NATURAL_32] */ static EIF_TYPE_INDEX ptf398[] = {399,212,0xFFFF}; static struct eif_par_types par398 = {398, ptf398, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [NATURAL_32] */ static EIF_TYPE_INDEX ptf399[] = {391,212,0xFFFF}; static struct eif_par_types par399 = {399, ptf399, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [NATURAL_32] */ static EIF_TYPE_INDEX ptf400[] = {405,212,0xFFFF}; static struct eif_par_types par400 = {400, ptf400, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [NATURAL_32] */ static EIF_TYPE_INDEX ptf401[] = {402,212,0xFFFF}; static struct eif_par_types par401 = {401, ptf401, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [NATURAL_32] */ static EIF_TYPE_INDEX ptf402[] = {403,212,0xFFFF}; static struct eif_par_types par402 = {402, ptf402, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [NATURAL_32] */ static EIF_TYPE_INDEX ptf403[] = {404,212,0xFFFF}; static struct eif_par_types par403 = {403, ptf403, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [NATURAL_32] */ static EIF_TYPE_INDEX ptf404[] = {391,212,0xFFFF}; static struct eif_par_types par404 = {404, ptf404, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [NATURAL_32] */ static EIF_TYPE_INDEX ptf405[] = {391,212,0xFFFF}; static struct eif_par_types par405 = {405, ptf405, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [INTEGER_32] */ static EIF_TYPE_INDEX ptf406[] = {0,0xFFFF}; static struct eif_par_types par406 = {406, ptf406, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [CHARACTER_8] */ static EIF_TYPE_INDEX ptf407[] = {0,0xFFFF}; static struct eif_par_types par407 = {407, ptf407, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_QUEUE [G#1] */ static EIF_TYPE_INDEX ptf408[] = {410,0xFFF8,1,0xFFF7,271,0xFFF8,1,0xFFF7,156,0xFFFF}; static struct eif_par_types par408 = {408, ptf408, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAYED_QUEUE_ITERATION_CURSOR [G#1] */ static EIF_TYPE_INDEX ptf409[] = {246,0xFFF8,1,0xFFFF}; static struct eif_par_types par409 = {409, ptf409, (uint16) 1, (uint16) 1, (char) 0}; /* QUEUE [G#1] */ static EIF_TYPE_INDEX ptf410[] = {411,0xFFF8,1,0xFFFF}; static struct eif_par_types par410 = {410, ptf410, (uint16) 1, (uint16) 1, (char) 0}; /* DISPENSER [G#1] */ static EIF_TYPE_INDEX ptf411[] = {266,0xFFF8,1,0xFFF7,261,0xFFF8,1,0xFFFF}; static struct eif_par_types par411 = {411, ptf411, (uint16) 2, (uint16) 1, (char) 0}; /* HASH_TABLE [G#1, G#2] */ static EIF_TYPE_INDEX ptf412[] = {287,0xFFF8,1,0xFFF7,413,0xFFF8,1,0xFFF8,2,0xFFF7,416,0xFFF8,1,0xFFF8,2,0xFFF7,256,0xFFF8,1,0xFFF7,156,0xFFFF}; static struct eif_par_types par412 = {412, ptf412, (uint16) 5, (uint16) 2, (char) 0}; /* TABLE [G#1, G#2] */ static EIF_TYPE_INDEX ptf413[] = {267,0xFFF8,1,0xFFFF}; static struct eif_par_types par413 = {413, ptf413, (uint16) 1, (uint16) 2, (char) 0}; /* HASH_TABLE_ITERATION_CURSOR [G#1, G#2] */ static EIF_TYPE_INDEX ptf414[] = {259,0xFFF8,1,0xFFF7,415,0xFFF8,1,0xFFF8,2,0xFFFF}; static struct eif_par_types par414 = {414, ptf414, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE_ITERATION_CURSOR [G#1, G#2] */ static EIF_TYPE_INDEX ptf415[] = {246,0xFFF8,1,0xFFFF}; static struct eif_par_types par415 = {415, ptf415, (uint16) 1, (uint16) 2, (char) 0}; /* TABLE_ITERABLE [G#1, G#2] */ static EIF_TYPE_INDEX ptf416[] = {257,0xFFF8,1,0xFFFF}; static struct eif_par_types par416 = {416, ptf416, (uint16) 1, (uint16) 2, (char) 0}; /* ARRAY [REAL_32] */ static EIF_TYPE_INDEX ptf417[] = {449,191,0xFFF7,445,191,218,0xFFF7,451,191,0xFFFF}; static struct eif_par_types par417 = {417, ptf417, (uint16) 3, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [REAL_32] */ static EIF_TYPE_INDEX ptf418[] = {0,0xFFFF}; static struct eif_par_types par418 = {418, ptf418, (uint16) 1, (uint16) 1, (char) 0}; /* CONTAINER [REAL_32] */ static EIF_TYPE_INDEX ptf419[] = {422,191,0xFFFF}; static struct eif_par_types par419 = {419, ptf419, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [REAL_32] */ static EIF_TYPE_INDEX ptf420[] = {0,0xFFFF}; static struct eif_par_types par420 = {420, ptf420, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [REAL_32] */ static EIF_TYPE_INDEX ptf421[] = {422,191,0xFFFF}; static struct eif_par_types par421 = {421, ptf421, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [REAL_32] */ static EIF_TYPE_INDEX ptf422[] = {0,0xFFFF}; static struct eif_par_types par422 = {422, ptf422, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [REAL_32] */ static EIF_TYPE_INDEX ptf423[] = {420,191,0xFFF7,422,191,0xFFFF}; static struct eif_par_types par423 = {423, ptf423, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [REAL_32] */ static EIF_TYPE_INDEX ptf424[] = {425,191,0xFF01,421,191,0xFFFF}; static struct eif_par_types par424 = {424, ptf424, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [REAL_32, G#2] */ static EIF_TYPE_INDEX ptf425[] = {423,191,0xFFFF}; static struct eif_par_types par425 = {425, ptf425, (uint16) 1, (uint16) 2, (char) 0}; /* FINITE [REAL_32] */ static EIF_TYPE_INDEX ptf426[] = {427,191,0xFFFF}; static struct eif_par_types par426 = {426, ptf426, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [REAL_32] */ static EIF_TYPE_INDEX ptf427[] = {419,191,0xFFFF}; static struct eif_par_types par427 = {427, ptf427, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [REAL_32] */ static EIF_TYPE_INDEX ptf428[] = {433,191,0xFFFF}; static struct eif_par_types par428 = {428, ptf428, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [REAL_32] */ static EIF_TYPE_INDEX ptf429[] = {430,191,0xFFFF}; static struct eif_par_types par429 = {429, ptf429, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [REAL_32] */ static EIF_TYPE_INDEX ptf430[] = {431,191,0xFFFF}; static struct eif_par_types par430 = {430, ptf430, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [REAL_32] */ static EIF_TYPE_INDEX ptf431[] = {432,191,0xFFFF}; static struct eif_par_types par431 = {431, ptf431, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [REAL_32] */ static EIF_TYPE_INDEX ptf432[] = {419,191,0xFFFF}; static struct eif_par_types par432 = {432, ptf432, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [REAL_32] */ static EIF_TYPE_INDEX ptf433[] = {419,191,0xFFFF}; static struct eif_par_types par433 = {433, ptf433, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [REAL_32] */ static EIF_TYPE_INDEX ptf434[] = {451,191,0xFFF7,449,191,0xFFF7,442,191,0xFFF7,156,0xFFFF}; static struct eif_par_types par434 = {434, ptf434, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [REAL_32] */ static EIF_TYPE_INDEX ptf435[] = {436,191,0xFF01,434,191,0xFFFF}; static struct eif_par_types par435 = {435, ptf435, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [REAL_32, G#2] */ static EIF_TYPE_INDEX ptf436[] = {425,191,0xFFF8,2,0xFFFF}; static struct eif_par_types par436 = {436, ptf436, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL [REAL_32] */ static EIF_TYPE_INDEX ptf437[] = {139,0xFFF7,421,191,0xFFFF}; static struct eif_par_types par437 = {437, ptf437, (uint16) 2, (uint16) 1, (char) 0}; /* SPECIAL_ITERATION_CURSOR [REAL_32] */ static EIF_TYPE_INDEX ptf438[] = {436,191,0xFF01,437,191,0xFFFF}; static struct eif_par_types par438 = {438, ptf438, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [REAL_32] */ static EIF_TYPE_INDEX ptf439[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par439 = {439, ptf439, (uint16) 3, (uint16) 1, (char) 0}; /* SEQUENCE [REAL_32] */ static EIF_TYPE_INDEX ptf440[] = {430,191,0xFFF7,441,191,0xFFF7,426,191,0xFFFF}; static struct eif_par_types par440 = {440, ptf440, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [REAL_32] */ static EIF_TYPE_INDEX ptf441[] = {428,191,0xFFFF}; static struct eif_par_types par441 = {441, ptf441, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [REAL_32] */ static EIF_TYPE_INDEX ptf442[] = {443,191,0xFFF7,447,191,0xFFFF}; static struct eif_par_types par442 = {442, ptf442, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [REAL_32] */ static EIF_TYPE_INDEX ptf443[] = {444,191,0xFFFF}; static struct eif_par_types par443 = {443, ptf443, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [REAL_32] */ static EIF_TYPE_INDEX ptf444[] = {429,191,0xFFF7,445,191,218,0xFFF7,440,191,0xFFFF}; static struct eif_par_types par444 = {444, ptf444, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [REAL_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf445[] = {446,191,218,0xFFF7,421,191,0xFFFF}; static struct eif_par_types par445 = {445, ptf445, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [REAL_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf446[] = {431,191,0xFFFF}; static struct eif_par_types par446 = {446, ptf446, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [REAL_32] */ static EIF_TYPE_INDEX ptf447[] = {444,191,0xFFF7,448,191,0xFFFF}; static struct eif_par_types par447 = {447, ptf447, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [REAL_32] */ static EIF_TYPE_INDEX ptf448[] = {426,191,0xFFFF}; static struct eif_par_types par448 = {448, ptf448, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [REAL_32] */ static EIF_TYPE_INDEX ptf449[] = {450,191,0xFFFF}; static struct eif_par_types par449 = {449, ptf449, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [REAL_32] */ static EIF_TYPE_INDEX ptf450[] = {426,191,0xFFFF}; static struct eif_par_types par450 = {450, ptf450, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [REAL_32] */ static EIF_TYPE_INDEX ptf451[] = {0,0xFFFF}; static struct eif_par_types par451 = {451, ptf451, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [REAL_32] */ static EIF_TYPE_INDEX ptf452[] = {436,191,0xFF01,417,191,0xFFFF}; static struct eif_par_types par452 = {452, ptf452, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY [NATURAL_8] */ static EIF_TYPE_INDEX ptf453[] = {485,209,0xFFF7,481,209,218,0xFFF7,487,209,0xFFFF}; static struct eif_par_types par453 = {453, ptf453, (uint16) 3, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [NATURAL_8] */ static EIF_TYPE_INDEX ptf454[] = {0,0xFFFF}; static struct eif_par_types par454 = {454, ptf454, (uint16) 1, (uint16) 1, (char) 0}; /* CONTAINER [NATURAL_8] */ static EIF_TYPE_INDEX ptf455[] = {458,209,0xFFFF}; static struct eif_par_types par455 = {455, ptf455, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [NATURAL_8] */ static EIF_TYPE_INDEX ptf456[] = {0,0xFFFF}; static struct eif_par_types par456 = {456, ptf456, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [NATURAL_8] */ static EIF_TYPE_INDEX ptf457[] = {458,209,0xFFFF}; static struct eif_par_types par457 = {457, ptf457, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [NATURAL_8] */ static EIF_TYPE_INDEX ptf458[] = {0,0xFFFF}; static struct eif_par_types par458 = {458, ptf458, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [NATURAL_8] */ static EIF_TYPE_INDEX ptf459[] = {456,209,0xFFF7,458,209,0xFFFF}; static struct eif_par_types par459 = {459, ptf459, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [NATURAL_8] */ static EIF_TYPE_INDEX ptf460[] = {461,209,0xFF01,457,209,0xFFFF}; static struct eif_par_types par460 = {460, ptf460, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [NATURAL_8, G#2] */ static EIF_TYPE_INDEX ptf461[] = {459,209,0xFFFF}; static struct eif_par_types par461 = {461, ptf461, (uint16) 1, (uint16) 2, (char) 0}; /* FINITE [NATURAL_8] */ static EIF_TYPE_INDEX ptf462[] = {463,209,0xFFFF}; static struct eif_par_types par462 = {462, ptf462, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [NATURAL_8] */ static EIF_TYPE_INDEX ptf463[] = {455,209,0xFFFF}; static struct eif_par_types par463 = {463, ptf463, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [NATURAL_8] */ static EIF_TYPE_INDEX ptf464[] = {469,209,0xFFFF}; static struct eif_par_types par464 = {464, ptf464, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [NATURAL_8] */ static EIF_TYPE_INDEX ptf465[] = {466,209,0xFFFF}; static struct eif_par_types par465 = {465, ptf465, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [NATURAL_8] */ static EIF_TYPE_INDEX ptf466[] = {467,209,0xFFFF}; static struct eif_par_types par466 = {466, ptf466, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [NATURAL_8] */ static EIF_TYPE_INDEX ptf467[] = {468,209,0xFFFF}; static struct eif_par_types par467 = {467, ptf467, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [NATURAL_8] */ static EIF_TYPE_INDEX ptf468[] = {455,209,0xFFFF}; static struct eif_par_types par468 = {468, ptf468, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [NATURAL_8] */ static EIF_TYPE_INDEX ptf469[] = {455,209,0xFFFF}; static struct eif_par_types par469 = {469, ptf469, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [NATURAL_8] */ static EIF_TYPE_INDEX ptf470[] = {487,209,0xFFF7,485,209,0xFFF7,478,209,0xFFF7,156,0xFFFF}; static struct eif_par_types par470 = {470, ptf470, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [NATURAL_8] */ static EIF_TYPE_INDEX ptf471[] = {472,209,0xFF01,470,209,0xFFFF}; static struct eif_par_types par471 = {471, ptf471, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [NATURAL_8, G#2] */ static EIF_TYPE_INDEX ptf472[] = {461,209,0xFFF8,2,0xFFFF}; static struct eif_par_types par472 = {472, ptf472, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL [NATURAL_8] */ static EIF_TYPE_INDEX ptf473[] = {139,0xFFF7,457,209,0xFFFF}; static struct eif_par_types par473 = {473, ptf473, (uint16) 2, (uint16) 1, (char) 0}; /* SPECIAL_ITERATION_CURSOR [NATURAL_8] */ static EIF_TYPE_INDEX ptf474[] = {472,209,0xFF01,473,209,0xFFFF}; static struct eif_par_types par474 = {474, ptf474, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [NATURAL_8] */ static EIF_TYPE_INDEX ptf475[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par475 = {475, ptf475, (uint16) 3, (uint16) 1, (char) 0}; /* SEQUENCE [NATURAL_8] */ static EIF_TYPE_INDEX ptf476[] = {466,209,0xFFF7,477,209,0xFFF7,462,209,0xFFFF}; static struct eif_par_types par476 = {476, ptf476, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [NATURAL_8] */ static EIF_TYPE_INDEX ptf477[] = {464,209,0xFFFF}; static struct eif_par_types par477 = {477, ptf477, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [NATURAL_8] */ static EIF_TYPE_INDEX ptf478[] = {479,209,0xFFF7,483,209,0xFFFF}; static struct eif_par_types par478 = {478, ptf478, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [NATURAL_8] */ static EIF_TYPE_INDEX ptf479[] = {480,209,0xFFFF}; static struct eif_par_types par479 = {479, ptf479, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [NATURAL_8] */ static EIF_TYPE_INDEX ptf480[] = {465,209,0xFFF7,481,209,218,0xFFF7,476,209,0xFFFF}; static struct eif_par_types par480 = {480, ptf480, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [NATURAL_8, INTEGER_32] */ static EIF_TYPE_INDEX ptf481[] = {482,209,218,0xFFF7,457,209,0xFFFF}; static struct eif_par_types par481 = {481, ptf481, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [NATURAL_8, INTEGER_32] */ static EIF_TYPE_INDEX ptf482[] = {467,209,0xFFFF}; static struct eif_par_types par482 = {482, ptf482, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [NATURAL_8] */ static EIF_TYPE_INDEX ptf483[] = {480,209,0xFFF7,484,209,0xFFFF}; static struct eif_par_types par483 = {483, ptf483, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [NATURAL_8] */ static EIF_TYPE_INDEX ptf484[] = {462,209,0xFFFF}; static struct eif_par_types par484 = {484, ptf484, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [NATURAL_8] */ static EIF_TYPE_INDEX ptf485[] = {486,209,0xFFFF}; static struct eif_par_types par485 = {485, ptf485, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [NATURAL_8] */ static EIF_TYPE_INDEX ptf486[] = {462,209,0xFFFF}; static struct eif_par_types par486 = {486, ptf486, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [NATURAL_8] */ static EIF_TYPE_INDEX ptf487[] = {0,0xFFFF}; static struct eif_par_types par487 = {487, ptf487, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [NATURAL_8] */ static EIF_TYPE_INDEX ptf488[] = {472,209,0xFF01,453,209,0xFFFF}; static struct eif_par_types par488 = {488, ptf488, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [FILE_UTILITIES] */ static EIF_TYPE_INDEX ptf489[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par489 = {489, ptf489, (uint16) 3, (uint16) 1, (char) 0}; /* TYPE [UTF_CONVERTER] */ static EIF_TYPE_INDEX ptf490[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par490 = {490, ptf490, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [CHARACTER_8] */ static EIF_TYPE_INDEX ptf491[] = {493,197,0xFFFF}; static struct eif_par_types par491 = {491, ptf491, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf492[] = {493,197,0xFFFF}; static struct eif_par_types par492 = {492, ptf492, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf493[] = {0,0xFFFF}; static struct eif_par_types par493 = {493, ptf493, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [CHARACTER_8] */ static EIF_TYPE_INDEX ptf494[] = {407,197,0xFFF7,493,197,0xFFFF}; static struct eif_par_types par494 = {494, ptf494, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [CHARACTER_8] */ static EIF_TYPE_INDEX ptf495[] = {496,197,0xFF01,492,197,0xFFFF}; static struct eif_par_types par495 = {495, ptf495, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [CHARACTER_8, G#2] */ static EIF_TYPE_INDEX ptf496[] = {494,197,0xFFFF}; static struct eif_par_types par496 = {496, ptf496, (uint16) 1, (uint16) 2, (char) 0}; /* FINITE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf497[] = {498,197,0xFFFF}; static struct eif_par_types par497 = {497, ptf497, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [CHARACTER_8] */ static EIF_TYPE_INDEX ptf498[] = {491,197,0xFFFF}; static struct eif_par_types par498 = {498, ptf498, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [CHARACTER_8] */ static EIF_TYPE_INDEX ptf499[] = {504,197,0xFFFF}; static struct eif_par_types par499 = {499, ptf499, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf500[] = {501,197,0xFFFF}; static struct eif_par_types par500 = {500, ptf500, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf501[] = {502,197,0xFFFF}; static struct eif_par_types par501 = {501, ptf501, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [CHARACTER_8] */ static EIF_TYPE_INDEX ptf502[] = {503,197,0xFFFF}; static struct eif_par_types par502 = {502, ptf502, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [CHARACTER_8] */ static EIF_TYPE_INDEX ptf503[] = {491,197,0xFFFF}; static struct eif_par_types par503 = {503, ptf503, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf504[] = {491,197,0xFFFF}; static struct eif_par_types par504 = {504, ptf504, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [CHARACTER_8, G#2] */ static EIF_TYPE_INDEX ptf505[] = {496,197,0xFFF8,2,0xFFFF}; static struct eif_par_types par505 = {505, ptf505, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL [CHARACTER_8] */ static EIF_TYPE_INDEX ptf506[] = {139,0xFFF7,492,197,0xFFFF}; static struct eif_par_types par506 = {506, ptf506, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [CHARACTER_8] */ static EIF_TYPE_INDEX ptf507[] = {0,0xFFFF}; static struct eif_par_types par507 = {507, ptf507, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL_ITERATION_CURSOR [CHARACTER_8] */ static EIF_TYPE_INDEX ptf508[] = {505,197,0xFF01,506,197,0xFFFF}; static struct eif_par_types par508 = {508, ptf508, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf509[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par509 = {509, ptf509, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY [CHARACTER_8] */ static EIF_TYPE_INDEX ptf510[] = {522,197,0xFFF7,518,197,218,0xFFF7,524,197,0xFFFF}; static struct eif_par_types par510 = {510, ptf510, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAYED_LIST [CHARACTER_8] */ static EIF_TYPE_INDEX ptf511[] = {524,197,0xFFF7,522,197,0xFFF7,515,197,0xFFF7,156,0xFFFF}; static struct eif_par_types par511 = {511, ptf511, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [CHARACTER_8] */ static EIF_TYPE_INDEX ptf512[] = {505,197,0xFF01,511,197,0xFFFF}; static struct eif_par_types par512 = {512, ptf512, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf513[] = {501,197,0xFFF7,514,197,0xFFF7,497,197,0xFFFF}; static struct eif_par_types par513 = {513, ptf513, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [CHARACTER_8] */ static EIF_TYPE_INDEX ptf514[] = {499,197,0xFFFF}; static struct eif_par_types par514 = {514, ptf514, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [CHARACTER_8] */ static EIF_TYPE_INDEX ptf515[] = {516,197,0xFFF7,520,197,0xFFFF}; static struct eif_par_types par515 = {515, ptf515, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [CHARACTER_8] */ static EIF_TYPE_INDEX ptf516[] = {517,197,0xFFFF}; static struct eif_par_types par516 = {516, ptf516, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [CHARACTER_8] */ static EIF_TYPE_INDEX ptf517[] = {500,197,0xFFF7,518,197,218,0xFFF7,513,197,0xFFFF}; static struct eif_par_types par517 = {517, ptf517, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [CHARACTER_8, INTEGER_32] */ static EIF_TYPE_INDEX ptf518[] = {519,197,218,0xFFF7,492,197,0xFFFF}; static struct eif_par_types par518 = {518, ptf518, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [CHARACTER_8, INTEGER_32] */ static EIF_TYPE_INDEX ptf519[] = {502,197,0xFFFF}; static struct eif_par_types par519 = {519, ptf519, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [CHARACTER_8] */ static EIF_TYPE_INDEX ptf520[] = {517,197,0xFFF7,521,197,0xFFFF}; static struct eif_par_types par520 = {520, ptf520, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [CHARACTER_8] */ static EIF_TYPE_INDEX ptf521[] = {497,197,0xFFFF}; static struct eif_par_types par521 = {521, ptf521, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf522[] = {523,197,0xFFFF}; static struct eif_par_types par522 = {522, ptf522, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [CHARACTER_8] */ static EIF_TYPE_INDEX ptf523[] = {497,197,0xFFFF}; static struct eif_par_types par523 = {523, ptf523, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [CHARACTER_8] */ static EIF_TYPE_INDEX ptf524[] = {0,0xFFFF}; static struct eif_par_types par524 = {524, ptf524, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [CHARACTER_8] */ static EIF_TYPE_INDEX ptf525[] = {505,197,0xFF01,510,197,0xFFFF}; static struct eif_par_types par525 = {525, ptf525, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [REAL_64] */ static EIF_TYPE_INDEX ptf526[] = {140,0xFFFF}; static struct eif_par_types par526 = {526, ptf526, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [REAL_64] */ static EIF_TYPE_INDEX ptf527[] = {528,206,0xFFFF}; static struct eif_par_types par527 = {527, ptf527, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [REAL_64] */ static EIF_TYPE_INDEX ptf528[] = {226,0xFFFF}; static struct eif_par_types par528 = {528, ptf528, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [REAL_64]] */ static EIF_TYPE_INDEX ptf529[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par529 = {529, ptf529, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [INTEGER_32] */ static EIF_TYPE_INDEX ptf530[] = {140,0xFFFF}; static struct eif_par_types par530 = {530, ptf530, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [INTEGER_32] */ static EIF_TYPE_INDEX ptf531[] = {532,218,0xFFFF}; static struct eif_par_types par531 = {531, ptf531, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [INTEGER_32] */ static EIF_TYPE_INDEX ptf532[] = {226,0xFFFF}; static struct eif_par_types par532 = {532, ptf532, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [INTEGER_32]] */ static EIF_TYPE_INDEX ptf533[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par533 = {533, ptf533, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [G#1] */ static EIF_TYPE_INDEX ptf534[] = {140,0xFFFF}; static struct eif_par_types par534 = {534, ptf534, (uint16) 1, (uint16) 1, (char) 0}; /* HASH_TABLE [G#1, INTEGER_32] */ static EIF_TYPE_INDEX ptf535[] = {287,0xFFF8,1,0xFFF7,274,0xFFF8,1,218,0xFFF7,573,0xFFF8,1,218,0xFFF7,256,0xFFF8,1,0xFFF7,156,0xFFFF}; static struct eif_par_types par535 = {535, ptf535, (uint16) 5, (uint16) 2, (char) 0}; /* TYPE [INTEGER_32] */ static EIF_TYPE_INDEX ptf536[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par536 = {536, ptf536, (uint16) 3, (uint16) 1, (char) 0}; /* SPECIAL [INTEGER_32] */ static EIF_TYPE_INDEX ptf537[] = {139,0xFFF7,539,218,0xFFFF}; static struct eif_par_types par537 = {537, ptf537, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [INTEGER_32] */ static EIF_TYPE_INDEX ptf538[] = {0,0xFFFF}; static struct eif_par_types par538 = {538, ptf538, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [INTEGER_32] */ static EIF_TYPE_INDEX ptf539[] = {540,218,0xFFFF}; static struct eif_par_types par539 = {539, ptf539, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [INTEGER_32] */ static EIF_TYPE_INDEX ptf540[] = {0,0xFFFF}; static struct eif_par_types par540 = {540, ptf540, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [INTEGER_32] */ static EIF_TYPE_INDEX ptf541[] = {406,218,0xFFF7,540,218,0xFFFF}; static struct eif_par_types par541 = {541, ptf541, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [INTEGER_32] */ static EIF_TYPE_INDEX ptf542[] = {543,218,0xFF01,539,218,0xFFFF}; static struct eif_par_types par542 = {542, ptf542, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [INTEGER_32, G#2] */ static EIF_TYPE_INDEX ptf543[] = {541,218,0xFFFF}; static struct eif_par_types par543 = {543, ptf543, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL_ITERATION_CURSOR [INTEGER_32] */ static EIF_TYPE_INDEX ptf544[] = {545,218,0xFF01,537,218,0xFFFF}; static struct eif_par_types par544 = {544, ptf544, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [INTEGER_32, G#2] */ static EIF_TYPE_INDEX ptf545[] = {543,218,0xFFF8,2,0xFFFF}; static struct eif_par_types par545 = {545, ptf545, (uint16) 1, (uint16) 2, (char) 0}; /* ARRAY [INTEGER_32] */ static EIF_TYPE_INDEX ptf546[] = {567,218,0xFFF7,563,218,218,0xFFF7,569,218,0xFFFF}; static struct eif_par_types par546 = {546, ptf546, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [INTEGER_32] */ static EIF_TYPE_INDEX ptf547[] = {540,218,0xFFFF}; static struct eif_par_types par547 = {547, ptf547, (uint16) 1, (uint16) 1, (char) 0}; /* FINITE [INTEGER_32] */ static EIF_TYPE_INDEX ptf548[] = {549,218,0xFFFF}; static struct eif_par_types par548 = {548, ptf548, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [INTEGER_32] */ static EIF_TYPE_INDEX ptf549[] = {547,218,0xFFFF}; static struct eif_par_types par549 = {549, ptf549, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [INTEGER_32] */ static EIF_TYPE_INDEX ptf550[] = {555,218,0xFFFF}; static struct eif_par_types par550 = {550, ptf550, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [INTEGER_32] */ static EIF_TYPE_INDEX ptf551[] = {552,218,0xFFFF}; static struct eif_par_types par551 = {551, ptf551, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [INTEGER_32] */ static EIF_TYPE_INDEX ptf552[] = {553,218,0xFFFF}; static struct eif_par_types par552 = {552, ptf552, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [INTEGER_32] */ static EIF_TYPE_INDEX ptf553[] = {554,218,0xFFFF}; static struct eif_par_types par553 = {553, ptf553, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [INTEGER_32] */ static EIF_TYPE_INDEX ptf554[] = {547,218,0xFFFF}; static struct eif_par_types par554 = {554, ptf554, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [INTEGER_32] */ static EIF_TYPE_INDEX ptf555[] = {547,218,0xFFFF}; static struct eif_par_types par555 = {555, ptf555, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [INTEGER_32] */ static EIF_TYPE_INDEX ptf556[] = {569,218,0xFFF7,567,218,0xFFF7,560,218,0xFFF7,156,0xFFFF}; static struct eif_par_types par556 = {556, ptf556, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [INTEGER_32] */ static EIF_TYPE_INDEX ptf557[] = {545,218,0xFF01,556,218,0xFFFF}; static struct eif_par_types par557 = {557, ptf557, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [INTEGER_32] */ static EIF_TYPE_INDEX ptf558[] = {552,218,0xFFF7,559,218,0xFFF7,548,218,0xFFFF}; static struct eif_par_types par558 = {558, ptf558, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [INTEGER_32] */ static EIF_TYPE_INDEX ptf559[] = {550,218,0xFFFF}; static struct eif_par_types par559 = {559, ptf559, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [INTEGER_32] */ static EIF_TYPE_INDEX ptf560[] = {561,218,0xFFF7,565,218,0xFFFF}; static struct eif_par_types par560 = {560, ptf560, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [INTEGER_32] */ static EIF_TYPE_INDEX ptf561[] = {562,218,0xFFFF}; static struct eif_par_types par561 = {561, ptf561, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [INTEGER_32] */ static EIF_TYPE_INDEX ptf562[] = {551,218,0xFFF7,563,218,218,0xFFF7,558,218,0xFFFF}; static struct eif_par_types par562 = {562, ptf562, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [INTEGER_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf563[] = {564,218,218,0xFFF7,539,218,0xFFFF}; static struct eif_par_types par563 = {563, ptf563, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [INTEGER_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf564[] = {553,218,0xFFFF}; static struct eif_par_types par564 = {564, ptf564, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [INTEGER_32] */ static EIF_TYPE_INDEX ptf565[] = {562,218,0xFFF7,566,218,0xFFFF}; static struct eif_par_types par565 = {565, ptf565, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [INTEGER_32] */ static EIF_TYPE_INDEX ptf566[] = {548,218,0xFFFF}; static struct eif_par_types par566 = {566, ptf566, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [INTEGER_32] */ static EIF_TYPE_INDEX ptf567[] = {568,218,0xFFFF}; static struct eif_par_types par567 = {567, ptf567, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [INTEGER_32] */ static EIF_TYPE_INDEX ptf568[] = {548,218,0xFFFF}; static struct eif_par_types par568 = {568, ptf568, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [INTEGER_32] */ static EIF_TYPE_INDEX ptf569[] = {0,0xFFFF}; static struct eif_par_types par569 = {569, ptf569, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [INTEGER_32] */ static EIF_TYPE_INDEX ptf570[] = {545,218,0xFF01,546,218,0xFFFF}; static struct eif_par_types par570 = {570, ptf570, (uint16) 1, (uint16) 1, (char) 0}; /* HASH_TABLE_ITERATION_CURSOR [G#1, INTEGER_32] */ static EIF_TYPE_INDEX ptf571[] = {259,0xFFF8,1,0xFFF7,572,0xFFF8,1,218,0xFFFF}; static struct eif_par_types par571 = {571, ptf571, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE_ITERATION_CURSOR [G#1, INTEGER_32] */ static EIF_TYPE_INDEX ptf572[] = {246,0xFFF8,1,0xFFFF}; static struct eif_par_types par572 = {572, ptf572, (uint16) 1, (uint16) 2, (char) 0}; /* TABLE_ITERABLE [G#1, INTEGER_32] */ static EIF_TYPE_INDEX ptf573[] = {257,0xFFF8,1,0xFFFF}; static struct eif_par_types par573 = {573, ptf573, (uint16) 1, (uint16) 2, (char) 0}; /* RT_DBG_LOCAL_RECORD [POINTER] */ static EIF_TYPE_INDEX ptf574[] = {140,0xFFFF}; static struct eif_par_types par574 = {574, ptf574, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [POINTER] */ static EIF_TYPE_INDEX ptf575[] = {576,227,0xFFFF}; static struct eif_par_types par575 = {575, ptf575, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [POINTER] */ static EIF_TYPE_INDEX ptf576[] = {226,0xFFFF}; static struct eif_par_types par576 = {576, ptf576, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [POINTER]] */ static EIF_TYPE_INDEX ptf577[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par577 = {577, ptf577, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [REAL_64] */ static EIF_TYPE_INDEX ptf578[] = {140,0xFFFF}; static struct eif_par_types par578 = {578, ptf578, (uint16) 1, (uint16) 1, (char) 0}; /* CELL [INTEGER_32] */ static EIF_TYPE_INDEX ptf579[] = {0,0xFFFF}; static struct eif_par_types par579 = {579, ptf579, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [NATURAL_32] */ static EIF_TYPE_INDEX ptf580[] = {140,0xFFFF}; static struct eif_par_types par580 = {580, ptf580, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [NATURAL_32] */ static EIF_TYPE_INDEX ptf581[] = {582,212,0xFFFF}; static struct eif_par_types par581 = {581, ptf581, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [NATURAL_32] */ static EIF_TYPE_INDEX ptf582[] = {226,0xFFFF}; static struct eif_par_types par582 = {582, ptf582, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [NATURAL_32]] */ static EIF_TYPE_INDEX ptf583[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par583 = {583, ptf583, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [CHARACTER_32] */ static EIF_TYPE_INDEX ptf584[] = {140,0xFFFF}; static struct eif_par_types par584 = {584, ptf584, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [CHARACTER_32] */ static EIF_TYPE_INDEX ptf585[] = {586,194,0xFFFF}; static struct eif_par_types par585 = {585, ptf585, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [CHARACTER_32] */ static EIF_TYPE_INDEX ptf586[] = {226,0xFFFF}; static struct eif_par_types par586 = {586, ptf586, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [CHARACTER_32]] */ static EIF_TYPE_INDEX ptf587[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par587 = {587, ptf587, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [CHARACTER_8] */ static EIF_TYPE_INDEX ptf588[] = {140,0xFFFF}; static struct eif_par_types par588 = {588, ptf588, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [CHARACTER_8] */ static EIF_TYPE_INDEX ptf589[] = {590,197,0xFFFF}; static struct eif_par_types par589 = {589, ptf589, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [CHARACTER_8] */ static EIF_TYPE_INDEX ptf590[] = {226,0xFFFF}; static struct eif_par_types par590 = {590, ptf590, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [CHARACTER_8]] */ static EIF_TYPE_INDEX ptf591[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par591 = {591, ptf591, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [BOOLEAN] */ static EIF_TYPE_INDEX ptf592[] = {140,0xFFFF}; static struct eif_par_types par592 = {592, ptf592, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [BOOLEAN] */ static EIF_TYPE_INDEX ptf593[] = {594,203,0xFFFF}; static struct eif_par_types par593 = {593, ptf593, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [BOOLEAN] */ static EIF_TYPE_INDEX ptf594[] = {226,0xFFFF}; static struct eif_par_types par594 = {594, ptf594, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [BOOLEAN]] */ static EIF_TYPE_INDEX ptf595[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par595 = {595, ptf595, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [INTEGER_8] */ static EIF_TYPE_INDEX ptf596[] = {140,0xFFFF}; static struct eif_par_types par596 = {596, ptf596, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [INTEGER_8] */ static EIF_TYPE_INDEX ptf597[] = {598,188,0xFFFF}; static struct eif_par_types par597 = {597, ptf597, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [INTEGER_8] */ static EIF_TYPE_INDEX ptf598[] = {226,0xFFFF}; static struct eif_par_types par598 = {598, ptf598, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [INTEGER_8]] */ static EIF_TYPE_INDEX ptf599[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par599 = {599, ptf599, (uint16) 3, (uint16) 1, (char) 0}; /* HASH_TABLE [INTEGER_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf600[] = {566,218,0xFFF7,564,218,218,0xFFF7,603,218,218,0xFFF7,539,218,0xFFF7,156,0xFFFF}; static struct eif_par_types par600 = {600, ptf600, (uint16) 5, (uint16) 2, (char) 0}; /* HASH_TABLE_ITERATION_CURSOR [INTEGER_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf601[] = {542,218,0xFFF7,602,218,218,0xFFFF}; static struct eif_par_types par601 = {601, ptf601, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE_ITERATION_CURSOR [INTEGER_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf602[] = {406,218,0xFFFF}; static struct eif_par_types par602 = {602, ptf602, (uint16) 1, (uint16) 2, (char) 0}; /* TABLE_ITERABLE [INTEGER_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf603[] = {540,218,0xFFFF}; static struct eif_par_types par603 = {603, ptf603, (uint16) 1, (uint16) 2, (char) 0}; /* RT_DBG_LOCAL_RECORD [NATURAL_8] */ static EIF_TYPE_INDEX ptf604[] = {140,0xFFFF}; static struct eif_par_types par604 = {604, ptf604, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [NATURAL_8] */ static EIF_TYPE_INDEX ptf605[] = {606,209,0xFFFF}; static struct eif_par_types par605 = {605, ptf605, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [NATURAL_8] */ static EIF_TYPE_INDEX ptf606[] = {226,0xFFFF}; static struct eif_par_types par606 = {606, ptf606, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [NATURAL_8]] */ static EIF_TYPE_INDEX ptf607[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par607 = {607, ptf607, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [G#1] */ static EIF_TYPE_INDEX ptf608[] = {140,0xFFFF}; static struct eif_par_types par608 = {608, ptf608, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [POINTER] */ static EIF_TYPE_INDEX ptf609[] = {140,0xFFFF}; static struct eif_par_types par609 = {609, ptf609, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [NATURAL_64] */ static EIF_TYPE_INDEX ptf610[] = {140,0xFFFF}; static struct eif_par_types par610 = {610, ptf610, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [NATURAL_64] */ static EIF_TYPE_INDEX ptf611[] = {612,224,0xFFFF}; static struct eif_par_types par611 = {611, ptf611, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [NATURAL_64] */ static EIF_TYPE_INDEX ptf612[] = {226,0xFFFF}; static struct eif_par_types par612 = {612, ptf612, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [NATURAL_64]] */ static EIF_TYPE_INDEX ptf613[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par613 = {613, ptf613, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [CHARACTER_8] */ static EIF_TYPE_INDEX ptf614[] = {140,0xFFFF}; static struct eif_par_types par614 = {614, ptf614, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL [REAL_64] */ static EIF_TYPE_INDEX ptf615[] = {139,0xFFF7,617,206,0xFFFF}; static struct eif_par_types par615 = {615, ptf615, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [REAL_64] */ static EIF_TYPE_INDEX ptf616[] = {0,0xFFFF}; static struct eif_par_types par616 = {616, ptf616, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [REAL_64] */ static EIF_TYPE_INDEX ptf617[] = {618,206,0xFFFF}; static struct eif_par_types par617 = {617, ptf617, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [REAL_64] */ static EIF_TYPE_INDEX ptf618[] = {0,0xFFFF}; static struct eif_par_types par618 = {618, ptf618, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [REAL_64] */ static EIF_TYPE_INDEX ptf619[] = {0,0xFFFF}; static struct eif_par_types par619 = {619, ptf619, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [REAL_64] */ static EIF_TYPE_INDEX ptf620[] = {619,206,0xFFF7,618,206,0xFFFF}; static struct eif_par_types par620 = {620, ptf620, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [REAL_64] */ static EIF_TYPE_INDEX ptf621[] = {622,206,0xFF01,617,206,0xFFFF}; static struct eif_par_types par621 = {621, ptf621, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [REAL_64, G#2] */ static EIF_TYPE_INDEX ptf622[] = {620,206,0xFFFF}; static struct eif_par_types par622 = {622, ptf622, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL_ITERATION_CURSOR [REAL_64] */ static EIF_TYPE_INDEX ptf623[] = {624,206,0xFF01,615,206,0xFFFF}; static struct eif_par_types par623 = {623, ptf623, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [REAL_64, G#2] */ static EIF_TYPE_INDEX ptf624[] = {622,206,0xFFF8,2,0xFFFF}; static struct eif_par_types par624 = {624, ptf624, (uint16) 1, (uint16) 2, (char) 0}; /* TYPE [REAL_64] */ static EIF_TYPE_INDEX ptf625[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par625 = {625, ptf625, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY [REAL_64] */ static EIF_TYPE_INDEX ptf626[] = {647,206,0xFFF7,643,206,218,0xFFF7,649,206,0xFFFF}; static struct eif_par_types par626 = {626, ptf626, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [REAL_64] */ static EIF_TYPE_INDEX ptf627[] = {618,206,0xFFFF}; static struct eif_par_types par627 = {627, ptf627, (uint16) 1, (uint16) 1, (char) 0}; /* FINITE [REAL_64] */ static EIF_TYPE_INDEX ptf628[] = {629,206,0xFFFF}; static struct eif_par_types par628 = {628, ptf628, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [REAL_64] */ static EIF_TYPE_INDEX ptf629[] = {627,206,0xFFFF}; static struct eif_par_types par629 = {629, ptf629, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [REAL_64] */ static EIF_TYPE_INDEX ptf630[] = {635,206,0xFFFF}; static struct eif_par_types par630 = {630, ptf630, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [REAL_64] */ static EIF_TYPE_INDEX ptf631[] = {632,206,0xFFFF}; static struct eif_par_types par631 = {631, ptf631, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [REAL_64] */ static EIF_TYPE_INDEX ptf632[] = {633,206,0xFFFF}; static struct eif_par_types par632 = {632, ptf632, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [REAL_64] */ static EIF_TYPE_INDEX ptf633[] = {634,206,0xFFFF}; static struct eif_par_types par633 = {633, ptf633, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [REAL_64] */ static EIF_TYPE_INDEX ptf634[] = {627,206,0xFFFF}; static struct eif_par_types par634 = {634, ptf634, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [REAL_64] */ static EIF_TYPE_INDEX ptf635[] = {627,206,0xFFFF}; static struct eif_par_types par635 = {635, ptf635, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [REAL_64] */ static EIF_TYPE_INDEX ptf636[] = {649,206,0xFFF7,647,206,0xFFF7,640,206,0xFFF7,156,0xFFFF}; static struct eif_par_types par636 = {636, ptf636, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [REAL_64] */ static EIF_TYPE_INDEX ptf637[] = {624,206,0xFF01,636,206,0xFFFF}; static struct eif_par_types par637 = {637, ptf637, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [REAL_64] */ static EIF_TYPE_INDEX ptf638[] = {632,206,0xFFF7,639,206,0xFFF7,628,206,0xFFFF}; static struct eif_par_types par638 = {638, ptf638, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [REAL_64] */ static EIF_TYPE_INDEX ptf639[] = {630,206,0xFFFF}; static struct eif_par_types par639 = {639, ptf639, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [REAL_64] */ static EIF_TYPE_INDEX ptf640[] = {641,206,0xFFF7,645,206,0xFFFF}; static struct eif_par_types par640 = {640, ptf640, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [REAL_64] */ static EIF_TYPE_INDEX ptf641[] = {642,206,0xFFFF}; static struct eif_par_types par641 = {641, ptf641, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [REAL_64] */ static EIF_TYPE_INDEX ptf642[] = {631,206,0xFFF7,643,206,218,0xFFF7,638,206,0xFFFF}; static struct eif_par_types par642 = {642, ptf642, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [REAL_64, INTEGER_32] */ static EIF_TYPE_INDEX ptf643[] = {644,206,218,0xFFF7,617,206,0xFFFF}; static struct eif_par_types par643 = {643, ptf643, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [REAL_64, INTEGER_32] */ static EIF_TYPE_INDEX ptf644[] = {633,206,0xFFFF}; static struct eif_par_types par644 = {644, ptf644, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [REAL_64] */ static EIF_TYPE_INDEX ptf645[] = {642,206,0xFFF7,646,206,0xFFFF}; static struct eif_par_types par645 = {645, ptf645, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [REAL_64] */ static EIF_TYPE_INDEX ptf646[] = {628,206,0xFFFF}; static struct eif_par_types par646 = {646, ptf646, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [REAL_64] */ static EIF_TYPE_INDEX ptf647[] = {648,206,0xFFFF}; static struct eif_par_types par647 = {647, ptf647, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [REAL_64] */ static EIF_TYPE_INDEX ptf648[] = {628,206,0xFFFF}; static struct eif_par_types par648 = {648, ptf648, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [REAL_64] */ static EIF_TYPE_INDEX ptf649[] = {0,0xFFFF}; static struct eif_par_types par649 = {649, ptf649, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [REAL_64] */ static EIF_TYPE_INDEX ptf650[] = {624,206,0xFF01,626,206,0xFFFF}; static struct eif_par_types par650 = {650, ptf650, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL [INTEGER_8] */ static EIF_TYPE_INDEX ptf651[] = {139,0xFFF7,653,188,0xFFFF}; static struct eif_par_types par651 = {651, ptf651, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [INTEGER_8] */ static EIF_TYPE_INDEX ptf652[] = {0,0xFFFF}; static struct eif_par_types par652 = {652, ptf652, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [INTEGER_8] */ static EIF_TYPE_INDEX ptf653[] = {654,188,0xFFFF}; static struct eif_par_types par653 = {653, ptf653, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [INTEGER_8] */ static EIF_TYPE_INDEX ptf654[] = {0,0xFFFF}; static struct eif_par_types par654 = {654, ptf654, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [INTEGER_8] */ static EIF_TYPE_INDEX ptf655[] = {0,0xFFFF}; static struct eif_par_types par655 = {655, ptf655, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [INTEGER_8] */ static EIF_TYPE_INDEX ptf656[] = {655,188,0xFFF7,654,188,0xFFFF}; static struct eif_par_types par656 = {656, ptf656, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [INTEGER_8] */ static EIF_TYPE_INDEX ptf657[] = {658,188,0xFF01,653,188,0xFFFF}; static struct eif_par_types par657 = {657, ptf657, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [INTEGER_8, G#2] */ static EIF_TYPE_INDEX ptf658[] = {656,188,0xFFFF}; static struct eif_par_types par658 = {658, ptf658, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL_ITERATION_CURSOR [INTEGER_8] */ static EIF_TYPE_INDEX ptf659[] = {660,188,0xFF01,651,188,0xFFFF}; static struct eif_par_types par659 = {659, ptf659, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [INTEGER_8, G#2] */ static EIF_TYPE_INDEX ptf660[] = {658,188,0xFFF8,2,0xFFFF}; static struct eif_par_types par660 = {660, ptf660, (uint16) 1, (uint16) 2, (char) 0}; /* TYPE [INTEGER_8] */ static EIF_TYPE_INDEX ptf661[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par661 = {661, ptf661, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY [INTEGER_8] */ static EIF_TYPE_INDEX ptf662[] = {683,188,0xFFF7,679,188,218,0xFFF7,685,188,0xFFFF}; static struct eif_par_types par662 = {662, ptf662, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [INTEGER_8] */ static EIF_TYPE_INDEX ptf663[] = {654,188,0xFFFF}; static struct eif_par_types par663 = {663, ptf663, (uint16) 1, (uint16) 1, (char) 0}; /* FINITE [INTEGER_8] */ static EIF_TYPE_INDEX ptf664[] = {665,188,0xFFFF}; static struct eif_par_types par664 = {664, ptf664, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [INTEGER_8] */ static EIF_TYPE_INDEX ptf665[] = {663,188,0xFFFF}; static struct eif_par_types par665 = {665, ptf665, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [INTEGER_8] */ static EIF_TYPE_INDEX ptf666[] = {671,188,0xFFFF}; static struct eif_par_types par666 = {666, ptf666, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [INTEGER_8] */ static EIF_TYPE_INDEX ptf667[] = {668,188,0xFFFF}; static struct eif_par_types par667 = {667, ptf667, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [INTEGER_8] */ static EIF_TYPE_INDEX ptf668[] = {669,188,0xFFFF}; static struct eif_par_types par668 = {668, ptf668, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [INTEGER_8] */ static EIF_TYPE_INDEX ptf669[] = {670,188,0xFFFF}; static struct eif_par_types par669 = {669, ptf669, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [INTEGER_8] */ static EIF_TYPE_INDEX ptf670[] = {663,188,0xFFFF}; static struct eif_par_types par670 = {670, ptf670, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [INTEGER_8] */ static EIF_TYPE_INDEX ptf671[] = {663,188,0xFFFF}; static struct eif_par_types par671 = {671, ptf671, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [INTEGER_8] */ static EIF_TYPE_INDEX ptf672[] = {685,188,0xFFF7,683,188,0xFFF7,676,188,0xFFF7,156,0xFFFF}; static struct eif_par_types par672 = {672, ptf672, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [INTEGER_8] */ static EIF_TYPE_INDEX ptf673[] = {660,188,0xFF01,672,188,0xFFFF}; static struct eif_par_types par673 = {673, ptf673, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [INTEGER_8] */ static EIF_TYPE_INDEX ptf674[] = {668,188,0xFFF7,675,188,0xFFF7,664,188,0xFFFF}; static struct eif_par_types par674 = {674, ptf674, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [INTEGER_8] */ static EIF_TYPE_INDEX ptf675[] = {666,188,0xFFFF}; static struct eif_par_types par675 = {675, ptf675, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [INTEGER_8] */ static EIF_TYPE_INDEX ptf676[] = {677,188,0xFFF7,681,188,0xFFFF}; static struct eif_par_types par676 = {676, ptf676, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [INTEGER_8] */ static EIF_TYPE_INDEX ptf677[] = {678,188,0xFFFF}; static struct eif_par_types par677 = {677, ptf677, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [INTEGER_8] */ static EIF_TYPE_INDEX ptf678[] = {667,188,0xFFF7,679,188,218,0xFFF7,674,188,0xFFFF}; static struct eif_par_types par678 = {678, ptf678, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [INTEGER_8, INTEGER_32] */ static EIF_TYPE_INDEX ptf679[] = {680,188,218,0xFFF7,653,188,0xFFFF}; static struct eif_par_types par679 = {679, ptf679, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [INTEGER_8, INTEGER_32] */ static EIF_TYPE_INDEX ptf680[] = {669,188,0xFFFF}; static struct eif_par_types par680 = {680, ptf680, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [INTEGER_8] */ static EIF_TYPE_INDEX ptf681[] = {678,188,0xFFF7,682,188,0xFFFF}; static struct eif_par_types par681 = {681, ptf681, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [INTEGER_8] */ static EIF_TYPE_INDEX ptf682[] = {664,188,0xFFFF}; static struct eif_par_types par682 = {682, ptf682, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [INTEGER_8] */ static EIF_TYPE_INDEX ptf683[] = {684,188,0xFFFF}; static struct eif_par_types par683 = {683, ptf683, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [INTEGER_8] */ static EIF_TYPE_INDEX ptf684[] = {664,188,0xFFFF}; static struct eif_par_types par684 = {684, ptf684, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [INTEGER_8] */ static EIF_TYPE_INDEX ptf685[] = {0,0xFFFF}; static struct eif_par_types par685 = {685, ptf685, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [INTEGER_8] */ static EIF_TYPE_INDEX ptf686[] = {660,188,0xFF01,662,188,0xFFFF}; static struct eif_par_types par686 = {686, ptf686, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL [INTEGER_16] */ static EIF_TYPE_INDEX ptf687[] = {139,0xFFF7,689,221,0xFFFF}; static struct eif_par_types par687 = {687, ptf687, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [INTEGER_16] */ static EIF_TYPE_INDEX ptf688[] = {0,0xFFFF}; static struct eif_par_types par688 = {688, ptf688, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [INTEGER_16] */ static EIF_TYPE_INDEX ptf689[] = {690,221,0xFFFF}; static struct eif_par_types par689 = {689, ptf689, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [INTEGER_16] */ static EIF_TYPE_INDEX ptf690[] = {0,0xFFFF}; static struct eif_par_types par690 = {690, ptf690, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [INTEGER_16] */ static EIF_TYPE_INDEX ptf691[] = {0,0xFFFF}; static struct eif_par_types par691 = {691, ptf691, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [INTEGER_16] */ static EIF_TYPE_INDEX ptf692[] = {691,221,0xFFF7,690,221,0xFFFF}; static struct eif_par_types par692 = {692, ptf692, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [INTEGER_16] */ static EIF_TYPE_INDEX ptf693[] = {694,221,0xFF01,689,221,0xFFFF}; static struct eif_par_types par693 = {693, ptf693, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [INTEGER_16, G#2] */ static EIF_TYPE_INDEX ptf694[] = {692,221,0xFFFF}; static struct eif_par_types par694 = {694, ptf694, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL_ITERATION_CURSOR [INTEGER_16] */ static EIF_TYPE_INDEX ptf695[] = {696,221,0xFF01,687,221,0xFFFF}; static struct eif_par_types par695 = {695, ptf695, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [INTEGER_16, G#2] */ static EIF_TYPE_INDEX ptf696[] = {694,221,0xFFF8,2,0xFFFF}; static struct eif_par_types par696 = {696, ptf696, (uint16) 1, (uint16) 2, (char) 0}; /* TYPE [INTEGER_16] */ static EIF_TYPE_INDEX ptf697[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par697 = {697, ptf697, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY [INTEGER_16] */ static EIF_TYPE_INDEX ptf698[] = {719,221,0xFFF7,715,221,218,0xFFF7,721,221,0xFFFF}; static struct eif_par_types par698 = {698, ptf698, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [INTEGER_16] */ static EIF_TYPE_INDEX ptf699[] = {690,221,0xFFFF}; static struct eif_par_types par699 = {699, ptf699, (uint16) 1, (uint16) 1, (char) 0}; /* FINITE [INTEGER_16] */ static EIF_TYPE_INDEX ptf700[] = {701,221,0xFFFF}; static struct eif_par_types par700 = {700, ptf700, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [INTEGER_16] */ static EIF_TYPE_INDEX ptf701[] = {699,221,0xFFFF}; static struct eif_par_types par701 = {701, ptf701, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [INTEGER_16] */ static EIF_TYPE_INDEX ptf702[] = {707,221,0xFFFF}; static struct eif_par_types par702 = {702, ptf702, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [INTEGER_16] */ static EIF_TYPE_INDEX ptf703[] = {704,221,0xFFFF}; static struct eif_par_types par703 = {703, ptf703, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [INTEGER_16] */ static EIF_TYPE_INDEX ptf704[] = {705,221,0xFFFF}; static struct eif_par_types par704 = {704, ptf704, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [INTEGER_16] */ static EIF_TYPE_INDEX ptf705[] = {706,221,0xFFFF}; static struct eif_par_types par705 = {705, ptf705, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [INTEGER_16] */ static EIF_TYPE_INDEX ptf706[] = {699,221,0xFFFF}; static struct eif_par_types par706 = {706, ptf706, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [INTEGER_16] */ static EIF_TYPE_INDEX ptf707[] = {699,221,0xFFFF}; static struct eif_par_types par707 = {707, ptf707, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [INTEGER_16] */ static EIF_TYPE_INDEX ptf708[] = {721,221,0xFFF7,719,221,0xFFF7,712,221,0xFFF7,156,0xFFFF}; static struct eif_par_types par708 = {708, ptf708, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [INTEGER_16] */ static EIF_TYPE_INDEX ptf709[] = {696,221,0xFF01,708,221,0xFFFF}; static struct eif_par_types par709 = {709, ptf709, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [INTEGER_16] */ static EIF_TYPE_INDEX ptf710[] = {704,221,0xFFF7,711,221,0xFFF7,700,221,0xFFFF}; static struct eif_par_types par710 = {710, ptf710, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [INTEGER_16] */ static EIF_TYPE_INDEX ptf711[] = {702,221,0xFFFF}; static struct eif_par_types par711 = {711, ptf711, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [INTEGER_16] */ static EIF_TYPE_INDEX ptf712[] = {713,221,0xFFF7,717,221,0xFFFF}; static struct eif_par_types par712 = {712, ptf712, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [INTEGER_16] */ static EIF_TYPE_INDEX ptf713[] = {714,221,0xFFFF}; static struct eif_par_types par713 = {713, ptf713, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [INTEGER_16] */ static EIF_TYPE_INDEX ptf714[] = {703,221,0xFFF7,715,221,218,0xFFF7,710,221,0xFFFF}; static struct eif_par_types par714 = {714, ptf714, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [INTEGER_16, INTEGER_32] */ static EIF_TYPE_INDEX ptf715[] = {716,221,218,0xFFF7,689,221,0xFFFF}; static struct eif_par_types par715 = {715, ptf715, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [INTEGER_16, INTEGER_32] */ static EIF_TYPE_INDEX ptf716[] = {705,221,0xFFFF}; static struct eif_par_types par716 = {716, ptf716, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [INTEGER_16] */ static EIF_TYPE_INDEX ptf717[] = {714,221,0xFFF7,718,221,0xFFFF}; static struct eif_par_types par717 = {717, ptf717, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [INTEGER_16] */ static EIF_TYPE_INDEX ptf718[] = {700,221,0xFFFF}; static struct eif_par_types par718 = {718, ptf718, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [INTEGER_16] */ static EIF_TYPE_INDEX ptf719[] = {720,221,0xFFFF}; static struct eif_par_types par719 = {719, ptf719, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [INTEGER_16] */ static EIF_TYPE_INDEX ptf720[] = {700,221,0xFFFF}; static struct eif_par_types par720 = {720, ptf720, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [INTEGER_16] */ static EIF_TYPE_INDEX ptf721[] = {0,0xFFFF}; static struct eif_par_types par721 = {721, ptf721, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [INTEGER_16] */ static EIF_TYPE_INDEX ptf722[] = {696,221,0xFF01,698,221,0xFFFF}; static struct eif_par_types par722 = {722, ptf722, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL [NATURAL_32] */ static EIF_TYPE_INDEX ptf723[] = {139,0xFFF7,393,212,0xFFFF}; static struct eif_par_types par723 = {723, ptf723, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [NATURAL_32] */ static EIF_TYPE_INDEX ptf724[] = {0,0xFFFF}; static struct eif_par_types par724 = {724, ptf724, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL_ITERATION_CURSOR [NATURAL_32] */ static EIF_TYPE_INDEX ptf725[] = {726,212,0xFF01,723,212,0xFFFF}; static struct eif_par_types par725 = {725, ptf725, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [NATURAL_32, G#2] */ static EIF_TYPE_INDEX ptf726[] = {397,212,0xFFF8,2,0xFFFF}; static struct eif_par_types par726 = {726, ptf726, (uint16) 1, (uint16) 2, (char) 0}; /* TYPE [NATURAL_32] */ static EIF_TYPE_INDEX ptf727[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par727 = {727, ptf727, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY [NATURAL_32] */ static EIF_TYPE_INDEX ptf728[] = {740,212,0xFFF7,736,212,218,0xFFF7,742,212,0xFFFF}; static struct eif_par_types par728 = {728, ptf728, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAYED_LIST [NATURAL_32] */ static EIF_TYPE_INDEX ptf729[] = {742,212,0xFFF7,740,212,0xFFF7,733,212,0xFFF7,156,0xFFFF}; static struct eif_par_types par729 = {729, ptf729, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [NATURAL_32] */ static EIF_TYPE_INDEX ptf730[] = {726,212,0xFF01,729,212,0xFFFF}; static struct eif_par_types par730 = {730, ptf730, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [NATURAL_32] */ static EIF_TYPE_INDEX ptf731[] = {402,212,0xFFF7,732,212,0xFFF7,398,212,0xFFFF}; static struct eif_par_types par731 = {731, ptf731, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [NATURAL_32] */ static EIF_TYPE_INDEX ptf732[] = {400,212,0xFFFF}; static struct eif_par_types par732 = {732, ptf732, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [NATURAL_32] */ static EIF_TYPE_INDEX ptf733[] = {734,212,0xFFF7,738,212,0xFFFF}; static struct eif_par_types par733 = {733, ptf733, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [NATURAL_32] */ static EIF_TYPE_INDEX ptf734[] = {735,212,0xFFFF}; static struct eif_par_types par734 = {734, ptf734, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [NATURAL_32] */ static EIF_TYPE_INDEX ptf735[] = {401,212,0xFFF7,736,212,218,0xFFF7,731,212,0xFFFF}; static struct eif_par_types par735 = {735, ptf735, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [NATURAL_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf736[] = {737,212,218,0xFFF7,393,212,0xFFFF}; static struct eif_par_types par736 = {736, ptf736, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [NATURAL_32, INTEGER_32] */ static EIF_TYPE_INDEX ptf737[] = {403,212,0xFFFF}; static struct eif_par_types par737 = {737, ptf737, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [NATURAL_32] */ static EIF_TYPE_INDEX ptf738[] = {735,212,0xFFF7,739,212,0xFFFF}; static struct eif_par_types par738 = {738, ptf738, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [NATURAL_32] */ static EIF_TYPE_INDEX ptf739[] = {398,212,0xFFFF}; static struct eif_par_types par739 = {739, ptf739, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [NATURAL_32] */ static EIF_TYPE_INDEX ptf740[] = {741,212,0xFFFF}; static struct eif_par_types par740 = {740, ptf740, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [NATURAL_32] */ static EIF_TYPE_INDEX ptf741[] = {398,212,0xFFFF}; static struct eif_par_types par741 = {741, ptf741, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [NATURAL_32] */ static EIF_TYPE_INDEX ptf742[] = {0,0xFFFF}; static struct eif_par_types par742 = {742, ptf742, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [NATURAL_32] */ static EIF_TYPE_INDEX ptf743[] = {726,212,0xFF01,728,212,0xFFFF}; static struct eif_par_types par743 = {743, ptf743, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [BOOLEAN] */ static EIF_TYPE_INDEX ptf744[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par744 = {744, ptf744, (uint16) 3, (uint16) 1, (char) 0}; /* SPECIAL [INTEGER_64] */ static EIF_TYPE_INDEX ptf745[] = {139,0xFFF7,747,200,0xFFFF}; static struct eif_par_types par745 = {745, ptf745, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [INTEGER_64] */ static EIF_TYPE_INDEX ptf746[] = {0,0xFFFF}; static struct eif_par_types par746 = {746, ptf746, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [INTEGER_64] */ static EIF_TYPE_INDEX ptf747[] = {748,200,0xFFFF}; static struct eif_par_types par747 = {747, ptf747, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [INTEGER_64] */ static EIF_TYPE_INDEX ptf748[] = {0,0xFFFF}; static struct eif_par_types par748 = {748, ptf748, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [INTEGER_64] */ static EIF_TYPE_INDEX ptf749[] = {0,0xFFFF}; static struct eif_par_types par749 = {749, ptf749, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [INTEGER_64] */ static EIF_TYPE_INDEX ptf750[] = {749,200,0xFFF7,748,200,0xFFFF}; static struct eif_par_types par750 = {750, ptf750, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [INTEGER_64] */ static EIF_TYPE_INDEX ptf751[] = {752,200,0xFF01,747,200,0xFFFF}; static struct eif_par_types par751 = {751, ptf751, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [INTEGER_64, G#2] */ static EIF_TYPE_INDEX ptf752[] = {750,200,0xFFFF}; static struct eif_par_types par752 = {752, ptf752, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL_ITERATION_CURSOR [INTEGER_64] */ static EIF_TYPE_INDEX ptf753[] = {754,200,0xFF01,745,200,0xFFFF}; static struct eif_par_types par753 = {753, ptf753, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [INTEGER_64, G#2] */ static EIF_TYPE_INDEX ptf754[] = {752,200,0xFFF8,2,0xFFFF}; static struct eif_par_types par754 = {754, ptf754, (uint16) 1, (uint16) 2, (char) 0}; /* TYPE [INTEGER_64] */ static EIF_TYPE_INDEX ptf755[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par755 = {755, ptf755, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY [INTEGER_64] */ static EIF_TYPE_INDEX ptf756[] = {777,200,0xFFF7,773,200,218,0xFFF7,779,200,0xFFFF}; static struct eif_par_types par756 = {756, ptf756, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [INTEGER_64] */ static EIF_TYPE_INDEX ptf757[] = {748,200,0xFFFF}; static struct eif_par_types par757 = {757, ptf757, (uint16) 1, (uint16) 1, (char) 0}; /* FINITE [INTEGER_64] */ static EIF_TYPE_INDEX ptf758[] = {759,200,0xFFFF}; static struct eif_par_types par758 = {758, ptf758, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [INTEGER_64] */ static EIF_TYPE_INDEX ptf759[] = {757,200,0xFFFF}; static struct eif_par_types par759 = {759, ptf759, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [INTEGER_64] */ static EIF_TYPE_INDEX ptf760[] = {765,200,0xFFFF}; static struct eif_par_types par760 = {760, ptf760, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [INTEGER_64] */ static EIF_TYPE_INDEX ptf761[] = {762,200,0xFFFF}; static struct eif_par_types par761 = {761, ptf761, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [INTEGER_64] */ static EIF_TYPE_INDEX ptf762[] = {763,200,0xFFFF}; static struct eif_par_types par762 = {762, ptf762, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [INTEGER_64] */ static EIF_TYPE_INDEX ptf763[] = {764,200,0xFFFF}; static struct eif_par_types par763 = {763, ptf763, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [INTEGER_64] */ static EIF_TYPE_INDEX ptf764[] = {757,200,0xFFFF}; static struct eif_par_types par764 = {764, ptf764, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [INTEGER_64] */ static EIF_TYPE_INDEX ptf765[] = {757,200,0xFFFF}; static struct eif_par_types par765 = {765, ptf765, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [INTEGER_64] */ static EIF_TYPE_INDEX ptf766[] = {779,200,0xFFF7,777,200,0xFFF7,770,200,0xFFF7,156,0xFFFF}; static struct eif_par_types par766 = {766, ptf766, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [INTEGER_64] */ static EIF_TYPE_INDEX ptf767[] = {754,200,0xFF01,766,200,0xFFFF}; static struct eif_par_types par767 = {767, ptf767, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [INTEGER_64] */ static EIF_TYPE_INDEX ptf768[] = {762,200,0xFFF7,769,200,0xFFF7,758,200,0xFFFF}; static struct eif_par_types par768 = {768, ptf768, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [INTEGER_64] */ static EIF_TYPE_INDEX ptf769[] = {760,200,0xFFFF}; static struct eif_par_types par769 = {769, ptf769, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [INTEGER_64] */ static EIF_TYPE_INDEX ptf770[] = {771,200,0xFFF7,775,200,0xFFFF}; static struct eif_par_types par770 = {770, ptf770, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [INTEGER_64] */ static EIF_TYPE_INDEX ptf771[] = {772,200,0xFFFF}; static struct eif_par_types par771 = {771, ptf771, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [INTEGER_64] */ static EIF_TYPE_INDEX ptf772[] = {761,200,0xFFF7,773,200,218,0xFFF7,768,200,0xFFFF}; static struct eif_par_types par772 = {772, ptf772, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [INTEGER_64, INTEGER_32] */ static EIF_TYPE_INDEX ptf773[] = {774,200,218,0xFFF7,747,200,0xFFFF}; static struct eif_par_types par773 = {773, ptf773, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [INTEGER_64, INTEGER_32] */ static EIF_TYPE_INDEX ptf774[] = {763,200,0xFFFF}; static struct eif_par_types par774 = {774, ptf774, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [INTEGER_64] */ static EIF_TYPE_INDEX ptf775[] = {772,200,0xFFF7,776,200,0xFFFF}; static struct eif_par_types par775 = {775, ptf775, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [INTEGER_64] */ static EIF_TYPE_INDEX ptf776[] = {758,200,0xFFFF}; static struct eif_par_types par776 = {776, ptf776, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [INTEGER_64] */ static EIF_TYPE_INDEX ptf777[] = {778,200,0xFFFF}; static struct eif_par_types par777 = {777, ptf777, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [INTEGER_64] */ static EIF_TYPE_INDEX ptf778[] = {758,200,0xFFFF}; static struct eif_par_types par778 = {778, ptf778, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [INTEGER_64] */ static EIF_TYPE_INDEX ptf779[] = {0,0xFFFF}; static struct eif_par_types par779 = {779, ptf779, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [INTEGER_64] */ static EIF_TYPE_INDEX ptf780[] = {754,200,0xFF01,756,200,0xFFFF}; static struct eif_par_types par780 = {780, ptf780, (uint16) 1, (uint16) 1, (char) 0}; /* CELL [CHARACTER_32] */ static EIF_TYPE_INDEX ptf781[] = {0,0xFFFF}; static struct eif_par_types par781 = {781, ptf781, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL [BOOLEAN] */ static EIF_TYPE_INDEX ptf782[] = {139,0xFFF7,784,203,0xFFFF}; static struct eif_par_types par782 = {782, ptf782, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [BOOLEAN] */ static EIF_TYPE_INDEX ptf783[] = {0,0xFFFF}; static struct eif_par_types par783 = {783, ptf783, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [BOOLEAN] */ static EIF_TYPE_INDEX ptf784[] = {785,203,0xFFFF}; static struct eif_par_types par784 = {784, ptf784, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [BOOLEAN] */ static EIF_TYPE_INDEX ptf785[] = {0,0xFFFF}; static struct eif_par_types par785 = {785, ptf785, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [BOOLEAN] */ static EIF_TYPE_INDEX ptf786[] = {0,0xFFFF}; static struct eif_par_types par786 = {786, ptf786, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [BOOLEAN] */ static EIF_TYPE_INDEX ptf787[] = {786,203,0xFFF7,785,203,0xFFFF}; static struct eif_par_types par787 = {787, ptf787, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [BOOLEAN] */ static EIF_TYPE_INDEX ptf788[] = {789,203,0xFF01,784,203,0xFFFF}; static struct eif_par_types par788 = {788, ptf788, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [BOOLEAN, G#2] */ static EIF_TYPE_INDEX ptf789[] = {787,203,0xFFFF}; static struct eif_par_types par789 = {789, ptf789, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL_ITERATION_CURSOR [BOOLEAN] */ static EIF_TYPE_INDEX ptf790[] = {791,203,0xFF01,782,203,0xFFFF}; static struct eif_par_types par790 = {790, ptf790, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [BOOLEAN, G#2] */ static EIF_TYPE_INDEX ptf791[] = {789,203,0xFFF8,2,0xFFFF}; static struct eif_par_types par791 = {791, ptf791, (uint16) 1, (uint16) 2, (char) 0}; /* ARRAY [BOOLEAN] */ static EIF_TYPE_INDEX ptf792[] = {813,203,0xFFF7,809,203,218,0xFFF7,815,203,0xFFFF}; static struct eif_par_types par792 = {792, ptf792, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [BOOLEAN] */ static EIF_TYPE_INDEX ptf793[] = {785,203,0xFFFF}; static struct eif_par_types par793 = {793, ptf793, (uint16) 1, (uint16) 1, (char) 0}; /* FINITE [BOOLEAN] */ static EIF_TYPE_INDEX ptf794[] = {795,203,0xFFFF}; static struct eif_par_types par794 = {794, ptf794, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [BOOLEAN] */ static EIF_TYPE_INDEX ptf795[] = {793,203,0xFFFF}; static struct eif_par_types par795 = {795, ptf795, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [BOOLEAN] */ static EIF_TYPE_INDEX ptf796[] = {801,203,0xFFFF}; static struct eif_par_types par796 = {796, ptf796, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [BOOLEAN] */ static EIF_TYPE_INDEX ptf797[] = {798,203,0xFFFF}; static struct eif_par_types par797 = {797, ptf797, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [BOOLEAN] */ static EIF_TYPE_INDEX ptf798[] = {799,203,0xFFFF}; static struct eif_par_types par798 = {798, ptf798, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [BOOLEAN] */ static EIF_TYPE_INDEX ptf799[] = {800,203,0xFFFF}; static struct eif_par_types par799 = {799, ptf799, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [BOOLEAN] */ static EIF_TYPE_INDEX ptf800[] = {793,203,0xFFFF}; static struct eif_par_types par800 = {800, ptf800, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [BOOLEAN] */ static EIF_TYPE_INDEX ptf801[] = {793,203,0xFFFF}; static struct eif_par_types par801 = {801, ptf801, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [BOOLEAN] */ static EIF_TYPE_INDEX ptf802[] = {815,203,0xFFF7,813,203,0xFFF7,806,203,0xFFF7,156,0xFFFF}; static struct eif_par_types par802 = {802, ptf802, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [BOOLEAN] */ static EIF_TYPE_INDEX ptf803[] = {791,203,0xFF01,802,203,0xFFFF}; static struct eif_par_types par803 = {803, ptf803, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [BOOLEAN] */ static EIF_TYPE_INDEX ptf804[] = {798,203,0xFFF7,805,203,0xFFF7,794,203,0xFFFF}; static struct eif_par_types par804 = {804, ptf804, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [BOOLEAN] */ static EIF_TYPE_INDEX ptf805[] = {796,203,0xFFFF}; static struct eif_par_types par805 = {805, ptf805, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [BOOLEAN] */ static EIF_TYPE_INDEX ptf806[] = {807,203,0xFFF7,811,203,0xFFFF}; static struct eif_par_types par806 = {806, ptf806, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [BOOLEAN] */ static EIF_TYPE_INDEX ptf807[] = {808,203,0xFFFF}; static struct eif_par_types par807 = {807, ptf807, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [BOOLEAN] */ static EIF_TYPE_INDEX ptf808[] = {797,203,0xFFF7,809,203,218,0xFFF7,804,203,0xFFFF}; static struct eif_par_types par808 = {808, ptf808, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [BOOLEAN, INTEGER_32] */ static EIF_TYPE_INDEX ptf809[] = {810,203,218,0xFFF7,784,203,0xFFFF}; static struct eif_par_types par809 = {809, ptf809, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [BOOLEAN, INTEGER_32] */ static EIF_TYPE_INDEX ptf810[] = {799,203,0xFFFF}; static struct eif_par_types par810 = {810, ptf810, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [BOOLEAN] */ static EIF_TYPE_INDEX ptf811[] = {808,203,0xFFF7,812,203,0xFFFF}; static struct eif_par_types par811 = {811, ptf811, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [BOOLEAN] */ static EIF_TYPE_INDEX ptf812[] = {794,203,0xFFFF}; static struct eif_par_types par812 = {812, ptf812, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [BOOLEAN] */ static EIF_TYPE_INDEX ptf813[] = {814,203,0xFFFF}; static struct eif_par_types par813 = {813, ptf813, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [BOOLEAN] */ static EIF_TYPE_INDEX ptf814[] = {794,203,0xFFFF}; static struct eif_par_types par814 = {814, ptf814, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [BOOLEAN] */ static EIF_TYPE_INDEX ptf815[] = {0,0xFFFF}; static struct eif_par_types par815 = {815, ptf815, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [BOOLEAN] */ static EIF_TYPE_INDEX ptf816[] = {791,203,0xFF01,792,203,0xFFFF}; static struct eif_par_types par816 = {816, ptf816, (uint16) 1, (uint16) 1, (char) 0}; /* FUNCTION [G#1, G#2] */ static EIF_TYPE_INDEX ptf817[] = {254,0xFFF8,1,0xFFFF}; static struct eif_par_types par817 = {817, ptf817, (uint16) 1, (uint16) 2, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [NATURAL_16] */ static EIF_TYPE_INDEX ptf818[] = {140,0xFFFF}; static struct eif_par_types par818 = {818, ptf818, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [NATURAL_16] */ static EIF_TYPE_INDEX ptf819[] = {820,215,0xFFFF}; static struct eif_par_types par819 = {819, ptf819, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [NATURAL_16] */ static EIF_TYPE_INDEX ptf820[] = {226,0xFFFF}; static struct eif_par_types par820 = {820, ptf820, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [NATURAL_16]] */ static EIF_TYPE_INDEX ptf821[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par821 = {821, ptf821, (uint16) 3, (uint16) 1, (char) 0}; /* COUNTABLE_SEQUENCE [INTEGER_32] */ static EIF_TYPE_INDEX ptf822[] = {827,218,0xFFF7,552,218,0xFFF7,550,218,0xFFFF}; static struct eif_par_types par822 = {822, ptf822, (uint16) 3, (uint16) 1, (char) 0}; /* LINKED_LIST [INTEGER_32] */ static EIF_TYPE_INDEX ptf823[] = {560,218,0xFFFF}; static struct eif_par_types par823 = {823, ptf823, (uint16) 1, (uint16) 1, (char) 0}; /* LINKED_LIST_ITERATION_CURSOR [INTEGER_32] */ static EIF_TYPE_INDEX ptf824[] = {542,218,0xFFFF}; static struct eif_par_types par824 = {824, ptf824, (uint16) 1, (uint16) 1, (char) 0}; /* LINKABLE [INTEGER_32] */ static EIF_TYPE_INDEX ptf825[] = {579,218,0xFFFF}; static struct eif_par_types par825 = {825, ptf825, (uint16) 1, (uint16) 1, (char) 0}; /* LINKED_LIST_CURSOR [INTEGER_32] */ static EIF_TYPE_INDEX ptf826[] = {142,0xFFFF}; static struct eif_par_types par826 = {826, ptf826, (uint16) 1, (uint16) 1, (char) 0}; /* COUNTABLE [INTEGER_32] */ static EIF_TYPE_INDEX ptf827[] = {828,218,0xFFFF}; static struct eif_par_types par827 = {827, ptf827, (uint16) 1, (uint16) 1, (char) 0}; /* INFINITE [INTEGER_32] */ static EIF_TYPE_INDEX ptf828[] = {549,218,0xFFFF}; static struct eif_par_types par828 = {828, ptf828, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_STACK [G#1] */ static EIF_TYPE_INDEX ptf829[] = {830,0xFFF8,1,0xFFF7,270,0xFFF8,1,0xFFFF}; static struct eif_par_types par829 = {829, ptf829, (uint16) 2, (uint16) 1, (char) 0}; /* STACK [G#1] */ static EIF_TYPE_INDEX ptf830[] = {411,0xFFF8,1,0xFFFF}; static struct eif_par_types par830 = {830, ptf830, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [NATURAL_32] */ static EIF_TYPE_INDEX ptf831[] = {140,0xFFFF}; static struct eif_par_types par831 = {831, ptf831, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [INTEGER_32] */ static EIF_TYPE_INDEX ptf832[] = {140,0xFFFF}; static struct eif_par_types par832 = {832, ptf832, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [INTEGER_64] */ static EIF_TYPE_INDEX ptf833[] = {140,0xFFFF}; static struct eif_par_types par833 = {833, ptf833, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [INTEGER_64] */ static EIF_TYPE_INDEX ptf834[] = {835,200,0xFFFF}; static struct eif_par_types par834 = {834, ptf834, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [INTEGER_64] */ static EIF_TYPE_INDEX ptf835[] = {226,0xFFFF}; static struct eif_par_types par835 = {835, ptf835, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [INTEGER_64]] */ static EIF_TYPE_INDEX ptf836[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par836 = {836, ptf836, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [INTEGER_16] */ static EIF_TYPE_INDEX ptf837[] = {140,0xFFFF}; static struct eif_par_types par837 = {837, ptf837, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [INTEGER_16] */ static EIF_TYPE_INDEX ptf838[] = {839,221,0xFFFF}; static struct eif_par_types par838 = {838, ptf838, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [INTEGER_16] */ static EIF_TYPE_INDEX ptf839[] = {226,0xFFFF}; static struct eif_par_types par839 = {839, ptf839, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [INTEGER_16]] */ static EIF_TYPE_INDEX ptf840[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par840 = {840, ptf840, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [INTEGER_8] */ static EIF_TYPE_INDEX ptf841[] = {140,0xFFFF}; static struct eif_par_types par841 = {841, ptf841, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [REAL_32] */ static EIF_TYPE_INDEX ptf842[] = {140,0xFFFF}; static struct eif_par_types par842 = {842, ptf842, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_POINTER [REAL_32] */ static EIF_TYPE_INDEX ptf843[] = {844,191,0xFFFF}; static struct eif_par_types par843 = {843, ptf843, (uint16) 1, (uint16) 1, (char) 1}; /* reference TYPED_POINTER [REAL_32] */ static EIF_TYPE_INDEX ptf844[] = {226,0xFFFF}; static struct eif_par_types par844 = {844, ptf844, (uint16) 1, (uint16) 1, (char) 1}; /* TYPE [TYPED_POINTER [REAL_32]] */ static EIF_TYPE_INDEX ptf845[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par845 = {845, ptf845, (uint16) 3, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [BOOLEAN] */ static EIF_TYPE_INDEX ptf846[] = {140,0xFFFF}; static struct eif_par_types par846 = {846, ptf846, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [CHARACTER_32] */ static EIF_TYPE_INDEX ptf847[] = {140,0xFFFF}; static struct eif_par_types par847 = {847, ptf847, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_ATTRIBUTE_RECORD [NATURAL_8] */ static EIF_TYPE_INDEX ptf848[] = {140,0xFFFF}; static struct eif_par_types par848 = {848, ptf848, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL [NATURAL_64] */ static EIF_TYPE_INDEX ptf849[] = {139,0xFFF7,851,224,0xFFFF}; static struct eif_par_types par849 = {849, ptf849, (uint16) 2, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [NATURAL_64] */ static EIF_TYPE_INDEX ptf850[] = {0,0xFFFF}; static struct eif_par_types par850 = {850, ptf850, (uint16) 1, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE [NATURAL_64] */ static EIF_TYPE_INDEX ptf851[] = {852,224,0xFFFF}; static struct eif_par_types par851 = {851, ptf851, (uint16) 1, (uint16) 1, (char) 0}; /* ITERABLE [NATURAL_64] */ static EIF_TYPE_INDEX ptf852[] = {0,0xFFFF}; static struct eif_par_types par852 = {852, ptf852, (uint16) 1, (uint16) 1, (char) 0}; /* ITERATION_CURSOR [NATURAL_64] */ static EIF_TYPE_INDEX ptf853[] = {0,0xFFFF}; static struct eif_par_types par853 = {853, ptf853, (uint16) 1, (uint16) 1, (char) 0}; /* INDEXABLE_ITERATION_CURSOR [NATURAL_64] */ static EIF_TYPE_INDEX ptf854[] = {853,224,0xFFF7,852,224,0xFFFF}; static struct eif_par_types par854 = {854, ptf854, (uint16) 2, (uint16) 1, (char) 0}; /* READABLE_INDEXABLE_ITERATION_CURSOR [NATURAL_64] */ static EIF_TYPE_INDEX ptf855[] = {856,224,0xFF01,851,224,0xFFFF}; static struct eif_par_types par855 = {855, ptf855, (uint16) 1, (uint16) 1, (char) 0}; /* TYPED_INDEXABLE_ITERATION_CURSOR [NATURAL_64, G#2] */ static EIF_TYPE_INDEX ptf856[] = {854,224,0xFFFF}; static struct eif_par_types par856 = {856, ptf856, (uint16) 1, (uint16) 2, (char) 0}; /* SPECIAL_ITERATION_CURSOR [NATURAL_64] */ static EIF_TYPE_INDEX ptf857[] = {858,224,0xFF01,849,224,0xFFFF}; static struct eif_par_types par857 = {857, ptf857, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [NATURAL_64, G#2] */ static EIF_TYPE_INDEX ptf858[] = {856,224,0xFFF8,2,0xFFFF}; static struct eif_par_types par858 = {858, ptf858, (uint16) 1, (uint16) 2, (char) 0}; /* TYPE [NATURAL_64] */ static EIF_TYPE_INDEX ptf859[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par859 = {859, ptf859, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY [NATURAL_64] */ static EIF_TYPE_INDEX ptf860[] = {881,224,0xFFF7,877,224,218,0xFFF7,883,224,0xFFFF}; static struct eif_par_types par860 = {860, ptf860, (uint16) 3, (uint16) 1, (char) 0}; /* CONTAINER [NATURAL_64] */ static EIF_TYPE_INDEX ptf861[] = {852,224,0xFFFF}; static struct eif_par_types par861 = {861, ptf861, (uint16) 1, (uint16) 1, (char) 0}; /* FINITE [NATURAL_64] */ static EIF_TYPE_INDEX ptf862[] = {863,224,0xFFFF}; static struct eif_par_types par862 = {862, ptf862, (uint16) 1, (uint16) 1, (char) 0}; /* BOX [NATURAL_64] */ static EIF_TYPE_INDEX ptf863[] = {861,224,0xFFFF}; static struct eif_par_types par863 = {863, ptf863, (uint16) 1, (uint16) 1, (char) 0}; /* LINEAR [NATURAL_64] */ static EIF_TYPE_INDEX ptf864[] = {869,224,0xFFFF}; static struct eif_par_types par864 = {864, ptf864, (uint16) 1, (uint16) 1, (char) 0}; /* CURSOR_STRUCTURE [NATURAL_64] */ static EIF_TYPE_INDEX ptf865[] = {866,224,0xFFFF}; static struct eif_par_types par865 = {865, ptf865, (uint16) 1, (uint16) 1, (char) 0}; /* ACTIVE [NATURAL_64] */ static EIF_TYPE_INDEX ptf866[] = {867,224,0xFFFF}; static struct eif_par_types par866 = {866, ptf866, (uint16) 1, (uint16) 1, (char) 0}; /* BAG [NATURAL_64] */ static EIF_TYPE_INDEX ptf867[] = {868,224,0xFFFF}; static struct eif_par_types par867 = {867, ptf867, (uint16) 1, (uint16) 1, (char) 0}; /* COLLECTION [NATURAL_64] */ static EIF_TYPE_INDEX ptf868[] = {861,224,0xFFFF}; static struct eif_par_types par868 = {868, ptf868, (uint16) 1, (uint16) 1, (char) 0}; /* TRAVERSABLE [NATURAL_64] */ static EIF_TYPE_INDEX ptf869[] = {861,224,0xFFFF}; static struct eif_par_types par869 = {869, ptf869, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [NATURAL_64] */ static EIF_TYPE_INDEX ptf870[] = {883,224,0xFFF7,881,224,0xFFF7,874,224,0xFFF7,156,0xFFFF}; static struct eif_par_types par870 = {870, ptf870, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [NATURAL_64] */ static EIF_TYPE_INDEX ptf871[] = {858,224,0xFF01,870,224,0xFFFF}; static struct eif_par_types par871 = {871, ptf871, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [NATURAL_64] */ static EIF_TYPE_INDEX ptf872[] = {866,224,0xFFF7,873,224,0xFFF7,862,224,0xFFFF}; static struct eif_par_types par872 = {872, ptf872, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [NATURAL_64] */ static EIF_TYPE_INDEX ptf873[] = {864,224,0xFFFF}; static struct eif_par_types par873 = {873, ptf873, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [NATURAL_64] */ static EIF_TYPE_INDEX ptf874[] = {875,224,0xFFF7,879,224,0xFFFF}; static struct eif_par_types par874 = {874, ptf874, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [NATURAL_64] */ static EIF_TYPE_INDEX ptf875[] = {876,224,0xFFFF}; static struct eif_par_types par875 = {875, ptf875, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [NATURAL_64] */ static EIF_TYPE_INDEX ptf876[] = {865,224,0xFFF7,877,224,218,0xFFF7,872,224,0xFFFF}; static struct eif_par_types par876 = {876, ptf876, (uint16) 3, (uint16) 1, (char) 0}; /* INDEXABLE [NATURAL_64, INTEGER_32] */ static EIF_TYPE_INDEX ptf877[] = {878,224,218,0xFFF7,851,224,0xFFFF}; static struct eif_par_types par877 = {877, ptf877, (uint16) 2, (uint16) 2, (char) 0}; /* TABLE [NATURAL_64, INTEGER_32] */ static EIF_TYPE_INDEX ptf878[] = {867,224,0xFFFF}; static struct eif_par_types par878 = {878, ptf878, (uint16) 1, (uint16) 2, (char) 0}; /* DYNAMIC_CHAIN [NATURAL_64] */ static EIF_TYPE_INDEX ptf879[] = {876,224,0xFFF7,880,224,0xFFFF}; static struct eif_par_types par879 = {879, ptf879, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [NATURAL_64] */ static EIF_TYPE_INDEX ptf880[] = {862,224,0xFFFF}; static struct eif_par_types par880 = {880, ptf880, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [NATURAL_64] */ static EIF_TYPE_INDEX ptf881[] = {882,224,0xFFFF}; static struct eif_par_types par881 = {881, ptf881, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [NATURAL_64] */ static EIF_TYPE_INDEX ptf882[] = {862,224,0xFFFF}; static struct eif_par_types par882 = {882, ptf882, (uint16) 1, (uint16) 1, (char) 0}; /* TO_SPECIAL [NATURAL_64] */ static EIF_TYPE_INDEX ptf883[] = {0,0xFFFF}; static struct eif_par_types par883 = {883, ptf883, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [NATURAL_64] */ static EIF_TYPE_INDEX ptf884[] = {858,224,0xFF01,860,224,0xFFFF}; static struct eif_par_types par884 = {884, ptf884, (uint16) 1, (uint16) 1, (char) 0}; /* HASH_TABLE_ITERATION_CURSOR [NATURAL_32, POINTER] */ static EIF_TYPE_INDEX ptf885[] = {396,212,0xFFF7,889,212,227,0xFFFF}; static struct eif_par_types par885 = {885, ptf885, (uint16) 2, (uint16) 2, (char) 0}; /* HASH_TABLE [NATURAL_32, POINTER] */ static EIF_TYPE_INDEX ptf886[] = {739,212,0xFFF7,887,212,227,0xFFF7,888,212,227,0xFFF7,393,212,0xFFF7,156,0xFFFF}; static struct eif_par_types par886 = {886, ptf886, (uint16) 5, (uint16) 2, (char) 0}; /* TABLE [NATURAL_32, POINTER] */ static EIF_TYPE_INDEX ptf887[] = {403,212,0xFFFF}; static struct eif_par_types par887 = {887, ptf887, (uint16) 1, (uint16) 2, (char) 0}; /* TABLE_ITERABLE [NATURAL_32, POINTER] */ static EIF_TYPE_INDEX ptf888[] = {394,212,0xFFFF}; static struct eif_par_types par888 = {888, ptf888, (uint16) 1, (uint16) 2, (char) 0}; /* TABLE_ITERATION_CURSOR [NATURAL_32, POINTER] */ static EIF_TYPE_INDEX ptf889[] = {392,212,0xFFFF}; static struct eif_par_types par889 = {889, ptf889, (uint16) 1, (uint16) 2, (char) 0}; /* LINKED_STACK [BOOLEAN] */ static EIF_TYPE_INDEX ptf890[] = {894,203,0xFFF7,897,203,0xFFFF}; static struct eif_par_types par890 = {890, ptf890, (uint16) 2, (uint16) 1, (char) 0}; /* LINKED_LIST_CURSOR [BOOLEAN] */ static EIF_TYPE_INDEX ptf891[] = {142,0xFFFF}; static struct eif_par_types par891 = {891, ptf891, (uint16) 1, (uint16) 1, (char) 0}; /* LINKABLE [BOOLEAN] */ static EIF_TYPE_INDEX ptf892[] = {893,203,0xFFFF}; static struct eif_par_types par892 = {892, ptf892, (uint16) 1, (uint16) 1, (char) 0}; /* CELL [BOOLEAN] */ static EIF_TYPE_INDEX ptf893[] = {0,0xFFFF}; static struct eif_par_types par893 = {893, ptf893, (uint16) 1, (uint16) 1, (char) 0}; /* STACK [BOOLEAN] */ static EIF_TYPE_INDEX ptf894[] = {895,203,0xFFFF}; static struct eif_par_types par894 = {894, ptf894, (uint16) 1, (uint16) 1, (char) 0}; /* DISPENSER [BOOLEAN] */ static EIF_TYPE_INDEX ptf895[] = {798,203,0xFFF7,794,203,0xFFFF}; static struct eif_par_types par895 = {895, ptf895, (uint16) 2, (uint16) 1, (char) 0}; /* ARRAYED_STACK [BOOLEAN] */ static EIF_TYPE_INDEX ptf896[] = {894,203,0xFFF7,802,203,0xFFFF}; static struct eif_par_types par896 = {896, ptf896, (uint16) 2, (uint16) 1, (char) 0}; /* LINKED_LIST [BOOLEAN] */ static EIF_TYPE_INDEX ptf897[] = {806,203,0xFFFF}; static struct eif_par_types par897 = {897, ptf897, (uint16) 1, (uint16) 1, (char) 0}; /* LINKED_LIST_ITERATION_CURSOR [BOOLEAN] */ static EIF_TYPE_INDEX ptf898[] = {788,203,0xFFFF}; static struct eif_par_types par898 = {898, ptf898, (uint16) 1, (uint16) 1, (char) 0}; /* PREDICATE [G#1] */ static EIF_TYPE_INDEX ptf899[] = {264,0xFFF8,1,203,0xFFFF}; static struct eif_par_types par899 = {899, ptf899, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [INTEGER_64] */ static EIF_TYPE_INDEX ptf900[] = {140,0xFFFF}; static struct eif_par_types par900 = {900, ptf900, (uint16) 1, (uint16) 1, (char) 0}; /* LINKED_STACK [INTEGER_32] */ static EIF_TYPE_INDEX ptf901[] = {902,218,0xFFF7,823,218,0xFFFF}; static struct eif_par_types par901 = {901, ptf901, (uint16) 2, (uint16) 1, (char) 0}; /* STACK [INTEGER_32] */ static EIF_TYPE_INDEX ptf902[] = {903,218,0xFFFF}; static struct eif_par_types par902 = {902, ptf902, (uint16) 1, (uint16) 1, (char) 0}; /* DISPENSER [INTEGER_32] */ static EIF_TYPE_INDEX ptf903[] = {552,218,0xFFF7,548,218,0xFFFF}; static struct eif_par_types par903 = {903, ptf903, (uint16) 2, (uint16) 1, (char) 0}; /* ARRAYED_STACK [INTEGER_32] */ static EIF_TYPE_INDEX ptf904[] = {902,218,0xFFF7,556,218,0xFFFF}; static struct eif_par_types par904 = {904, ptf904, (uint16) 2, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [INTEGER_16] */ static EIF_TYPE_INDEX ptf905[] = {140,0xFFFF}; static struct eif_par_types par905 = {905, ptf905, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [NATURAL_16] */ static EIF_TYPE_INDEX ptf906[] = {140,0xFFFF}; static struct eif_par_types par906 = {906, ptf906, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST [CHARACTER_32] */ static EIF_TYPE_INDEX ptf907[] = {917,194,0xFFF7,910,194,0xFFF7,921,194,0xFFF7,156,0xFFFF}; static struct eif_par_types par907 = {907, ptf907, (uint16) 4, (uint16) 1, (char) 0}; /* ARRAY [CHARACTER_32] */ static EIF_TYPE_INDEX ptf908[] = {910,194,0xFFF7,288,194,218,0xFFF7,917,194,0xFFFF}; static struct eif_par_types par908 = {908, ptf908, (uint16) 3, (uint16) 1, (char) 0}; /* NATIVE_ARRAY [CHARACTER_32] */ static EIF_TYPE_INDEX ptf909[] = {0,0xFFFF}; static struct eif_par_types par909 = {909, ptf909, (uint16) 1, (uint16) 1, (char) 0}; /* RESIZABLE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf910[] = {911,194,0xFFFF}; static struct eif_par_types par910 = {910, ptf910, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED [CHARACTER_32] */ static EIF_TYPE_INDEX ptf911[] = {297,194,0xFFFF}; static struct eif_par_types par911 = {911, ptf911, (uint16) 1, (uint16) 1, (char) 0}; /* TYPE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf912[] = {184,0xFFF7,106,0xFFF7,137,0xFFFF}; static struct eif_par_types par912 = {912, ptf912, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAY_ITERATION_CURSOR [CHARACTER_32] */ static EIF_TYPE_INDEX ptf913[] = {916,194,0xFF01,908,194,0xFFFF}; static struct eif_par_types par913 = {913, ptf913, (uint16) 1, (uint16) 1, (char) 0}; /* SPECIAL [CHARACTER_32] */ static EIF_TYPE_INDEX ptf914[] = {139,0xFFF7,293,194,0xFFFF}; static struct eif_par_types par914 = {914, ptf914, (uint16) 2, (uint16) 1, (char) 0}; /* SPECIAL_ITERATION_CURSOR [CHARACTER_32] */ static EIF_TYPE_INDEX ptf915[] = {916,194,0xFF01,914,194,0xFFFF}; static struct eif_par_types par915 = {915, ptf915, (uint16) 1, (uint16) 1, (char) 0}; /* GENERAL_SPECIAL_ITERATION_CURSOR [CHARACTER_32, G#2] */ static EIF_TYPE_INDEX ptf916[] = {296,194,0xFFF8,2,0xFFFF}; static struct eif_par_types par916 = {916, ptf916, (uint16) 1, (uint16) 2, (char) 0}; /* TO_SPECIAL [CHARACTER_32] */ static EIF_TYPE_INDEX ptf917[] = {0,0xFFFF}; static struct eif_par_types par917 = {917, ptf917, (uint16) 1, (uint16) 1, (char) 0}; /* ARRAYED_LIST_ITERATION_CURSOR [CHARACTER_32] */ static EIF_TYPE_INDEX ptf918[] = {916,194,0xFF01,907,194,0xFFFF}; static struct eif_par_types par918 = {918, ptf918, (uint16) 1, (uint16) 1, (char) 0}; /* SEQUENCE [CHARACTER_32] */ static EIF_TYPE_INDEX ptf919[] = {301,194,0xFFF7,920,194,0xFFF7,297,194,0xFFFF}; static struct eif_par_types par919 = {919, ptf919, (uint16) 3, (uint16) 1, (char) 0}; /* BILINEAR [CHARACTER_32] */ static EIF_TYPE_INDEX ptf920[] = {299,194,0xFFFF}; static struct eif_par_types par920 = {920, ptf920, (uint16) 1, (uint16) 1, (char) 0}; /* DYNAMIC_LIST [CHARACTER_32] */ static EIF_TYPE_INDEX ptf921[] = {922,194,0xFFF7,924,194,0xFFFF}; static struct eif_par_types par921 = {921, ptf921, (uint16) 2, (uint16) 1, (char) 0}; /* LIST [CHARACTER_32] */ static EIF_TYPE_INDEX ptf922[] = {923,194,0xFFFF}; static struct eif_par_types par922 = {922, ptf922, (uint16) 1, (uint16) 1, (char) 0}; /* CHAIN [CHARACTER_32] */ static EIF_TYPE_INDEX ptf923[] = {300,194,0xFFF7,288,194,218,0xFFF7,919,194,0xFFFF}; static struct eif_par_types par923 = {923, ptf923, (uint16) 3, (uint16) 1, (char) 0}; /* DYNAMIC_CHAIN [CHARACTER_32] */ static EIF_TYPE_INDEX ptf924[] = {923,194,0xFFF7,925,194,0xFFFF}; static struct eif_par_types par924 = {924, ptf924, (uint16) 2, (uint16) 1, (char) 0}; /* UNBOUNDED [CHARACTER_32] */ static EIF_TYPE_INDEX ptf925[] = {297,194,0xFFFF}; static struct eif_par_types par925 = {925, ptf925, (uint16) 1, (uint16) 1, (char) 0}; /* CELL [G#1] */ static EIF_TYPE_INDEX ptf926[] = {0,0xFFFF}; static struct eif_par_types par926 = {926, ptf926, (uint16) 1, (uint16) 1, (char) 0}; /* STRING_TABLE [G#1] */ static EIF_TYPE_INDEX ptf927[] = {412,0xFFF8,1,0xFF01,229,0xFFFF}; static struct eif_par_types par927 = {927, ptf927, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [INTEGER_8] */ static EIF_TYPE_INDEX ptf928[] = {140,0xFFFF}; static struct eif_par_types par928 = {928, ptf928, (uint16) 1, (uint16) 1, (char) 0}; /* LINKED_LIST [G#1] */ static EIF_TYPE_INDEX ptf929[] = {283,0xFFF8,1,0xFFFF}; static struct eif_par_types par929 = {929, ptf929, (uint16) 1, (uint16) 1, (char) 0}; /* LINKED_LIST_ITERATION_CURSOR [G#1] */ static EIF_TYPE_INDEX ptf930[] = {259,0xFFF8,1,0xFFFF}; static struct eif_par_types par930 = {930, ptf930, (uint16) 1, (uint16) 1, (char) 0}; /* LINKABLE [G#1] */ static EIF_TYPE_INDEX ptf931[] = {926,0xFFF8,1,0xFFFF}; static struct eif_par_types par931 = {931, ptf931, (uint16) 1, (uint16) 1, (char) 0}; /* LINKED_LIST_CURSOR [G#1] */ static EIF_TYPE_INDEX ptf932[] = {142,0xFFFF}; static struct eif_par_types par932 = {932, ptf932, (uint16) 1, (uint16) 1, (char) 0}; /* STRING_TABLE [INTEGER_32] */ static EIF_TYPE_INDEX ptf933[] = {935,218,0xFF01,229,0xFFFF}; static struct eif_par_types par933 = {933, ptf933, (uint16) 1, (uint16) 1, (char) 0}; /* HASH_TABLE_ITERATION_CURSOR [INTEGER_32, G#2] */ static EIF_TYPE_INDEX ptf934[] = {542,218,0xFFF7,937,218,0xFFF8,2,0xFFFF}; static struct eif_par_types par934 = {934, ptf934, (uint16) 2, (uint16) 2, (char) 0}; /* HASH_TABLE [INTEGER_32, G#2] */ static EIF_TYPE_INDEX ptf935[] = {566,218,0xFFF7,936,218,0xFFF8,2,0xFFF7,938,218,0xFFF8,2,0xFFF7,539,218,0xFFF7,156,0xFFFF}; static struct eif_par_types par935 = {935, ptf935, (uint16) 5, (uint16) 2, (char) 0}; /* TABLE [INTEGER_32, G#2] */ static EIF_TYPE_INDEX ptf936[] = {553,218,0xFFFF}; static struct eif_par_types par936 = {936, ptf936, (uint16) 1, (uint16) 2, (char) 0}; /* TABLE_ITERATION_CURSOR [INTEGER_32, G#2] */ static EIF_TYPE_INDEX ptf937[] = {406,218,0xFFFF}; static struct eif_par_types par937 = {937, ptf937, (uint16) 1, (uint16) 2, (char) 0}; /* TABLE_ITERABLE [INTEGER_32, G#2] */ static EIF_TYPE_INDEX ptf938[] = {540,218,0xFFFF}; static struct eif_par_types par938 = {938, ptf938, (uint16) 1, (uint16) 2, (char) 0}; /* ACTION_SEQUENCE [G#1] */ static EIF_TYPE_INDEX ptf939[] = {941,0xFF01,250,0xFFF8,1,0xFFFF}; static struct eif_par_types par939 = {939, ptf939, (uint16) 1, (uint16) 1, (char) 0}; /* LINKED_QUEUE [G#1] */ static EIF_TYPE_INDEX ptf940[] = {410,0xFFF8,1,0xFFF7,929,0xFFF8,1,0xFFFF}; static struct eif_par_types par940 = {940, ptf940, (uint16) 2, (uint16) 1, (char) 0}; /* INTERACTIVE_LIST [G#1] */ static EIF_TYPE_INDEX ptf941[] = {270,0xFFF8,1,0xFFFF}; static struct eif_par_types par941 = {941, ptf941, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [CHARACTER_8] */ static EIF_TYPE_INDEX ptf942[] = {140,0xFFFF}; static struct eif_par_types par942 = {942, ptf942, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [CHARACTER_32] */ static EIF_TYPE_INDEX ptf943[] = {140,0xFFFF}; static struct eif_par_types par943 = {943, ptf943, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [NATURAL_8] */ static EIF_TYPE_INDEX ptf944[] = {140,0xFFFF}; static struct eif_par_types par944 = {944, ptf944, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [REAL_32] */ static EIF_TYPE_INDEX ptf945[] = {140,0xFFFF}; static struct eif_par_types par945 = {945, ptf945, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [NATURAL_32] */ static EIF_TYPE_INDEX ptf946[] = {140,0xFFFF}; static struct eif_par_types par946 = {946, ptf946, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [NATURAL_16] */ static EIF_TYPE_INDEX ptf947[] = {140,0xFFFF}; static struct eif_par_types par947 = {947, ptf947, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [REAL_64] */ static EIF_TYPE_INDEX ptf948[] = {140,0xFFFF}; static struct eif_par_types par948 = {948, ptf948, (uint16) 1, (uint16) 1, (char) 0}; /* SET [INTEGER_32] */ static EIF_TYPE_INDEX ptf949[] = {554,218,0xFFFF}; static struct eif_par_types par949 = {949, ptf949, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [POINTER] */ static EIF_TYPE_INDEX ptf950[] = {140,0xFFFF}; static struct eif_par_types par950 = {950, ptf950, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [INTEGER_16] */ static EIF_TYPE_INDEX ptf951[] = {140,0xFFFF}; static struct eif_par_types par951 = {951, ptf951, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [BOOLEAN] */ static EIF_TYPE_INDEX ptf952[] = {140,0xFFFF}; static struct eif_par_types par952 = {952, ptf952, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [NATURAL_64] */ static EIF_TYPE_INDEX ptf953[] = {140,0xFFFF}; static struct eif_par_types par953 = {953, ptf953, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_FIELD_RECORD [INTEGER_32] */ static EIF_TYPE_INDEX ptf954[] = {140,0xFFFF}; static struct eif_par_types par954 = {954, ptf954, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [REAL_32] */ static EIF_TYPE_INDEX ptf955[] = {140,0xFFFF}; static struct eif_par_types par955 = {955, ptf955, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [NATURAL_64] */ static EIF_TYPE_INDEX ptf956[] = {140,0xFFFF}; static struct eif_par_types par956 = {956, ptf956, (uint16) 1, (uint16) 1, (char) 0}; /* CELL [NATURAL_64] */ static EIF_TYPE_INDEX ptf957[] = {0,0xFFFF}; static struct eif_par_types par957 = {957, ptf957, (uint16) 1, (uint16) 1, (char) 0}; /* RT_DBG_LOCAL_RECORD [INTEGER_64] */ static EIF_TYPE_INDEX ptf958[] = {140,0xFFFF}; static struct eif_par_types par958 = {958, ptf958, (uint16) 1, (uint16) 1, (char) 0}; /* PACKET */ static EIF_TYPE_INDEX ptf959[] = {0,0xFFFF}; static struct eif_par_types par959 = {959, ptf959, (uint16) 1, (uint16) 0, (char) 0}; /* ITP_EXPRESSION_PROCESSOR */ static EIF_TYPE_INDEX ptf960[] = {0,0xFFFF}; static struct eif_par_types par960 = {960, ptf960, (uint16) 1, (uint16) 0, (char) 0}; /* EQA_SYSTEM_PATH */ static EIF_TYPE_INDEX ptf961[] = {0,0xFFFF}; static struct eif_par_types par961 = {961, ptf961, (uint16) 1, (uint16) 0, (char) 0}; /* C_DATE */ static EIF_TYPE_INDEX ptf962[] = {0,0xFFFF}; static struct eif_par_types par962 = {962, ptf962, (uint16) 1, (uint16) 0, (char) 0}; /* EQA_FILE_SYSTEM */ static EIF_TYPE_INDEX ptf963[] = {0,0xFFFF}; static struct eif_par_types par963 = {963, ptf963, (uint16) 1, (uint16) 0, (char) 0}; /* EQA_ASSERTIONS */ static EIF_TYPE_INDEX ptf964[] = {0,0xFFFF}; static struct eif_par_types par964 = {964, ptf964, (uint16) 1, (uint16) 0, (char) 0}; /* EQA_TEST_INVOCATION_RESPONSE */ static EIF_TYPE_INDEX ptf965[] = {0,0xFFFF}; static struct eif_par_types par965 = {965, ptf965, (uint16) 1, (uint16) 0, (char) 0}; /* EQA_ENVIRONMENT */ static EIF_TYPE_INDEX ptf966[] = {0,0xFFFF}; static struct eif_par_types par966 = {966, ptf966, (uint16) 1, (uint16) 0, (char) 0}; /* DATE_TIME_LANGUAGE_CONSTANTS */ static EIF_TYPE_INDEX ptf967[] = {0,0xFFFF}; static struct eif_par_types par967 = {967, ptf967, (uint16) 1, (uint16) 0, (char) 0}; /* DATE_TIME_TOOLS */ static EIF_TYPE_INDEX ptf968[] = {967,0xFFFF}; static struct eif_par_types par968 = {968, ptf968, (uint16) 1, (uint16) 0, (char) 0}; /* GROUP_ELEMENT */ static EIF_TYPE_INDEX ptf969[] = {0,0xFFFF}; static struct eif_par_types par969 = {969, ptf969, (uint16) 1, (uint16) 0, (char) 0}; /* SOCKET_ADDRESS */ static EIF_TYPE_INDEX ptf970[] = {0,0xFFFF}; static struct eif_par_types par970 = {970, ptf970, (uint16) 1, (uint16) 0, (char) 0}; /* SOCKET_TIMEOUT_UTILITIES */ static EIF_TYPE_INDEX ptf971[] = {0,0xFFFF}; static struct eif_par_types par971 = {971, ptf971, (uint16) 1, (uint16) 0, (char) 0}; /* ADDRINFO */ static EIF_TYPE_INDEX ptf972[] = {0,0xFFFF}; static struct eif_par_types par972 = {972, ptf972, (uint16) 1, (uint16) 0, (char) 0}; /* ITP_SHARED_CONSTANTS */ static EIF_TYPE_INDEX ptf973[] = {0,0xFFFF}; static struct eif_par_types par973 = {973, ptf973, (uint16) 1, (uint16) 0, (char) 0}; /* EQA_TEST_SET */ static EIF_TYPE_INDEX ptf974[] = {0,0xFFFF}; static struct eif_par_types par974 = {974, ptf974, (uint16) 1, (uint16) 0, (char) 0}; /* NEW_TEST_SET */ static EIF_TYPE_INDEX ptf975[] = {974,0xFFFF}; static struct eif_par_types par975 = {975, ptf975, (uint16) 1, (uint16) 0, (char) 0}; /* CODE_VALIDITY_CHECKER */ static EIF_TYPE_INDEX ptf976[] = {0,0xFFFF}; static struct eif_par_types par976 = {976, ptf976, (uint16) 1, (uint16) 0, (char) 0}; /* TIME_UTILITY */ static EIF_TYPE_INDEX ptf977[] = {0,0xFFFF}; static struct eif_par_types par977 = {977, ptf977, (uint16) 1, (uint16) 0, (char) 0}; /* TIME_CONSTANTS */ static EIF_TYPE_INDEX ptf978[] = {977,0xFFFF}; static struct eif_par_types par978 = {978, ptf978, (uint16) 1, (uint16) 0, (char) 0}; /* TIME_MEASUREMENT */ static EIF_TYPE_INDEX ptf979[] = {978,0xFFFF}; static struct eif_par_types par979 = {979, ptf979, (uint16) 1, (uint16) 0, (char) 0}; /* TIME_VALUE */ static EIF_TYPE_INDEX ptf980[] = {979,0xFFFF}; static struct eif_par_types par980 = {980, ptf980, (uint16) 1, (uint16) 0, (char) 0}; /* DATE_CONSTANTS */ static EIF_TYPE_INDEX ptf981[] = {977,0xFFFF}; static struct eif_par_types par981 = {981, ptf981, (uint16) 1, (uint16) 0, (char) 0}; /* DATE_MEASUREMENT */ static EIF_TYPE_INDEX ptf982[] = {981,0xFFFF}; static struct eif_par_types par982 = {982, ptf982, (uint16) 1, (uint16) 0, (char) 0}; /* DATE_TIME_MEASUREMENT */ static EIF_TYPE_INDEX ptf983[] = {981,0xFFF7,978,0xFFFF}; static struct eif_par_types par983 = {983, ptf983, (uint16) 2, (uint16) 0, (char) 0}; /* DATE_TIME_VALUE */ static EIF_TYPE_INDEX ptf984[] = {983,0xFFFF}; static struct eif_par_types par984 = {984, ptf984, (uint16) 1, (uint16) 0, (char) 0}; /* ITP_EXPRESSION */ static EIF_TYPE_INDEX ptf985[] = {0,0xFFFF}; static struct eif_par_types par985 = {985, ptf985, (uint16) 1, (uint16) 0, (char) 0}; /* INET_PROPERTIES */ static EIF_TYPE_INDEX ptf986[] = {0,0xFFFF}; static struct eif_par_types par986 = {986, ptf986, (uint16) 1, (uint16) 0, (char) 0}; /* INET_ADDRESS_IMPL */ static EIF_TYPE_INDEX ptf987[] = {0,0xFFFF}; static struct eif_par_types par987 = {987, ptf987, (uint16) 1, (uint16) 0, (char) 0}; /* INET_ADDRESS_IMPL_V6 */ static EIF_TYPE_INDEX ptf988[] = {987,0xFFFF}; static struct eif_par_types par988 = {988, ptf988, (uint16) 1, (uint16) 0, (char) 0}; /* INET_ADDRESS_IMPL_V4 */ static EIF_TYPE_INDEX ptf989[] = {987,0xFFFF}; static struct eif_par_types par989 = {989, ptf989, (uint16) 1, (uint16) 0, (char) 0}; /* ERL_CONSTANTS */ static EIF_TYPE_INDEX ptf990[] = {0,0xFFFF}; static struct eif_par_types par990 = {990, ptf990, (uint16) 1, (uint16) 0, (char) 0}; /* ITP_CONSTANT */ static EIF_TYPE_INDEX ptf991[] = {985,0xFFF7,990,0xFFFF}; static struct eif_par_types par991 = {991, ptf991, (uint16) 2, (uint16) 0, (char) 0}; /* INET_ADDRESS */ static EIF_TYPE_INDEX ptf992[] = {0,0xFFFF}; static struct eif_par_types par992 = {992, ptf992, (uint16) 1, (uint16) 0, (char) 0}; /* INET6_ADDRESS */ static EIF_TYPE_INDEX ptf993[] = {992,0xFFF7,986,0xFFFF}; static struct eif_par_types par993 = {993, ptf993, (uint16) 2, (uint16) 0, (char) 0}; /* INET4_ADDRESS */ static EIF_TYPE_INDEX ptf994[] = {992,0xFFFF}; static struct eif_par_types par994 = {994, ptf994, (uint16) 1, (uint16) 0, (char) 0}; /* EQA_EXTERNALS */ static EIF_TYPE_INDEX ptf995[] = {0,0xFFFF}; static struct eif_par_types par995 = {995, ptf995, (uint16) 1, (uint16) 0, (char) 0}; /* ITP_STORE */ static EIF_TYPE_INDEX ptf996[] = {41,0xFFFF}; static struct eif_par_types par996 = {996, ptf996, (uint16) 1, (uint16) 0, (char) 0}; /* SOCKET_RESOURCES */ static EIF_TYPE_INDEX ptf997[] = {0,0xFFFF}; static struct eif_par_types par997 = {997, ptf997, (uint16) 1, (uint16) 0, (char) 0}; /* INET_ADDRESS_FACTORY */ static EIF_TYPE_INDEX ptf998[] = {986,0xFFF7,997,0xFFFF}; static struct eif_par_types par998 = {998, ptf998, (uint16) 2, (uint16) 0, (char) 0}; /* NETWORK_SOCKET_ADDRESS */ static EIF_TYPE_INDEX ptf999[] = {998,0xFFF7,997,0xFFF7,970,0xFFFF}; static struct eif_par_types par999 = {999, ptf999, (uint16) 3, (uint16) 0, (char) 0}; /* DURATION */ static EIF_TYPE_INDEX ptf1000[] = {106,0xFFF7,969,0xFFFF}; static struct eif_par_types par1000 = {1000, ptf1000, (uint16) 2, (uint16) 0, (char) 0}; /* TIME_DURATION */ static EIF_TYPE_INDEX ptf1001[] = {1000,0xFFF7,979,0xFFF7,118,0xFFFF}; static struct eif_par_types par1001 = {1001, ptf1001, (uint16) 3, (uint16) 0, (char) 0}; /* DATE_DURATION */ static EIF_TYPE_INDEX ptf1002[] = {1000,0xFFF7,981,0xFFF7,982,0xFFFF}; static struct eif_par_types par1002 = {1002, ptf1002, (uint16) 3, (uint16) 0, (char) 0}; /* DATE_TIME_DURATION */ static EIF_TYPE_INDEX ptf1003[] = {1000,0xFFF7,983,0xFFFF}; static struct eif_par_types par1003 = {1003, ptf1003, (uint16) 2, (uint16) 0, (char) 0}; /* ABSOLUTE */ static EIF_TYPE_INDEX ptf1004[] = {107,0xFFFF}; static struct eif_par_types par1004 = {1004, ptf1004, (uint16) 1, (uint16) 0, (char) 0}; /* APPLICATION */ static EIF_TYPE_INDEX ptf1005[] = {147,0xFFFF}; static struct eif_par_types par1005 = {1005, ptf1005, (uint16) 1, (uint16) 0, (char) 0}; /* EQA_EVALUATOR */ static EIF_TYPE_INDEX ptf1006[] = {153,0xFFF7,130,0xFFF7,995,0xFFF7,36,0xFFFF}; static struct eif_par_types par1006 = {1006, ptf1006, (uint16) 4, (uint16) 0, (char) 0}; /* DATE_TIME_CODE */ static EIF_TYPE_INDEX ptf1007[] = {976,0xFFF7,137,0xFFFF}; static struct eif_par_types par1007 = {1007, ptf1007, (uint16) 2, (uint16) 0, (char) 0}; /* EQA_PARTIAL_RESULT */ static EIF_TYPE_INDEX ptf1008[] = {156,0xFFFF}; static struct eif_par_types par1008 = {1008, ptf1008, (uint16) 1, (uint16) 0, (char) 0}; /* EQA_RESULT */ static EIF_TYPE_INDEX ptf1009[] = {1008,0xFFFF}; static struct eif_par_types par1009 = {1009, ptf1009, (uint16) 1, (uint16) 0, (char) 0}; /* DATE_VALUE */ static EIF_TYPE_INDEX ptf1010[] = {982,0xFFF7,156,0xFFFF}; static struct eif_par_types par1010 = {1010, ptf1010, (uint16) 2, (uint16) 0, (char) 0}; /* EXTERNAL_OBJECT */ static EIF_TYPE_INDEX ptf1011[] = {168,0xFFFF}; static struct eif_par_types par1011 = {1011, ptf1011, (uint16) 1, (uint16) 0, (char) 0}; /* ADDRINFO_1 */ static EIF_TYPE_INDEX ptf1012[] = {972,0xFFF7,1011,0xFFFF}; static struct eif_par_types par1012 = {1012, ptf1012, (uint16) 2, (uint16) 0, (char) 0}; /* ADDRINFO_2 */ static EIF_TYPE_INDEX ptf1013[] = {1012,0xFFFF}; static struct eif_par_types par1013 = {1013, ptf1013, (uint16) 1, (uint16) 0, (char) 0}; /* SOCKET */ static EIF_TYPE_INDEX ptf1014[] = {997,0xFFF7,179,0xFFF7,62,0xFFF7,123,0xFFFF}; static struct eif_par_types par1014 = {1014, ptf1014, (uint16) 4, (uint16) 0, (char) 0}; /* STREAM_SOCKET */ static EIF_TYPE_INDEX ptf1015[] = {1014,0xFFFF}; static struct eif_par_types par1015 = {1015, ptf1015, (uint16) 1, (uint16) 0, (char) 0}; /* NETWORK_SOCKET */ static EIF_TYPE_INDEX ptf1016[] = {1014,0xFFF7,971,0xFFFF}; static struct eif_par_types par1016 = {1016, ptf1016, (uint16) 2, (uint16) 0, (char) 0}; /* NETWORK_STREAM_SOCKET */ static EIF_TYPE_INDEX ptf1017[] = {998,0xFFF7,1016,0xFFF7,1015,0xFFFF}; static struct eif_par_types par1017 = {1017, ptf1017, (uint16) 3, (uint16) 0, (char) 0}; /* EQA_TEST_OUTPUT_BUFFER */ static EIF_TYPE_INDEX ptf1018[] = {183,0xFFFF}; static struct eif_par_types par1018 = {1018, ptf1018, (uint16) 1, (uint16) 0, (char) 0}; /* ITP_VARIABLE */ static EIF_TYPE_INDEX ptf1019[] = {985,0xFFF7,184,0xFFFF}; static struct eif_par_types par1019 = {1019, ptf1019, (uint16) 2, (uint16) 0, (char) 0}; /* FIND_SEPARATOR_FACILITY */ static EIF_TYPE_INDEX ptf1020[] = {976,0xFFF7,0,0xFFFF}; static struct eif_par_types par1020 = {1020, ptf1020, (uint16) 2, (uint16) 0, (char) 0}; /* DATE_TIME_CODE_STRING */ static EIF_TYPE_INDEX ptf1021[] = {1020,0xFFFF}; static struct eif_par_types par1021 = {1021, ptf1021, (uint16) 1, (uint16) 0, (char) 0}; /* TIME_VALIDITY_CHECKER */ static EIF_TYPE_INDEX ptf1022[] = {978,0xFFF7,980,0xFFF7,0,0xFFFF}; static struct eif_par_types par1022 = {1022, ptf1022, (uint16) 3, (uint16) 0, (char) 0}; /* TIME */ static EIF_TYPE_INDEX ptf1023[] = {1004,0xFFF7,980,0xFFF7,1022,0xFFF7,137,0xFFFF}; static struct eif_par_types par1023 = {1023, ptf1023, (uint16) 4, (uint16) 0, (char) 0}; /* DATE_VALIDITY_CHECKER */ static EIF_TYPE_INDEX ptf1024[] = {981,0xFFF7,1010,0xFFF7,0,0xFFFF}; static struct eif_par_types par1024 = {1024, ptf1024, (uint16) 3, (uint16) 0, (char) 0}; /* DATE */ static EIF_TYPE_INDEX ptf1025[] = {1004,0xFFF7,1010,0xFFF7,1024,0xFFF7,137,0xFFFF}; static struct eif_par_types par1025 = {1025, ptf1025, (uint16) 4, (uint16) 0, (char) 0}; /* DATE_TIME_VALIDITY_CHECKER */ static EIF_TYPE_INDEX ptf1026[] = {1024,0xFFF7,1022,0xFFF7,0,0xFFFF}; static struct eif_par_types par1026 = {1026, ptf1026, (uint16) 3, (uint16) 0, (char) 0}; /* DATE_TIME_PARSER */ static EIF_TYPE_INDEX ptf1027[] = {1026,0xFFF7,1020,0xFFFF}; static struct eif_par_types par1027 = {1027, ptf1027, (uint16) 2, (uint16) 0, (char) 0}; /* DATE_TIME */ static EIF_TYPE_INDEX ptf1028[] = {1004,0xFFF7,984,0xFFF7,1026,0xFFF7,137,0xFFFF}; static struct eif_par_types par1028 = {1028, ptf1028, (uint16) 4, (uint16) 0, (char) 0}; /* EQA_TEST_INVOCATION_EXCEPTION */ static EIF_TYPE_INDEX ptf1029[] = {0,0xFFF7,30,0xFFF7,130,0xFFF7,156,0xFFFF}; static struct eif_par_types par1029 = {1029, ptf1029, (uint16) 4, (uint16) 0, (char) 0}; /* ITP_INTERPRETER */ static EIF_TYPE_INDEX ptf1030[] = {0,0xFFF7,62,0xFFF7,147,0xFFF7,973,0xFFF7,990,0xFFF7,153,0xFFF7,36,0xFFF7,995,0xFFFF}; static struct eif_par_types par1030 = {1030, ptf1030, (uint16) 8, (uint16) 0, (char) 0}; /* RING_BUFFER [INTEGER_32] */ static EIF_TYPE_INDEX ptf1031[] = {0,0xFFFF}; static struct eif_par_types par1031 = {1031, ptf1031, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED_QUEUE [INTEGER_32] */ static EIF_TYPE_INDEX ptf1032[] = {1033,218,0xFFF7,568,218,0xFFFF}; static struct eif_par_types par1032 = {1032, ptf1032, (uint16) 2, (uint16) 1, (char) 0}; /* ARRAYED_QUEUE [INTEGER_32] */ static EIF_TYPE_INDEX ptf1033[] = {1035,218,0xFFF7,567,218,0xFFF7,156,0xFFFF}; static struct eif_par_types par1033 = {1033, ptf1033, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAYED_QUEUE_ITERATION_CURSOR [INTEGER_32] */ static EIF_TYPE_INDEX ptf1034[] = {406,218,0xFFFF}; static struct eif_par_types par1034 = {1034, ptf1034, (uint16) 1, (uint16) 1, (char) 0}; /* QUEUE [INTEGER_32] */ static EIF_TYPE_INDEX ptf1035[] = {903,218,0xFFFF}; static struct eif_par_types par1035 = {1035, ptf1035, (uint16) 1, (uint16) 1, (char) 0}; /* RING_BUFFER [CHARACTER_8] */ static EIF_TYPE_INDEX ptf1036[] = {0,0xFFFF}; static struct eif_par_types par1036 = {1036, ptf1036, (uint16) 1, (uint16) 1, (char) 0}; /* BOUNDED_QUEUE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf1037[] = {1038,197,0xFFF7,523,197,0xFFFF}; static struct eif_par_types par1037 = {1037, ptf1037, (uint16) 2, (uint16) 1, (char) 0}; /* ARRAYED_QUEUE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf1038[] = {1040,197,0xFFF7,522,197,0xFFF7,156,0xFFFF}; static struct eif_par_types par1038 = {1038, ptf1038, (uint16) 3, (uint16) 1, (char) 0}; /* ARRAYED_QUEUE_ITERATION_CURSOR [CHARACTER_8] */ static EIF_TYPE_INDEX ptf1039[] = {407,197,0xFFFF}; static struct eif_par_types par1039 = {1039, ptf1039, (uint16) 1, (uint16) 1, (char) 0}; /* QUEUE [CHARACTER_8] */ static EIF_TYPE_INDEX ptf1040[] = {1041,197,0xFFFF}; static struct eif_par_types par1040 = {1040, ptf1040, (uint16) 1, (uint16) 1, (char) 0}; /* DISPENSER [CHARACTER_8] */ static EIF_TYPE_INDEX ptf1041[] = {501,197,0xFFF7,497,197,0xFFFF}; static struct eif_par_types par1041 = {1041, ptf1041, (uint16) 2, (uint16) 1, (char) 0}; /* INTERVAL [G#1] */ static EIF_TYPE_INDEX ptf1042[] = {106,0xFFFF}; static struct eif_par_types par1042 = {1042, ptf1042, (uint16) 1, (uint16) 1, (char) 0}; /* EQA_TEST_EVALUATOR [G#1] */ static EIF_TYPE_INDEX ptf1043[] = {153,0xFFF7,62,0xFFF7,130,0xFFFF}; static struct eif_par_types par1043 = {1043, ptf1043, (uint16) 3, (uint16) 1, (char) 0}; int egc_partab_size_init = 1043 ; struct eif_par_types *egc_partab_init[] = { &par0, &par1, &par2, &par3, &par4, &par5, &par6, &par7, &par8, &par9, &par10, &par11, &par12, &par13, &par14, &par15, &par16, &par17, &par18, &par19, &par20, &par21, &par22, &par23, &par24, &par25, &par26, &par27, &par28, &par29, &par30, &par31, &par32, &par33, &par34, &par35, &par36, &par37, &par38, &par39, &par40, &par41, &par42, &par43, &par44, &par45, &par46, &par47, &par48, &par49, &par50, &par51, &par52, &par53, &par54, &par55, &par56, &par57, &par58, &par59, &par60, &par61, &par62, &par63, &par64, &par65, &par66, &par67, &par68, &par69, &par70, &par71, &par72, &par73, &par74, &par75, &par76, &par77, &par78, &par79, &par80, &par81, &par82, &par83, &par84, &par85, &par86, &par87, &par88, &par89, &par90, &par91, &par92, &par93, &par94, &par95, &par96, &par97, &par98, &par99, &par100, &par101, &par102, &par103, &par104, &par105, &par106, &par107, &par108, &par109, &par110, &par111, &par112, &par113, &par114, &par115, &par116, &par117, &par118, &par119, &par120, &par121, &par122, &par123, &par124, &par125, &par126, &par127, &par128, &par129, &par130, &par131, &par132, &par133, &par134, &par135, &par136, &par137, &par138, &par139, &par140, &par141, &par142, &par143, &par144, &par145, &par146, &par147, &par148, &par149, &par150, &par151, &par152, &par153, &par154, &par155, &par156, &par157, &par158, &par159, &par160, &par161, &par162, &par163, &par164, &par165, &par166, &par167, &par168, &par169, &par170, &par171, &par172, &par173, &par174, &par175, &par176, &par177, &par178, &par179, &par180, &par181, &par182, &par183, &par184, &par185, &par186, &par187, &par188, &par189, &par190, &par191, &par192, &par193, &par194, &par195, &par196, &par197, &par198, &par199, &par200, &par201, &par202, &par203, &par204, &par205, &par206, &par207, &par208, &par209, &par210, &par211, &par212, &par213, &par214, &par215, &par216, &par217, &par218, &par219, &par220, &par221, &par222, &par223, &par224, &par225, &par226, &par227, &par228, &par229, &par230, &par231, &par232, &par233, &par234, &par235, &par236, &par237, &par238, &par239, &par240, &par241, &par242, &par243, &par244, &par245, &par246, &par247, &par248, &par249, &par250, &par251, &par252, &par253, &par254, &par255, &par256, &par257, &par258, &par259, &par260, &par261, &par262, &par263, &par264, &par265, &par266, &par267, &par268, &par269, &par270, &par271, &par272, &par273, &par274, &par275, &par276, &par277, &par278, &par279, &par280, &par281, &par282, &par283, &par284, &par285, &par286, &par287, &par288, &par289, &par290, &par291, &par292, &par293, &par294, &par295, &par296, &par297, &par298, &par299, &par300, &par301, &par302, &par303, &par304, &par305, &par306, &par307, &par308, &par309, &par310, &par311, &par312, &par313, &par314, &par315, &par316, &par317, &par318, &par319, &par320, &par321, &par322, &par323, &par324, &par325, &par326, &par327, &par328, &par329, &par330, &par331, &par332, &par333, &par334, &par335, &par336, &par337, &par338, &par339, &par340, &par341, &par342, &par343, &par344, &par345, &par346, &par347, &par348, &par349, &par350, &par351, &par352, &par353, &par354, &par355, &par356, &par357, &par358, &par359, &par360, &par361, &par362, &par363, &par364, &par365, &par366, &par367, &par368, &par369, &par370, &par371, &par372, &par373, &par374, &par375, &par376, &par377, &par378, &par379, &par380, &par381, &par382, &par383, &par384, &par385, &par386, &par387, &par388, &par389, &par390, &par391, &par392, &par393, &par394, &par395, &par396, &par397, &par398, &par399, &par400, &par401, &par402, &par403, &par404, &par405, &par406, &par407, &par408, &par409, &par410, &par411, &par412, &par413, &par414, &par415, &par416, &par417, &par418, &par419, &par420, &par421, &par422, &par423, &par424, &par425, &par426, &par427, &par428, &par429, &par430, &par431, &par432, &par433, &par434, &par435, &par436, &par437, &par438, &par439, &par440, &par441, &par442, &par443, &par444, &par445, &par446, &par447, &par448, &par449, &par450, &par451, &par452, &par453, &par454, &par455, &par456, &par457, &par458, &par459, &par460, &par461, &par462, &par463, &par464, &par465, &par466, &par467, &par468, &par469, &par470, &par471, &par472, &par473, &par474, &par475, &par476, &par477, &par478, &par479, &par480, &par481, &par482, &par483, &par484, &par485, &par486, &par487, &par488, &par489, &par490, &par491, &par492, &par493, &par494, &par495, &par496, &par497, &par498, &par499, &par500, &par501, &par502, &par503, &par504, &par505, &par506, &par507, &par508, &par509, &par510, &par511, &par512, &par513, &par514, &par515, &par516, &par517, &par518, &par519, &par520, &par521, &par522, &par523, &par524, &par525, &par526, &par527, &par528, &par529, &par530, &par531, &par532, &par533, &par534, &par535, &par536, &par537, &par538, &par539, &par540, &par541, &par542, &par543, &par544, &par545, &par546, &par547, &par548, &par549, &par550, &par551, &par552, &par553, &par554, &par555, &par556, &par557, &par558, &par559, &par560, &par561, &par562, &par563, &par564, &par565, &par566, &par567, &par568, &par569, &par570, &par571, &par572, &par573, &par574, &par575, &par576, &par577, &par578, &par579, &par580, &par581, &par582, &par583, &par584, &par585, &par586, &par587, &par588, &par589, &par590, &par591, &par592, &par593, &par594, &par595, &par596, &par597, &par598, &par599, &par600, &par601, &par602, &par603, &par604, &par605, &par606, &par607, &par608, &par609, &par610, &par611, &par612, &par613, &par614, &par615, &par616, &par617, &par618, &par619, &par620, &par621, &par622, &par623, &par624, &par625, &par626, &par627, &par628, &par629, &par630, &par631, &par632, &par633, &par634, &par635, &par636, &par637, &par638, &par639, &par640, &par641, &par642, &par643, &par644, &par645, &par646, &par647, &par648, &par649, &par650, &par651, &par652, &par653, &par654, &par655, &par656, &par657, &par658, &par659, &par660, &par661, &par662, &par663, &par664, &par665, &par666, &par667, &par668, &par669, &par670, &par671, &par672, &par673, &par674, &par675, &par676, &par677, &par678, &par679, &par680, &par681, &par682, &par683, &par684, &par685, &par686, &par687, &par688, &par689, &par690, &par691, &par692, &par693, &par694, &par695, &par696, &par697, &par698, &par699, &par700, &par701, &par702, &par703, &par704, &par705, &par706, &par707, &par708, &par709, &par710, &par711, &par712, &par713, &par714, &par715, &par716, &par717, &par718, &par719, &par720, &par721, &par722, &par723, &par724, &par725, &par726, &par727, &par728, &par729, &par730, &par731, &par732, &par733, &par734, &par735, &par736, &par737, &par738, &par739, &par740, &par741, &par742, &par743, &par744, &par745, &par746, &par747, &par748, &par749, &par750, &par751, &par752, &par753, &par754, &par755, &par756, &par757, &par758, &par759, &par760, &par761, &par762, &par763, &par764, &par765, &par766, &par767, &par768, &par769, &par770, &par771, &par772, &par773, &par774, &par775, &par776, &par777, &par778, &par779, &par780, &par781, &par782, &par783, &par784, &par785, &par786, &par787, &par788, &par789, &par790, &par791, &par792, &par793, &par794, &par795, &par796, &par797, &par798, &par799, &par800, &par801, &par802, &par803, &par804, &par805, &par806, &par807, &par808, &par809, &par810, &par811, &par812, &par813, &par814, &par815, &par816, &par817, &par818, &par819, &par820, &par821, &par822, &par823, &par824, &par825, &par826, &par827, &par828, &par829, &par830, &par831, &par832, &par833, &par834, &par835, &par836, &par837, &par838, &par839, &par840, &par841, &par842, &par843, &par844, &par845, &par846, &par847, &par848, &par849, &par850, &par851, &par852, &par853, &par854, &par855, &par856, &par857, &par858, &par859, &par860, &par861, &par862, &par863, &par864, &par865, &par866, &par867, &par868, &par869, &par870, &par871, &par872, &par873, &par874, &par875, &par876, &par877, &par878, &par879, &par880, &par881, &par882, &par883, &par884, &par885, &par886, &par887, &par888, &par889, &par890, &par891, &par892, &par893, &par894, &par895, &par896, &par897, &par898, &par899, &par900, &par901, &par902, &par903, &par904, &par905, &par906, &par907, &par908, &par909, &par910, &par911, &par912, &par913, &par914, &par915, &par916, &par917, &par918, &par919, &par920, &par921, &par922, &par923, &par924, &par925, &par926, &par927, &par928, &par929, &par930, &par931, &par932, &par933, &par934, &par935, &par936, &par937, &par938, &par939, &par940, &par941, &par942, &par943, &par944, &par945, &par946, &par947, &par948, &par949, &par950, &par951, &par952, &par953, &par954, &par955, &par956, &par957, &par958, &par959, &par960, &par961, &par962, &par963, &par964, &par965, &par966, &par967, &par968, &par969, &par970, &par971, &par972, &par973, &par974, &par975, &par976, &par977, &par978, &par979, &par980, &par981, &par982, &par983, &par984, &par985, &par986, &par987, &par988, &par989, &par990, &par991, &par992, &par993, &par994, &par995, &par996, &par997, &par998, &par999, &par1000, &par1001, &par1002, &par1003, &par1004, &par1005, &par1006, &par1007, &par1008, &par1009, &par1010, &par1011, &par1012, &par1013, &par1014, &par1015, &par1016, &par1017, &par1018, &par1019, &par1020, &par1021, &par1022, &par1023, &par1024, &par1025, &par1026, &par1027, &par1028, &par1029, &par1030, &par1031, &par1032, &par1033, &par1034, &par1035, &par1036, &par1037, &par1038, &par1039, &par1040, &par1041, &par1042, &par1043, NULL}; #ifdef __cplusplus } #endif
812091.c
/* +------------------------------------------------------------------------+ | Phalcon Framework | +------------------------------------------------------------------------+ | Copyright (c) 2011-2013 Phalcon Team (http://www.phalconphp.com) | +------------------------------------------------------------------------+ | This source file is subject to the New BSD License that is bundled | | with this package in the file docs/LICENSE.txt. | | | | If you did not receive a copy of the license and are unable to | | obtain it through the world-wide-web, please send an email | | to [email protected] so we can send you a copy immediately. | +------------------------------------------------------------------------+ | Authors: Andres Gutierrez <[email protected]> | | Eduar Carvajal <[email protected]> | | ZhuZongXin <[email protected]> | +------------------------------------------------------------------------+ */ #include "py/matplot.h" #include "py/exception.h" #include "kernel/main.h" #include "kernel/memory.h" #include "kernel/exception.h" #include "kernel/object.h" #include "kernel/fcall.h" #include "kernel/array.h" #include "kernel/hash.h" #include "kernel/concat.h" #include "kernel/operators.h" #include "kernel/string.h" #include "kernel/output.h" /** * Phalcon\Py\Matplot * */ zend_class_entry *phalcon_py_matplot_ce; PHP_METHOD(Phalcon_Py_Matplot, factory); PHP_METHOD(Phalcon_Py_Matplot, __construct); PHP_METHOD(Phalcon_Py_Matplot, annotate); PHP_METHOD(Phalcon_Py_Matplot, plot); PHP_METHOD(Phalcon_Py_Matplot, fillBetween); PHP_METHOD(Phalcon_Py_Matplot, hist); PHP_METHOD(Phalcon_Py_Matplot, errorbar); PHP_METHOD(Phalcon_Py_Matplot, figure); PHP_METHOD(Phalcon_Py_Matplot, legend); PHP_METHOD(Phalcon_Py_Matplot, ylim); PHP_METHOD(Phalcon_Py_Matplot, getYlim); PHP_METHOD(Phalcon_Py_Matplot, xlim); PHP_METHOD(Phalcon_Py_Matplot, getXlim); PHP_METHOD(Phalcon_Py_Matplot, subplot); PHP_METHOD(Phalcon_Py_Matplot, title); PHP_METHOD(Phalcon_Py_Matplot, axis); PHP_METHOD(Phalcon_Py_Matplot, xlabel); PHP_METHOD(Phalcon_Py_Matplot, ylabel); PHP_METHOD(Phalcon_Py_Matplot, grid); PHP_METHOD(Phalcon_Py_Matplot, show); PHP_METHOD(Phalcon_Py_Matplot, close); PHP_METHOD(Phalcon_Py_Matplot, draw); PHP_METHOD(Phalcon_Py_Matplot, pause); PHP_METHOD(Phalcon_Py_Matplot, save); PHP_METHOD(Phalcon_Py_Matplot, clf); PHP_METHOD(Phalcon_Py_Matplot, tightLayout); PHP_METHOD(Phalcon_Py_Matplot, __call); ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_factory, 0, 0, 0) ZEND_ARG_TYPE_INFO(0, backend, IS_STRING, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot___construct, 0, 0, 0) ZEND_ARG_TYPE_INFO(0, backend, IS_STRING, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_annotate, 0, 0, 3) ZEND_ARG_TYPE_INFO(0, annotation, IS_STRING, 0) ZEND_ARG_TYPE_INFO(0, x, IS_DOUBLE, 0) ZEND_ARG_TYPE_INFO(0, y, IS_DOUBLE, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_plot, 0, 0, 1) ZEND_ARG_TYPE_INFO(0, x, IS_ARRAY, 0) ZEND_ARG_TYPE_INFO(0, y, IS_ARRAY, 1) ZEND_ARG_TYPE_INFO(0, format, IS_STRING, 1) ZEND_ARG_TYPE_INFO(0, style, IS_STRING, 1) ZEND_ARG_TYPE_INFO(0, label, IS_STRING, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_fillbetween, 0, 0, 4) ZEND_ARG_TYPE_INFO(0, x, IS_ARRAY, 0) ZEND_ARG_TYPE_INFO(0, y1, IS_ARRAY, 0) ZEND_ARG_TYPE_INFO(0, y2, IS_ARRAY, 0) ZEND_ARG_TYPE_INFO(0, keywords, IS_ARRAY, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_hist, 0, 0, 1) ZEND_ARG_TYPE_INFO(0, y, IS_ARRAY, 0) ZEND_ARG_TYPE_INFO(0, bins, IS_LONG, 1) ZEND_ARG_TYPE_INFO(0, color, IS_STRING, 1) ZEND_ARG_TYPE_INFO(0, alpha, IS_STRING, 1) ZEND_ARG_TYPE_INFO(0, label, IS_STRING, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_errorbar, 0, 0, 3) ZEND_ARG_TYPE_INFO(0, x, IS_ARRAY, 0) ZEND_ARG_TYPE_INFO(0, y, IS_ARRAY, 0) ZEND_ARG_TYPE_INFO(0, yerr, IS_ARRAY, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_ylim, 0, 0, 2) ZEND_ARG_TYPE_INFO(0, left, IS_DOUBLE, 0) ZEND_ARG_TYPE_INFO(0, right, IS_DOUBLE, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_xlim, 0, 0, 2) ZEND_ARG_TYPE_INFO(0, left, IS_DOUBLE, 0) ZEND_ARG_TYPE_INFO(0, right, IS_DOUBLE, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_title, 0, 0, 1) ZEND_ARG_TYPE_INFO(0, title, IS_STRING, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_axis, 0, 0, 1) ZEND_ARG_TYPE_INFO(0, axis, IS_STRING, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_xlabel, 0, 0, 1) ZEND_ARG_TYPE_INFO(0, xlabel, IS_STRING, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_ylabel, 0, 0, 1) ZEND_ARG_TYPE_INFO(0, ylabel, IS_STRING, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_grid, 0, 0, 1) ZEND_ARG_TYPE_INFO(0, flag, _IS_BOOL, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_pause, 0, 0, 1) ZEND_ARG_TYPE_INFO(0, interval, IS_DOUBLE, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot_save, 0, 0, 1) ZEND_ARG_TYPE_INFO(0, filename, IS_STRING, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_py_matplot___call, 0, 0, 1) ZEND_ARG_INFO(0, method) ZEND_ARG_INFO(0, arguments) ZEND_END_ARG_INFO() static const zend_function_entry phalcon_py_matplot_method_entry[] = { PHP_ME(Phalcon_Py_Matplot, factory, arginfo_phalcon_py_matplot_factory, ZEND_ACC_PUBLIC|ZEND_ACC_STATIC) PHP_ME(Phalcon_Py_Matplot, __construct, arginfo_phalcon_py_matplot___construct, ZEND_ACC_PRIVATE|ZEND_ACC_CTOR) PHP_ME(Phalcon_Py_Matplot, annotate, arginfo_phalcon_py_matplot_annotate, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, plot, arginfo_phalcon_py_matplot_plot, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, fillBetween, arginfo_phalcon_py_matplot_fillbetween, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, hist, arginfo_phalcon_py_matplot_hist, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, errorbar, arginfo_phalcon_py_matplot_errorbar, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, figure, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, legend, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, ylim, arginfo_phalcon_py_matplot_ylim, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, getYlim, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, xlim, arginfo_phalcon_py_matplot_xlim, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, getXlim, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, subplot, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, title, arginfo_phalcon_py_matplot_title, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, axis, arginfo_phalcon_py_matplot_axis, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, xlabel, arginfo_phalcon_py_matplot_xlabel, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, ylabel, arginfo_phalcon_py_matplot_ylabel, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, grid, arginfo_phalcon_py_matplot_grid, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, show, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, close, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, draw, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, pause, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, save, arginfo_phalcon_py_matplot_save, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, clf, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, tightLayout, arginfo_empty, ZEND_ACC_PUBLIC) PHP_ME(Phalcon_Py_Matplot, __call, arginfo_phalcon_py_matplot___call, ZEND_ACC_PUBLIC) PHP_FE_END }; zend_object_handlers phalcon_py_matplot_object_handlers; zend_object* phalcon_py_matplot_object_create_handler(zend_class_entry *ce) { phalcon_py_matplot_object *intern = ecalloc(1, sizeof(phalcon_py_matplot_object) + zend_object_properties_size(ce)); intern->std.ce = ce; zend_object_std_init(&intern->std, ce); object_properties_init(&intern->std, ce); intern->std.handlers = &phalcon_py_matplot_object_handlers; return &intern->std; } void phalcon_py_matplot_object_free_handler(zend_object *object) { zend_object_std_dtor(object); } /** * Phalcon\Py\Matplot initializer */ PHALCON_INIT_CLASS(Phalcon_Py_Matplot){ PHALCON_REGISTER_CLASS_CREATE_OBJECT(Phalcon\\Py, Matplot, py_matplot, phalcon_py_matplot_method_entry, 0); zend_declare_property_null(phalcon_py_matplot_ce, SL("_instance"), ZEND_ACC_PROTECTED|ZEND_ACC_STATIC); return SUCCESS; } /** * * @return Phalcon\Py\Matplot **/ PHP_METHOD(Phalcon_Py_Matplot, factory) { zval *backend = NULL, instance = {}; phalcon_fetch_params(0, 0, 1, &backend); if (!backend) { backend = &PHALCON_GLOBAL(z_null); } phalcon_read_static_property_ce(&instance, phalcon_py_matplot_ce, SL("_instance"), PH_READONLY); if (Z_TYPE(instance) == IS_NULL) { object_init_ex(return_value, phalcon_py_matplot_ce); PHALCON_CALL_METHOD(NULL, return_value, "__construct", backend); phalcon_update_static_property_ce(phalcon_py_matplot_ce, SL("_instance"), return_value); } else { RETURN_CTOR(&instance); } } /** * Phalcon\Py\Matplot constructor * */ PHP_METHOD(Phalcon_Py_Matplot, __construct) { zval *backend = NULL; phalcon_py_matplot_object *intern; phalcon_fetch_params(0, 0, 1, &backend); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); PHP_PYTHON_THREAD_ACQUIRE(); intern->matplotlib = PyImport_ImportModule("matplotlib"); if (!intern->matplotlib) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Error loading module matplotlib!"); PHP_PYTHON_THREAD_RELEASE(); return; } Py_DECREF(intern->matplotlib); // matplotlib.use() must be called *before* pylab, matplotlib.pyplot, or matplotlib.backends is imported for the first time if (backend && Z_TYPE_P(backend) == IS_STRING) { PyObject_CallMethod(intern->matplotlib, "use", Z_STRVAL_P(backend)); } intern->pyplot = PyImport_ImportModule("matplotlib.pyplot"); if (!intern->pyplot) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Error loading module matplotlib.pyplot!"); PHP_PYTHON_THREAD_RELEASE(); return; } Py_DECREF(intern->pyplot); intern->pylab = PyImport_ImportModule("pylab"); if (!intern->pylab) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Error loading module pylab!"); PHP_PYTHON_THREAD_RELEASE(); return; } Py_DECREF(intern->pylab); intern->s_python_function_show = PyObject_GetAttrString(intern->pyplot, "show"); intern->s_python_function_close = PyObject_GetAttrString(intern->pyplot, "close"); intern->s_python_function_draw = PyObject_GetAttrString(intern->pyplot, "draw"); intern->s_python_function_pause = PyObject_GetAttrString(intern->pyplot, "pause"); intern->s_python_function_figure = PyObject_GetAttrString(intern->pyplot, "figure"); intern->s_python_function_plot = PyObject_GetAttrString(intern->pyplot, "plot"); intern->s_python_function_fill_between = PyObject_GetAttrString(intern->pyplot, "fill_between"); intern->s_python_function_hist = PyObject_GetAttrString(intern->pyplot,"hist"); intern->s_python_function_subplot = PyObject_GetAttrString(intern->pyplot, "subplot"); intern->s_python_function_legend = PyObject_GetAttrString(intern->pyplot, "legend"); intern->s_python_function_ylim = PyObject_GetAttrString(intern->pyplot, "ylim"); intern->s_python_function_title = PyObject_GetAttrString(intern->pyplot, "title"); intern->s_python_function_axis = PyObject_GetAttrString(intern->pyplot, "axis"); intern->s_python_function_xlabel = PyObject_GetAttrString(intern->pyplot, "xlabel"); intern->s_python_function_ylabel = PyObject_GetAttrString(intern->pyplot, "ylabel"); intern->s_python_function_grid = PyObject_GetAttrString(intern->pyplot, "grid"); intern->s_python_function_xlim = PyObject_GetAttrString(intern->pyplot, "xlim"); intern->s_python_function_save = PyObject_GetAttrString(intern->pylab, "savefig"); intern->s_python_function_annotate = PyObject_GetAttrString(intern->pyplot,"annotate"); intern->s_python_function_clf = PyObject_GetAttrString(intern->pyplot, "clf"); intern->s_python_function_errorbar = PyObject_GetAttrString(intern->pyplot, "errorbar"); intern->s_python_function_tight_layout = PyObject_GetAttrString(intern->pyplot, "tight_layout"); if(!intern->s_python_function_show || !intern->s_python_function_close || !intern->s_python_function_draw || !intern->s_python_function_pause || !intern->s_python_function_figure || !intern->s_python_function_plot || !intern->s_python_function_fill_between || !intern->s_python_function_subplot || !intern->s_python_function_legend || !intern->s_python_function_ylim || !intern->s_python_function_title || !intern->s_python_function_axis || !intern->s_python_function_xlabel || !intern->s_python_function_ylabel || !intern->s_python_function_grid || !intern->s_python_function_xlim || !intern->s_python_function_save || !intern->s_python_function_clf || !intern->s_python_function_annotate || !intern->s_python_function_errorbar || !intern->s_python_function_errorbar || !intern->s_python_function_tight_layout ) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Couldn't find required function!"); PHP_PYTHON_THREAD_RELEASE(); return; } if (!PyFunction_Check(intern->s_python_function_show) || !PyFunction_Check(intern->s_python_function_close) || !PyFunction_Check(intern->s_python_function_draw) || !PyFunction_Check(intern->s_python_function_pause) || !PyFunction_Check(intern->s_python_function_figure) || !PyFunction_Check(intern->s_python_function_plot) || !PyFunction_Check(intern->s_python_function_fill_between) || !PyFunction_Check(intern->s_python_function_subplot) || !PyFunction_Check(intern->s_python_function_legend) || !PyFunction_Check(intern->s_python_function_annotate) || !PyFunction_Check(intern->s_python_function_ylim) || !PyFunction_Check(intern->s_python_function_title) || !PyFunction_Check(intern->s_python_function_axis) || !PyFunction_Check(intern->s_python_function_xlabel) || !PyFunction_Check(intern->s_python_function_ylabel) || !PyFunction_Check(intern->s_python_function_grid) || !PyFunction_Check(intern->s_python_function_xlim) || !PyFunction_Check(intern->s_python_function_save) || !PyFunction_Check(intern->s_python_function_clf) || !PyFunction_Check(intern->s_python_function_tight_layout) || !PyFunction_Check(intern->s_python_function_errorbar) ) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Python object is unexpectedly not a PyFunction!"); PHP_PYTHON_THREAD_RELEASE(); return; } intern->s_python_empty_tuple = PyTuple_New(0); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, annotate){ zval *annotation, *x, *y; phalcon_py_matplot_object *intern; PyObject *pyxy, *pystr, *kwargs, *args, *res; phalcon_fetch_params(0, 3, 0, &annotation, &x, &y); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); pyxy = PyTuple_New(2); PyTuple_SetItem(pyxy, 0, PyFloat_FromDouble(Z_DVAL_P(x))); PyTuple_SetItem(pyxy, 1, PyFloat_FromDouble(Z_DVAL_P(y))); kwargs = PyDict_New(); PyDict_SetItemString(kwargs, "xy", pyxy); pystr = PyString_FromString(Z_STRVAL_P(annotation)); args = PyTuple_New(1); PyTuple_SetItem(args, 0, pystr); res = PyObject_Call(intern->s_python_function_annotate, args, kwargs); Py_DECREF(args); Py_DECREF(kwargs); if(res) { Py_DECREF(res); RETVAL_TRUE; } else { RETVAL_FALSE; } PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, plot){ zval *_x, *_y = NULL, *format = NULL, *style = NULL, *label = NULL; phalcon_py_matplot_object *intern; PyObject *xarray, *yarray, *pystring, *plot_args, *kwargs, *res; int size = 0; phalcon_fetch_params(0, 1, 4, &_x, &_y, &format, &style, &label); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); size = zend_hash_num_elements(Z_ARRVAL_P(_x)); if (!_y || Z_TYPE_P(_y) == IS_NULL) { int i; xarray = PyList_New(size); for (i = 0; i < size; i++) { PyList_SetItem(xarray, (Py_ssize_t)i, PyInt_FromLong(i)); } yarray = pip_hash_to_list(_x); } else { xarray = pip_hash_to_list(_x); yarray = pip_hash_to_list(_y); } if (format && Z_TYPE_P(format) == IS_STRING) { pystring = PyString_FromString(Z_STRVAL_P(format)); } else { pystring = PyString_FromString(""); } plot_args = PyTuple_New(3); PyTuple_SetItem(plot_args, 0, xarray); PyTuple_SetItem(plot_args, 1, yarray); PyTuple_SetItem(plot_args, 2, pystring); kwargs = PyDict_New(); if (style && Z_TYPE_P(style) == IS_STRING) { PyDict_SetItemString(kwargs, "style", PyString_FromString(Z_STRVAL_P(style))); } if (label && Z_TYPE_P(label) == IS_STRING) { PyDict_SetItemString(kwargs, "label", PyString_FromString(Z_STRVAL_P(label))); } res = PyObject_Call(intern->s_python_function_plot, plot_args, kwargs); Py_DECREF(plot_args); Py_DECREF(kwargs); if(res) { pip_pyobject_to_zval(res, return_value); Py_DECREF(res); } PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, fillBetween){ zval *x, *y1, *y2, *keywords; phalcon_py_matplot_object *intern; PyObject *xarray, *y1array, *y2array, *args, *kwargs, *res; phalcon_fetch_params(0, 4, 0, &x, &y1, &y2, &keywords); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); // using numpy arrays xarray = pip_hash_to_list(x); y1array = pip_hash_to_list(y1); y2array = pip_hash_to_list(y2); // construct positional args args = PyTuple_New(3); PyTuple_SetItem(args, 0, xarray); PyTuple_SetItem(args, 1, y1array); PyTuple_SetItem(args, 2, y2array); // construct keyword args kwargs = pip_hash_to_dict(keywords); res = PyObject_Call(intern->s_python_function_fill_between, args, kwargs); Py_DECREF(args); Py_DECREF(kwargs); if(res) { Py_DECREF(res); } PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, hist){ zval *y, *bins = NULL, *color = NULL, *alpha = NULL, *label = NULL; phalcon_py_matplot_object *intern; PyObject *yarray, *plot_args, *kwargs, *res; phalcon_fetch_params(0, 1, 4, &y, &bins, &color, &alpha, &label); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); yarray = pip_hash_to_list(y); plot_args = PyTuple_New(1); PyTuple_SetItem(plot_args, 0, yarray); kwargs = PyDict_New(); if (bins && Z_TYPE_P(bins) == IS_LONG) { PyDict_SetItemString(kwargs, "bins", PyLong_FromLong(Z_LVAL_P(bins))); } else { PyDict_SetItemString(kwargs, "bins", PyLong_FromLong(10)); } if (color && Z_TYPE_P(color) == IS_STRING) { PyDict_SetItemString(kwargs, "color", PyString_FromString(Z_STRVAL_P(color))); } else { PyDict_SetItemString(kwargs, "color", PyString_FromString("b")); } if (alpha && Z_TYPE_P(alpha) == IS_DOUBLE) { PyDict_SetItemString(kwargs, "alpha", PyFloat_FromDouble(Z_DVAL_P(alpha))); } else { PyDict_SetItemString(kwargs, "alpha", PyFloat_FromDouble(1.0)); } if (label && Z_TYPE_P(label) == IS_STRING) { PyDict_SetItemString(kwargs, "label", PyString_FromString(Z_STRVAL_P(label))); } res = PyObject_Call(intern->s_python_function_hist, plot_args, kwargs); Py_DECREF(plot_args); Py_DECREF(kwargs); if(res) { Py_DECREF(res); } PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, errorbar){ zval *x, *y, *yerr; phalcon_py_matplot_object *intern; PyObject *xarray, *yarray, *yerrarray, *plot_args, *kwargs, *res; phalcon_fetch_params(0, 3, 0, &x, &y, &yerr); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); xarray = pip_hash_to_list(x); yarray = pip_hash_to_list(y); yerrarray = pip_hash_to_list(yerr); plot_args = PyTuple_New(2); PyTuple_SetItem(plot_args, 0, xarray); PyTuple_SetItem(plot_args, 1, yarray); kwargs = PyDict_New(); PyDict_SetItemString(kwargs, "yerr", yerrarray); res = PyObject_Call(intern->s_python_function_errorbar, plot_args, kwargs); Py_DECREF(kwargs); Py_DECREF(plot_args); if (res) { Py_DECREF(res); } else { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to errorbar() failed."); PHP_PYTHON_THREAD_RELEASE(); } PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, figure){ phalcon_py_matplot_object *intern; PyObject *res; PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); res = PyObject_CallObject(intern->s_python_function_figure, intern->s_python_empty_tuple); if(!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to figure() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, legend){ phalcon_py_matplot_object *intern; PyObject *res; PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); res = PyObject_CallObject(intern->s_python_function_legend, intern->s_python_empty_tuple); if(!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to legend() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, ylim){ zval *left, *right; phalcon_py_matplot_object *intern; PyObject *list, *args, *res; phalcon_fetch_params(0, 2, 0, &left, &right); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); list = PyList_New(2); PyList_SetItem(list, 0, PyFloat_FromDouble(Z_DVAL_P(left))); PyList_SetItem(list, 1, PyFloat_FromDouble(Z_DVAL_P(right))); args = PyTuple_New(1); PyTuple_SetItem(args, 0, list); res = PyObject_CallObject(intern->s_python_function_ylim, args); Py_DECREF(args); if(!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to ylim() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, getYlim){ phalcon_py_matplot_object *intern; PyObject *args, *res, *left, *right; PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); args = PyTuple_New(0); res = PyObject_CallObject(intern->s_python_function_ylim, args); Py_DECREF(args); if(!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to getylim() failed."); return; } left = PyTuple_GetItem(res,0); right = PyTuple_GetItem(res,1); array_init(return_value); phalcon_array_append_double(return_value, PyFloat_AsDouble(left)); phalcon_array_append_double(return_value, PyFloat_AsDouble(right)); Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, xlim){ zval *left, *right; phalcon_py_matplot_object *intern; PyObject *list, *args, *res; phalcon_fetch_params(0, 2, 0, &left, &right); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); list = PyList_New(2); PyList_SetItem(list, 0, PyFloat_FromDouble(Z_DVAL_P(left))); PyList_SetItem(list, 1, PyFloat_FromDouble(Z_DVAL_P(right))); args = PyTuple_New(1); PyTuple_SetItem(args, 0, list); res = PyObject_CallObject(intern->s_python_function_xlim, args); Py_DECREF(args); if(!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to xlim() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, getXlim){ phalcon_py_matplot_object *intern; PyObject *args, *res, *left, *right; PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); args = PyTuple_New(0); res = PyObject_CallObject(intern->s_python_function_xlim, args); Py_DECREF(args); if(!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to getxlim() failed."); return; } left = PyTuple_GetItem(res,0); right = PyTuple_GetItem(res,1); array_init(return_value); phalcon_array_append_double(return_value, PyFloat_AsDouble(left)); phalcon_array_append_double(return_value, PyFloat_AsDouble(right)); Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, subplot){ zval *nrows, *ncols, *plot_number; phalcon_py_matplot_object *intern; PyObject *args, *res; phalcon_fetch_params(0, 3, 0, &nrows, &ncols, &plot_number); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); args = PyTuple_New(3); PyTuple_SetItem(args, 0, PyFloat_FromDouble(Z_DVAL_P(nrows))); PyTuple_SetItem(args, 1, PyFloat_FromDouble(Z_DVAL_P(ncols))); PyTuple_SetItem(args, 2, PyFloat_FromDouble(Z_DVAL_P(plot_number))); res = PyObject_CallObject(intern->s_python_function_subplot, args); Py_DECREF(args); if(!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to subplot() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, title){ zval *title; phalcon_py_matplot_object *intern; PyObject *pytitle, *args, *res; phalcon_fetch_params(0, 1, 0, &title); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); pytitle = PyString_FromString(Z_STRVAL_P(title)); args = PyTuple_New(1); PyTuple_SetItem(args, 0, pytitle); res = PyObject_CallObject(intern->s_python_function_title, args); Py_DECREF(args); if (!res) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to title() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); RETURN_TRUE; } PHP_METHOD(Phalcon_Py_Matplot, axis){ zval *axis; phalcon_py_matplot_object *intern; PyObject *pyaxis, *args, *res; phalcon_fetch_params(0, 1, 0, &axis); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); pyaxis = PyString_FromString(Z_STRVAL_P(axis)); args = PyTuple_New(1); PyTuple_SetItem(args, 0, pyaxis); res = PyObject_CallObject(intern->s_python_function_axis, args); if (!res) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to axis() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); RETURN_TRUE; } PHP_METHOD(Phalcon_Py_Matplot, xlabel){ zval *xlabel; phalcon_py_matplot_object *intern; PyObject *pyxlabel, *args, *res; phalcon_fetch_params(0, 1, 0, &xlabel); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); pyxlabel = PyString_FromString(Z_STRVAL_P(xlabel)); args = PyTuple_New(1); PyTuple_SetItem(args, 0, pyxlabel); res = PyObject_CallObject(intern->s_python_function_xlabel, args); if (!res) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to xlabel() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); RETURN_TRUE; } PHP_METHOD(Phalcon_Py_Matplot, ylabel){ zval *ylabel; phalcon_py_matplot_object *intern; PyObject *pyylabel, *args, *res; phalcon_fetch_params(0, 1, 0, &ylabel); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); pyylabel = PyString_FromString(Z_STRVAL_P(ylabel)); args = PyTuple_New(1); PyTuple_SetItem(args, 0, pyylabel); res = PyObject_CallObject(intern->s_python_function_ylabel, args); if (!res) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to ylabel() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); RETURN_TRUE; } PHP_METHOD(Phalcon_Py_Matplot, grid){ zval *flag; phalcon_py_matplot_object *intern; PyObject *args, *res; phalcon_fetch_params(0, 1, 0, &flag); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); args = PyTuple_New(1); PyTuple_SetItem(args, 0, zend_is_true(flag) ? Py_True : Py_False); res = PyObject_CallObject(intern->s_python_function_grid, args); Py_DECREF(args); if (!res) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to grid() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); RETURN_TRUE; } PHP_METHOD(Phalcon_Py_Matplot, show){ phalcon_py_matplot_object *intern; PyObject *res; PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); res = PyObject_CallObject(intern->s_python_function_show, intern->s_python_empty_tuple); if (!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to show() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, close){ phalcon_py_matplot_object *intern; PyObject *res; PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); res = PyObject_CallObject(intern->s_python_function_close, intern->s_python_empty_tuple); if (!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to close() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, draw){ phalcon_py_matplot_object *intern; PyObject *res; PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); res = PyObject_CallObject(intern->s_python_function_draw, intern->s_python_empty_tuple); if (!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to draw() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, pause){ zval *interval; phalcon_py_matplot_object *intern; PyObject *args, *res; phalcon_fetch_params(0, 1, 0, &interval); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); args = PyTuple_New(1); PyTuple_SetItem(args, 0, PyFloat_FromDouble(Z_DVAL_P(interval))); res = PyObject_CallObject(intern->s_python_function_pause, args); Py_DECREF(args); if (!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to pause() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, save){ zval *filename; phalcon_py_matplot_object *intern; PyObject *pyfilename, *args, *res; phalcon_fetch_params(0, 1, 0, &filename); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); pyfilename = PyString_FromString(Z_STRVAL_P(filename)); args = PyTuple_New(1); PyTuple_SetItem(args, 0, pyfilename); res = PyObject_CallObject(intern->s_python_function_save, args); Py_DECREF(args); if (!res) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to save() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); RETURN_TRUE; } PHP_METHOD(Phalcon_Py_Matplot, clf){ phalcon_py_matplot_object *intern; PyObject *res; PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); res = PyObject_CallObject(intern->s_python_function_clf, intern->s_python_empty_tuple); if (!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to clf() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } PHP_METHOD(Phalcon_Py_Matplot, tightLayout){ phalcon_py_matplot_object *intern; PyObject *res; PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); res = PyObject_CallObject(intern->s_python_function_tight_layout, intern->s_python_empty_tuple); if (!res) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Call to tightLayout() failed."); return; } Py_DECREF(res); PHP_PYTHON_THREAD_RELEASE(); } /** * Handles method calls when a method is not implemented * * @param string $method * @param array $arguments * @return mixed */ PHP_METHOD(Phalcon_Py_Matplot, __call){ zval *method, *arguments = NULL; phalcon_py_matplot_object *intern; PyObject *function, *args, *kwargs = NULL, *retval; phalcon_fetch_params(0, 1, 1, &method, &arguments); PHP_PYTHON_THREAD_ACQUIRE(); intern = phalcon_py_matplot_object_from_obj(Z_OBJ_P(getThis())); function = PyObject_GetAttrString(intern->pyplot, Z_STRVAL_P(method)); if (!function) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Couldn't find required function!"); PHP_PYTHON_THREAD_RELEASE(); return; } if (!PyFunction_Check(function)) { PHALCON_THROW_EXCEPTION_STR(phalcon_py_exception_ce, "Python object is unexpectedly not a PyFunction!"); PHP_PYTHON_THREAD_RELEASE(); return; } if (!arguments) { args = PyTuple_New(0); } else if (Z_TYPE_P(arguments) == IS_ARRAY) { zval zero = {}; if (phalcon_array_isset_fetch_long(&zero, arguments, 0, PH_READONLY) && Z_TYPE(zero) == IS_ARRAY) { zval _args = {}, _kwargs = {}; if (phalcon_array_isset_fetch_str(&_args, &zero, SL("args"), PH_READONLY)) { args = pip_hash_to_tuple(&_args); } else { args = PyTuple_New(0); } if (phalcon_array_isset_fetch_str(&_kwargs, &zero, SL("kwargs"), PH_READONLY)) { kwargs = pip_hash_to_dict(&_kwargs); } } else { args = PyTuple_New(0); } } else { args = PyTuple_New(1); PyTuple_SetItem(args, 0, pip_zval_to_pyobject(arguments)); } if (kwargs) { retval = PyObject_Call(function, args, kwargs); Py_DECREF(kwargs); } else { retval = PyObject_CallObject(function, args); } Py_DECREF(function); Py_DECREF(args); if (!retval) { PHP_PYTHON_THREAD_RELEASE(); PHALCON_THROW_EXCEPTION_FORMAT(phalcon_py_exception_ce, "Call to %s() failed.", Z_STRVAL_P(method)); return; } pip_pyobject_to_zval(retval, return_value); Py_DECREF(retval); PHP_PYTHON_THREAD_RELEASE(); }
701733.c
#include "worker.h" #include "stdio.h" #include "stdlib.h" #include "adios.h" #include "sos.h" /* * Worker A is the first application in the workflow. * It does some "computation" and communication for N * iterations, and it produces "output" that is "input" to worker_b. */ int iterations = 1; bool send_to_b = true; extern SOS_pub *example_pub; void validate_input(int argc, char* argv[]) { if (argc < 2) { my_printf("Usage: %s <num iterations> <send to next worker>\n", argv[0]); exit(1); } /* if (commsize < 2) { my_printf("%s requires at least 2 processes.\n", argv[0]); exit(1); } */ iterations = atoi(argv[1]); if (argc > 2) { int tmp = atoi(argv[2]); if (tmp == 0) { send_to_b = false; } } } int worker(int argc, char* argv[]) { TAU_PROFILE_TIMER(timer, __func__, __FILE__, TAU_USER); TAU_PROFILE_START(timer); static bool announced = false; my_printf("%d of %d In worker A\n", myrank, commsize); /* validate input */ validate_input(argc, argv); my_printf("Worker A will execute %d iterations.\n", iterations); /* ADIOS: These declarations are required to match the generated * gread_/gwrite_ functions. (And those functions are * generated by calling 'gpp.py adios_config.xml') ... */ uint64_t adios_groupsize; uint64_t adios_totalsize; uint64_t adios_handle; char adios_filename[256]; MPI_Comm adios_comm; /* ADIOS: Can duplicate, split the world, whatever. * This allows you to have P writers to N files. * With no splits, everyone shares 1 file, but * can write lock-free by using different areas. */ //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm); adios_comm = MPI_COMM_WORLD; int NX = 10; int NY = 1; double t[NX]; double p[NX]; /* ADIOS: Set up the adios communications and buffers, open the file. */ if (send_to_b) { sprintf(adios_filename, "adios_a_to_b.bp"); adios_init("adios_config.xml", adios_comm); } int index, i; for (index = 0 ; index < iterations ; index++ ) { /* Do some exchanges with neighbors */ do_neighbor_exchange(); /* "Compute" */ compute(index); /* Write output */ //my_printf("a"); for (i = 0; i < NX; i++) { t[i] = index*100.0 + myrank*NX + i; } for (i = 0; i < NY; i++) { p[i] = index*1000.0 + myrank*NY + i; } if (myrank == 0) { printf ("--------- A Step: %d --------------------------------\n", index); } if (send_to_b) { TAU_PROFILE_TIMER(adiostimer, "ADIOS send", __FILE__, TAU_USER); TAU_PROFILE_START(adiostimer); if (index == 0) { adios_open(&adios_handle, "a_to_b", adios_filename, "w", adios_comm); } else { adios_open(&adios_handle, "a_to_b", adios_filename, "a", adios_comm); } /* ADIOS: Actually write the data out. * Yes, this is the recommended method, and this way, changes in * configuration with the .XML file will, even in the worst-case * scenario, merely require running 'gpp.py adios_config.xml' * and typing 'make'. */ #include "gwrite_a_to_b.ch" /* ADIOS: Close out the file completely and finalize. * If MPI is being used, this must happen before MPI_Finalize(). */ adios_close(adios_handle); TAU_PROFILE_STOP(adiostimer); #if 1 if (!announced) { int foo = NX; SOS_pack(example_pub, "NX", SOS_VAL_TYPE_INT, &foo); SOS_announce(example_pub); SOS_publish(example_pub); announced = true; } #endif } MPI_Barrier(MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); if (send_to_b) { adios_finalize(myrank); } my_printf("Worker A exting.\n"); //MPI_Comm_free(&adios_comm); TAU_PROFILE_STOP(timer); /* exit */ return 0; }
541561.c
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * Log library implementation notes. * * Log library stores all tags provided to esp_log_level_set as a linked * list. See uncached_tag_entry_t structure. * * To avoid looking up log level for given tag each time message is * printed, this library caches pointers to tags. Because the suggested * way of creating tags uses one 'TAG' constant per file, this caching * should be effective. Cache is a binary min-heap of cached_tag_entry_t * items, ordering is done on 'generation' member. In this context, * generation is an integer which is incremented each time an operation * with cache is performed. When cache is full, new item is inserted in * place of an oldest item (that is, with smallest 'generation' value). * After that, bubble-down operation is performed to fix ordering in the * min-heap. * * The potential problem with wrap-around of cache generation counter is * ignored for now. This will happen if someone happens to output more * than 4 billion log entries, at which point wrap-around will not be * the biggest problem. * */ #ifndef BOOTLOADER_BUILD #include <freertos/FreeRTOS.h> #include <freertos/FreeRTOSConfig.h> #include <freertos/task.h> #include <freertos/semphr.h> #endif #include "esp_attr.h" #include "xtensa/hal.h" #include "soc/soc.h" #include <stdbool.h> #include <stdarg.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <ctype.h> #include "esp_log.h" #include "rom/queue.h" #include "soc/soc_memory_layout.h" //print number of bytes per line for esp_log_buffer_char and esp_log_buffer_hex #define BYTES_PER_LINE 16 #ifndef BOOTLOADER_BUILD // Number of tags to be cached. Must be 2**n - 1, n >= 2. #define TAG_CACHE_SIZE 31 // Maximum time to wait for the mutex in a logging statement. #define MAX_MUTEX_WAIT_MS 10 #define MAX_MUTEX_WAIT_TICKS ((MAX_MUTEX_WAIT_MS + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS) // Uncomment this to enable consistency checks and cache statistics in this file. // #define LOG_BUILTIN_CHECKS typedef struct { const char* tag; uint32_t level : 3; uint32_t generation : 29; } cached_tag_entry_t; typedef struct uncached_tag_entry_{ SLIST_ENTRY(uncached_tag_entry_) entries; uint8_t level; // esp_log_level_t as uint8_t char tag[0]; // beginning of a zero-terminated string } uncached_tag_entry_t; static esp_log_level_t s_log_default_level = ESP_LOG_VERBOSE; static SLIST_HEAD(log_tags_head , uncached_tag_entry_) s_log_tags = SLIST_HEAD_INITIALIZER(s_log_tags); static cached_tag_entry_t s_log_cache[TAG_CACHE_SIZE]; static uint32_t s_log_cache_max_generation = 0; static uint32_t s_log_cache_entry_count = 0; static vprintf_like_t s_log_print_func = &vprintf; static SemaphoreHandle_t s_log_mutex = NULL; #ifdef LOG_BUILTIN_CHECKS static uint32_t s_log_cache_misses = 0; #endif static inline bool get_cached_log_level(const char* tag, esp_log_level_t* level); static inline bool get_uncached_log_level(const char* tag, esp_log_level_t* level); static inline void add_to_cache(const char* tag, esp_log_level_t level); static void heap_bubble_down(int index); static inline void heap_swap(int i, int j); static inline bool should_output(esp_log_level_t level_for_message, esp_log_level_t level_for_tag); static inline void clear_log_level_list(); vprintf_like_t esp_log_set_vprintf(vprintf_like_t func) { if (!s_log_mutex) { s_log_mutex = xSemaphoreCreateMutex(); } xSemaphoreTake(s_log_mutex, portMAX_DELAY); vprintf_like_t orig_func = s_log_print_func; s_log_print_func = func; xSemaphoreGive(s_log_mutex); return orig_func; } void esp_log_level_set(const char* tag, esp_log_level_t level) { if (!s_log_mutex) { s_log_mutex = xSemaphoreCreateMutex(); } xSemaphoreTake(s_log_mutex, portMAX_DELAY); // for wildcard tag, remove all linked list items and clear the cache if (strcmp(tag, "*") == 0) { s_log_default_level = level; clear_log_level_list(); xSemaphoreGive(s_log_mutex); return; } //searching exist tag uncached_tag_entry_t *it = NULL; SLIST_FOREACH( it, &s_log_tags, entries ) { if ( strcmp(it->tag, tag)==0 ) { //one tag in the linked list match, update the level it->level = level; //quit with it != NULL break; } } //no exist tag, append new one if ( it == NULL ) { // allocate new linked list entry and append it to the head of the list size_t entry_size = offsetof(uncached_tag_entry_t, tag) + strlen(tag) + 1; uncached_tag_entry_t* new_entry = (uncached_tag_entry_t*) malloc(entry_size); if (!new_entry) { xSemaphoreGive(s_log_mutex); return; } new_entry->level = (uint8_t) level; strcpy(new_entry->tag, tag); SLIST_INSERT_HEAD( &s_log_tags, new_entry, entries ); } //search in the cache and update it if exist for (int i = 0; i < s_log_cache_entry_count; ++i) { #ifdef LOG_BUILTIN_CHECKS assert(i == 0 || s_log_cache[(i - 1) / 2].generation < s_log_cache[i].generation); #endif if (strcmp(s_log_cache[i].tag,tag) == 0) { s_log_cache[i].level = level; break; } } xSemaphoreGive(s_log_mutex); } void clear_log_level_list() { while( !SLIST_EMPTY(&s_log_tags)) { SLIST_REMOVE_HEAD(&s_log_tags, entries ); } s_log_cache_entry_count = 0; s_log_cache_max_generation = 0; #ifdef LOG_BUILTIN_CHECKS s_log_cache_misses = 0; #endif } void IRAM_ATTR esp_log_vwrite(esp_log_level_t level, const char* tag, const char* format, va_list args) { if (!s_log_mutex) { s_log_mutex = xSemaphoreCreateMutex(); } if (xSemaphoreTake(s_log_mutex, MAX_MUTEX_WAIT_TICKS) == pdFALSE) { return; } esp_log_level_t level_for_tag; // Look for the tag in cache first, then in the linked list of all tags if (!get_cached_log_level(tag, &level_for_tag)) { if (!get_uncached_log_level(tag, &level_for_tag)) { level_for_tag = s_log_default_level; } add_to_cache(tag, level_for_tag); #ifdef LOG_BUILTIN_CHECKS ++s_log_cache_misses; #endif } xSemaphoreGive(s_log_mutex); if (!should_output(level, level_for_tag)) { return; } (*s_log_print_func)(format, args); } void IRAM_ATTR esp_log_write(esp_log_level_t level, const char* tag, const char* format, ...) { va_list list; va_start(list, format); esp_log_vwrite(level, tag, format, list); va_end(list); } static inline bool get_cached_log_level(const char* tag, esp_log_level_t* level) { // Look for `tag` in cache int i; for (i = 0; i < s_log_cache_entry_count; ++i) { #ifdef LOG_BUILTIN_CHECKS assert(i == 0 || s_log_cache[(i - 1) / 2].generation < s_log_cache[i].generation); #endif if (s_log_cache[i].tag == tag) { break; } } if (i == s_log_cache_entry_count) { // Not found in cache return false; } // Return level from cache *level = (esp_log_level_t) s_log_cache[i].level; // If cache has been filled, start taking ordering into account // (other options are: dynamically resize cache, add "dummy" entries // to the cache; this option was chosen because code is much simpler, // and the unfair behavior of cache will show it self at most once, when // it has just been filled) if (s_log_cache_entry_count == TAG_CACHE_SIZE) { // Update item generation s_log_cache[i].generation = s_log_cache_max_generation++; // Restore heap ordering heap_bubble_down(i); } return true; } static inline void add_to_cache(const char* tag, esp_log_level_t level) { uint32_t generation = s_log_cache_max_generation++; // First consider the case when cache is not filled yet. // In this case, just add new entry at the end. // This happens to satisfy binary min-heap ordering. if (s_log_cache_entry_count < TAG_CACHE_SIZE) { s_log_cache[s_log_cache_entry_count] = (cached_tag_entry_t) { .generation = generation, .level = level, .tag = tag }; ++s_log_cache_entry_count; return; } // Cache is full, so we replace the oldest entry (which is at index 0 // because this is a min-heap) with the new one, and do bubble-down // operation to restore min-heap ordering. s_log_cache[0] = (cached_tag_entry_t) { .tag = tag, .level = level, .generation = generation }; heap_bubble_down(0); } static inline bool get_uncached_log_level(const char* tag, esp_log_level_t* level) { // Walk the linked list of all tags and see if given tag is present in the list. // This is slow because tags are compared as strings. uncached_tag_entry_t *it; SLIST_FOREACH( it, &s_log_tags, entries ) { if (strcmp(tag, it->tag) == 0) { *level = it->level; return true; } } return false; } static inline bool should_output(esp_log_level_t level_for_message, esp_log_level_t level_for_tag) { return level_for_message <= level_for_tag; } static void heap_bubble_down(int index) { while (index < TAG_CACHE_SIZE / 2) { int left_index = index * 2 + 1; int right_index = left_index + 1; int next = (s_log_cache[left_index].generation < s_log_cache[right_index].generation) ? left_index : right_index; heap_swap(index, next); index = next; } } static inline void heap_swap(int i, int j) { cached_tag_entry_t tmp = s_log_cache[i]; s_log_cache[i] = s_log_cache[j]; s_log_cache[j] = tmp; } #endif //BOOTLOADER_BUILD #ifndef BOOTLOADER_BUILD #define ATTR IRAM_ATTR #else #define ATTR #endif // BOOTLOADER_BUILD //the variable defined in ROM is the cpu frequency in MHz. //as a workaround before the interface for this variable extern uint32_t g_ticks_per_us_pro; uint32_t ATTR esp_log_early_timestamp() { return xthal_get_ccount() / (g_ticks_per_us_pro * 1000); } #ifndef BOOTLOADER_BUILD uint32_t IRAM_ATTR esp_log_timestamp() { if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) { return esp_log_early_timestamp(); } static uint32_t base = 0; if (base == 0 && xPortGetCoreID() == 0) { base = esp_log_early_timestamp(); } return base + xTaskGetTickCount() * (1000 / configTICK_RATE_HZ); } #else uint32_t esp_log_timestamp() __attribute__((alias("esp_log_early_timestamp"))); #endif //BOOTLOADER_BUILD void esp_log_buffer_hex_internal(const char *tag, const void *buffer, uint16_t buff_len, esp_log_level_t log_level) { if ( buff_len == 0 ) return; char temp_buffer[BYTES_PER_LINE+3]; //for not-byte-accessible memory char hex_buffer[3*BYTES_PER_LINE+1]; const char *ptr_line; int bytes_cur_line; do { if ( buff_len > BYTES_PER_LINE ) { bytes_cur_line = BYTES_PER_LINE; } else { bytes_cur_line = buff_len; } if ( !esp_ptr_byte_accessible(buffer) ) { //use memcpy to get around alignment issue memcpy( temp_buffer, buffer, (bytes_cur_line+3)/4*4 ); ptr_line = temp_buffer; } else { ptr_line = buffer; } for( int i = 0; i < bytes_cur_line; i ++ ) { sprintf( hex_buffer + 3*i, "%02x ", ptr_line[i] ); } ESP_LOG_LEVEL( log_level, tag, "%s", hex_buffer ); buffer += bytes_cur_line; buff_len -= bytes_cur_line; } while( buff_len ); } void esp_log_buffer_char_internal(const char *tag, const void *buffer, uint16_t buff_len, esp_log_level_t log_level) { if ( buff_len == 0 ) return; char temp_buffer[BYTES_PER_LINE+3]; //for not-byte-accessible memory char char_buffer[BYTES_PER_LINE+1]; const char *ptr_line; int bytes_cur_line; do { if ( buff_len > BYTES_PER_LINE ) { bytes_cur_line = BYTES_PER_LINE; } else { bytes_cur_line = buff_len; } if ( !esp_ptr_byte_accessible(buffer) ) { //use memcpy to get around alignment issue memcpy( temp_buffer, buffer, (bytes_cur_line+3)/4*4 ); ptr_line = temp_buffer; } else { ptr_line = buffer; } for( int i = 0; i < bytes_cur_line; i ++ ) { sprintf( char_buffer + i, "%c", ptr_line[i] ); } ESP_LOG_LEVEL( log_level, tag, "%s", char_buffer ); buffer += bytes_cur_line; buff_len -= bytes_cur_line; } while( buff_len ); } void esp_log_buffer_hexdump_internal( const char *tag, const void *buffer, uint16_t buff_len, esp_log_level_t log_level) { if ( buff_len == 0 ) return; char temp_buffer[BYTES_PER_LINE+3]; //for not-byte-accessible memory const char *ptr_line; //format: field[length] // ADDR[10]+" "+DATA_HEX[8*3]+" "+DATA_HEX[8*3]+" |"+DATA_CHAR[8]+"|" char hd_buffer[10+3+BYTES_PER_LINE*3+3+BYTES_PER_LINE+1+1]; char *ptr_hd; int bytes_cur_line; do { if ( buff_len > BYTES_PER_LINE ) { bytes_cur_line = BYTES_PER_LINE; } else { bytes_cur_line = buff_len; } if ( !esp_ptr_byte_accessible(buffer) ) { //use memcpy to get around alignment issue memcpy( temp_buffer, buffer, (bytes_cur_line+3)/4*4 ); ptr_line = temp_buffer; } else { ptr_line = buffer; } ptr_hd = hd_buffer; ptr_hd += sprintf( ptr_hd, "%p ", buffer ); for( int i = 0; i < BYTES_PER_LINE; i ++ ) { if ( (i&7)==0 ) { ptr_hd += sprintf( ptr_hd, " " ); } if ( i < bytes_cur_line ) { ptr_hd += sprintf( ptr_hd, " %02x", ptr_line[i] ); } else { ptr_hd += sprintf( ptr_hd, " " ); } } ptr_hd += sprintf( ptr_hd, " |" ); for( int i = 0; i < bytes_cur_line; i ++ ) { if ( isprint((int)ptr_line[i]) ) { ptr_hd += sprintf( ptr_hd, "%c", ptr_line[i] ); } else { ptr_hd += sprintf( ptr_hd, "." ); } } ptr_hd += sprintf( ptr_hd, "|" ); ESP_LOG_LEVEL( log_level, tag, "%s", hd_buffer ); buffer += bytes_cur_line; buff_len -= bytes_cur_line; } while( buff_len ); }
36511.c
/* TomsFastMath, a fast ISO C bignum library. * * This project is meant to fill in where LibTomMath * falls short. That is speed ;-) * * This project is public domain and free for all purposes. * * Tom St Denis, [email protected] */ #include <tfm.h> /* c = [a, b] */ void fp_lcm(fp_int *a, fp_int *b, fp_int *c) { fp_int t1, t2; fp_init(&t1); fp_init(&t2); fp_gcd(a, b, &t1); if (fp_cmp_mag(a, b) == FP_GT) { fp_div(a, &t1, &t2, NULL); fp_mul(b, &t2, c); } else { fp_div(b, &t1, &t2, NULL); fp_mul(a, &t2, c); } } /* $Source$ */ /* $Revision$ */ /* $Date$ */
11257.c
/* Copyright (C) 2002-2020 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <[email protected]>, 2002. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <https://www.gnu.org/licenses/>. */ #include <errno.h> #include "pthreadP.h" int __pthread_attr_getstackaddr (const pthread_attr_t *attr, void **stackaddr) { struct pthread_attr *iattr; iattr = (struct pthread_attr *) attr; /* Some code assumes this function to work even if no stack address has been set. Let them figure it out for themselves what the value means. Simply store the result. */ *stackaddr = iattr->stackaddr; return 0; } strong_alias (__pthread_attr_getstackaddr, pthread_attr_getstackaddr) link_warning (pthread_attr_getstackaddr, "the use of `pthread_attr_getstackaddr' is deprecated, use `pthread_attr_getstack'")