filename
stringlengths
3
9
code
stringlengths
4
1.87M
445507.c
/* * Copyright (c) 2019 Nordic Semiconductor ASA. * * SPDX-License-Identifier: Apache-2.0 */ #include <ztest.h> #include <arch/cpu.h> #include <arch/arm/cortex_m/cmsis.h> static volatile int test_flag; void arm_zero_latency_isr_handler(void *args) { ARG_UNUSED(args); test_flag = 1; } void test_arm_zero_latency_irqs(void) { /* Determine an NVIC IRQ line that is not currently in use. */ int i, key; int init_flag, post_flag; init_flag = test_flag; zassert_false(init_flag, "Test flag not initialized to zero\n"); for (i = CONFIG_NUM_IRQS - 1; i >= 0; i--) { if (NVIC_GetEnableIRQ(i) == 0) { /* * Interrupts configured statically with IRQ_CONNECT(.) * are automatically enabled. NVIC_GetEnableIRQ() * returning false, here, implies that the IRQ line is * not enabled, thus, currently not in use by Zephyr. */ break; } } zassert_true(i >= 0, "No available IRQ line to configure as zero-latency\n"); TC_PRINT("Available IRQ line: %u\n", i); /* Configure the available IRQ line as zero-latency. */ z_arch_irq_connect_dynamic(i, 0 /* Unused */, arm_zero_latency_isr_handler, NULL, IRQ_ZERO_LATENCY); NVIC_ClearPendingIRQ(i); NVIC_EnableIRQ(i); /* Lock interrupts */ key = irq_lock(); /* Set the zero-latency IRQ to pending state. */ NVIC_SetPendingIRQ(i); /* * Instruction barriers to make sure the NVIC IRQ is * set to pending state before 'test_flag' is checked. */ __DSB(); __ISB(); /* Confirm test flag is set by the zero-latency ISR handler. */ post_flag = test_flag; zassert_true(post_flag == 1, "Test flag not set by ISR\n"); irq_unlock(key); } /** * @} */
243279.c
/* * Copyright (C) 2020 Yaron Gvili and Gvili Tech Ltd. * * See the accompanying LICENSE.txt file for licensing information. */ /*! \file src/swifft.c * \brief LibSWIFFT public C implementation * * Implementation using the best instruction set available at build time. */ #include "libswifft/swifft.h" #include "libswifft/swifft_avx.h" #include "libswifft/swifft_avx2.h" #include "libswifft/swifft_avx512.h" #undef SWIFFT_ISET #define SWIFFT_ISET() SWIFFT_INSTRUCTION_SET #include "swifft_ops.inl" LIBSWIFFT_BEGIN_EXTERN_C SWIFFT_ALIGN const BitSequence SWIFFT_sign0[SWIFFT_INPUT_BLOCK_SIZE] = {0}; void SWIFFT_fft(const BitSequence * LIBSWIFFT_RESTRICT input, const BitSequence * LIBSWIFFT_RESTRICT sign, int m, int16_t * LIBSWIFFT_RESTRICT fftout) { SWIFFT_ISET_NAME(SWIFFT_fft_)(input, sign, m, fftout); } void SWIFFT_fftsum(const int16_t * LIBSWIFFT_RESTRICT ikey, const int16_t * LIBSWIFFT_RESTRICT ifftout, int m, int16_t * LIBSWIFFT_RESTRICT iout) { SWIFFT_ISET_NAME(SWIFFT_fftsum_)(ikey, ifftout, m, iout); } //! \brief Converts from base-257 to base-256. //! //! vals array is assumed to have (SWIFFT_W=8) SWIFFT_N=8 digits in base 257. //! Assume that most significiant is last. //! output in vals is the same 8 numbers encoded in base 256. //! //! \param[in,out] vals the vals array. static void ToBase256(Z1vec vals[8]) { Z1vec Z1_255 = Z1CONST(255), Z1_8 = Z1CONST(8); int i; for (i=8-1; i>0; i--){ int j; for (j=i-1; j<8-1; j++) { Z1vec v = vals[j]+vals[j+1]; vals[j] = v & Z1_255; vals[j+1] += (v >> Z1_8); } } } //! \brief Compacts a hash value of SWIFFT. //! The result is not composable with other compacted hash values. //! //! \param[in] output the hash value of SWIFFT, of size 128 bytes (1024 bit). //! \param[out] compact the compacted hash value of SWIFFT, of size 64 bytes (512 bit). void SWIFFT_Compact(const BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE], BitSequence compact[SWIFFT_COMPACT_BLOCK_SIZE]) { // // The 8*8 output int16_ts needs to be transposed before and after // SIMD base change. // This could be avoided by defining the base change differently // but then a transpose like operation would have to be performed // by the normal (Non-SIMD) version. // Z1vec transposed[SWIFFT_N/SWIFFT_W]; int16_t *tin = (int16_t *) output; int16_t *tout = (int16_t *) transposed; int i; for (i=0; i<SWIFFT_N/SWIFFT_W; i++,tin+=8,tout++) { tout[0] = tin[0]; tout[8] = tin[1]; tout[16] = tin[2]; tout[24] = tin[3]; tout[32] = tin[4]; tout[40] = tin[5]; tout[48] = tin[6]; tout[56] = tin[7]; } ToBase256(transposed); tin = (int16_t *) transposed; BitSequence *cout = compact; int carry = 0; for (i=0; i<SWIFFT_N/SWIFFT_W; i++,tin++,cout+=8) { cout[0] = tin[0]; cout[1] = tin[8]; cout[2] = tin[16]; cout[3] = tin[24]; cout[4] = tin[32]; cout[5] = tin[40]; cout[6] = tin[48]; cout[7] = tin[56]&255; carry |= ((tin[56]>>8)<<i); } // ignore carry } //! \brief Sets a constant value at each SWIFFT hash value element. //! //! \param[out] output the hash value of SWIFFT to modify. //! \param[in] operand the constant value to set. void SWIFFT_ConstSet(BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE], const int16_t operand) { SWIFFT_ISET_NAME(SWIFFT_ConstSet_)(output, operand); } //! \brief Adds a constant value to each SWIFFT hash value element. //! //! \param[in,out] output the hash value of SWIFFT to modify. //! \param[in] operand the constant value to add. void SWIFFT_ConstAdd(BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE], const int16_t operand) { SWIFFT_ISET_NAME(SWIFFT_ConstAdd_)(output, operand); } //! \brief Subtracts a constant value from each SWIFFT hash value element. //! //! \param[in,out] output the hash value of SWIFFT to modify. //! \param[in] operand the constant value to subtract. void SWIFFT_ConstSub(BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE], const int16_t operand) { SWIFFT_ISET_NAME(SWIFFT_ConstSub_)(output, operand); } //! \brief Multiply a constant value into each SWIFFT hash value element. //! //! \param[in,out] output the hash value of SWIFFT to modify. //! \param[in] operand the constant value to multiply by. void SWIFFT_ConstMul(BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE], const int16_t operand) { SWIFFT_ISET_NAME(SWIFFT_ConstMul_)(output, operand); } //! \brief Adds a SWIFFT hash value to another, element-wise. //! //! \param[in,out] output the hash value of SWIFFT to modify. //! \param[in] operand the hash value to add. void SWIFFT_Add(BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE], const BitSequence operand[SWIFFT_OUTPUT_BLOCK_SIZE]) { SWIFFT_ISET_NAME(SWIFFT_Add_)(output, operand); } //! \brief Subtracts a SWIFFT hash value from another, element-wise. //! //! \param[in,out] output the hash value of SWIFFT to modify. //! \param[in] operand the hash value to subtract. void SWIFFT_Sub(BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE], const BitSequence operand[SWIFFT_OUTPUT_BLOCK_SIZE]) { SWIFFT_ISET_NAME(SWIFFT_Sub_)(output, operand); } //! \brief Multiplies a SWIFFT hash value from another, element-wise. //! //! \param[in,out] output the hash value of SWIFFT to modify. //! \param[in] operand the hash value to multiply by. void SWIFFT_Mul(BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE], const BitSequence operand[SWIFFT_OUTPUT_BLOCK_SIZE]) { SWIFFT_ISET_NAME(SWIFFT_Mul_)(output, operand); } //! \brief Computes the result of a SWIFFT operation. //! The result is composable with other hash values. //! //! \param[in] input the input of 256 bytes (2048 bit). //! \param[out] output the resulting hash value of SWIFFT, of size 128 bytes (1024 bit). void SWIFFT_Compute(const BitSequence input[SWIFFT_INPUT_BLOCK_SIZE], BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE]) { SWIFFT_ISET_NAME(SWIFFT_Compute_)(input, output); } //! \brief Computes the result of a SWIFFT operation. //! The result is composable with other hash values. //! //! \param[in] input the input of 256 bytes (2048 bit). //! \param[in] sign the sign bits corresponding to the input of 256 bytes (2048 bit). //! \param[out] output the resulting hash value of SWIFFT, of size 128 bytes (1024 bit). void SWIFFT_ComputeSigned(const BitSequence input[SWIFFT_INPUT_BLOCK_SIZE], const BitSequence sign[SWIFFT_INPUT_BLOCK_SIZE], BitSequence output[SWIFFT_OUTPUT_BLOCK_SIZE]) { SWIFFT_ISET_NAME(SWIFFT_ComputeSigned_)(input, sign, output); } //! \brief Computes the result of multiple SWIFFT operations. //! The result is composable with other hash values. //! //! \param[in] nblocks the number of blocks to operate on. //! \param[in] input the blocks of input, each of 256 bytes (2048 bit). //! \param[out] output the resulting blocks of hash values of SWIFFT, each of size 128 bytes (1024 bit). void SWIFFT_ComputeMultiple(int nblocks, const BitSequence * input, BitSequence * output) { SWIFFT_ISET_NAME(SWIFFT_ComputeMultiple_)(nblocks, input, output); } //! \brief Computes the result of multiple SWIFFT operations. //! The result is composable with other hash values. //! //! \param[in] nblocks the number of blocks to operate on. //! \param[in] input the blocks of input, each of 256 bytes (2048 bit). //! \param[in] sign the blocks of sign bits corresponding to blocks of input of 256 bytes (2048 bit). //! \param[out] output the resulting blocks of hash values of SWIFFT, each of size 128 bytes (1024 bit). void SWIFFT_ComputeMultipleSigned(int nblocks, const BitSequence * input, const BitSequence * sign, BitSequence * output) { SWIFFT_ISET_NAME(SWIFFT_ComputeMultipleSigned_)(nblocks, input, sign, output); } LIBSWIFFT_END_EXTERN_C
876948.c
/** * Compile command : gcc -o gfalt_copyfile gfalt_copyfile.c `pkg-config --libs --cflags gfal_transfer` */ #include <stdio.h> #include <stdlib.h> #include <gfal_api.h> #include <signal.h> #include <transfer/gfal_transfer.h> gfal2_context_t handle=NULL; void event_callback(const gfalt_event_t e, gpointer user_data) { static const char* side_str[] = {"SRC", "DST", "BTH"}; printf("[%ld:%ld] %s %s\t%s\t%s\n", e->timestamp / 1000, e->timestamp % 1000, side_str[e->side], g_quark_to_string(e->domain), g_quark_to_string(e->stage), e->description); } // setup interrupt void sigint_cancel(int param) { printf("User pressed Ctrl+C\n"); if(handle) gfal2_cancel(handle); } int main(int argc, char** argv){ signal(SIGINT, &sigint_cancel); if( argc <3 ){ printf(" Usage %s [src_url] [dst_url] \n",argv[0]); return 1; } GError * tmp_err = NULL; // classical GError/glib error management // initialize gfal gfal_set_verbose(GFAL_VERBOSE_TRACE | GFAL_VERBOSE_VERBOSE | GFAL_VERBOSE_TRACE_PLUGIN); if( (handle = gfal2_context_new(&tmp_err)) == NULL ) { printf(" bad initialization %d : %s.\n", tmp_err->code,tmp_err->message); return -1; } // Params gfalt_params_t params = gfalt_params_handle_new(NULL); gfalt_set_replace_existing_file(params, TRUE, NULL); gfalt_set_create_parent_dir(params, TRUE, NULL); // Register callback gfalt_set_event_callback(params, event_callback, &tmp_err); // begin copy if( gfalt_copy_file(handle, params, argv[1], argv[2], &tmp_err) != 0){ printf(" error while the file transfer %d : %s.\n", tmp_err->code,tmp_err->message); gfal2_context_free(handle); return -1; }else printf(" transfer sucessfull ! \n"); gfal2_context_free(handle); return 0; }
784824.c
/* * Interplay MVE Video Decoder * Copyright (C) 2003 The FFmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Interplay MVE Video Decoder by Mike Melanson (melanson@pcisys.net) * For more information about the Interplay MVE format, visit: * http://www.pcisys.net/~melanson/codecs/interplay-mve.txt * This code is written in such a way that the identifiers match up * with the encoding descriptions in the document. * * This decoder presently only supports a PAL8 output colorspace. * * An Interplay video frame consists of 2 parts: The decoding map and * the video data. A demuxer must load these 2 parts together in a single * buffer before sending it through the stream to this decoder. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "libavutil/intreadwrite.h" #define BITSTREAM_READER_LE #include "avcodec.h" #include "bytestream.h" #include "get_bits.h" #include "hpeldsp.h" #include "internal.h" #define PALETTE_COUNT 256 typedef struct IpvideoContext { AVCodecContext *avctx; HpelDSPContext hdsp; AVFrame *second_last_frame; AVFrame *last_frame; const unsigned char *decoding_map; int decoding_map_size; int is_16bpp; GetByteContext stream_ptr, mv_ptr; unsigned char *pixel_ptr; int line_inc; int stride; int upper_motion_limit_offset; uint32_t pal[256]; } IpvideoContext; static int copy_from(IpvideoContext *s, AVFrame *src, AVFrame *dst, int delta_x, int delta_y) { int current_offset = s->pixel_ptr - dst->data[0]; int motion_offset = current_offset + delta_y * dst->linesize[0] + delta_x * (1 + s->is_16bpp); if (motion_offset < 0) { av_log(s->avctx, AV_LOG_ERROR, "motion offset < 0 (%d)\n", motion_offset); return AVERROR_INVALIDDATA; } else if (motion_offset > s->upper_motion_limit_offset) { av_log(s->avctx, AV_LOG_ERROR, "motion offset above limit (%d >= %d)\n", motion_offset, s->upper_motion_limit_offset); return AVERROR_INVALIDDATA; } if (!src->data[0]) { av_log(s->avctx, AV_LOG_ERROR, "Invalid decode type, corrupted header?\n"); return AVERROR(EINVAL); } s->hdsp.put_pixels_tab[!s->is_16bpp][0](s->pixel_ptr, src->data[0] + motion_offset, dst->linesize[0], 8); return 0; } static int ipvideo_decode_block_opcode_0x0(IpvideoContext *s, AVFrame *frame) { return copy_from(s, s->last_frame, frame, 0, 0); } static int ipvideo_decode_block_opcode_0x1(IpvideoContext *s, AVFrame *frame) { return copy_from(s, s->second_last_frame, frame, 0, 0); } static int ipvideo_decode_block_opcode_0x2(IpvideoContext *s, AVFrame *frame) { unsigned char B; int x, y; /* copy block from 2 frames ago using a motion vector; need 1 more byte */ if (!s->is_16bpp) { B = bytestream2_get_byte(&s->stream_ptr); } else { B = bytestream2_get_byte(&s->mv_ptr); } if (B < 56) { x = 8 + (B % 7); y = B / 7; } else { x = -14 + ((B - 56) % 29); y = 8 + ((B - 56) / 29); } ff_tlog(s->avctx, "motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); return copy_from(s, s->second_last_frame, frame, x, y); } static int ipvideo_decode_block_opcode_0x3(IpvideoContext *s, AVFrame *frame) { unsigned char B; int x, y; /* copy 8x8 block from current frame from an up/left block */ /* need 1 more byte for motion */ if (!s->is_16bpp) { B = bytestream2_get_byte(&s->stream_ptr); } else { B = bytestream2_get_byte(&s->mv_ptr); } if (B < 56) { x = -(8 + (B % 7)); y = -(B / 7); } else { x = -(-14 + ((B - 56) % 29)); y = -( 8 + ((B - 56) / 29)); } ff_tlog(s->avctx, "motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); return copy_from(s, frame, frame, x, y); } static int ipvideo_decode_block_opcode_0x4(IpvideoContext *s, AVFrame *frame) { int x, y; unsigned char B, BL, BH; /* copy a block from the previous frame; need 1 more byte */ if (!s->is_16bpp) { B = bytestream2_get_byte(&s->stream_ptr); } else { B = bytestream2_get_byte(&s->mv_ptr); } BL = B & 0x0F; BH = (B >> 4) & 0x0F; x = -8 + BL; y = -8 + BH; ff_tlog(s->avctx, "motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); return copy_from(s, s->last_frame, frame, x, y); } static int ipvideo_decode_block_opcode_0x5(IpvideoContext *s, AVFrame *frame) { signed char x, y; /* copy a block from the previous frame using an expanded range; * need 2 more bytes */ x = bytestream2_get_byte(&s->stream_ptr); y = bytestream2_get_byte(&s->stream_ptr); ff_tlog(s->avctx, "motion bytes = %d, %d\n", x, y); return copy_from(s, s->last_frame, frame, x, y); } static int ipvideo_decode_block_opcode_0x6(IpvideoContext *s, AVFrame *frame) { /* mystery opcode? skip multiple blocks? */ av_log(s->avctx, AV_LOG_ERROR, "Help! Mystery opcode 0x6 seen\n"); /* report success */ return 0; } static int ipvideo_decode_block_opcode_0x7(IpvideoContext *s, AVFrame *frame) { int x, y; unsigned char P[2]; unsigned int flags; if (bytestream2_get_bytes_left(&s->stream_ptr) < 4) { av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0x7\n"); return AVERROR_INVALIDDATA; } /* 2-color encoding */ P[0] = bytestream2_get_byte(&s->stream_ptr); P[1] = bytestream2_get_byte(&s->stream_ptr); if (P[0] <= P[1]) { /* need 8 more bytes from the stream */ for (y = 0; y < 8; y++) { flags = bytestream2_get_byte(&s->stream_ptr) | 0x100; for (; flags != 1; flags >>= 1) *s->pixel_ptr++ = P[flags & 1]; s->pixel_ptr += s->line_inc; } } else { /* need 2 more bytes from the stream */ flags = bytestream2_get_le16(&s->stream_ptr); for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2, flags >>= 1) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1 ] = s->pixel_ptr[x + s->stride] = s->pixel_ptr[x + 1 + s->stride] = P[flags & 1]; } s->pixel_ptr += s->stride * 2; } } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0x8(IpvideoContext *s, AVFrame *frame) { int x, y; unsigned char P[4]; unsigned int flags = 0; if (bytestream2_get_bytes_left(&s->stream_ptr) < 12) { av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0x8\n"); return AVERROR_INVALIDDATA; } /* 2-color encoding for each 4x4 quadrant, or 2-color encoding on * either top and bottom or left and right halves */ P[0] = bytestream2_get_byte(&s->stream_ptr); P[1] = bytestream2_get_byte(&s->stream_ptr); if (P[0] <= P[1]) { for (y = 0; y < 16; y++) { // new values for each 4x4 block if (!(y & 3)) { if (y) { P[0] = bytestream2_get_byte(&s->stream_ptr); P[1] = bytestream2_get_byte(&s->stream_ptr); } flags = bytestream2_get_le16(&s->stream_ptr); } for (x = 0; x < 4; x++, flags >>= 1) *s->pixel_ptr++ = P[flags & 1]; s->pixel_ptr += s->stride - 4; // switch to right half if (y == 7) s->pixel_ptr -= 8 * s->stride - 4; } } else { flags = bytestream2_get_le32(&s->stream_ptr); P[2] = bytestream2_get_byte(&s->stream_ptr); P[3] = bytestream2_get_byte(&s->stream_ptr); if (P[2] <= P[3]) { /* vertical split; left & right halves are 2-color encoded */ for (y = 0; y < 16; y++) { for (x = 0; x < 4; x++, flags >>= 1) *s->pixel_ptr++ = P[flags & 1]; s->pixel_ptr += s->stride - 4; // switch to right half if (y == 7) { s->pixel_ptr -= 8 * s->stride - 4; P[0] = P[2]; P[1] = P[3]; flags = bytestream2_get_le32(&s->stream_ptr); } } } else { /* horizontal split; top & bottom halves are 2-color encoded */ for (y = 0; y < 8; y++) { if (y == 4) { P[0] = P[2]; P[1] = P[3]; flags = bytestream2_get_le32(&s->stream_ptr); } for (x = 0; x < 8; x++, flags >>= 1) *s->pixel_ptr++ = P[flags & 1]; s->pixel_ptr += s->line_inc; } } } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s, AVFrame *frame) { int x, y; unsigned char P[4]; if (bytestream2_get_bytes_left(&s->stream_ptr) < 8) { av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0x9\n"); return AVERROR_INVALIDDATA; } /* 4-color encoding */ bytestream2_get_buffer(&s->stream_ptr, P, 4); if (P[0] <= P[1]) { if (P[2] <= P[3]) { /* 1 of 4 colors for each pixel, need 16 more bytes */ for (y = 0; y < 8; y++) { /* get the next set of 8 2-bit flags */ int flags = bytestream2_get_le16(&s->stream_ptr); for (x = 0; x < 8; x++, flags >>= 2) *s->pixel_ptr++ = P[flags & 0x03]; s->pixel_ptr += s->line_inc; } } else { uint32_t flags; /* 1 of 4 colors for each 2x2 block, need 4 more bytes */ flags = bytestream2_get_le32(&s->stream_ptr); for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2, flags >>= 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1 ] = s->pixel_ptr[x + s->stride] = s->pixel_ptr[x + 1 + s->stride] = P[flags & 0x03]; } s->pixel_ptr += s->stride * 2; } } } else { uint64_t flags; /* 1 of 4 colors for each 2x1 or 1x2 block, need 8 more bytes */ flags = bytestream2_get_le64(&s->stream_ptr); if (P[2] <= P[3]) { for (y = 0; y < 8; y++) { for (x = 0; x < 8; x += 2, flags >>= 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1] = P[flags & 0x03]; } s->pixel_ptr += s->stride; } } else { for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x++, flags >>= 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + s->stride] = P[flags & 0x03]; } s->pixel_ptr += s->stride * 2; } } } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xA(IpvideoContext *s, AVFrame *frame) { int x, y; unsigned char P[8]; int flags = 0; if (bytestream2_get_bytes_left(&s->stream_ptr) < 16) { av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0xA\n"); return AVERROR_INVALIDDATA; } bytestream2_get_buffer(&s->stream_ptr, P, 4); /* 4-color encoding for each 4x4 quadrant, or 4-color encoding on * either top and bottom or left and right halves */ if (P[0] <= P[1]) { /* 4-color encoding for each quadrant; need 32 bytes */ for (y = 0; y < 16; y++) { // new values for each 4x4 block if (!(y & 3)) { if (y) bytestream2_get_buffer(&s->stream_ptr, P, 4); flags = bytestream2_get_le32(&s->stream_ptr); } for (x = 0; x < 4; x++, flags >>= 2) *s->pixel_ptr++ = P[flags & 0x03]; s->pixel_ptr += s->stride - 4; // switch to right half if (y == 7) s->pixel_ptr -= 8 * s->stride - 4; } } else { // vertical split? int vert; uint64_t flags = bytestream2_get_le64(&s->stream_ptr); bytestream2_get_buffer(&s->stream_ptr, P + 4, 4); vert = P[4] <= P[5]; /* 4-color encoding for either left and right or top and bottom * halves */ for (y = 0; y < 16; y++) { for (x = 0; x < 4; x++, flags >>= 2) *s->pixel_ptr++ = P[flags & 0x03]; if (vert) { s->pixel_ptr += s->stride - 4; // switch to right half if (y == 7) s->pixel_ptr -= 8 * s->stride - 4; } else if (y & 1) s->pixel_ptr += s->line_inc; // load values for second half if (y == 7) { memcpy(P, P + 4, 4); flags = bytestream2_get_le64(&s->stream_ptr); } } } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xB(IpvideoContext *s, AVFrame *frame) { int y; /* 64-color encoding (each pixel in block is a different color) */ for (y = 0; y < 8; y++) { bytestream2_get_buffer(&s->stream_ptr, s->pixel_ptr, 8); s->pixel_ptr += s->stride; } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xC(IpvideoContext *s, AVFrame *frame) { int x, y; /* 16-color block encoding: each 2x2 block is a different color */ for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1 ] = s->pixel_ptr[x + s->stride] = s->pixel_ptr[x + 1 + s->stride] = bytestream2_get_byte(&s->stream_ptr); } s->pixel_ptr += s->stride * 2; } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xD(IpvideoContext *s, AVFrame *frame) { int y; unsigned char P[2]; if (bytestream2_get_bytes_left(&s->stream_ptr) < 4) { av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0xD\n"); return AVERROR_INVALIDDATA; } /* 4-color block encoding: each 4x4 block is a different color */ for (y = 0; y < 8; y++) { if (!(y & 3)) { P[0] = bytestream2_get_byte(&s->stream_ptr); P[1] = bytestream2_get_byte(&s->stream_ptr); } memset(s->pixel_ptr, P[0], 4); memset(s->pixel_ptr + 4, P[1], 4); s->pixel_ptr += s->stride; } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xE(IpvideoContext *s, AVFrame *frame) { int y; unsigned char pix; /* 1-color encoding: the whole block is 1 solid color */ pix = bytestream2_get_byte(&s->stream_ptr); for (y = 0; y < 8; y++) { memset(s->pixel_ptr, pix, 8); s->pixel_ptr += s->stride; } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xF(IpvideoContext *s, AVFrame *frame) { int x, y; unsigned char sample[2]; /* dithered encoding */ sample[0] = bytestream2_get_byte(&s->stream_ptr); sample[1] = bytestream2_get_byte(&s->stream_ptr); for (y = 0; y < 8; y++) { for (x = 0; x < 8; x += 2) { *s->pixel_ptr++ = sample[ y & 1 ]; *s->pixel_ptr++ = sample[!(y & 1)]; } s->pixel_ptr += s->line_inc; } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0x6_16(IpvideoContext *s, AVFrame *frame) { signed char x, y; /* copy a block from the second last frame using an expanded range */ x = bytestream2_get_byte(&s->stream_ptr); y = bytestream2_get_byte(&s->stream_ptr); ff_tlog(s->avctx, "motion bytes = %d, %d\n", x, y); return copy_from(s, s->second_last_frame, frame, x, y); } static int ipvideo_decode_block_opcode_0x7_16(IpvideoContext *s, AVFrame *frame) { int x, y; uint16_t P[2]; unsigned int flags; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; /* 2-color encoding */ P[0] = bytestream2_get_le16(&s->stream_ptr); P[1] = bytestream2_get_le16(&s->stream_ptr); if (!(P[0] & 0x8000)) { for (y = 0; y < 8; y++) { flags = bytestream2_get_byte(&s->stream_ptr) | 0x100; for (; flags != 1; flags >>= 1) *pixel_ptr++ = P[flags & 1]; pixel_ptr += s->line_inc; } } else { flags = bytestream2_get_le16(&s->stream_ptr); for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2, flags >>= 1) { pixel_ptr[x ] = pixel_ptr[x + 1 ] = pixel_ptr[x + s->stride] = pixel_ptr[x + 1 + s->stride] = P[flags & 1]; } pixel_ptr += s->stride * 2; } } return 0; } static int ipvideo_decode_block_opcode_0x8_16(IpvideoContext *s, AVFrame *frame) { int x, y; uint16_t P[4]; unsigned int flags = 0; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; /* 2-color encoding for each 4x4 quadrant, or 2-color encoding on * either top and bottom or left and right halves */ P[0] = bytestream2_get_le16(&s->stream_ptr); P[1] = bytestream2_get_le16(&s->stream_ptr); if (!(P[0] & 0x8000)) { for (y = 0; y < 16; y++) { // new values for each 4x4 block if (!(y & 3)) { if (y) { P[0] = bytestream2_get_le16(&s->stream_ptr); P[1] = bytestream2_get_le16(&s->stream_ptr); } flags = bytestream2_get_le16(&s->stream_ptr); } for (x = 0; x < 4; x++, flags >>= 1) *pixel_ptr++ = P[flags & 1]; pixel_ptr += s->stride - 4; // switch to right half if (y == 7) pixel_ptr -= 8 * s->stride - 4; } } else { flags = bytestream2_get_le32(&s->stream_ptr); P[2] = bytestream2_get_le16(&s->stream_ptr); P[3] = bytestream2_get_le16(&s->stream_ptr); if (!(P[2] & 0x8000)) { /* vertical split; left & right halves are 2-color encoded */ for (y = 0; y < 16; y++) { for (x = 0; x < 4; x++, flags >>= 1) *pixel_ptr++ = P[flags & 1]; pixel_ptr += s->stride - 4; // switch to right half if (y == 7) { pixel_ptr -= 8 * s->stride - 4; P[0] = P[2]; P[1] = P[3]; flags = bytestream2_get_le32(&s->stream_ptr); } } } else { /* horizontal split; top & bottom halves are 2-color encoded */ for (y = 0; y < 8; y++) { if (y == 4) { P[0] = P[2]; P[1] = P[3]; flags = bytestream2_get_le32(&s->stream_ptr); } for (x = 0; x < 8; x++, flags >>= 1) *pixel_ptr++ = P[flags & 1]; pixel_ptr += s->line_inc; } } } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0x9_16(IpvideoContext *s, AVFrame *frame) { int x, y; uint16_t P[4]; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; /* 4-color encoding */ for (x = 0; x < 4; x++) P[x] = bytestream2_get_le16(&s->stream_ptr); if (!(P[0] & 0x8000)) { if (!(P[2] & 0x8000)) { /* 1 of 4 colors for each pixel */ for (y = 0; y < 8; y++) { /* get the next set of 8 2-bit flags */ int flags = bytestream2_get_le16(&s->stream_ptr); for (x = 0; x < 8; x++, flags >>= 2) *pixel_ptr++ = P[flags & 0x03]; pixel_ptr += s->line_inc; } } else { uint32_t flags; /* 1 of 4 colors for each 2x2 block */ flags = bytestream2_get_le32(&s->stream_ptr); for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2, flags >>= 2) { pixel_ptr[x ] = pixel_ptr[x + 1 ] = pixel_ptr[x + s->stride] = pixel_ptr[x + 1 + s->stride] = P[flags & 0x03]; } pixel_ptr += s->stride * 2; } } } else { uint64_t flags; /* 1 of 4 colors for each 2x1 or 1x2 block */ flags = bytestream2_get_le64(&s->stream_ptr); if (!(P[2] & 0x8000)) { for (y = 0; y < 8; y++) { for (x = 0; x < 8; x += 2, flags >>= 2) { pixel_ptr[x ] = pixel_ptr[x + 1] = P[flags & 0x03]; } pixel_ptr += s->stride; } } else { for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x++, flags >>= 2) { pixel_ptr[x ] = pixel_ptr[x + s->stride] = P[flags & 0x03]; } pixel_ptr += s->stride * 2; } } } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xA_16(IpvideoContext *s, AVFrame *frame) { int x, y; uint16_t P[8]; int flags = 0; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; for (x = 0; x < 4; x++) P[x] = bytestream2_get_le16(&s->stream_ptr); /* 4-color encoding for each 4x4 quadrant, or 4-color encoding on * either top and bottom or left and right halves */ if (!(P[0] & 0x8000)) { /* 4-color encoding for each quadrant */ for (y = 0; y < 16; y++) { // new values for each 4x4 block if (!(y & 3)) { if (y) for (x = 0; x < 4; x++) P[x] = bytestream2_get_le16(&s->stream_ptr); flags = bytestream2_get_le32(&s->stream_ptr); } for (x = 0; x < 4; x++, flags >>= 2) *pixel_ptr++ = P[flags & 0x03]; pixel_ptr += s->stride - 4; // switch to right half if (y == 7) pixel_ptr -= 8 * s->stride - 4; } } else { // vertical split? int vert; uint64_t flags = bytestream2_get_le64(&s->stream_ptr); for (x = 4; x < 8; x++) P[x] = bytestream2_get_le16(&s->stream_ptr); vert = !(P[4] & 0x8000); /* 4-color encoding for either left and right or top and bottom * halves */ for (y = 0; y < 16; y++) { for (x = 0; x < 4; x++, flags >>= 2) *pixel_ptr++ = P[flags & 0x03]; if (vert) { pixel_ptr += s->stride - 4; // switch to right half if (y == 7) pixel_ptr -= 8 * s->stride - 4; } else if (y & 1) pixel_ptr += s->line_inc; // load values for second half if (y == 7) { memcpy(P, P + 4, 8); flags = bytestream2_get_le64(&s->stream_ptr); } } } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xB_16(IpvideoContext *s, AVFrame *frame) { int x, y; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; /* 64-color encoding (each pixel in block is a different color) */ for (y = 0; y < 8; y++) { for (x = 0; x < 8; x++) pixel_ptr[x] = bytestream2_get_le16(&s->stream_ptr); pixel_ptr += s->stride; } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xC_16(IpvideoContext *s, AVFrame *frame) { int x, y; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; /* 16-color block encoding: each 2x2 block is a different color */ for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2) { pixel_ptr[x ] = pixel_ptr[x + 1 ] = pixel_ptr[x + s->stride] = pixel_ptr[x + 1 + s->stride] = bytestream2_get_le16(&s->stream_ptr); } pixel_ptr += s->stride * 2; } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xD_16(IpvideoContext *s, AVFrame *frame) { int x, y; uint16_t P[2]; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; /* 4-color block encoding: each 4x4 block is a different color */ for (y = 0; y < 8; y++) { if (!(y & 3)) { P[0] = bytestream2_get_le16(&s->stream_ptr); P[1] = bytestream2_get_le16(&s->stream_ptr); } for (x = 0; x < 8; x++) pixel_ptr[x] = P[x >> 2]; pixel_ptr += s->stride; } /* report success */ return 0; } static int ipvideo_decode_block_opcode_0xE_16(IpvideoContext *s, AVFrame *frame) { int x, y; uint16_t pix; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; /* 1-color encoding: the whole block is 1 solid color */ pix = bytestream2_get_le16(&s->stream_ptr); for (y = 0; y < 8; y++) { for (x = 0; x < 8; x++) pixel_ptr[x] = pix; pixel_ptr += s->stride; } /* report success */ return 0; } static int (* const ipvideo_decode_block[])(IpvideoContext *s, AVFrame *frame) = { ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1, ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3, ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5, ipvideo_decode_block_opcode_0x6, ipvideo_decode_block_opcode_0x7, ipvideo_decode_block_opcode_0x8, ipvideo_decode_block_opcode_0x9, ipvideo_decode_block_opcode_0xA, ipvideo_decode_block_opcode_0xB, ipvideo_decode_block_opcode_0xC, ipvideo_decode_block_opcode_0xD, ipvideo_decode_block_opcode_0xE, ipvideo_decode_block_opcode_0xF, }; static int (* const ipvideo_decode_block16[])(IpvideoContext *s, AVFrame *frame) = { ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1, ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3, ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5, ipvideo_decode_block_opcode_0x6_16, ipvideo_decode_block_opcode_0x7_16, ipvideo_decode_block_opcode_0x8_16, ipvideo_decode_block_opcode_0x9_16, ipvideo_decode_block_opcode_0xA_16, ipvideo_decode_block_opcode_0xB_16, ipvideo_decode_block_opcode_0xC_16, ipvideo_decode_block_opcode_0xD_16, ipvideo_decode_block_opcode_0xE_16, ipvideo_decode_block_opcode_0x1, }; static void ipvideo_decode_opcodes(IpvideoContext *s, AVFrame *frame) { int x, y; unsigned char opcode; int ret; GetBitContext gb; bytestream2_skip(&s->stream_ptr, 14); /* data starts 14 bytes in */ if (!s->is_16bpp) { /* this is PAL8, so make the palette available */ memcpy(frame->data[1], s->pal, AVPALETTE_SIZE); s->stride = frame->linesize[0]; } else { s->stride = frame->linesize[0] >> 1; s->mv_ptr = s->stream_ptr; bytestream2_skip(&s->mv_ptr, bytestream2_get_le16(&s->stream_ptr)); } s->line_inc = s->stride - 8; s->upper_motion_limit_offset = (s->avctx->height - 8) * frame->linesize[0] + (s->avctx->width - 8) * (1 + s->is_16bpp); init_get_bits(&gb, s->decoding_map, s->decoding_map_size * 8); for (y = 0; y < s->avctx->height; y += 8) { for (x = 0; x < s->avctx->width; x += 8) { if (get_bits_left(&gb) < 4) return; opcode = get_bits(&gb, 4); ff_tlog(s->avctx, " block @ (%3d, %3d): encoding 0x%X, data ptr offset %d\n", x, y, opcode, bytestream2_tell(&s->stream_ptr)); if (!s->is_16bpp) { s->pixel_ptr = frame->data[0] + x + y*frame->linesize[0]; ret = ipvideo_decode_block[opcode](s, frame); } else { s->pixel_ptr = frame->data[0] + x*2 + y*frame->linesize[0]; ret = ipvideo_decode_block16[opcode](s, frame); } if (ret != 0) { av_log(s->avctx, AV_LOG_ERROR, "decode problem on frame %d, @ block (%d, %d)\n", s->avctx->frame_number, x, y); return; } } } if (bytestream2_get_bytes_left(&s->stream_ptr) > 1) { av_log(s->avctx, AV_LOG_DEBUG, "decode finished with %d bytes left over\n", bytestream2_get_bytes_left(&s->stream_ptr)); } } static av_cold int ipvideo_decode_init(AVCodecContext *avctx) { IpvideoContext *s = avctx->priv_data; s->avctx = avctx; s->is_16bpp = avctx->bits_per_coded_sample == 16; avctx->pix_fmt = s->is_16bpp ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_PAL8; ff_hpeldsp_init(&s->hdsp, avctx->flags); s->last_frame = av_frame_alloc(); s->second_last_frame = av_frame_alloc(); if (!s->last_frame || !s->second_last_frame) { av_frame_free(&s->last_frame); av_frame_free(&s->second_last_frame); return AVERROR(ENOMEM); } return 0; } static int ipvideo_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; IpvideoContext *s = avctx->priv_data; AVFrame *frame = data; int ret; if (av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, NULL)) { av_frame_unref(s->last_frame); av_frame_unref(s->second_last_frame); } if (buf_size < 2) return AVERROR_INVALIDDATA; /* decoding map contains 4 bits of information per 8x8 block */ s->decoding_map_size = AV_RL16(avpkt->data); /* compressed buffer needs to be large enough to at least hold an entire * decoding map */ if (buf_size < s->decoding_map_size + 2) return buf_size; s->decoding_map = buf + 2; bytestream2_init(&s->stream_ptr, buf + 2 + s->decoding_map_size, buf_size - s->decoding_map_size); if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; if (!s->is_16bpp) { int size; const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, &size); if (pal && size == AVPALETTE_SIZE) { frame->palette_has_changed = 1; memcpy(s->pal, pal, AVPALETTE_SIZE); } else if (pal) { av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", size); } } ipvideo_decode_opcodes(s, frame); *got_frame = 1; /* shuffle frames */ av_frame_unref(s->second_last_frame); FFSWAP(AVFrame*, s->second_last_frame, s->last_frame); if ((ret = av_frame_ref(s->last_frame, frame)) < 0) return ret; /* report that the buffer was completely consumed */ return buf_size; } static av_cold int ipvideo_decode_end(AVCodecContext *avctx) { IpvideoContext *s = avctx->priv_data; av_frame_free(&s->last_frame); av_frame_free(&s->second_last_frame); return 0; } AVCodec ff_interplay_video_decoder = { .name = "interplayvideo", .long_name = NULL_IF_CONFIG_SMALL("Interplay MVE video"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_INTERPLAY_VIDEO, .priv_data_size = sizeof(IpvideoContext), .init = ipvideo_decode_init, .close = ipvideo_decode_end, .decode = ipvideo_decode_frame, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_PARAM_CHANGE, };
482707.c
/*- * BSD LICENSE * * Copyright (c) Intel Corporation. All rights reserved. * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "spdk_cunit.h" #include "spdk/env.h" #include "nvme/nvme.c" #include "spdk_internal/mock.h" #include "common/lib/test_env.c" DEFINE_STUB_V(nvme_ctrlr_proc_get_ref, (struct spdk_nvme_ctrlr *ctrlr)); DEFINE_STUB_V(nvme_ctrlr_proc_put_ref, (struct spdk_nvme_ctrlr *ctrlr)); DEFINE_STUB_V(nvme_ctrlr_fail, (struct spdk_nvme_ctrlr *ctrlr, bool hotremove)); DEFINE_STUB(spdk_nvme_transport_available_by_name, bool, (const char *transport_name), true); /* return anything non-NULL, this won't be deferenced anywhere in this test */ DEFINE_STUB(nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *, (struct spdk_nvme_ctrlr *ctrlr), (struct spdk_nvme_ctrlr_process *)(uintptr_t)0x1); DEFINE_STUB(nvme_ctrlr_process_init, int, (struct spdk_nvme_ctrlr *ctrlr), 0); DEFINE_STUB(nvme_ctrlr_get_ref_count, int, (struct spdk_nvme_ctrlr *ctrlr), 0); DEFINE_STUB(dummy_probe_cb, bool, (void *cb_ctx, const struct spdk_nvme_transport_id *trid, struct spdk_nvme_ctrlr_opts *opts), false); DEFINE_STUB(nvme_transport_ctrlr_construct, struct spdk_nvme_ctrlr *, (const struct spdk_nvme_transport_id *trid, const struct spdk_nvme_ctrlr_opts *opts, void *devhandle), NULL); DEFINE_STUB_V(nvme_io_msg_ctrlr_detach, (struct spdk_nvme_ctrlr *ctrlr)); DEFINE_STUB(spdk_nvme_transport_available, bool, (enum spdk_nvme_transport_type trtype), true); DEFINE_STUB(spdk_pci_event_listen, int, (void), 0); DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group, uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0); static bool ut_destruct_called = false; void nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) { ut_destruct_called = true; } void nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr, struct nvme_ctrlr_detach_ctx *ctx) { ut_destruct_called = true; } int nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr, struct nvme_ctrlr_detach_ctx *ctx) { if (ctx->cb_fn) { ctx->cb_fn(ctrlr); } return 0; } union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) { union spdk_nvme_csts_register csts = {}; return csts; } void spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) { memset(opts, 0, opts_size); opts->opts_size = opts_size; } static void memset_trid(struct spdk_nvme_transport_id *trid1, struct spdk_nvme_transport_id *trid2) { memset(trid1, 0, sizeof(struct spdk_nvme_transport_id)); memset(trid2, 0, sizeof(struct spdk_nvme_transport_id)); } static bool ut_check_trtype = false; static bool ut_test_probe_internal = false; static int ut_nvme_pcie_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect) { struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_qpair qpair = {}; int rc; if (probe_ctx->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) { return -1; } ctrlr = calloc(1, sizeof(*ctrlr)); CU_ASSERT(ctrlr != NULL); ctrlr->adminq = &qpair; /* happy path with first controller */ MOCK_SET(nvme_transport_ctrlr_construct, ctrlr); rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL); CU_ASSERT(rc == 0); /* failed with the second controller */ MOCK_SET(nvme_transport_ctrlr_construct, NULL); rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL); CU_ASSERT(rc != 0); MOCK_CLEAR_P(nvme_transport_ctrlr_construct); return -1; } int nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) { free(ctrlr); return 0; } int nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect) { struct spdk_nvme_ctrlr *ctrlr = NULL; if (ut_check_trtype == true) { CU_ASSERT(probe_ctx->trid.trtype == SPDK_NVME_TRANSPORT_PCIE); } if (ut_test_probe_internal) { return ut_nvme_pcie_ctrlr_scan(probe_ctx, direct_connect); } if (direct_connect == true && probe_ctx->probe_cb) { nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock); ctrlr = nvme_get_ctrlr_by_trid(&probe_ctx->trid); nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock); probe_ctx->probe_cb(probe_ctx->cb_ctx, &probe_ctx->trid, &ctrlr->opts); } return 0; } static bool ut_attach_cb_called = false; static void dummy_attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts) { ut_attach_cb_called = true; } static void test_spdk_nvme_probe(void) { int rc = 0; const struct spdk_nvme_transport_id *trid = NULL; void *cb_ctx = NULL; spdk_nvme_probe_cb probe_cb = NULL; spdk_nvme_attach_cb attach_cb = dummy_attach_cb; spdk_nvme_remove_cb remove_cb = NULL; struct spdk_nvme_ctrlr ctrlr; pthread_mutexattr_t attr; struct nvme_driver dummy; g_spdk_nvme_driver = &dummy; /* driver init fails */ MOCK_SET(spdk_process_is_primary, false); MOCK_SET(spdk_memzone_lookup, NULL); rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb); CU_ASSERT(rc == -1); /* * For secondary processes, the attach_cb should automatically get * called for any controllers already initialized by the primary * process. */ MOCK_SET(spdk_nvme_transport_available_by_name, false); MOCK_SET(spdk_process_is_primary, true); dummy.initialized = true; g_spdk_nvme_driver = &dummy; rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb); CU_ASSERT(rc == -1); /* driver init passes, transport available, secondary call attach_cb */ MOCK_SET(spdk_nvme_transport_available_by_name, true); MOCK_SET(spdk_process_is_primary, false); MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver); dummy.initialized = true; memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr)); CU_ASSERT(pthread_mutexattr_init(&attr) == 0); CU_ASSERT(pthread_mutex_init(&dummy.lock, &attr) == 0); TAILQ_INIT(&dummy.shared_attached_ctrlrs); TAILQ_INSERT_TAIL(&dummy.shared_attached_ctrlrs, &ctrlr, tailq); ut_attach_cb_called = false; /* setup nvme_transport_ctrlr_scan() stub to also check the trype */ ut_check_trtype = true; rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb); CU_ASSERT(rc == 0); CU_ASSERT(ut_attach_cb_called == true); /* driver init passes, transport available, we are primary */ MOCK_SET(spdk_process_is_primary, true); rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb); CU_ASSERT(rc == 0); g_spdk_nvme_driver = NULL; /* reset to pre-test values */ MOCK_CLEAR(spdk_memzone_lookup); ut_check_trtype = false; pthread_mutex_destroy(&dummy.lock); pthread_mutexattr_destroy(&attr); } static void test_spdk_nvme_connect(void) { struct spdk_nvme_ctrlr *ret_ctrlr = NULL; struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_ctrlr_opts opts = {}; struct spdk_nvme_ctrlr ctrlr; pthread_mutexattr_t attr; struct nvme_driver dummy; /* initialize the variable to prepare the test */ dummy.initialized = true; TAILQ_INIT(&dummy.shared_attached_ctrlrs); g_spdk_nvme_driver = &dummy; CU_ASSERT(pthread_mutexattr_init(&attr) == 0); CU_ASSERT(pthread_mutex_init(&g_spdk_nvme_driver->lock, &attr) == 0); /* set NULL trid pointer to test immediate return */ ret_ctrlr = spdk_nvme_connect(NULL, NULL, 0); CU_ASSERT(ret_ctrlr == NULL); /* driver init passes, transport available, secondary process connects ctrlr */ MOCK_SET(spdk_process_is_primary, false); MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver); MOCK_SET(spdk_nvme_transport_available_by_name, true); memset(&trid, 0, sizeof(trid)); trid.trtype = SPDK_NVME_TRANSPORT_PCIE; ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0); CU_ASSERT(ret_ctrlr == NULL); /* driver init passes, setup one ctrlr on the attached_list */ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr)); snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:01:00.0"); ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq); /* get the ctrlr from the attached list */ snprintf(trid.traddr, sizeof(trid.traddr), "0000:01:00.0"); ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0); CU_ASSERT(ret_ctrlr == &ctrlr); /* get the ctrlr from the attached list with default ctrlr opts */ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES; ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0); CU_ASSERT(ret_ctrlr == &ctrlr); CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES); /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */ opts.num_io_queues = 1; ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts)); CU_ASSERT(ret_ctrlr == &ctrlr); CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1); CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, sizeof(opts)); /* opts_size is 0 */ ret_ctrlr = spdk_nvme_connect(&trid, &opts, 0); CU_ASSERT(ret_ctrlr == &ctrlr); CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 0); /* opts_size is less than sizeof(*opts) if opts != NULL */ ret_ctrlr = spdk_nvme_connect(&trid, &opts, 4); CU_ASSERT(ret_ctrlr == &ctrlr); CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1); CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 4); /* remove the attached ctrlr on the attached_list */ MOCK_SET(nvme_ctrlr_get_ref_count, 1); CU_ASSERT(spdk_nvme_detach(&ctrlr) == 0); CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs)); /* driver init passes, transport available, primary process connects ctrlr */ MOCK_SET(spdk_process_is_primary, true); /* setup one ctrlr on the attached_list */ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr)); snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:02:00.0"); ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq); /* get the ctrlr from the attached list */ snprintf(trid.traddr, sizeof(trid.traddr), "0000:02:00.0"); ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0); CU_ASSERT(ret_ctrlr == &ctrlr); /* get the ctrlr from the attached list with default ctrlr opts */ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES; ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0); CU_ASSERT(ret_ctrlr == &ctrlr); CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES); /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */ opts.num_io_queues = 2; ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts)); CU_ASSERT(ret_ctrlr == &ctrlr); CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 2); /* remove the attached ctrlr on the attached_list */ CU_ASSERT(spdk_nvme_detach(ret_ctrlr) == 0); CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs)); /* test driver init failure return */ MOCK_SET(spdk_process_is_primary, false); MOCK_SET(spdk_memzone_lookup, NULL); ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0); CU_ASSERT(ret_ctrlr == NULL); } static struct spdk_nvme_probe_ctx * test_nvme_init_get_probe_ctx(void) { struct spdk_nvme_probe_ctx *probe_ctx; probe_ctx = calloc(1, sizeof(*probe_ctx)); SPDK_CU_ASSERT_FATAL(probe_ctx != NULL); TAILQ_INIT(&probe_ctx->init_ctrlrs); return probe_ctx; } static void test_nvme_init_controllers(void) { int rc = 0; struct nvme_driver test_driver; void *cb_ctx = NULL; spdk_nvme_attach_cb attach_cb = dummy_attach_cb; struct spdk_nvme_probe_ctx *probe_ctx; struct spdk_nvme_ctrlr *ctrlr; pthread_mutexattr_t attr; g_spdk_nvme_driver = &test_driver; ctrlr = calloc(1, sizeof(*ctrlr)); SPDK_CU_ASSERT_FATAL(ctrlr != NULL); ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_PCIE; CU_ASSERT(pthread_mutexattr_init(&attr) == 0); CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, &attr) == 0); CU_ASSERT(pthread_mutex_init(&test_driver.lock, &attr) == 0); TAILQ_INIT(&test_driver.shared_attached_ctrlrs); /* * Try to initialize, but nvme_ctrlr_process_init will fail. * Verify correct behavior when it does. */ MOCK_SET(nvme_ctrlr_process_init, 1); MOCK_SET(spdk_process_is_primary, 1); g_spdk_nvme_driver->initialized = false; ut_destruct_called = false; probe_ctx = test_nvme_init_get_probe_ctx(); TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq); probe_ctx->cb_ctx = cb_ctx; probe_ctx->attach_cb = attach_cb; probe_ctx->trid.trtype = SPDK_NVME_TRANSPORT_PCIE; rc = nvme_init_controllers(probe_ctx); CU_ASSERT(rc == 0); CU_ASSERT(g_spdk_nvme_driver->initialized == true); CU_ASSERT(ut_destruct_called == true); /* * Controller init OK, need to move the controller state machine * forward by setting the ctrl state so that it can be moved * the shared_attached_ctrlrs list. */ probe_ctx = test_nvme_init_get_probe_ctx(); TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq); ctrlr->state = NVME_CTRLR_STATE_READY; MOCK_SET(nvme_ctrlr_process_init, 0); rc = nvme_init_controllers(probe_ctx); CU_ASSERT(rc == 0); CU_ASSERT(ut_attach_cb_called == true); CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs)); CU_ASSERT(TAILQ_FIRST(&g_spdk_nvme_driver->shared_attached_ctrlrs) == ctrlr); TAILQ_REMOVE(&g_spdk_nvme_driver->shared_attached_ctrlrs, ctrlr, tailq); /* * Reset to initial state */ CU_ASSERT(pthread_mutex_destroy(&ctrlr->ctrlr_lock) == 0); memset(ctrlr, 0, sizeof(struct spdk_nvme_ctrlr)); CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, &attr) == 0); /* * Non-PCIe controllers should be added to the per-process list, not the shared list. */ ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_RDMA; probe_ctx = test_nvme_init_get_probe_ctx(); TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq); ctrlr->state = NVME_CTRLR_STATE_READY; MOCK_SET(nvme_ctrlr_process_init, 0); rc = nvme_init_controllers(probe_ctx); CU_ASSERT(rc == 0); CU_ASSERT(ut_attach_cb_called == true); CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs)); CU_ASSERT(TAILQ_FIRST(&g_nvme_attached_ctrlrs) == ctrlr); TAILQ_REMOVE(&g_nvme_attached_ctrlrs, ctrlr, tailq); CU_ASSERT(pthread_mutex_destroy(&ctrlr->ctrlr_lock) == 0); free(ctrlr); CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs)); g_spdk_nvme_driver = NULL; pthread_mutexattr_destroy(&attr); pthread_mutex_destroy(&test_driver.lock); } static void test_nvme_driver_init(void) { int rc; struct nvme_driver dummy; g_spdk_nvme_driver = &dummy; /* adjust this so testing doesn't take so long */ g_nvme_driver_timeout_ms = 100; /* process is primary and mem already reserved */ MOCK_SET(spdk_process_is_primary, true); dummy.initialized = true; rc = nvme_driver_init(); CU_ASSERT(rc == 0); /* * Process is primary and mem not yet reserved but the call * to spdk_memzone_reserve() returns NULL. */ g_spdk_nvme_driver = NULL; MOCK_SET(spdk_process_is_primary, true); MOCK_SET(spdk_memzone_reserve, NULL); rc = nvme_driver_init(); CU_ASSERT(rc == -1); /* process is not primary, no mem already reserved */ MOCK_SET(spdk_process_is_primary, false); MOCK_SET(spdk_memzone_lookup, NULL); g_spdk_nvme_driver = NULL; rc = nvme_driver_init(); CU_ASSERT(rc == -1); /* process is not primary, mem is already reserved & init'd */ MOCK_SET(spdk_process_is_primary, false); MOCK_SET(spdk_memzone_lookup, (void *)&dummy); dummy.initialized = true; rc = nvme_driver_init(); CU_ASSERT(rc == 0); /* process is not primary, mem is reserved but not initialized */ /* and times out */ MOCK_SET(spdk_process_is_primary, false); MOCK_SET(spdk_memzone_reserve, (void *)&dummy); dummy.initialized = false; rc = nvme_driver_init(); CU_ASSERT(rc == -1); /* process is primary, got mem but mutex won't init */ MOCK_SET(spdk_process_is_primary, true); MOCK_SET(spdk_memzone_reserve, (void *)&dummy); MOCK_SET(pthread_mutexattr_init, -1); g_spdk_nvme_driver = NULL; dummy.initialized = true; rc = nvme_driver_init(); /* for FreeBSD we can't can't effectively mock this path */ #ifndef __FreeBSD__ CU_ASSERT(rc != 0); #else CU_ASSERT(rc == 0); #endif /* process is primary, got mem, mutex OK */ MOCK_SET(spdk_process_is_primary, true); MOCK_CLEAR(pthread_mutexattr_init); g_spdk_nvme_driver = NULL; rc = nvme_driver_init(); CU_ASSERT(g_spdk_nvme_driver->initialized == false); CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs)); CU_ASSERT(rc == 0); g_spdk_nvme_driver = NULL; MOCK_CLEAR(spdk_memzone_reserve); MOCK_CLEAR(spdk_memzone_lookup); } static void test_spdk_nvme_detach(void) { int rc = 1; struct spdk_nvme_ctrlr ctrlr; struct spdk_nvme_ctrlr *ret_ctrlr; struct nvme_driver test_driver; memset(&ctrlr, 0, sizeof(ctrlr)); ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; g_spdk_nvme_driver = &test_driver; TAILQ_INIT(&test_driver.shared_attached_ctrlrs); TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq); CU_ASSERT(pthread_mutex_init(&test_driver.lock, NULL) == 0); /* * Controllers are ref counted so mock the function that returns * the ref count so that detach will actually call the destruct * function which we've mocked simply to verify that it gets * called (we aren't testing what the real destruct function does * here.) */ MOCK_SET(nvme_ctrlr_get_ref_count, 1); rc = spdk_nvme_detach(&ctrlr); ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs); CU_ASSERT(ret_ctrlr == NULL); CU_ASSERT(ut_destruct_called == true); CU_ASSERT(rc == 0); /* * Mock the ref count to 1 so we confirm that the destruct * function is not called and that attached ctrl list is * not empty. */ MOCK_SET(nvme_ctrlr_get_ref_count, 2); TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq); ut_destruct_called = false; rc = spdk_nvme_detach(&ctrlr); ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs); CU_ASSERT(ret_ctrlr != NULL); CU_ASSERT(ut_destruct_called == false); CU_ASSERT(rc == 0); /* * Non-PCIe controllers should be on the per-process attached_ctrlrs list, not the * shared_attached_ctrlrs list. Test an RDMA controller and ensure it is removed * from the correct list. */ memset(&ctrlr, 0, sizeof(ctrlr)); ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA; TAILQ_INIT(&g_nvme_attached_ctrlrs); TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, &ctrlr, tailq); MOCK_SET(nvme_ctrlr_get_ref_count, 1); rc = spdk_nvme_detach(&ctrlr); CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs)); CU_ASSERT(ut_destruct_called == true); CU_ASSERT(rc == 0); g_spdk_nvme_driver = NULL; pthread_mutex_destroy(&test_driver.lock); } static void test_nvme_completion_poll_cb(void) { struct nvme_completion_poll_status *status; struct spdk_nvme_cpl cpl; status = calloc(1, sizeof(*status)); SPDK_CU_ASSERT_FATAL(status != NULL); memset(&cpl, 0xff, sizeof(cpl)); nvme_completion_poll_cb(status, &cpl); CU_ASSERT(status->done == true); CU_ASSERT(memcmp(&cpl, &status->cpl, sizeof(struct spdk_nvme_cpl)) == 0); free(status); } /* stub callback used by test_nvme_user_copy_cmd_complete() */ static struct spdk_nvme_cpl ut_spdk_nvme_cpl = {0}; static void dummy_cb(void *user_cb_arg, struct spdk_nvme_cpl *cpl) { ut_spdk_nvme_cpl = *cpl; } static void test_nvme_user_copy_cmd_complete(void) { struct nvme_request req; int test_data = 0xdeadbeef; int buff_size = sizeof(int); void *buff; static struct spdk_nvme_cpl cpl; memset(&req, 0, sizeof(req)); memset(&cpl, 0x5a, sizeof(cpl)); /* test without a user buffer provided */ req.user_cb_fn = (void *)dummy_cb; nvme_user_copy_cmd_complete(&req, &cpl); CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0); /* test with a user buffer provided */ req.user_buffer = malloc(buff_size); SPDK_CU_ASSERT_FATAL(req.user_buffer != NULL); memset(req.user_buffer, 0, buff_size); req.payload_size = buff_size; buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); SPDK_CU_ASSERT_FATAL(buff != NULL); req.payload = NVME_PAYLOAD_CONTIG(buff, NULL); memcpy(buff, &test_data, buff_size); req.cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; req.pid = getpid(); /* zero out the test value set in the callback */ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl)); nvme_user_copy_cmd_complete(&req, &cpl); CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) == 0); CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0); /* * Now test the same path as above but this time choose an opc * that results in a different data transfer type. */ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl)); memset(req.user_buffer, 0, buff_size); buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); SPDK_CU_ASSERT_FATAL(buff != NULL); req.payload = NVME_PAYLOAD_CONTIG(buff, NULL); memcpy(buff, &test_data, buff_size); req.cmd.opc = SPDK_NVME_OPC_SET_FEATURES; nvme_user_copy_cmd_complete(&req, &cpl); CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) != 0); CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0); /* clean up */ free(req.user_buffer); } static void test_nvme_allocate_request_null(void) { struct spdk_nvme_qpair qpair; spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234; void *cb_arg = (void *)0x5678; struct nvme_request *req = NULL; struct nvme_request dummy_req; STAILQ_INIT(&qpair.free_req); STAILQ_INIT(&qpair.queued_req); /* * Put a dummy on the queue so we can make a request * and confirm that what comes back is what we expect. */ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq); req = nvme_allocate_request_null(&qpair, cb_fn, cb_arg); /* * Compare the req with the parmaters that we passed in * as well as what the function is supposed to update. */ SPDK_CU_ASSERT_FATAL(req != NULL); CU_ASSERT(req->cb_fn == cb_fn); CU_ASSERT(req->cb_arg == cb_arg); CU_ASSERT(req->pid == getpid()); CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG); CU_ASSERT(req->payload.md == NULL); CU_ASSERT(req->payload.contig_or_cb_arg == NULL); } static void test_nvme_allocate_request(void) { struct spdk_nvme_qpair qpair; struct nvme_payload payload; uint32_t payload_struct_size = sizeof(payload); spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234; void *cb_arg = (void *)0x6789; struct nvme_request *req = NULL; struct nvme_request dummy_req; /* Fill the whole payload struct with a known pattern */ memset(&payload, 0x5a, payload_struct_size); STAILQ_INIT(&qpair.free_req); STAILQ_INIT(&qpair.queued_req); /* Test trying to allocate a request when no requests are available */ req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0, cb_fn, cb_arg); CU_ASSERT(req == NULL); /* put a dummy on the queue, and then allocate one */ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq); req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0, cb_fn, cb_arg); /* all the req elements should now match the passed in parameters */ SPDK_CU_ASSERT_FATAL(req != NULL); CU_ASSERT(req->cb_fn == cb_fn); CU_ASSERT(req->cb_arg == cb_arg); CU_ASSERT(memcmp(&req->payload, &payload, payload_struct_size) == 0); CU_ASSERT(req->payload_size == payload_struct_size); CU_ASSERT(req->pid == getpid()); } static void test_nvme_free_request(void) { struct nvme_request match_req; struct spdk_nvme_qpair qpair; struct nvme_request *req; /* put a req on the Q, take it off and compare */ memset(&match_req.cmd, 0x5a, sizeof(struct spdk_nvme_cmd)); match_req.qpair = &qpair; /* the code under tests asserts this condition */ match_req.num_children = 0; STAILQ_INIT(&qpair.free_req); nvme_free_request(&match_req); req = STAILQ_FIRST(&match_req.qpair->free_req); CU_ASSERT(req == &match_req); } static void test_nvme_allocate_request_user_copy(void) { struct spdk_nvme_qpair qpair; spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x12345; void *cb_arg = (void *)0x12345; bool host_to_controller = true; struct nvme_request *req; struct nvme_request dummy_req; int test_data = 0xdeadbeef; void *buffer = NULL; uint32_t payload_size = sizeof(int); STAILQ_INIT(&qpair.free_req); STAILQ_INIT(&qpair.queued_req); /* no buffer or valid payload size, early NULL return */ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn, cb_arg, host_to_controller); CU_ASSERT(req == NULL); /* good buffer and valid payload size */ buffer = malloc(payload_size); SPDK_CU_ASSERT_FATAL(buffer != NULL); memcpy(buffer, &test_data, payload_size); /* put a dummy on the queue */ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq); MOCK_CLEAR(spdk_malloc); MOCK_CLEAR(spdk_zmalloc); req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn, cb_arg, host_to_controller); SPDK_CU_ASSERT_FATAL(req != NULL); CU_ASSERT(req->user_cb_fn == cb_fn); CU_ASSERT(req->user_cb_arg == cb_arg); CU_ASSERT(req->user_buffer == buffer); CU_ASSERT(req->cb_arg == req); CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) == 0); spdk_free(req->payload.contig_or_cb_arg); /* same thing but additional path coverage, no copy */ host_to_controller = false; STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq); req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn, cb_arg, host_to_controller); SPDK_CU_ASSERT_FATAL(req != NULL); CU_ASSERT(req->user_cb_fn == cb_fn); CU_ASSERT(req->user_cb_arg == cb_arg); CU_ASSERT(req->user_buffer == buffer); CU_ASSERT(req->cb_arg == req); CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) != 0); spdk_free(req->payload.contig_or_cb_arg); /* good buffer and valid payload size but make spdk_zmalloc fail */ /* set the mock pointer to NULL for spdk_zmalloc */ MOCK_SET(spdk_zmalloc, NULL); req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn, cb_arg, host_to_controller); CU_ASSERT(req == NULL); free(buffer); MOCK_CLEAR(spdk_zmalloc); } static void test_nvme_ctrlr_probe(void) { int rc = 0; struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_qpair qpair = {}; const struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_probe_ctx probe_ctx = {}; void *devhandle = NULL; void *cb_ctx = NULL; struct spdk_nvme_ctrlr *dummy = NULL; ctrlr.adminq = &qpair; TAILQ_INIT(&probe_ctx.init_ctrlrs); nvme_driver_init(); /* test when probe_cb returns false */ MOCK_SET(dummy_probe_cb, false); nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL); rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle); CU_ASSERT(rc == 1); /* probe_cb returns true but we can't construct a ctrl */ MOCK_SET(dummy_probe_cb, true); MOCK_SET(nvme_transport_ctrlr_construct, NULL); nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL); rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle); CU_ASSERT(rc == -1); /* happy path */ MOCK_SET(dummy_probe_cb, true); MOCK_SET(nvme_transport_ctrlr_construct, &ctrlr); nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL); rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle); CU_ASSERT(rc == 0); dummy = TAILQ_FIRST(&probe_ctx.init_ctrlrs); SPDK_CU_ASSERT_FATAL(dummy != NULL); CU_ASSERT(dummy == ut_nvme_transport_ctrlr_construct); TAILQ_REMOVE(&probe_ctx.init_ctrlrs, dummy, tailq); MOCK_CLEAR_P(nvme_transport_ctrlr_construct); free(g_spdk_nvme_driver); } static void test_nvme_robust_mutex_init_shared(void) { pthread_mutex_t mtx; int rc = 0; /* test where both pthread calls succeed */ MOCK_SET(pthread_mutexattr_init, 0); MOCK_SET(pthread_mutex_init, 0); rc = nvme_robust_mutex_init_shared(&mtx); CU_ASSERT(rc == 0); /* test where we can't init attr's but init mutex works */ MOCK_SET(pthread_mutexattr_init, -1); MOCK_SET(pthread_mutex_init, 0); rc = nvme_robust_mutex_init_shared(&mtx); /* for FreeBSD the only possible return value is 0 */ #ifndef __FreeBSD__ CU_ASSERT(rc != 0); #else CU_ASSERT(rc == 0); #endif /* test where we can init attr's but the mutex init fails */ MOCK_SET(pthread_mutexattr_init, 0); MOCK_SET(pthread_mutex_init, -1); rc = nvme_robust_mutex_init_shared(&mtx); /* for FreeBSD the only possible return value is 0 */ #ifndef __FreeBSD__ CU_ASSERT(rc != 0); #else CU_ASSERT(rc == 0); #endif } static void test_opc_data_transfer(void) { enum spdk_nvme_data_transfer xfer; xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_FLUSH); CU_ASSERT(xfer == SPDK_NVME_DATA_NONE); xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_WRITE); CU_ASSERT(xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_READ); CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_GET_LOG_PAGE); CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); } static void test_trid_parse_and_compare(void) { struct spdk_nvme_transport_id trid1, trid2; int ret; /* set trid1 trid2 value to id parse */ ret = spdk_nvme_transport_id_parse(NULL, "trtype:PCIe traddr:0000:04:00.0"); CU_ASSERT(ret == -EINVAL); memset(&trid1, 0, sizeof(trid1)); ret = spdk_nvme_transport_id_parse(&trid1, NULL); CU_ASSERT(ret == -EINVAL); ret = spdk_nvme_transport_id_parse(NULL, NULL); CU_ASSERT(ret == -EINVAL); memset(&trid1, 0, sizeof(trid1)); ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0"); CU_ASSERT(ret == -EINVAL); memset(&trid1, 0, sizeof(trid1)); ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0-:"); CU_ASSERT(ret == -EINVAL); memset(&trid1, 0, sizeof(trid1)); ret = spdk_nvme_transport_id_parse(&trid1, " \t\n:"); CU_ASSERT(ret == -EINVAL); memset(&trid1, 0, sizeof(trid1)); CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:rdma\n" "adrfam:ipv4\n" "traddr:192.168.100.8\n" "trsvcid:4420\n" "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0); CU_ASSERT(trid1.trtype == SPDK_NVME_TRANSPORT_RDMA); CU_ASSERT(trid1.adrfam == SPDK_NVMF_ADRFAM_IPV4); CU_ASSERT(strcmp(trid1.traddr, "192.168.100.8") == 0); CU_ASSERT(strcmp(trid1.trsvcid, "4420") == 0); CU_ASSERT(strcmp(trid1.subnqn, "nqn.2014-08.org.nvmexpress.discovery") == 0); memset(&trid2, 0, sizeof(trid2)); CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:0000:04:00.0") == 0); CU_ASSERT(trid2.trtype == SPDK_NVME_TRANSPORT_PCIE); CU_ASSERT(strcmp(trid2.traddr, "0000:04:00.0") == 0); CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) != 0); /* set trid1 trid2 and test id_compare */ memset_trid(&trid1, &trid2); trid1.adrfam = SPDK_NVMF_ADRFAM_IPV6; trid2.adrfam = SPDK_NVMF_ADRFAM_IPV4; ret = spdk_nvme_transport_id_compare(&trid1, &trid2); CU_ASSERT(ret > 0); memset_trid(&trid1, &trid2); snprintf(trid1.traddr, sizeof(trid1.traddr), "192.168.100.8"); snprintf(trid2.traddr, sizeof(trid2.traddr), "192.168.100.9"); ret = spdk_nvme_transport_id_compare(&trid1, &trid2); CU_ASSERT(ret < 0); memset_trid(&trid1, &trid2); snprintf(trid1.trsvcid, sizeof(trid1.trsvcid), "4420"); snprintf(trid2.trsvcid, sizeof(trid2.trsvcid), "4421"); ret = spdk_nvme_transport_id_compare(&trid1, &trid2); CU_ASSERT(ret < 0); memset_trid(&trid1, &trid2); snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery"); snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2017-08.org.nvmexpress.discovery"); ret = spdk_nvme_transport_id_compare(&trid1, &trid2); CU_ASSERT(ret < 0); memset_trid(&trid1, &trid2); snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery"); snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery"); ret = spdk_nvme_transport_id_compare(&trid1, &trid2); CU_ASSERT(ret == 0); memset_trid(&trid1, &trid2); snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery"); snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.Nvmexpress.discovery"); ret = spdk_nvme_transport_id_compare(&trid1, &trid2); CU_ASSERT(ret > 0); memset_trid(&trid1, &trid2); ret = spdk_nvme_transport_id_compare(&trid1, &trid2); CU_ASSERT(ret == 0); /* Compare PCI addresses via spdk_pci_addr_compare (rather than as strings) */ memset_trid(&trid1, &trid2); CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0); CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0); CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) == 0); memset_trid(&trid1, &trid2); CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:05:00.0") == 0); CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0); CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) > 0); memset_trid(&trid1, &trid2); CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0); CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:05:00.0") == 0); CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0); memset_trid(&trid1, &trid2); CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype=PCIe traddr=0000:04:00.0") == 0); CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype=PCIe traddr=05:00.0") == 0); CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0); CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:tcp\n" "adrfam:ipv4\n" "traddr:192.168.100.8\n" "trsvcid:4420\n" "priority:2\n" "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0); CU_ASSERT(trid1.priority == 2); } static void test_spdk_nvme_transport_id_parse_trtype(void) { enum spdk_nvme_transport_type *trtype; enum spdk_nvme_transport_type sct; char *str; trtype = NULL; str = "unit_test"; /* test function returned value when trtype is NULL but str not NULL */ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL)); /* test function returned value when str is NULL but trtype not NULL */ trtype = &sct; str = NULL; CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL)); /* test function returned value when str and strtype not NULL, but str value * not "PCIe" or "RDMA" */ str = "unit_test"; CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == 0); CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_CUSTOM); /* test trtype value when use function "strcasecmp" to compare str and "PCIe",not case-sensitive */ str = "PCIe"; spdk_nvme_transport_id_parse_trtype(trtype, str); CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE); str = "pciE"; spdk_nvme_transport_id_parse_trtype(trtype, str); CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE); /* test trtype value when use function "strcasecmp" to compare str and "RDMA",not case-sensitive */ str = "RDMA"; spdk_nvme_transport_id_parse_trtype(trtype, str); CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA); str = "rdma"; spdk_nvme_transport_id_parse_trtype(trtype, str); CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA); /* test trtype value when use function "strcasecmp" to compare str and "FC",not case-sensitive */ str = "FC"; spdk_nvme_transport_id_parse_trtype(trtype, str); CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC); str = "fc"; spdk_nvme_transport_id_parse_trtype(trtype, str); CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC); /* test trtype value when use function "strcasecmp" to compare str and "TCP",not case-sensitive */ str = "TCP"; spdk_nvme_transport_id_parse_trtype(trtype, str); CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP); str = "tcp"; spdk_nvme_transport_id_parse_trtype(trtype, str); CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP); } static void test_spdk_nvme_transport_id_parse_adrfam(void) { enum spdk_nvmf_adrfam *adrfam; enum spdk_nvmf_adrfam sct; char *str; adrfam = NULL; str = "unit_test"; /* test function returned value when adrfam is NULL but str not NULL */ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL)); /* test function returned value when str is NULL but adrfam not NULL */ adrfam = &sct; str = NULL; CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL)); /* test function returned value when str and adrfam not NULL, but str value * not "IPv4" or "IPv6" or "IB" or "FC" */ str = "unit_test"; CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-ENOENT)); /* test adrfam value when use function "strcasecmp" to compare str and "IPv4",not case-sensitive */ str = "IPv4"; spdk_nvme_transport_id_parse_adrfam(adrfam, str); CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4); str = "ipV4"; spdk_nvme_transport_id_parse_adrfam(adrfam, str); CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4); /* test adrfam value when use function "strcasecmp" to compare str and "IPv6",not case-sensitive */ str = "IPv6"; spdk_nvme_transport_id_parse_adrfam(adrfam, str); CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6); str = "ipV6"; spdk_nvme_transport_id_parse_adrfam(adrfam, str); CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6); /* test adrfam value when use function "strcasecmp" to compare str and "IB",not case-sensitive */ str = "IB"; spdk_nvme_transport_id_parse_adrfam(adrfam, str); CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB); str = "ib"; spdk_nvme_transport_id_parse_adrfam(adrfam, str); CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB); /* test adrfam value when use function "strcasecmp" to compare str and "FC",not case-sensitive */ str = "FC"; spdk_nvme_transport_id_parse_adrfam(adrfam, str); CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC); str = "fc"; spdk_nvme_transport_id_parse_adrfam(adrfam, str); CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC); } static void test_trid_trtype_str(void) { const char *s; s = spdk_nvme_transport_id_trtype_str(-5); CU_ASSERT(s == NULL); s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_PCIE); SPDK_CU_ASSERT_FATAL(s != NULL); CU_ASSERT(strcmp(s, "PCIe") == 0); s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA); SPDK_CU_ASSERT_FATAL(s != NULL); CU_ASSERT(strcmp(s, "RDMA") == 0); s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_FC); SPDK_CU_ASSERT_FATAL(s != NULL); CU_ASSERT(strcmp(s, "FC") == 0); s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_TCP); SPDK_CU_ASSERT_FATAL(s != NULL); CU_ASSERT(strcmp(s, "TCP") == 0); } static void test_trid_adrfam_str(void) { const char *s; s = spdk_nvme_transport_id_adrfam_str(-5); CU_ASSERT(s == NULL); s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV4); SPDK_CU_ASSERT_FATAL(s != NULL); CU_ASSERT(strcmp(s, "IPv4") == 0); s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV6); SPDK_CU_ASSERT_FATAL(s != NULL); CU_ASSERT(strcmp(s, "IPv6") == 0); s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IB); SPDK_CU_ASSERT_FATAL(s != NULL); CU_ASSERT(strcmp(s, "IB") == 0); s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_FC); SPDK_CU_ASSERT_FATAL(s != NULL); CU_ASSERT(strcmp(s, "FC") == 0); } /* stub callback used by the test_nvme_request_check_timeout */ static bool ut_timeout_cb_call = false; static void dummy_timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, uint16_t cid) { ut_timeout_cb_call = true; } static void test_nvme_request_check_timeout(void) { int rc; struct spdk_nvme_qpair qpair; struct nvme_request req; struct spdk_nvme_ctrlr_process active_proc; uint16_t cid = 0; uint64_t now_tick = 0; memset(&qpair, 0x0, sizeof(qpair)); memset(&req, 0x0, sizeof(req)); memset(&active_proc, 0x0, sizeof(active_proc)); req.qpair = &qpair; active_proc.timeout_cb_fn = dummy_timeout_cb; /* if have called timeout_cb_fn then return directly */ req.timed_out = true; rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick); CU_ASSERT(rc == 0); CU_ASSERT(ut_timeout_cb_call == false); /* if timeout isn't enabled then return directly */ req.timed_out = false; req.submit_tick = 0; rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick); CU_ASSERT(rc == 0); CU_ASSERT(ut_timeout_cb_call == false); /* req->pid isn't right then return directly */ req.submit_tick = 1; req.pid = g_spdk_nvme_pid + 1; rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick); CU_ASSERT(rc == 0); CU_ASSERT(ut_timeout_cb_call == false); /* AER command has no timeout */ req.pid = g_spdk_nvme_pid; req.cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick); CU_ASSERT(rc == 0); CU_ASSERT(ut_timeout_cb_call == false); /* time isn't out */ qpair.id = 1; rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick); CU_ASSERT(rc == 1); CU_ASSERT(ut_timeout_cb_call == false); now_tick = 2; rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick); CU_ASSERT(req.timed_out == true); CU_ASSERT(ut_timeout_cb_call == true); CU_ASSERT(rc == 0); } struct nvme_completion_poll_status g_status; uint64_t completion_delay_us, timeout_in_usecs; int g_process_comp_result; pthread_mutex_t g_robust_lock = PTHREAD_MUTEX_INITIALIZER; int spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) { spdk_delay_us(completion_delay_us); g_status.done = completion_delay_us < timeout_in_usecs && g_process_comp_result == 0 ? true : false; return g_process_comp_result; } static void test_nvme_wait_for_completion(void) { struct spdk_nvme_qpair qpair; struct spdk_nvme_ctrlr ctrlr; int rc = 0; memset(&ctrlr, 0, sizeof(ctrlr)); ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; memset(&qpair, 0, sizeof(qpair)); qpair.ctrlr = &ctrlr; /* completion timeout */ memset(&g_status, 0, sizeof(g_status)); completion_delay_us = 2000000; timeout_in_usecs = 1000000; rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_usecs); CU_ASSERT(g_status.timed_out == true); CU_ASSERT(g_status.done == false); CU_ASSERT(rc == -ECANCELED); /* spdk_nvme_qpair_process_completions returns error */ memset(&g_status, 0, sizeof(g_status)); g_process_comp_result = -1; completion_delay_us = 1000000; timeout_in_usecs = 2000000; rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_usecs); CU_ASSERT(rc == -ECANCELED); CU_ASSERT(g_status.timed_out == true); CU_ASSERT(g_status.done == false); CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC); CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION); g_process_comp_result = 0; /* complete in time */ memset(&g_status, 0, sizeof(g_status)); completion_delay_us = 1000000; timeout_in_usecs = 2000000; rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_usecs); CU_ASSERT(g_status.timed_out == false); CU_ASSERT(g_status.done == true); CU_ASSERT(rc == 0); /* nvme_wait_for_completion */ /* spdk_nvme_qpair_process_completions returns error */ memset(&g_status, 0, sizeof(g_status)); g_process_comp_result = -1; rc = nvme_wait_for_completion(&qpair, &g_status); CU_ASSERT(rc == -ECANCELED); CU_ASSERT(g_status.timed_out == true); CU_ASSERT(g_status.done == false); CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC); CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION); /* successful completion */ memset(&g_status, 0, sizeof(g_status)); g_process_comp_result = 0; rc = nvme_wait_for_completion(&qpair, &g_status); CU_ASSERT(rc == 0); CU_ASSERT(g_status.timed_out == false); CU_ASSERT(g_status.done == true); /* completion timeout */ memset(&g_status, 0, sizeof(g_status)); completion_delay_us = 2000000; timeout_in_usecs = 1000000; rc = nvme_wait_for_completion_robust_lock_timeout(&qpair, &g_status, &g_robust_lock, timeout_in_usecs); CU_ASSERT(g_status.timed_out == true); CU_ASSERT(g_status.done == false); CU_ASSERT(rc == -ECANCELED); /* spdk_nvme_qpair_process_completions returns error */ memset(&g_status, 0, sizeof(g_status)); g_process_comp_result = -1; completion_delay_us = 1000000; timeout_in_usecs = 2000000; rc = nvme_wait_for_completion_robust_lock_timeout(&qpair, &g_status, &g_robust_lock, timeout_in_usecs); CU_ASSERT(rc == -ECANCELED); CU_ASSERT(g_status.timed_out == true); CU_ASSERT(g_status.done == false); CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC); CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION); g_process_comp_result = 0; /* complete in time */ memset(&g_status, 0, sizeof(g_status)); completion_delay_us = 1000000; timeout_in_usecs = 2000000; rc = nvme_wait_for_completion_robust_lock_timeout(&qpair, &g_status, &g_robust_lock, timeout_in_usecs); CU_ASSERT(g_status.timed_out == false); CU_ASSERT(g_status.done == true); CU_ASSERT(rc == 0); /* nvme_wait_for_completion */ /* spdk_nvme_qpair_process_completions returns error */ memset(&g_status, 0, sizeof(g_status)); g_process_comp_result = -1; rc = nvme_wait_for_completion_robust_lock(&qpair, &g_status, &g_robust_lock); CU_ASSERT(rc == -ECANCELED); CU_ASSERT(g_status.timed_out == true); CU_ASSERT(g_status.done == false); CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC); CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION); /* successful completion */ memset(&g_status, 0, sizeof(g_status)); g_process_comp_result = 0; rc = nvme_wait_for_completion_robust_lock(&qpair, &g_status, &g_robust_lock); CU_ASSERT(rc == 0); CU_ASSERT(g_status.timed_out == false); CU_ASSERT(g_status.done == true); } static void test_nvme_ctrlr_probe_internal(void) { struct spdk_nvme_probe_ctx *probe_ctx; struct spdk_nvme_transport_id trid = {}; struct nvme_driver dummy; int rc; probe_ctx = calloc(1, sizeof(*probe_ctx)); CU_ASSERT(probe_ctx != NULL); MOCK_SET(spdk_process_is_primary, true); MOCK_SET(spdk_memzone_reserve, (void *)&dummy); g_spdk_nvme_driver = NULL; rc = nvme_driver_init(); CU_ASSERT(rc == 0); ut_test_probe_internal = true; MOCK_SET(dummy_probe_cb, true); trid.trtype = SPDK_NVME_TRANSPORT_PCIE; nvme_probe_ctx_init(probe_ctx, &trid, NULL, dummy_probe_cb, NULL, NULL); rc = nvme_probe_internal(probe_ctx, false); CU_ASSERT(rc < 0); CU_ASSERT(TAILQ_EMPTY(&probe_ctx->init_ctrlrs)); free(probe_ctx); ut_test_probe_internal = false; } int main(int argc, char **argv) { CU_pSuite suite = NULL; unsigned int num_failures; CU_set_error_action(CUEA_ABORT); CU_initialize_registry(); suite = CU_add_suite("nvme", NULL, NULL); CU_ADD_TEST(suite, test_opc_data_transfer); CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_trtype); CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_adrfam); CU_ADD_TEST(suite, test_trid_parse_and_compare); CU_ADD_TEST(suite, test_trid_trtype_str); CU_ADD_TEST(suite, test_trid_adrfam_str); CU_ADD_TEST(suite, test_nvme_ctrlr_probe); CU_ADD_TEST(suite, test_spdk_nvme_probe); CU_ADD_TEST(suite, test_spdk_nvme_connect); CU_ADD_TEST(suite, test_nvme_ctrlr_probe_internal); CU_ADD_TEST(suite, test_nvme_init_controllers); CU_ADD_TEST(suite, test_nvme_driver_init); CU_ADD_TEST(suite, test_spdk_nvme_detach); CU_ADD_TEST(suite, test_nvme_completion_poll_cb); CU_ADD_TEST(suite, test_nvme_user_copy_cmd_complete); CU_ADD_TEST(suite, test_nvme_allocate_request_null); CU_ADD_TEST(suite, test_nvme_allocate_request); CU_ADD_TEST(suite, test_nvme_free_request); CU_ADD_TEST(suite, test_nvme_allocate_request_user_copy); CU_ADD_TEST(suite, test_nvme_robust_mutex_init_shared); CU_ADD_TEST(suite, test_nvme_request_check_timeout); CU_ADD_TEST(suite, test_nvme_wait_for_completion); CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); num_failures = CU_get_number_of_failures(); CU_cleanup_registry(); return num_failures; }
930439.c
/*************************************************************** * * PROGRAM: dc_ftmm_1D (diane_ftm) * 1D version of dc_ftmm * * PURPOSE: normalize Transfer Function with L1 normalization * and bound it * * INPUT: argv[1] = modsq_name * argv[2] = FTB_name * argv[3] = Lower thresold for Transfer Function in Fourier domain * * NB: *.FTB bounded of Transfer Function (output) * * From Karim's version: December 1992 * * AUTHOR: JLP, SR, JV * translated to C by Karim BOUYOUCEF * adapted to 1-D by JLP * * JLP * Version 01/02/00 ***************************************************************/ #include <stdio.h> #include <string.h> #include <math.h> #include <jlp_ftoc.h> void main(argc,argv) int argc; char *argv[]; { /* DECLARATIONS */ INT4 nx, ny; INT_PNTR pntr_ima; int status; char comments[81], ftb_name[60], modsq_name[61]; register int i, j, ix, iy; float *modsq, min, max; double *ftr, *fti; double alpha, cumul, ww; printf(" dc_ftmm_1D, version 01/02/00 \n"); printf("NB: To determine the correct threshold, first use this program with 0.\n"); /* TEST OF COMMAND LINE */ if (argc == 7) argc = 4; if (argc != 4) { fprintf(stderr,"\nUnexpected number of arguments \n"); fprintf(stderr,"\nUSAGE:\n"); fprintf(stderr,"\ndc_ftm modsq_file output_FTB Lower_thresold\n\n"); fprintf(stderr,"\nLower_thresold = lower thresold of Transfer Function\n\n"); exit(-1); } /* READ COMMAND LINE PARAMETERS */ strcpy(modsq_name,argv[1]); strcpy(ftb_name,argv[2]); status = sscanf(argv[3],"%lf",&alpha); if ((status != 1) || (alpha > 1.)) { fprintf(stderr,"\nFATAL ERROR: Lower thresold [%s] incorrect\n\n",argv[2]); exit(-1); } fprintf(stderr,"\n***************************"); fprintf(stderr,"\nPROGRAM : dc_ftmm_1D Version 01/02/00"); fprintf(stderr,"\n***************************"); fprintf(stderr,"\nlower thresold for Transfer Function = %f \n",alpha); /* INPUT MODSQ */ JLP_BEGIN(); JLP_INQUIFMT(); JLP_VM_READIMAG1(&pntr_ima, &nx, &ny, modsq_name, comments); modsq = (float *)pntr_ima; fti = (double *)malloc(nx * ny * sizeof(double)); ftr = (double *)malloc(nx * ny * sizeof(double)); /* Square root of modsq: */ for (i = 0; i < nx * ny; i++) { ww = modsq[i]; if(ww > 0) #ifdef TOTO ftr[i] = sqrt(ww); #endif ftr[i] = ww; else ftr[i] = 0.; } /* First compute the PSF: */ RECENT_FFT_1D_Y(ftr, ftr, &nx, &ny, &nx); for (i = 0; i < nx * ny; i++) fti[i] = 0.; fftw_1D_Y(ftr, fti, (int)nx, (int)ny, -1); /* Make sure that the PSF is zero on the edges: */ for(ix = 0; ix < nx; ix++) { min = ftr[ix]; for (iy = 1; iy < ny; iy++) if(min > ftr[ix + iy*nx]) min = ftr[ix + iy*nx]; for (iy = 0; iy < ny; iy++) ftr[ix + iy*nx] -= min; } /* Then normalize L1 the PSF for each column */ for(ix = 0; ix < nx; ix++) { cumul = 0.0; for (iy = 0; iy < ny; iy++) cumul += ftr[ix + iy*nx]; #ifdef DEBUG if(ix < 1) printf(" cumul = %f (line#%d)\n", cumul,ix); #endif for (iy = 0; iy < ny; iy++) ftr[ix + iy*nx] /= cumul; } /* Computes back the Transfer Function: */ for (i = 0; i < nx * ny; i++) fti[i] = 0.; fftw_1D_Y(ftr, fti, (int)nx, (int)ny, 1); /* Threshold according to ALPHAT: */ max = ftr[0]; for (i = 0; i < nx * ny; i++) if(max < ftr[i]) max = ftr[i]; /* Just for an estimation (for a first go) */ printf("Maximum of Transfer Function is = %f\n", max); for (i = 0; i < nx * ny; i++) if (ftr[i] < alpha) ftr[i] = 0.0; if(alpha >= max) { printf("Fatal error/Threshold is too high: max < alpha\n"); exit(-1); } /* STORAGE OF THE RESULT *.FTB */ RECENT_FFT_1D_Y(ftr, ftr, &nx, &ny, &nx); strcpy(comments,"dc_ftmm_1D"); JLP_D_WRITEIMAG(ftr, &nx, &ny, &nx, ftb_name, comments); fprintf(stderr,"\n\n"); JLP_END(); }
284079.c
/*****************************************************************/ /* Fila de prioridade / min-heap | PROG2 | MIEEC | 2017/18 */ /*****************************************************************/ #include "heap.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define RAIZ (1) #define PAI(x) (x / 2) #define FILHO_ESQ(x) (x * 2) #define FILHO_DIR(x) (x * 2 + 1) int menor_que(elemento *e1, elemento *e2); heap *heap_nova(int capacidade) { heap *h = (heap *)malloc(sizeof(heap)); if (!h) return NULL; h->tamanho = 0; h->capacidade = capacidade; h->elementos = (elemento **)calloc(capacidade + 1, sizeof(elemento *)); if (!h->elementos) { free(h); return NULL; } for (int i = 0; i < capacidade; i++) h->elementos[i] = NULL; return h; } void heap_apaga(heap *h) { int i; if (!h) return; /* apaga todos os elementos e respetivas strings */ for (i = RAIZ; i <= h->tamanho; i++) { // free(h->elementos[i]->no); free(h->elementos[i]); h->elementos[i] = NULL; } free(h->elementos); free(h); } elemento *elemento_novo(double prioridade, no_grafo *no) { if (!no) return NULL; elemento *elem = (elemento *)malloc(sizeof(elemento)); if (!elem) return NULL; elem->no = no; elem->prioridade = prioridade; return elem; } int heap_insere(heap *h, no_grafo *no, double prioridade) { elemento *aux = NULL, *elem; int i = 0; /* se heap esta' cheia, nao insere elemento */ if (h->tamanho >= h->capacidade) return 0; elem = elemento_novo(prioridade, no); if (!elem) return 0; /* coloca elemento no fim da heap */ h->tamanho++; i = h->tamanho; h->elementos[i] = elem; /* enquanto elemento for mais prioritario do que o respetivo pai, troca-os */ while (i != RAIZ && menor_que(h->elementos[i], h->elementos[PAI(i)])) { aux = h->elementos[PAI(i)]; h->elementos[PAI(i)] = h->elementos[i]; h->elementos[i] = aux; i = PAI(i); } return 1; } no_grafo *heap_remove(heap *h) { int i, filho_maior; elemento *aux; no_grafo *ret; /* se heap estiver vazia, nao remove elemento */ if (!h || h->tamanho <= 0) return NULL; ret = h->elementos[RAIZ]->no; free(h->elementos[RAIZ]); /* coloca ultimo elemento da heap na raiz */ h->elementos[RAIZ] = h->elementos[h->tamanho]; h->elementos[h->tamanho] = NULL; h->tamanho--; i = RAIZ; /* enquanto nao chegar 'a base da heap */ while (FILHO_ESQ(i) <= h->tamanho) { filho_maior = FILHO_ESQ(i); /* verifica se existe filho 'a direita e se este e' mais prioritario do que 'a esquerda */ if (FILHO_DIR(i) <= h->tamanho && menor_que(h->elementos[FILHO_DIR(i)], h->elementos[FILHO_ESQ(i)])) filho_maior = FILHO_DIR(i); /* enquanto elemento for mais prioritario do que o respetivo pai, troca-os */ if (menor_que(h->elementos[filho_maior], h->elementos[i])) { aux = h->elementos[filho_maior]; h->elementos[filho_maior] = h->elementos[i]; h->elementos[i] = aux; i = filho_maior; } else break; } return ret; } void heap_imprime(heap *h, int indice) { int i, nivel = 0; if (indice <= h->tamanho) { i = indice; while (i > 1) { i = i / 2; nivel++; } heap_imprime(h, indice * 2); for (i = 0; i < 3 * nivel; i++) printf(" "); printf("%s (%.2f)\n", h->elementos[indice]->no->cidade, h->elementos[indice]->prioridade); heap_imprime(h, indice * 2 + 1); } } int menor_que(elemento *e1, elemento *e2) { if (e1 == NULL || e2 == NULL) { return 0; } return e1->prioridade < e2->prioridade; }
74806.c
/* * Copyright (c) 2016-2017 Hisilicon Limited. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/acpi.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <net/addrconf.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> #include <rdma/ib_umem.h> #include <rdma/uverbs_ioctl.h> #include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" #include "hns_roce_hw_v2.h" static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->lkey = cpu_to_le32(sg->lkey); dseg->addr = cpu_to_le64(sg->addr); dseg->len = cpu_to_le32(sg->length); } /* * mapped-value = 1 + real-value * The hns wr opcode real value is start from 0, In order to distinguish between * initialized and uninitialized map values, we plus 1 to the actual value when * defining the mapping, so that the validity can be identified by checking the * mapped value is greater than 0. */ #define HR_OPC_MAP(ib_key, hr_key) \ [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key static const u32 hns_roce_op_code[] = { HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE), HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM), HR_OPC_MAP(SEND, SEND), HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM), HR_OPC_MAP(RDMA_READ, RDMA_READ), HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP), HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD), HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV), HR_OPC_MAP(LOCAL_INV, LOCAL_INV), HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP), HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD), HR_OPC_MAP(REG_MR, FAST_REG_PMR), }; static u32 to_hr_opcode(u32 ib_opcode) { if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code)) return HNS_ROCE_V2_WQE_OP_MASK; return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 : HNS_ROCE_V2_WQE_OP_MASK; } static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, const struct ib_reg_wr *wr) { struct hns_roce_wqe_frmr_seg *fseg = (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe); struct hns_roce_mr *mr = to_hr_mr(wr->mr); u64 pbl_ba; /* use ib_access_flags */ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S, wr->access & IB_ACCESS_MW_BIND ? 1 : 0); roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S, wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S, wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0); roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S, wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0); roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S, wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0); /* Data structure reuse may lead to confusion */ pbl_ba = mr->pbl_mtr.hem_cfg.root_ba; rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba)); rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba)); rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff); rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32); rc_sq_wqe->rkey = cpu_to_le32(wr->key); rc_sq_wqe->va = cpu_to_le64(wr->mr->iova); fseg->pbl_size = cpu_to_le32(mr->npages); roce_set_field(fseg->mode_buf_pg_sz, V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M, V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S, to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); roce_set_bit(fseg->mode_buf_pg_sz, V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0); } static void set_atomic_seg(const struct ib_send_wr *wr, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, unsigned int valid_num_sge) { struct hns_roce_v2_wqe_data_seg *dseg = (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe); struct hns_roce_wqe_atomic_seg *aseg = (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg); set_data_seg_v2(dseg, wr->sg_list); if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap); aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add); } else { aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->compare_add); aseg->cmp_data = 0; } roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); } static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, const struct ib_send_wr *wr, unsigned int *sge_idx, u32 msg_len) { struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg); unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len; unsigned int left_len_in_pg; unsigned int idx = *sge_idx; unsigned int i = 0; unsigned int len; void *addr; void *dseg; if (msg_len > ext_sge_sz) { ibdev_err(ibdev, "no enough extended sge space for inline data.\n"); return -EINVAL; } dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg; len = wr->sg_list[0].length; addr = (void *)(unsigned long)(wr->sg_list[0].addr); /* When copying data to extended sge space, the left length in page may * not long enough for current user's sge. So the data should be * splited into several parts, one in the first page, and the others in * the subsequent pages. */ while (1) { if (len <= left_len_in_pg) { memcpy(dseg, addr, len); idx += len / dseg_len; i++; if (i >= wr->num_sge) break; left_len_in_pg -= len; len = wr->sg_list[i].length; addr = (void *)(unsigned long)(wr->sg_list[i].addr); dseg += len; } else { memcpy(dseg, addr, left_len_in_pg); len -= left_len_in_pg; addr += left_len_in_pg; idx += left_len_in_pg / dseg_len; dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT; } } *sge_idx = idx; return 0; } static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, unsigned int *sge_ind, unsigned int valid_num_sge) { struct hns_roce_v2_wqe_data_seg *dseg; unsigned int cnt = valid_num_sge; struct ib_sge *sge = wr->sg_list; unsigned int idx = *sge_ind; if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { cnt -= HNS_ROCE_SGE_IN_WQE; sge += HNS_ROCE_SGE_IN_WQE; } while (cnt > 0) { dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); set_data_seg_v2(dseg, sge); idx++; sge++; cnt--; } *sge_ind = idx; } static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len) { struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); int mtu = ib_mtu_enum_to_int(qp->path_mtu); if (len > qp->max_inline_data || len > mtu) { ibdev_err(&hr_dev->ib_dev, "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n", len, qp->max_inline_data, mtu); return false; } return true; } static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, unsigned int *sge_idx) { struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len); struct ib_device *ibdev = &hr_dev->ib_dev; unsigned int curr_idx = *sge_idx; void *dseg = rc_sq_wqe; unsigned int i; int ret; if (unlikely(wr->opcode == IB_WR_RDMA_READ)) { ibdev_err(ibdev, "invalid inline parameters!\n"); return -EINVAL; } if (!check_inl_data_len(qp, msg_len)) return -EINVAL; dseg += sizeof(struct hns_roce_v2_rc_send_wqe); roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1); if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) { roce_set_bit(rc_sq_wqe->byte_20, V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0); for (i = 0; i < wr->num_sge; i++) { memcpy(dseg, ((void *)wr->sg_list[i].addr), wr->sg_list[i].length); dseg += wr->sg_list[i].length; } } else { roce_set_bit(rc_sq_wqe->byte_20, V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1); ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len); if (ret) return ret; roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, curr_idx - *sge_idx); } *sge_idx = curr_idx; return 0; } static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, unsigned int *sge_ind, unsigned int valid_num_sge) { struct hns_roce_v2_wqe_data_seg *dseg = (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe); struct hns_roce_qp *qp = to_hr_qp(ibqp); int j = 0; int i; roce_set_field(rc_sq_wqe->byte_20, V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, (*sge_ind) & (qp->sge.sge_cnt - 1)); if (wr->send_flags & IB_SEND_INLINE) return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind); if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) { for (i = 0; i < wr->num_sge; i++) { if (likely(wr->sg_list[i].length)) { set_data_seg_v2(dseg, wr->sg_list + i); dseg++; } } } else { for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) { if (likely(wr->sg_list[i].length)) { set_data_seg_v2(dseg, wr->sg_list + i); dseg++; j++; } } set_extend_sge(qp, wr, sge_ind, valid_num_sge); } roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); return 0; } static int check_send_valid(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_qp *ibqp = &hr_qp->ibqp; if (unlikely(ibqp->qp_type != IB_QPT_RC && ibqp->qp_type != IB_QPT_GSI && ibqp->qp_type != IB_QPT_UD)) { ibdev_err(ibdev, "Not supported QP(0x%x)type!\n", ibqp->qp_type); return -EOPNOTSUPP; } else if (unlikely(hr_qp->state == IB_QPS_RESET || hr_qp->state == IB_QPS_INIT || hr_qp->state == IB_QPS_RTR)) { ibdev_err(ibdev, "failed to post WQE, QP state %d!\n", hr_qp->state); return -EINVAL; } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) { ibdev_err(ibdev, "failed to post WQE, dev state %d!\n", hr_dev->state); return -EIO; } return 0; } static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr, unsigned int *sge_len) { unsigned int valid_num = 0; unsigned int len = 0; int i; for (i = 0; i < wr->num_sge; i++) { if (likely(wr->sg_list[i].length)) { len += wr->sg_list[i].length; valid_num++; } } *sge_len = len; return valid_num; } static __le32 get_immtdata(const struct ib_send_wr *wr) { switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM: return cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); default: return 0; } } static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, const struct ib_send_wr *wr) { u32 ib_op = wr->opcode; if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM) return -EINVAL; ud_sq_wqe->immtdata = get_immtdata(wr); roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M, V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op)); return 0; } static inline int set_ud_wqe(struct hns_roce_qp *qp, const struct ib_send_wr *wr, void *wqe, unsigned int *sge_idx, unsigned int owner_bit) { struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe; unsigned int curr_idx = *sge_idx; int valid_num_sge; u32 msg_len = 0; bool loopback; u8 *smac; int ret; valid_num_sge = calc_wr_sge_num(wr, &msg_len); memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe)); ret = set_ud_opcode(ud_sq_wqe, wr); if (WARN_ON(ret)) return ret; roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M, V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]); roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M, V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]); roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M, V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]); roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M, V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]); roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_4_M, V2_UD_SEND_WQE_BYTE_48_DMAC_4_S, ah->av.mac[4]); roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_5_M, V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]); /* MAC loopback */ smac = (u8 *)hr_dev->dev_addr[qp->port]; loopback = ether_addr_equal_unaligned(ah->av.mac, smac) ? 1 : 0; roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback); ud_sq_wqe->msg_len = cpu_to_le32(msg_len); /* Set sig attr */ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S, (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); /* Set se attr */ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S, (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit); roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M, V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn); roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); roce_set_field(ud_sq_wqe->byte_20, V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, curr_idx & (qp->sge.sge_cnt - 1)); roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M, V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport); ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? qp->qkey : ud_wr(wr)->remote_qkey); roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M, V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn); roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M, V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id); roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit); roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M, V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass); roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel); roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M, V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl); roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M, V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port); roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, ah->av.vlan_en ? 1 : 0); roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, ah->av.gid_index); memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2); set_extend_sge(qp, wr, &curr_idx, valid_num_sge); *sge_idx = curr_idx; return 0; } static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, const struct ib_send_wr *wr) { u32 ib_op = wr->opcode; rc_sq_wqe->immtdata = get_immtdata(wr); switch (ib_op) { case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr); break; case IB_WR_REG_MR: set_frmr_seg(rc_sq_wqe, reg_wr(wr)); break; case IB_WR_LOCAL_INV: roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1); fallthrough; case IB_WR_SEND_WITH_INV: rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); break; default: return -EINVAL; } roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M, V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op)); return 0; } static inline int set_rc_wqe(struct hns_roce_qp *qp, const struct ib_send_wr *wr, void *wqe, unsigned int *sge_idx, unsigned int owner_bit) { struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; unsigned int curr_idx = *sge_idx; unsigned int valid_num_sge; u32 msg_len = 0; int ret; valid_num_sge = calc_wr_sge_num(wr, &msg_len); memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe)); rc_sq_wqe->msg_len = cpu_to_le32(msg_len); ret = set_rc_opcode(rc_sq_wqe, wr); if (WARN_ON(ret)) return ret; roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S, (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S, (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S, (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit); if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) set_atomic_seg(wr, rc_sq_wqe, valid_num_sge); else if (wr->opcode != IB_WR_REG_MR) ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe, &curr_idx, valid_num_sge); *sge_idx = curr_idx; return ret; } static inline void update_sq_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp) { /* * Hip08 hardware cannot flush the WQEs in SQ if the QP state * gets into errored mode. Hence, as a workaround to this * hardware limitation, driver needs to assist in flushing. But * the flushing operation uses mailbox to convey the QP state to * the hardware and which can sleep due to the mutex protection * around the mailbox calls. Hence, use the deferred flush for * now. */ if (qp->state == IB_QPS_ERR) { if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) init_flush_work(hr_dev, qp); } else { struct hns_roce_v2_db sq_db = {}; roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn); roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB); roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M, V2_DB_PARAMETER_IDX_S, qp->sq.head); roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M, V2_DB_PARAMETER_SL_S, qp->sl); hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l); } } static int hns_roce_v2_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_qp *qp = to_hr_qp(ibqp); unsigned long flags = 0; unsigned int owner_bit; unsigned int sge_idx; unsigned int wqe_idx; void *wqe = NULL; int nreq; int ret; spin_lock_irqsave(&qp->sq.lock, flags); ret = check_send_valid(hr_dev, qp); if (unlikely(ret)) { *bad_wr = wr; nreq = 0; goto out; } sge_idx = qp->next_sge; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { ret = -ENOMEM; *bad_wr = wr; goto out; } wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); if (unlikely(wr->num_sge > qp->sq.max_gs)) { ibdev_err(ibdev, "num_sge=%d > qp->sq.max_gs=%d\n", wr->num_sge, qp->sq.max_gs); ret = -EINVAL; *bad_wr = wr; goto out; } wqe = hns_roce_get_send_wqe(qp, wqe_idx); qp->sq.wrid[wqe_idx] = wr->wr_id; owner_bit = ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); /* Corresponding to the QP type, wqe process separately */ if (ibqp->qp_type == IB_QPT_GSI) ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit); else if (ibqp->qp_type == IB_QPT_RC) ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit); if (unlikely(ret)) { *bad_wr = wr; goto out; } } out: if (likely(nreq)) { qp->sq.head += nreq; qp->next_sge = sge_idx; /* Memory barrier */ wmb(); update_sq_db(hr_dev, qp); } spin_unlock_irqrestore(&qp->sq.lock, flags); return ret; } static int check_recv_valid(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) return -EIO; else if (hr_qp->state == IB_QPS_RESET) return -EINVAL; return 0; } static int hns_roce_v2_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_rinl_sge *sge_list; unsigned long flags; void *wqe = NULL; u32 wqe_idx; int nreq; int ret; int i; spin_lock_irqsave(&hr_qp->rq.lock, flags); ret = check_recv_valid(hr_dev, hr_qp); if (unlikely(ret)) { *bad_wr = wr; nreq = 0; goto out; } for (nreq = 0; wr; ++nreq, wr = wr->next) { if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq, hr_qp->ibqp.recv_cq))) { ret = -ENOMEM; *bad_wr = wr; goto out; } wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n", wr->num_sge, hr_qp->rq.max_gs); ret = -EINVAL; *bad_wr = wr; goto out; } wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx); dseg = (struct hns_roce_v2_wqe_data_seg *)wqe; for (i = 0; i < wr->num_sge; i++) { if (!wr->sg_list[i].length) continue; set_data_seg_v2(dseg, wr->sg_list + i); dseg++; } if (wr->num_sge < hr_qp->rq.max_gs) { dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); dseg->addr = 0; } /* rq support inline data */ if (hr_qp->rq_inl_buf.wqe_cnt) { sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list; hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge; for (i = 0; i < wr->num_sge; i++) { sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr; sge_list[i].len = wr->sg_list[i].length; } } hr_qp->rq.wrid[wqe_idx] = wr->wr_id; } out: if (likely(nreq)) { hr_qp->rq.head += nreq; /* Memory barrier */ wmb(); /* * Hip08 hardware cannot flush the WQEs in RQ if the QP state * gets into errored mode. Hence, as a workaround to this * hardware limitation, driver needs to assist in flushing. But * the flushing operation uses mailbox to convey the QP state to * the hardware and which can sleep due to the mutex protection * around the mailbox calls. Hence, use the deferred flush for * now. */ if (hr_qp->state == IB_QPS_ERR) { if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) init_flush_work(hr_dev, hr_qp); } else { *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff; } } spin_unlock_irqrestore(&hr_qp->rq.lock, flags); return ret; } static void *get_srq_wqe(struct hns_roce_srq *srq, int n) { return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift); } static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n) { return hns_roce_buf_offset(idx_que->mtr.kmem, n << idx_que->entry_shift); } static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index) { /* always called with interrupts disabled. */ spin_lock(&srq->lock); bitmap_clear(srq->idx_que.bitmap, wqe_index, 1); srq->tail++; spin_unlock(&srq->lock); } static int find_empty_entry(struct hns_roce_idx_que *idx_que, unsigned long size) { int wqe_idx; if (unlikely(bitmap_full(idx_que->bitmap, size))) return -ENOSPC; wqe_idx = find_first_zero_bit(idx_que->bitmap, size); bitmap_set(idx_que->bitmap, wqe_idx, 1); return wqe_idx; } static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); struct hns_roce_srq *srq = to_hr_srq(ibsrq); struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_v2_db srq_db; unsigned long flags; __le32 *srq_idx; int ret = 0; int wqe_idx; void *wqe; int nreq; int ind; int i; spin_lock_irqsave(&srq->lock, flags); ind = srq->head & (srq->wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (unlikely(wr->num_sge >= srq->max_gs)) { ret = -EINVAL; *bad_wr = wr; break; } if (unlikely(srq->head == srq->tail)) { ret = -ENOMEM; *bad_wr = wr; break; } wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt); if (unlikely(wqe_idx < 0)) { ret = -ENOMEM; *bad_wr = wr; break; } wqe = get_srq_wqe(srq, wqe_idx); dseg = (struct hns_roce_v2_wqe_data_seg *)wqe; for (i = 0; i < wr->num_sge; ++i) { dseg[i].len = cpu_to_le32(wr->sg_list[i].length); dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey); dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr); } if (wr->num_sge < srq->max_gs) { dseg[i].len = 0; dseg[i].lkey = cpu_to_le32(0x100); dseg[i].addr = 0; } srq_idx = get_idx_buf(&srq->idx_que, ind); *srq_idx = cpu_to_le32(wqe_idx); srq->wrid[wqe_idx] = wr->wr_id; ind = (ind + 1) & (srq->wqe_cnt - 1); } if (likely(nreq)) { srq->head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); srq_db.byte_4 = cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S | (srq->srqn & V2_DB_BYTE_4_TAG_M)); srq_db.parameter = cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M); hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l); } spin_unlock_irqrestore(&srq->lock, flags); return ret; } static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev, unsigned long instance_stage, unsigned long reset_stage) { /* When hardware reset has been completed once or more, we should stop * sending mailbox&cmq&doorbell to hardware. If now in .init_instance() * function, we should exit with error. If now at HNAE3_INIT_CLIENT * stage of soft reset process, we should exit with error, and then * HNAE3_INIT_CLIENT related process can rollback the operation like * notifing hardware to free resources, HNAE3_INIT_CLIENT related * process will exit with error to notify NIC driver to reschedule soft * reset process once again. */ hr_dev->is_reset = true; hr_dev->dis_db = true; if (reset_stage == HNS_ROCE_STATE_RST_INIT || instance_stage == HNS_ROCE_STATE_INIT) return CMD_RST_PRC_EBUSY; return CMD_RST_PRC_SUCCESS; } static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev, unsigned long instance_stage, unsigned long reset_stage) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hnae3_handle *handle = priv->handle; const struct hnae3_ae_ops *ops = handle->ae_algo->ops; /* When hardware reset is detected, we should stop sending mailbox&cmq& * doorbell to hardware. If now in .init_instance() function, we should * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset * process, we should exit with error, and then HNAE3_INIT_CLIENT * related process can rollback the operation like notifing hardware to * free resources, HNAE3_INIT_CLIENT related process will exit with * error to notify NIC driver to reschedule soft reset process once * again. */ hr_dev->dis_db = true; if (!ops->get_hw_reset_stat(handle)) hr_dev->is_reset = true; if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT || instance_stage == HNS_ROCE_STATE_INIT) return CMD_RST_PRC_EBUSY; return CMD_RST_PRC_SUCCESS; } static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hnae3_handle *handle = priv->handle; const struct hnae3_ae_ops *ops = handle->ae_algo->ops; /* When software reset is detected at .init_instance() function, we * should stop sending mailbox&cmq&doorbell to hardware, and exit * with error. */ hr_dev->dis_db = true; if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) hr_dev->is_reset = true; return CMD_RST_PRC_EBUSY; } static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hnae3_handle *handle = priv->handle; const struct hnae3_ae_ops *ops = handle->ae_algo->ops; unsigned long instance_stage; /* the current instance stage */ unsigned long reset_stage; /* the current reset stage */ unsigned long reset_cnt; bool sw_resetting; bool hw_resetting; if (hr_dev->is_reset) return CMD_RST_PRC_SUCCESS; /* Get information about reset from NIC driver or RoCE driver itself, * the meaning of the following variables from NIC driver are described * as below: * reset_cnt -- The count value of completed hardware reset. * hw_resetting -- Whether hardware device is resetting now. * sw_resetting -- Whether NIC's software reset process is running now. */ instance_stage = handle->rinfo.instance_state; reset_stage = handle->rinfo.reset_state; reset_cnt = ops->ae_dev_reset_cnt(handle); hw_resetting = ops->get_cmdq_stat(handle); sw_resetting = ops->ae_dev_resetting(handle); if (reset_cnt != hr_dev->reset_cnt) return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage, reset_stage); else if (hw_resetting) return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage, reset_stage); else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) return hns_roce_v2_cmd_sw_resetting(hr_dev); return 0; } static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring) { int ntu = ring->next_to_use; int ntc = ring->next_to_clean; int used = (ntu - ntc + ring->desc_num) % ring->desc_num; return ring->desc_num - used - 1; } static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev, struct hns_roce_v2_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc); ring->desc = kzalloc(size, GFP_KERNEL); if (!ring->desc) return -ENOMEM; ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size, DMA_BIDIRECTIONAL); if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) { ring->desc_dma_addr = 0; kfree(ring->desc); ring->desc = NULL; return -ENOMEM; } return 0; } static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev, struct hns_roce_v2_cmq_ring *ring) { dma_unmap_single(hr_dev->dev, ring->desc_dma_addr, ring->desc_num * sizeof(struct hns_roce_cmq_desc), DMA_BIDIRECTIONAL); ring->desc_dma_addr = 0; kfree(ring->desc); } static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ? &priv->cmq.csq : &priv->cmq.crq; ring->flag = ring_type; ring->next_to_clean = 0; ring->next_to_use = 0; return hns_roce_alloc_cmq_desc(hr_dev, ring); } static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ? &priv->cmq.csq : &priv->cmq.crq; dma_addr_t dma = ring->desc_dma_addr; if (ring_type == TYPE_CSQ) { roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma); roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma)); roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); } else { roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma); roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG, upper_32_bits(dma)); roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG, ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0); roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0); } } static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; int ret; /* Setup the queue entries for command queue */ priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM; priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM; /* Setup the lock for command queue */ spin_lock_init(&priv->cmq.csq.lock); spin_lock_init(&priv->cmq.crq.lock); /* Setup Tx write back timeout */ priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT; /* Init CSQ */ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ); if (ret) { dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret); return ret; } /* Init CRQ */ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ); if (ret) { dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret); goto err_crq; } /* Init CSQ REG */ hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ); /* Init CRQ REG */ hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ); return 0; err_crq: hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq); return ret; } static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq); hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq); } static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, enum hns_roce_opcode_type opcode, bool is_read) { memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc)); desc->opcode = cpu_to_le16(opcode); desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN); if (is_read) desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR); else desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); } static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev) { u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG); struct hns_roce_v2_priv *priv = hr_dev->priv; return head == priv->cmq.csq.next_to_use; } static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; struct hns_roce_cmq_desc *desc; u16 ntc = csq->next_to_clean; u32 head; int clean = 0; desc = &csq->desc[ntc]; head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG); while (head != ntc) { memset(desc, 0, sizeof(*desc)); ntc++; if (ntc == csq->desc_num) ntc = 0; desc = &csq->desc[ntc]; clean++; } csq->next_to_clean = ntc; return clean; } static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; struct hns_roce_cmq_desc *desc_to_use; bool complete = false; u32 timeout = 0; int handle = 0; u16 desc_ret; int ret = 0; int ntc; spin_lock_bh(&csq->lock); if (num > hns_roce_cmq_space(csq)) { spin_unlock_bh(&csq->lock); return -EBUSY; } /* * Record the location of desc in the cmq for this time * which will be use for hardware to write back */ ntc = csq->next_to_use; while (handle < num) { desc_to_use = &csq->desc[csq->next_to_use]; *desc_to_use = desc[handle]; dev_dbg(hr_dev->dev, "set cmq desc:\n"); csq->next_to_use++; if (csq->next_to_use == csq->desc_num) csq->next_to_use = 0; handle++; } /* Write to hardware */ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use); /* * If the command is sync, wait for the firmware to write back, * if multi descriptors to be sent, use the first one to check */ if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) { do { if (hns_roce_cmq_csq_done(hr_dev)) break; udelay(1); timeout++; } while (timeout < priv->cmq.tx_timeout); } if (hns_roce_cmq_csq_done(hr_dev)) { complete = true; handle = 0; while (handle < num) { /* get the result of hardware write back */ desc_to_use = &csq->desc[ntc]; desc[handle] = *desc_to_use; dev_dbg(hr_dev->dev, "Get cmq desc:\n"); desc_ret = le16_to_cpu(desc[handle].retval); if (desc_ret == CMD_EXEC_SUCCESS) ret = 0; else ret = -EIO; priv->cmq.last_status = desc_ret; ntc++; handle++; if (ntc == csq->desc_num) ntc = 0; } } if (!complete) ret = -EAGAIN; /* clean the command send queue */ handle = hns_roce_cmq_csq_clean(hr_dev); if (handle != num) dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n", handle, num); spin_unlock_bh(&csq->lock); return ret; } static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { int retval; int ret; ret = hns_roce_v2_rst_process_cmd(hr_dev); if (ret == CMD_RST_PRC_SUCCESS) return 0; if (ret == CMD_RST_PRC_EBUSY) return -EBUSY; ret = __hns_roce_cmq_send(hr_dev, desc, num); if (ret) { retval = hns_roce_v2_rst_process_cmd(hr_dev); if (retval == CMD_RST_PRC_SUCCESS) return 0; else if (retval == CMD_RST_PRC_EBUSY) return -EBUSY; } return ret; } static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) { struct hns_roce_query_version *resp; struct hns_roce_cmq_desc desc; int ret; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) return ret; resp = (struct hns_roce_query_version *)desc.data; hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version); hr_dev->vendor_id = hr_dev->pci_dev->vendor; return 0; } static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hnae3_handle *handle = priv->handle; const struct hnae3_ae_ops *ops = handle->ae_algo->ops; unsigned long reset_cnt; bool sw_resetting; bool hw_resetting; reset_cnt = ops->ae_dev_reset_cnt(handle); hw_resetting = ops->get_hw_reset_stat(handle); sw_resetting = ops->ae_dev_resetting(handle); if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting) return true; return false; } static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval, int flag) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hnae3_handle *handle = priv->handle; const struct hnae3_ae_ops *ops = handle->ae_algo->ops; unsigned long instance_stage; unsigned long reset_cnt; unsigned long end; bool sw_resetting; bool hw_resetting; instance_stage = handle->rinfo.instance_state; reset_cnt = ops->ae_dev_reset_cnt(handle); hw_resetting = ops->get_hw_reset_stat(handle); sw_resetting = ops->ae_dev_resetting(handle); if (reset_cnt != hr_dev->reset_cnt) { hr_dev->dis_db = true; hr_dev->is_reset = true; dev_info(hr_dev->dev, "Func clear success after reset.\n"); } else if (hw_resetting) { hr_dev->dis_db = true; dev_warn(hr_dev->dev, "Func clear is pending, device in resetting state.\n"); end = HNS_ROCE_V2_HW_RST_TIMEOUT; while (end) { if (!ops->get_hw_reset_stat(handle)) { hr_dev->is_reset = true; dev_info(hr_dev->dev, "Func clear success after reset.\n"); return; } msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; } dev_warn(hr_dev->dev, "Func clear failed.\n"); } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) { hr_dev->dis_db = true; dev_warn(hr_dev->dev, "Func clear is pending, device in resetting state.\n"); end = HNS_ROCE_V2_HW_RST_TIMEOUT; while (end) { if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) { hr_dev->is_reset = true; dev_info(hr_dev->dev, "Func clear success after sw reset\n"); return; } msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; } dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n"); } else { if (retval && !flag) dev_warn(hr_dev->dev, "Func clear read failed, ret = %d.\n", retval); dev_warn(hr_dev->dev, "Func clear failed.\n"); } } static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) { bool fclr_write_fail_flag = false; struct hns_roce_func_clear *resp; struct hns_roce_cmq_desc desc; unsigned long end; int ret = 0; if (hns_roce_func_clr_chk_rst(hr_dev)) goto out; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false); resp = (struct hns_roce_func_clear *)desc.data; ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) { fclr_write_fail_flag = true; dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n", ret); goto out; } msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL); end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS; while (end) { if (hns_roce_func_clr_chk_rst(hr_dev)) goto out; msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT); end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, true); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) continue; if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) { hr_dev->is_reset = true; return; } } out: hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag); } static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev) { struct hns_roce_query_fw_info *resp; struct hns_roce_cmq_desc desc; int ret; hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) return ret; resp = (struct hns_roce_query_fw_info *)desc.data; hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver)); return 0; } static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev) { struct hns_roce_cfg_global_param *req; struct hns_roce_cmq_desc desc; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM, false); req = (struct hns_roce_cfg_global_param *)desc.data; memset(req, 0, sizeof(*req)); roce_set_field(req->time_cfg_udp_port, CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M, CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8); roce_set_field(req->time_cfg_udp_port, CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M, CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7); return hns_roce_cmq_send(hr_dev, &desc, 1); } static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) { struct hns_roce_cmq_desc desc[2]; struct hns_roce_pf_res_a *req_a; struct hns_roce_pf_res_b *req_b; int ret; int i; for (i = 0; i < 2; i++) { hns_roce_cmq_setup_basic_desc(&desc[i], HNS_ROCE_OPC_QUERY_PF_RES, true); if (i == 0) desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); else desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); } ret = hns_roce_cmq_send(hr_dev, desc, 2); if (ret) return ret; req_a = (struct hns_roce_pf_res_a *)desc[0].data; req_b = (struct hns_roce_pf_res_b *)desc[1].data; hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num, PF_RES_DATA_1_PF_QPC_BT_NUM_M, PF_RES_DATA_1_PF_QPC_BT_NUM_S); hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num, PF_RES_DATA_2_PF_SRQC_BT_NUM_M, PF_RES_DATA_2_PF_SRQC_BT_NUM_S); hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num, PF_RES_DATA_3_PF_CQC_BT_NUM_M, PF_RES_DATA_3_PF_CQC_BT_NUM_S); hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num, PF_RES_DATA_4_PF_MPT_BT_NUM_M, PF_RES_DATA_4_PF_MPT_BT_NUM_S); hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num, PF_RES_DATA_3_PF_SL_NUM_M, PF_RES_DATA_3_PF_SL_NUM_S); hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num, PF_RES_DATA_4_PF_SCCC_BT_NUM_M, PF_RES_DATA_4_PF_SCCC_BT_NUM_S); return 0; } static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev) { struct hns_roce_pf_timer_res_a *req_a; struct hns_roce_cmq_desc desc; int ret; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES, true); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) return ret; req_a = (struct hns_roce_pf_timer_res_a *)desc.data; hr_dev->caps.qpc_timer_bt_num = roce_get_field(req_a->qpc_timer_bt_idx_num, PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M, PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S); hr_dev->caps.cqc_timer_bt_num = roce_get_field(req_a->cqc_timer_bt_idx_num, PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M, PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S); return 0; } static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id) { struct hns_roce_cmq_desc desc; struct hns_roce_vf_switch *swt; int ret; swt = (struct hns_roce_vf_switch *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M, VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) return ret; desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN); desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1); roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0); roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1); return hns_roce_cmq_send(hr_dev, &desc, 1); } static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) { struct hns_roce_cmq_desc desc[2]; struct hns_roce_vf_res_a *req_a; struct hns_roce_vf_res_b *req_b; int i; req_a = (struct hns_roce_vf_res_a *)desc[0].data; req_b = (struct hns_roce_vf_res_b *)desc[1].data; for (i = 0; i < 2; i++) { hns_roce_cmq_setup_basic_desc(&desc[i], HNS_ROCE_OPC_ALLOC_VF_RES, false); if (i == 0) desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); else desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); } roce_set_field(req_a->vf_qpc_bt_idx_num, VF_RES_A_DATA_1_VF_QPC_BT_IDX_M, VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0); roce_set_field(req_a->vf_qpc_bt_idx_num, VF_RES_A_DATA_1_VF_QPC_BT_NUM_M, VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM); roce_set_field(req_a->vf_srqc_bt_idx_num, VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M, VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0); roce_set_field(req_a->vf_srqc_bt_idx_num, VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M, VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S, HNS_ROCE_VF_SRQC_BT_NUM); roce_set_field(req_a->vf_cqc_bt_idx_num, VF_RES_A_DATA_3_VF_CQC_BT_IDX_M, VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0); roce_set_field(req_a->vf_cqc_bt_idx_num, VF_RES_A_DATA_3_VF_CQC_BT_NUM_M, VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM); roce_set_field(req_a->vf_mpt_bt_idx_num, VF_RES_A_DATA_4_VF_MPT_BT_IDX_M, VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0); roce_set_field(req_a->vf_mpt_bt_idx_num, VF_RES_A_DATA_4_VF_MPT_BT_NUM_M, VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM); roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M, VF_RES_A_DATA_5_VF_EQC_IDX_S, 0); roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M, VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM); roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M, VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0); roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M, VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM); roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M, VF_RES_B_DATA_2_VF_SGID_IDX_S, 0); roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M, VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM); roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M, VF_RES_B_DATA_3_VF_QID_IDX_S, 0); roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M, VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM); roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0); roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S, HNS_ROCE_VF_SCCC_BT_NUM); return hns_roce_cmq_send(hr_dev, desc, 2); } static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) { u8 srqc_hop_num = hr_dev->caps.srqc_hop_num; u8 qpc_hop_num = hr_dev->caps.qpc_hop_num; u8 cqc_hop_num = hr_dev->caps.cqc_hop_num; u8 mpt_hop_num = hr_dev->caps.mpt_hop_num; u8 sccc_hop_num = hr_dev->caps.sccc_hop_num; struct hns_roce_cfg_bt_attr *req; struct hns_roce_cmq_desc desc; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false); req = (struct hns_roce_cfg_bt_attr *)desc.data; memset(req, 0, sizeof(*req)); roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S, hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S, hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S, qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num); roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S, hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S, hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S, srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num); roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S, hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S, hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S, cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num); roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S, hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S, hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S, mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num); roce_set_field(req->vf_sccc_cfg, CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M, CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S, hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_sccc_cfg, CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M, CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S, hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET); roce_set_field(req->vf_sccc_cfg, CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M, CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S, sccc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num); return hns_roce_cmq_send(hr_dev, &desc, 1); } static void set_default_caps(struct hns_roce_dev *hr_dev) { struct hns_roce_caps *caps = &hr_dev->caps; caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM; caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM; caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM; caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM; caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM; caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM; caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; caps->num_uars = HNS_ROCE_V2_UAR_NUM; caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM; caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM; caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM; caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM; caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM; caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS; caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS; caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS; caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS; caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM; caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA; caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA; caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ; caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ; caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ; caps->qpc_sz = HNS_ROCE_V2_QPC_SZ; caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ; caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ; caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ; caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ; caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ; caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE; caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; caps->reserved_lkey = 0; caps->reserved_pds = 0; caps->reserved_mrws = 1; caps->reserved_uars = 0; caps->reserved_cqs = 0; caps->reserved_srqs = 0; caps->reserved_qps = HNS_ROCE_V2_RSV_QPS; caps->qpc_ba_pg_sz = 0; caps->qpc_buf_pg_sz = 0; caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->srqc_ba_pg_sz = 0; caps->srqc_buf_pg_sz = 0; caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->cqc_ba_pg_sz = 0; caps->cqc_buf_pg_sz = 0; caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->mpt_ba_pg_sz = 0; caps->mpt_buf_pg_sz = 0; caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->mtt_ba_pg_sz = 0; caps->mtt_buf_pg_sz = 0; caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM; caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM; caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM; caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM; caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K; caps->cqe_buf_pg_sz = 0; caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM; caps->srqwqe_ba_pg_sz = 0; caps->srqwqe_buf_pg_sz = 0; caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM; caps->idx_ba_pg_sz = 0; caps->idx_buf_pg_sz = 0; caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM; caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE; caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR | HNS_ROCE_CAP_FLAG_ROCE_V1_V2 | HNS_ROCE_CAP_FLAG_RQ_INLINE | HNS_ROCE_CAP_FLAG_RECORD_DB | HNS_ROCE_CAP_FLAG_SQ_RECORD_DB; caps->pkey_table_len[0] = 1; caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM; caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM; caps->aeqe_size = HNS_ROCE_AEQE_SIZE; caps->ceqe_size = HNS_ROCE_CEQE_SIZE; caps->local_ca_ack_delay = 0; caps->max_mtu = IB_MTU_4096; caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR; caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE; caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW | HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR | HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL; caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM; caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ; caps->qpc_timer_ba_pg_sz = 0; caps->qpc_timer_buf_pg_sz = 0; caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM; caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; caps->cqc_timer_ba_pg_sz = 0; caps->cqc_timer_buf_pg_sz = 0; caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ; caps->sccc_ba_pg_sz = 0; caps->sccc_buf_pg_sz = 0; caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM; if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE; caps->qpc_sz = HNS_ROCE_V3_QPC_SZ; } } static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num, int *buf_page_size, int *bt_page_size, u32 hem_type) { u64 obj_per_chunk; u64 bt_chunk_size = PAGE_SIZE; u64 buf_chunk_size = PAGE_SIZE; u64 obj_per_chunk_default = buf_chunk_size / obj_size; *buf_page_size = 0; *bt_page_size = 0; switch (hop_num) { case 3: obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) * (bt_chunk_size / BA_BYTE_LEN) * (bt_chunk_size / BA_BYTE_LEN) * obj_per_chunk_default; break; case 2: obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) * (bt_chunk_size / BA_BYTE_LEN) * obj_per_chunk_default; break; case 1: obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) * obj_per_chunk_default; break; case HNS_ROCE_HOP_NUM_0: obj_per_chunk = ctx_bt_num * obj_per_chunk_default; break; default: pr_err("Table %d not support hop_num = %d!\n", hem_type, hop_num); return; } if (hem_type >= HEM_TYPE_MTT) *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk)); else *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk)); } static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) { struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM]; struct hns_roce_caps *caps = &hr_dev->caps; struct hns_roce_query_pf_caps_a *resp_a; struct hns_roce_query_pf_caps_b *resp_b; struct hns_roce_query_pf_caps_c *resp_c; struct hns_roce_query_pf_caps_d *resp_d; struct hns_roce_query_pf_caps_e *resp_e; int ctx_hop_num; int pbl_hop_num; int ret; int i; for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) { hns_roce_cmq_setup_basic_desc(&desc[i], HNS_ROCE_OPC_QUERY_PF_CAPS_NUM, true); if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1)) desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); else desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); } ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM); if (ret) return ret; resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data; resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data; resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data; resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data; resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data; caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg); caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer); caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer); caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); caps->num_aeq_vectors = resp_a->num_aeq_vectors; caps->num_other_vectors = resp_a->num_other_vectors; caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; caps->max_srq_desc_sz = resp_a->max_srq_desc_sz; caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE; caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; caps->irrl_entry_sz = resp_b->irrl_entry_sz; caps->trrl_entry_sz = resp_b->trrl_entry_sz; caps->cqc_entry_sz = resp_b->cqc_entry_sz; caps->srqc_entry_sz = resp_b->srqc_entry_sz; caps->idx_entry_sz = resp_b->idx_entry_sz; caps->sccc_sz = resp_b->sccc_sz; caps->max_mtu = resp_b->max_mtu; caps->qpc_sz = HNS_ROCE_V2_QPC_SZ; caps->min_cqes = resp_b->min_cqes; caps->min_wqes = resp_b->min_wqes; caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap); caps->pkey_table_len[0] = resp_b->pkey_table_len; caps->phy_num_uars = resp_b->phy_num_uars; ctx_hop_num = resp_b->ctx_hop_num; pbl_hop_num = resp_b->pbl_hop_num; caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds, V2_QUERY_PF_CAPS_C_NUM_PDS_M, V2_QUERY_PF_CAPS_C_NUM_PDS_S); caps->flags = roce_get_field(resp_c->cap_flags_num_pds, V2_QUERY_PF_CAPS_C_CAP_FLAGS_M, V2_QUERY_PF_CAPS_C_CAP_FLAGS_S); caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) << HNS_ROCE_CAP_FLAGS_EX_SHIFT; caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs, V2_QUERY_PF_CAPS_C_NUM_CQS_M, V2_QUERY_PF_CAPS_C_NUM_CQS_S); caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs, V2_QUERY_PF_CAPS_C_MAX_GID_M, V2_QUERY_PF_CAPS_C_MAX_GID_S); caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth, V2_QUERY_PF_CAPS_C_CQ_DEPTH_M, V2_QUERY_PF_CAPS_C_CQ_DEPTH_S); caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws, V2_QUERY_PF_CAPS_C_NUM_MRWS_M, V2_QUERY_PF_CAPS_C_NUM_MRWS_S); caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps, V2_QUERY_PF_CAPS_C_NUM_QPS_M, V2_QUERY_PF_CAPS_C_NUM_QPS_S); caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps, V2_QUERY_PF_CAPS_C_MAX_ORD_M, V2_QUERY_PF_CAPS_C_MAX_ORD_S); caps->max_qp_dest_rdma = caps->max_qp_init_rdma; caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs, V2_QUERY_PF_CAPS_D_NUM_SRQS_M, V2_QUERY_PF_CAPS_D_NUM_SRQS_S); caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth, V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M, V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S); caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth, V2_QUERY_PF_CAPS_D_NUM_CEQS_M, V2_QUERY_PF_CAPS_D_NUM_CEQS_S); caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth, V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M, V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S); caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M, V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S); caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M, V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S); caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds, V2_QUERY_PF_CAPS_D_RSV_PDS_M, V2_QUERY_PF_CAPS_D_RSV_PDS_S); caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds, V2_QUERY_PF_CAPS_D_NUM_UARS_M, V2_QUERY_PF_CAPS_D_NUM_UARS_S); caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps, V2_QUERY_PF_CAPS_D_RSV_QPS_M, V2_QUERY_PF_CAPS_D_RSV_QPS_S); caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps, V2_QUERY_PF_CAPS_D_RSV_UARS_M, V2_QUERY_PF_CAPS_D_RSV_UARS_S); caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws, V2_QUERY_PF_CAPS_E_RSV_MRWS_M, V2_QUERY_PF_CAPS_E_RSV_MRWS_S); caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws, V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M, V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S); caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs, V2_QUERY_PF_CAPS_E_RSV_CQS_M, V2_QUERY_PF_CAPS_E_RSV_CQS_S); caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs, V2_QUERY_PF_CAPS_E_RSV_SRQS_M, V2_QUERY_PF_CAPS_E_RSV_SRQS_S); caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey, V2_QUERY_PF_CAPS_E_RSV_LKEYS_M, V2_QUERY_PF_CAPS_E_RSV_LKEYS_S); caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period); caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ; caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS; caps->ceqe_size = HNS_ROCE_CEQE_SIZE; caps->aeqe_size = HNS_ROCE_AEQE_SIZE; caps->mtt_ba_pg_sz = 0; caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS; caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS; caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS; caps->qpc_hop_num = ctx_hop_num; caps->srqc_hop_num = ctx_hop_num; caps->cqc_hop_num = ctx_hop_num; caps->mpt_hop_num = ctx_hop_num; caps->mtt_hop_num = pbl_hop_num; caps->cqe_hop_num = pbl_hop_num; caps->srqwqe_hop_num = pbl_hop_num; caps->idx_hop_num = pbl_hop_num; caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M, V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S); caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M, V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S); caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M, V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S); if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE; caps->qpc_sz = HNS_ROCE_V3_QPC_SZ; caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ; } calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num, caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz, HEM_TYPE_QPC); calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num, caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz, HEM_TYPE_MTPT); calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num, caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz, HEM_TYPE_CQC); calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num, caps->srqc_bt_num, &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC); caps->sccc_hop_num = ctx_hop_num; caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num, caps->sccc_bt_num, &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC); calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz, caps->cqc_timer_hop_num, caps->cqc_timer_bt_num, &caps->cqc_timer_buf_pg_sz, &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER); calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num, 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE); calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz, caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz, &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE); calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num, 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX); return 0; } static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev) { struct hns_roce_cmq_desc desc; struct hns_roce_cfg_entry_size *cfg_size = (struct hns_roce_cfg_entry_size *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE, false); cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_QPC_SIZE); cfg_size->size = cpu_to_le32(hr_dev->caps.qpc_sz); return hns_roce_cmq_send(hr_dev, &desc, 1); } static int hns_roce_config_sccc_size(struct hns_roce_dev *hr_dev) { struct hns_roce_cmq_desc desc; struct hns_roce_cfg_entry_size *cfg_size = (struct hns_roce_cfg_entry_size *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE, false); cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_SCCC_SIZE); cfg_size->size = cpu_to_le32(hr_dev->caps.sccc_sz); return hns_roce_cmq_send(hr_dev, &desc, 1); } static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev) { int ret; if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) return 0; ret = hns_roce_config_qpc_size(hr_dev); if (ret) { dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret); return ret; } ret = hns_roce_config_sccc_size(hr_dev); if (ret) dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret); return ret; } static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) { struct hns_roce_caps *caps = &hr_dev->caps; int ret; ret = hns_roce_cmq_query_hw_info(hr_dev); if (ret) { dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n", ret); return ret; } ret = hns_roce_query_fw_ver(hr_dev); if (ret) { dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n", ret); return ret; } ret = hns_roce_config_global_param(hr_dev); if (ret) { dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n", ret); return ret; } /* Get pf resource owned by every pf */ ret = hns_roce_query_pf_resource(hr_dev); if (ret) { dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n", ret); return ret; } ret = hns_roce_query_pf_timer_resource(hr_dev); if (ret) { dev_err(hr_dev->dev, "failed to query pf timer resource, ret = %d.\n", ret); return ret; } ret = hns_roce_set_vf_switch_param(hr_dev, 0); if (ret) { dev_err(hr_dev->dev, "failed to set function switch param, ret = %d.\n", ret); return ret; } hr_dev->vendor_part_id = hr_dev->pci_dev->device; hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid); caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K; caps->pbl_buf_pg_sz = 0; caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; caps->eqe_ba_pg_sz = 0; caps->eqe_buf_pg_sz = 0; caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM; caps->tsq_buf_pg_sz = 0; ret = hns_roce_query_pf_caps(hr_dev); if (ret) set_default_caps(hr_dev); ret = hns_roce_alloc_vf_resource(hr_dev); if (ret) { dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n", ret); return ret; } ret = hns_roce_v2_set_bt(hr_dev); if (ret) { dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n", ret); return ret; } /* Configure the size of QPC, SCCC, etc. */ ret = hns_roce_config_entry_size(hr_dev); return ret; } static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, enum hns_roce_link_table_type type) { struct hns_roce_cmq_desc desc[2]; struct hns_roce_cfg_llm_a *req_a = (struct hns_roce_cfg_llm_a *)desc[0].data; struct hns_roce_cfg_llm_b *req_b = (struct hns_roce_cfg_llm_b *)desc[1].data; struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_link_table *link_tbl; struct hns_roce_link_table_entry *entry; enum hns_roce_opcode_type opcode; u32 page_num; int i; switch (type) { case TSQ_LINK_TABLE: link_tbl = &priv->tsq; opcode = HNS_ROCE_OPC_CFG_EXT_LLM; break; case TPQ_LINK_TABLE: link_tbl = &priv->tpq; opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM; break; default: return -EINVAL; } page_num = link_tbl->npages; entry = link_tbl->table.buf; for (i = 0; i < 2; i++) { hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false); if (i == 0) desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); else desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); } req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff); req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32); roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S, link_tbl->npages); roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz); roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1); req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0); req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr); roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S, 0); req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0); roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M, CFG_LLM_TAIL_BA_H_S, entry[page_num - 1].blk_ba1_nxt_ptr & HNS_ROCE_LINK_TABLE_BA1_M); roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S, (entry[page_num - 2].blk_ba1_nxt_ptr & HNS_ROCE_LINK_TABLE_NXT_PTR_M) >> HNS_ROCE_LINK_TABLE_NXT_PTR_S); return hns_roce_cmq_send(hr_dev, desc, 2); } static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, enum hns_roce_link_table_type type) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_link_table *link_tbl; struct hns_roce_link_table_entry *entry; struct device *dev = hr_dev->dev; u32 buf_chk_sz; dma_addr_t t; int func_num = 1; int pg_num_a; int pg_num_b; int pg_num; int size; int i; switch (type) { case TSQ_LINK_TABLE: link_tbl = &priv->tsq; buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT); pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz; pg_num_b = hr_dev->caps.sl_num * 4 + 2; break; case TPQ_LINK_TABLE: link_tbl = &priv->tpq; buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT); pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz; pg_num_b = 2 * 4 * func_num + 2; break; default: return -EINVAL; } pg_num = max(pg_num_a, pg_num_b); size = pg_num * sizeof(struct hns_roce_link_table_entry); link_tbl->table.buf = dma_alloc_coherent(dev, size, &link_tbl->table.map, GFP_KERNEL); if (!link_tbl->table.buf) goto out; link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list), GFP_KERNEL); if (!link_tbl->pg_list) goto err_kcalloc_failed; entry = link_tbl->table.buf; for (i = 0; i < pg_num; ++i) { link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz, &t, GFP_KERNEL); if (!link_tbl->pg_list[i].buf) goto err_alloc_buf_failed; link_tbl->pg_list[i].map = t; entry[i].blk_ba0 = (u32)(t >> 12); entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44); if (i < (pg_num - 1)) entry[i].blk_ba1_nxt_ptr |= (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S; } link_tbl->npages = pg_num; link_tbl->pg_sz = buf_chk_sz; return hns_roce_config_link_table(hr_dev, type); err_alloc_buf_failed: for (i -= 1; i >= 0; i--) dma_free_coherent(dev, buf_chk_sz, link_tbl->pg_list[i].buf, link_tbl->pg_list[i].map); kfree(link_tbl->pg_list); err_kcalloc_failed: dma_free_coherent(dev, size, link_tbl->table.buf, link_tbl->table.map); out: return -ENOMEM; } static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev, struct hns_roce_link_table *link_tbl) { struct device *dev = hr_dev->dev; int size; int i; size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry); for (i = 0; i < link_tbl->npages; ++i) if (link_tbl->pg_list[i].buf) dma_free_coherent(dev, link_tbl->pg_sz, link_tbl->pg_list[i].buf, link_tbl->pg_list[i].map); kfree(link_tbl->pg_list); dma_free_coherent(dev, size, link_tbl->table.buf, link_tbl->table.map); } static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; int qpc_count, cqc_count; int ret, i; /* TSQ includes SQ doorbell and ack doorbell */ ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE); if (ret) { dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret); return ret; } ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE); if (ret) { dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret); goto err_tpq_init_failed; } /* Alloc memory for QPC Timer buffer space chunk */ for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; qpc_count++) { ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table, qpc_count); if (ret) { dev_err(hr_dev->dev, "QPC Timer get failed\n"); goto err_qpc_timer_failed; } } /* Alloc memory for CQC Timer buffer space chunk */ for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; cqc_count++) { ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table, cqc_count); if (ret) { dev_err(hr_dev->dev, "CQC Timer get failed\n"); goto err_cqc_timer_failed; } } return 0; err_cqc_timer_failed: for (i = 0; i < cqc_count; i++) hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i); err_qpc_timer_failed: for (i = 0; i < qpc_count; i++) hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i); hns_roce_free_link_table(hr_dev, &priv->tpq); err_tpq_init_failed: hns_roce_free_link_table(hr_dev, &priv->tsq); return ret; } static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; hns_roce_function_clear(hr_dev); hns_roce_free_link_table(hr_dev, &priv->tpq); hns_roce_free_link_table(hr_dev, &priv->tsq); } static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev) { struct hns_roce_cmq_desc desc; struct hns_roce_mbox_status *mb_st = (struct hns_roce_mbox_status *)desc.data; enum hns_roce_cmd_return_status status; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true); status = hns_roce_cmq_send(hr_dev, &desc, 1); if (status) return status; return le32_to_cpu(mb_st->mb_status_hw_run); } static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev) { u32 status = hns_roce_query_mbox_status(hr_dev); return status >> HNS_ROCE_HW_RUN_BIT_SHIFT; } static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev) { u32 status = hns_roce_query_mbox_status(hr_dev); return status & HNS_ROCE_HW_MB_STATUS_MASK; } static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token, int event) { struct hns_roce_cmq_desc desc; struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); mb->in_param_l = cpu_to_le32(in_param); mb->in_param_h = cpu_to_le32(in_param >> 32); mb->out_param_l = cpu_to_le32(out_param); mb->out_param_h = cpu_to_le32(out_param >> 32); mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op); mb->token_event_en = cpu_to_le32(event << 16 | token); return hns_roce_cmq_send(hr_dev, &desc, 1); } static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token, int event) { struct device *dev = hr_dev->dev; unsigned long end; int ret; end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies; while (hns_roce_v2_cmd_pending(hr_dev)) { if (time_after(jiffies, end)) { dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies, (int)end); return -EAGAIN; } cond_resched(); } ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier, op_modifier, op, token, event); if (ret) dev_err(dev, "Post mailbox fail(%d)\n", ret); return ret; } static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev, unsigned long timeout) { struct device *dev = hr_dev->dev; unsigned long end; u32 status; end = msecs_to_jiffies(timeout) + jiffies; while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end)) cond_resched(); if (hns_roce_v2_cmd_pending(hr_dev)) { dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n"); return -ETIMEDOUT; } status = hns_roce_v2_cmd_complete(hr_dev); if (status != 0x1) { if (status == CMD_RST_PRC_EBUSY) return status; dev_err(dev, "mailbox status 0x%x!\n", status); return -EBUSY; } return 0; } static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev, int gid_index, const union ib_gid *gid, enum hns_roce_sgid_type sgid_type) { struct hns_roce_cmq_desc desc; struct hns_roce_cfg_sgid_tb *sgid_tb = (struct hns_roce_cfg_sgid_tb *)desc.data; u32 *p; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false); roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M, CFG_SGID_TB_TABLE_IDX_S, gid_index); roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M, CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type); p = (u32 *)&gid->raw[0]; sgid_tb->vf_sgid_l = cpu_to_le32(*p); p = (u32 *)&gid->raw[4]; sgid_tb->vf_sgid_ml = cpu_to_le32(*p); p = (u32 *)&gid->raw[8]; sgid_tb->vf_sgid_mh = cpu_to_le32(*p); p = (u32 *)&gid->raw[0xc]; sgid_tb->vf_sgid_h = cpu_to_le32(*p); return hns_roce_cmq_send(hr_dev, &desc, 1); } static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr) { enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; int ret; if (!gid || !attr) return -EINVAL; if (attr->gid_type == IB_GID_TYPE_ROCE) sgid_type = GID_TYPE_FLAG_ROCE_V1; if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { if (ipv6_addr_v4mapped((void *)gid)) sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4; else sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6; } ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type); if (ret) ibdev_err(&hr_dev->ib_dev, "failed to configure sgid table, ret = %d!\n", ret); return ret; } static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr) { struct hns_roce_cmq_desc desc; struct hns_roce_cfg_smac_tb *smac_tb = (struct hns_roce_cfg_smac_tb *)desc.data; u16 reg_smac_h; u32 reg_smac_l; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false); reg_smac_l = *(u32 *)(&addr[0]); reg_smac_h = *(u16 *)(&addr[4]); roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M, CFG_SMAC_TB_IDX_S, phy_port); roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M, CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h); smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); return hns_roce_cmq_send(hr_dev, &desc, 1); } static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_v2_mpt_entry *mpt_entry, struct hns_roce_mr *mr) { u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 }; struct ib_device *ibdev = &hr_dev->ib_dev; dma_addr_t pbl_ba; int i, count; count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, ARRAY_SIZE(pages), &pbl_ba); if (count < 1) { ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n", count); return -ENOBUFS; } /* Aligned to the hardware address access unit */ for (i = 0; i < count; i++) pages[i] >>= 6; mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3); roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S, upper_32_bits(pbl_ba >> 3)); mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M, V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0])); mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M, V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1])); roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); return 0; } static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, struct hns_roce_mr *mr, unsigned long mtpt_idx) { struct hns_roce_v2_mpt_entry *mpt_entry; int ret; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, V2_MPT_BYTE_4_PD_S, mr->pd); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S, (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, mr->type == MR_TYPE_MR ? 0 : 1); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S, 1); mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); mpt_entry->lkey = cpu_to_le32(mr->key); mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova)); mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova)); if (mr->type == MR_TYPE_DMA) return 0; ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); return ret; } static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, int flags, u32 pdn, int mr_access_flags, u64 iova, u64 size, void *mb_buf) { struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; int ret = 0; roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID); if (flags & IB_MR_REREG_PD) { roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, V2_MPT_BYTE_4_PD_S, pdn); mr->pd = pdn; } if (flags & IB_MR_REREG_ACCESS) { roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S, (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0); } if (flags & IB_MR_REREG_TRANS) { mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova)); mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova)); mpt_entry->len_l = cpu_to_le32(lower_32_bits(size)); mpt_entry->len_h = cpu_to_le32(upper_32_bits(size)); mr->iova = iova; mr->size = size; ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); } return ret; } static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, struct hns_roce_mr *mr) { struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_v2_mpt_entry *mpt_entry; dma_addr_t pbl_ba = 0; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) { ibdev_err(ibdev, "failed to find frmr mtr.\n"); return -ENOBUFS; } roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, V2_MPT_BYTE_4_PD_S, mr->pd); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3)); roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S, upper_32_bits(pbl_ba >> 3)); roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); return 0; } static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) { struct hns_roce_v2_mpt_entry *mpt_entry; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, V2_MPT_BYTE_4_PD_S, mw->pdn); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, V2_MPT_BYTE_4_PBL_HOP_NUM_S, mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mw->pbl_hop_num); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S, mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); mpt_entry->lkey = cpu_to_le32(mw->rkey); return 0; } static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) { return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); } static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n) { struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^ !!(n & hr_cq->cq_depth)) ? cqe : NULL; } static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci) { *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M; } static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, struct hns_roce_srq *srq) { struct hns_roce_v2_cqe *cqe, *dest; u32 prod_index; int nfreed = 0; int wqe_index; u8 owner_bit; for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); ++prod_index) { if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe) break; } /* * Now backwards through the CQ, removing CQ entries * that match our QP by overwriting them with next entries. */ while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe); if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M, V2_CQE_BYTE_16_LCL_QPN_S) & HNS_ROCE_V2_CQE_QPN_MASK) == qpn) { if (srq && roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) { wqe_index = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M, V2_CQE_BYTE_4_WQE_INDX_S); hns_roce_free_srq_wqe(srq, wqe_index); } ++nfreed; } else if (nfreed) { dest = get_cqe_v2(hr_cq, (prod_index + nfreed) & hr_cq->ib_cq.cqe); owner_bit = roce_get_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S); memcpy(dest, cqe, sizeof(*cqe)); roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S, owner_bit); } } if (nfreed) { hr_cq->cons_index += nfreed; /* * Make sure update of buffer contents is done before * updating consumer index. */ wmb(); hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index); } } static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, struct hns_roce_srq *srq) { spin_lock_irq(&hr_cq->lock); __hns_roce_v2_cq_clean(hr_cq, qpn, srq); spin_unlock_irq(&hr_cq->lock); } static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle) { struct hns_roce_v2_cq_context *cq_context; cq_context = mb_buf; memset(cq_context, 0, sizeof(*cq_context)); roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M, V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID); roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M, V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE); roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M, V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth)); roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M, V2_CQC_BYTE_4_CEQN_S, hr_cq->vector); roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, V2_CQC_BYTE_8_CQN_S, hr_cq->cqn); roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M, V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE ? 1 : 0); cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0])); roce_set_field(cq_context->byte_16_hop_addr, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S, upper_32_bits(to_hr_hw_page_addr(mtts[0]))); roce_set_field(cq_context->byte_16_hop_addr, V2_CQC_BYTE_16_CQE_HOP_NUM_M, V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1])); roce_set_field(cq_context->byte_24_pgsz_addr, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S, upper_32_bits(to_hr_hw_page_addr(mtts[1]))); roce_set_field(cq_context->byte_24_pgsz_addr, V2_CQC_BYTE_24_CQE_BA_PG_SZ_M, V2_CQC_BYTE_24_CQE_BA_PG_SZ_S, to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift)); roce_set_field(cq_context->byte_24_pgsz_addr, V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M, V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S, to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift)); cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3); roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M, V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3))); roce_set_bit(cq_context->byte_44_db_record, V2_CQC_BYTE_44_DB_RECORD_EN_S, (hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) ? 1 : 0); roce_set_field(cq_context->byte_44_db_record, V2_CQC_BYTE_44_DB_RECORD_ADDR_M, V2_CQC_BYTE_44_DB_RECORD_ADDR_S, ((u32)hr_cq->db.dma) >> 1); cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32); roce_set_field(cq_context->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S, HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM); roce_set_field(cq_context->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S, HNS_ROCE_V2_CQ_DEFAULT_INTERVAL); } static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); u32 notification_flag; __le32 doorbell[2]; doorbell[0] = 0; doorbell[1] = 0; notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL; /* * flags = 0; Notification Flag = 1, next * flags = 1; Notification Flag = 0, solocited */ roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S, hr_cq->cqn); roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_NTR); roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M, V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index); roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M, V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3); roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S, notification_flag); hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l); return 0; } static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe, struct hns_roce_qp **cur_qp, struct ib_wc *wc) { struct hns_roce_rinl_sge *sge_list; u32 wr_num, wr_cnt, sge_num; u32 sge_cnt, data_len, size; void *wqe_buf; wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M, V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff; wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1); sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list; sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt; wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt); data_len = wc->byte_len; for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) { size = min(sge_list[sge_cnt].len, data_len); memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size); data_len -= size; wqe_buf += size; } if (unlikely(data_len)) { wc->status = IB_WC_LOC_LEN_ERR; return -EAGAIN; } return 0; } static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq, int num_entries, struct ib_wc *wc) { unsigned int left; int npolled = 0; left = wq->head - wq->tail; if (left == 0) return 0; left = min_t(unsigned int, (unsigned int)num_entries, left); while (npolled < left) { wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wc->status = IB_WC_WR_FLUSH_ERR; wc->vendor_err = 0; wc->qp = &hr_qp->ibqp; wq->tail++; wc++; npolled++; } return npolled; } static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries, struct ib_wc *wc) { struct hns_roce_qp *hr_qp; int npolled = 0; list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) { npolled += sw_comp(hr_qp, &hr_qp->sq, num_entries - npolled, wc + npolled); if (npolled >= num_entries) goto out; } list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) { npolled += sw_comp(hr_qp, &hr_qp->rq, num_entries - npolled, wc + npolled); if (npolled >= num_entries) goto out; } out: return npolled; } static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe, struct ib_wc *wc) { static const struct { u32 cqe_status; enum ib_wc_status wc_status; } map[] = { { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS }, { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR }, { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR }, { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR }, { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR }, { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR }, { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR }, { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR }, { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR }, { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR }, { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR }, { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR }, { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR }, { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR }, { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR} }; u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M, V2_CQE_BYTE_4_STATUS_S); int i; wc->status = IB_WC_GENERAL_ERR; for (i = 0; i < ARRAY_SIZE(map); i++) if (cqe_status == map[i].cqe_status) { wc->status = map[i].wc_status; break; } if (likely(wc->status == IB_WC_SUCCESS || wc->status == IB_WC_WR_FLUSH_ERR)) return; ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe, cq->cqe_size, false); /* * For hns ROCEE, GENERAL_ERR is an error type that is not defined in * the standard protocol, the driver must ignore it and needn't to set * the QP to an error state. */ if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR) return; /* * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets * into errored mode. Hence, as a workaround to this hardware * limitation, driver needs to assist in flushing. But the flushing * operation uses mailbox to convey the QP state to the hardware and * which can sleep due to the mutex protection around the mailbox calls. * Hence, use the deferred flush for now. Once wc error detected, the * flushing operation is needed. */ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) init_flush_work(hr_dev, qp); } static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, struct hns_roce_qp **cur_qp, struct ib_wc *wc) { struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); struct hns_roce_srq *srq = NULL; struct hns_roce_v2_cqe *cqe; struct hns_roce_qp *hr_qp; struct hns_roce_wq *wq; int is_send; u16 wqe_ctr; u32 opcode; int qpn; int ret; /* Find cqe according to consumer index */ cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index); if (!cqe) return -EAGAIN; ++hr_cq->cons_index; /* Memory barrier */ rmb(); /* 0->SQ, 1->RQ */ is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S); qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M, V2_CQE_BYTE_16_LCL_QPN_S); if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) { hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); if (unlikely(!hr_qp)) { ibdev_err(&hr_dev->ib_dev, "CQ %06lx with entry for unknown QPN %06x\n", hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK); return -EINVAL; } *cur_qp = hr_qp; } wc->qp = &(*cur_qp)->ibqp; wc->vendor_err = 0; if (is_send) { wq = &(*cur_qp)->sq; if ((*cur_qp)->sq_signal_bits) { /* * If sg_signal_bit is 1, * firstly tail pointer updated to wqe * which current cqe correspond to */ wqe_ctr = (u16)roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M, V2_CQE_BYTE_4_WQE_INDX_S); wq->tail += (wqe_ctr - (u16)wq->tail) & (wq->wqe_cnt - 1); } wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; } else if ((*cur_qp)->ibqp.srq) { srq = to_hr_srq((*cur_qp)->ibqp.srq); wqe_ctr = (u16)roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M, V2_CQE_BYTE_4_WQE_INDX_S); wc->wr_id = srq->wrid[wqe_ctr]; hns_roce_free_srq_wqe(srq, wqe_ctr); } else { /* Update tail pointer, record wr_id */ wq = &(*cur_qp)->rq; wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; } get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc); if (unlikely(wc->status != IB_WC_SUCCESS)) return 0; if (is_send) { wc->wc_flags = 0; /* SQ corresponding to CQE */ switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M, V2_CQE_BYTE_4_OPCODE_S) & 0x1f) { case HNS_ROCE_V2_WQE_OP_SEND: wc->opcode = IB_WC_SEND; break; case HNS_ROCE_V2_WQE_OP_SEND_WITH_INV: wc->opcode = IB_WC_SEND; break; case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM: wc->opcode = IB_WC_SEND; wc->wc_flags |= IB_WC_WITH_IMM; break; case HNS_ROCE_V2_WQE_OP_RDMA_READ: wc->opcode = IB_WC_RDMA_READ; wc->byte_len = le32_to_cpu(cqe->byte_cnt); break; case HNS_ROCE_V2_WQE_OP_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM: wc->opcode = IB_WC_RDMA_WRITE; wc->wc_flags |= IB_WC_WITH_IMM; break; case HNS_ROCE_V2_WQE_OP_LOCAL_INV: wc->opcode = IB_WC_LOCAL_INV; wc->wc_flags |= IB_WC_WITH_INVALIDATE; break; case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP: wc->opcode = IB_WC_COMP_SWAP; wc->byte_len = 8; break; case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD: wc->opcode = IB_WC_FETCH_ADD; wc->byte_len = 8; break; case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP: wc->opcode = IB_WC_MASKED_COMP_SWAP; wc->byte_len = 8; break; case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD: wc->opcode = IB_WC_MASKED_FETCH_ADD; wc->byte_len = 8; break; case HNS_ROCE_V2_WQE_OP_FAST_REG_PMR: wc->opcode = IB_WC_REG_MR; break; case HNS_ROCE_V2_WQE_OP_BIND_MW: wc->opcode = IB_WC_REG_MR; break; default: wc->status = IB_WC_GENERAL_ERR; break; } } else { /* RQ correspond to CQE */ wc->byte_len = le32_to_cpu(cqe->byte_cnt); opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M, V2_CQE_BYTE_4_OPCODE_S); switch (opcode & 0x1f) { case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM: wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; wc->wc_flags = IB_WC_WITH_IMM; wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata)); break; case HNS_ROCE_V2_OPCODE_SEND: wc->opcode = IB_WC_RECV; wc->wc_flags = 0; break; case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM: wc->opcode = IB_WC_RECV; wc->wc_flags = IB_WC_WITH_IMM; wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata)); break; case HNS_ROCE_V2_OPCODE_SEND_WITH_INV: wc->opcode = IB_WC_RECV; wc->wc_flags = IB_WC_WITH_INVALIDATE; wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey); break; default: wc->status = IB_WC_GENERAL_ERR; break; } if ((wc->qp->qp_type == IB_QPT_RC || wc->qp->qp_type == IB_QPT_UC) && (opcode == HNS_ROCE_V2_OPCODE_SEND || opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM || opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) && (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) { ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc); if (unlikely(ret)) return -EAGAIN; } wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M, V2_CQE_BYTE_32_SL_S); wc->src_qp = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_RMT_QPN_M, V2_CQE_BYTE_32_RMT_QPN_S); wc->slid = 0; wc->wc_flags |= (roce_get_bit(cqe->byte_32, V2_CQE_BYTE_32_GRH_S) ? IB_WC_GRH : 0); wc->port_num = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S); wc->pkey_index = 0; if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) { wc->vlan_id = (u16)roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_VID_M, V2_CQE_BYTE_28_VID_S); wc->wc_flags |= IB_WC_WITH_VLAN; } else { wc->vlan_id = 0xffff; } wc->network_hdr_type = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_PORT_TYPE_M, V2_CQE_BYTE_28_PORT_TYPE_S); } return 0; } static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); struct hns_roce_qp *cur_qp = NULL; unsigned long flags; int npolled; spin_lock_irqsave(&hr_cq->lock, flags); /* * When the device starts to reset, the state is RST_DOWN. At this time, * there may still be some valid CQEs in the hardware that are not * polled. Therefore, it is not allowed to switch to the software mode * immediately. When the state changes to UNINIT, CQE no longer exists * in the hardware, and then switch to software mode. */ if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) { npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc); goto out; } for (npolled = 0; npolled < num_entries; ++npolled) { if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled)) break; } if (npolled) { /* Memory barrier */ wmb(); hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index); } out: spin_unlock_irqrestore(&hr_cq->lock, flags); return npolled; } static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type, int step_idx) { int op; if (type == HEM_TYPE_SCCC && step_idx) return -EINVAL; switch (type) { case HEM_TYPE_QPC: op = HNS_ROCE_CMD_WRITE_QPC_BT0; break; case HEM_TYPE_MTPT: op = HNS_ROCE_CMD_WRITE_MPT_BT0; break; case HEM_TYPE_CQC: op = HNS_ROCE_CMD_WRITE_CQC_BT0; break; case HEM_TYPE_SRQC: op = HNS_ROCE_CMD_WRITE_SRQC_BT0; break; case HEM_TYPE_SCCC: op = HNS_ROCE_CMD_WRITE_SCCC_BT0; break; case HEM_TYPE_QPC_TIMER: op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; break; case HEM_TYPE_CQC_TIMER: op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; break; default: dev_warn(hr_dev->dev, "Table %d not to be written by mailbox!\n", type); return -EINVAL; } return op + step_idx; } static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, u64 bt_ba, u32 hem_type, int step_idx) { struct hns_roce_cmd_mailbox *mailbox; int ret; int op; op = get_op_for_set_hem(hr_dev, hem_type, step_idx); if (op < 0) return 0; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj, 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS); hns_roce_free_cmd_mailbox(hr_dev, mailbox); return ret; } static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, int step_idx) { struct hns_roce_hem_iter iter; struct hns_roce_hem_mhop mhop; struct hns_roce_hem *hem; unsigned long mhop_obj = obj; int i, j, k; int ret = 0; u64 hem_idx = 0; u64 l1_idx = 0; u64 bt_ba = 0; u32 chunk_ba_num; u32 hop_num; if (!hns_roce_check_whether_mhop(hr_dev, table->type)) return 0; hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); i = mhop.l0_idx; j = mhop.l1_idx; k = mhop.l2_idx; hop_num = mhop.hop_num; chunk_ba_num = mhop.bt_chunk_size / 8; if (hop_num == 2) { hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num + k; l1_idx = i * chunk_ba_num + j; } else if (hop_num == 1) { hem_idx = i * chunk_ba_num + j; } else if (hop_num == HNS_ROCE_HOP_NUM_0) { hem_idx = i; } if (table->type == HEM_TYPE_SCCC) obj = mhop.l0_idx; if (check_whether_last_step(hop_num, step_idx)) { hem = table->hem[hem_idx]; for (hns_roce_hem_first(hem, &iter); !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) { bt_ba = hns_roce_hem_addr(&iter); ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx); } } else { if (step_idx == 0) bt_ba = table->bt_l0_dma_addr[i]; else if (step_idx == 1 && hop_num == 2) bt_ba = table->bt_l1_dma_addr[l1_idx]; ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx); } return ret; } static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, int step_idx) { struct device *dev = hr_dev->dev; struct hns_roce_cmd_mailbox *mailbox; int ret; u16 op = 0xff; if (!hns_roce_check_whether_mhop(hr_dev, table->type)) return 0; switch (table->type) { case HEM_TYPE_QPC: op = HNS_ROCE_CMD_DESTROY_QPC_BT0; break; case HEM_TYPE_MTPT: op = HNS_ROCE_CMD_DESTROY_MPT_BT0; break; case HEM_TYPE_CQC: op = HNS_ROCE_CMD_DESTROY_CQC_BT0; break; case HEM_TYPE_SCCC: case HEM_TYPE_QPC_TIMER: case HEM_TYPE_CQC_TIMER: break; case HEM_TYPE_SRQC: op = HNS_ROCE_CMD_DESTROY_SRQC_BT0; break; default: dev_warn(dev, "Table %d not to be destroyed by mailbox!\n", table->type); return 0; } if (table->type == HEM_TYPE_SCCC || table->type == HEM_TYPE_QPC_TIMER || table->type == HEM_TYPE_CQC_TIMER) return 0; op += step_idx; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); /* configure the tag and op */ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS); hns_roce_free_cmd_mailbox(hr_dev, mailbox); return ret; } static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask, struct hns_roce_qp *hr_qp) { struct hns_roce_cmd_mailbox *mailbox; int qpc_size; int ret; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); /* The qpc size of HIP08 is only 256B, which is half of HIP09 */ qpc_size = hr_dev->caps.qpc_sz; memcpy(mailbox->buf, context, qpc_size); memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size); ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, HNS_ROCE_CMD_MODIFY_QPC, HNS_ROCE_CMD_TIMEOUT_MSECS); hns_roce_free_cmd_mailbox(hr_dev, mailbox); return ret; } static void set_access_flags(struct hns_roce_qp *hr_qp, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask, const struct ib_qp_attr *attr, int attr_mask) { u8 dest_rd_atomic; u32 access_flags; dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ? attr->max_dest_rd_atomic : hr_qp->resp_depth; access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ? attr->qp_access_flags : hr_qp->atomic_rd_en; if (!dest_rd_atomic) access_flags &= IB_ACCESS_REMOTE_WRITE; roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, !!(access_flags & IB_ACCESS_REMOTE_READ)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0); } static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_S, to_hr_hem_entries_shift(hr_qp->sge.sge_cnt, hr_qp->sge.sge_shift)); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, ilog2(hr_qp->sq.wqe_cnt)); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, ilog2(hr_qp->rq.wqe_cnt)); } static void modify_qp_reset_to_init(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); /* * In v2 engine, software pass context and context mask to hardware * when modifying qp. If software need modify some fields in context, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type)); roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs)); set_qpc_wqe_cnt(hr_qp, context, qpc_mask); /* No VLAN need to set 0xFFF */ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, V2_QPC_BYTE_24_VLAN_ID_S, 0xfff); if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) roce_set_bit(context->byte_68_rq_db, V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1); roce_set_field(context->byte_68_rq_db, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, ((u32)hr_qp->rdb.dma) >> 1); context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0); roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn); if (ibqp->srq) { roce_set_field(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, to_hr_srq(ibqp->srq)->srqn); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_SRQ_EN_S, 1); } roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1); hr_qp->access_flags = attr->qp_access_flags; roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); } static void modify_qp_init_to_init(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); /* * In v2 engine, software pass context and context mask to hardware * when modifying qp. If software need modify some fields in context, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type)); roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, V2_QPC_BYTE_4_TST_S, 0); if (attr_mask & IB_QP_ACCESS_FLAGS) { roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0); } else { roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0); } roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, V2_QPC_BYTE_16_PD_S, 0); roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn); roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, V2_QPC_BYTE_80_RX_CQN_S, 0); roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, V2_QPC_BYTE_252_TX_CQN_S, 0); if (ibqp->srq) { roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_SRQ_EN_S, 1); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_SRQ_EN_S, 0); roce_set_field(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, to_hr_srq(ibqp->srq)->srqn); roce_set_field(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0); } roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, 0); if (attr_mask & IB_QP_DEST_QPN) { roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn); roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0); } } static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { u64 mtts[MTT_MIN_COUNT] = { 0 }; u64 wqe_sge_ba; int count; /* Search qp buf's mtts */ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts, MTT_MIN_COUNT, &wqe_sge_ba); if (hr_qp->rq.wqe_cnt && count < 1) { ibdev_err(&hr_dev->ib_dev, "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn); return -EINVAL; } context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); qpc_mask->wqe_sge_ba = 0; /* * In v2 engine, software pass context and context mask to hardware * when modifying qp. If software need modify some fields in context, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M, V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3)); roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M, V2_QPC_BYTE_12_WQE_SGE_BA_S, 0); roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M, V2_QPC_BYTE_12_SQ_HOP_NUM_S, to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num, hr_qp->sq.wqe_cnt)); roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M, V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_S, to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num, hr_qp->sge.sge_cnt)); roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQ_HOP_NUM_M, V2_QPC_BYTE_20_RQ_HOP_NUM_S, to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num, hr_qp->rq.wqe_cnt)); roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQ_HOP_NUM_M, V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0); roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift)); roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0); roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift)); roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0); context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0])); qpc_mask->rq_cur_blk_addr = 0; roce_set_field(context->byte_92_srq_info, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, upper_32_bits(to_hr_hw_page_addr(mtts[0]))); roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0); context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1])); qpc_mask->rq_nxt_blk_addr = 0; roce_set_field(context->byte_104_rq_sge, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, upper_32_bits(to_hr_hw_page_addr(mtts[1]))); roce_set_field(qpc_mask->byte_104_rq_sge, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0); roce_set_field(context->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head); roce_set_field(qpc_mask->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); roce_set_field(qpc_mask->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M, V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0); return 0; } static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { struct ib_device *ibdev = &hr_dev->ib_dev; u64 sge_cur_blk = 0; u64 sq_cur_blk = 0; int count; /* search qp buf's mtts */ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL); if (count < 1) { ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n", hr_qp->qpn); return -EINVAL; } if (hr_qp->sge.sge_cnt > 0) { count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sge.offset, &sge_cur_blk, 1, NULL); if (count < 1) { ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n", hr_qp->qpn); return -EINVAL; } } /* * In v2 engine, software pass context and context mask to hardware * when modifying qp. If software need modify some fields in context, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ context->sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk)); roce_set_field(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, upper_32_bits(to_hr_hw_page_addr(sq_cur_blk))); qpc_mask->sq_cur_blk_addr = 0; roce_set_field(qpc_mask->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0); context->sq_cur_sge_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sge_cur_blk)); roce_set_field(context->byte_184_irrl_idx, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, upper_32_bits(to_hr_hw_page_addr(sge_cur_blk))); qpc_mask->sq_cur_sge_blk_addr = 0; roce_set_field(qpc_mask->byte_184_irrl_idx, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0); context->rx_sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk)); roce_set_field(context->byte_232_irrl_sge, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, upper_32_bits(to_hr_hw_page_addr(sq_cur_blk))); qpc_mask->rx_sq_cur_blk_addr = 0; roce_set_field(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0); return 0; } static inline enum ib_mtu get_mtu(struct ib_qp *ibqp, const struct ib_qp_attr *attr) { if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) return IB_MTU_4096; return attr->path_mtu; } static int modify_qp_init_to_rtr(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct ib_device *ibdev = &hr_dev->ib_dev; dma_addr_t trrl_ba; dma_addr_t irrl_ba; enum ib_mtu mtu; u8 lp_pktn_ini; u8 port_num; u64 *mtts; u8 *dmac; u8 *smac; int port; int ret; ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask); if (ret) { ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret); return ret; } /* Search IRRL's mtts */ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, hr_qp->qpn, &irrl_ba); if (!mtts) { ibdev_err(ibdev, "failed to find qp irrl_table.\n"); return -EINVAL; } /* Search TRRL's mtts */ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, hr_qp->qpn, &trrl_ba); if (!mtts) { ibdev_err(ibdev, "failed to find qp trrl_table.\n"); return -EINVAL; } if (attr_mask & IB_QP_ALT_PATH) { ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n", attr_mask); return -EINVAL; } roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, V2_QPC_BYTE_132_TRRL_BA_S, trrl_ba >> 4); roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, V2_QPC_BYTE_132_TRRL_BA_S, 0); context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4)); qpc_mask->trrl_ba = 0; roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, V2_QPC_BYTE_140_TRRL_BA_S, (u32)(trrl_ba >> (32 + 16 + 4))); roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, V2_QPC_BYTE_140_TRRL_BA_S, 0); context->irrl_ba = cpu_to_le32(irrl_ba >> 6); qpc_mask->irrl_ba = 0; roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M, V2_QPC_BYTE_208_IRRL_BA_S, irrl_ba >> (32 + 6)); roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M, V2_QPC_BYTE_208_IRRL_BA_S, 0); roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1); roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0); roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S, hr_qp->sq_signal_bits); roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S, 0); port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; smac = (u8 *)hr_dev->dev_addr[port]; dmac = (u8 *)attr->ah_attr.roce.dmac; /* when dmac equals smac or loop_idc is 1, it should loopback */ if (ether_addr_equal_unaligned(dmac, smac) || hr_dev->loop_idc == 0x1) { roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0); } if (attr_mask & IB_QP_DEST_QPN) { roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num); roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0); } /* Configure GID index */ port_num = rdma_ah_get_port_num(&attr->ah_attr); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, hns_get_gid_index(hr_dev, port_num - 1, grh->sgid_index)); roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0); memcpy(&(context->dmac), dmac, sizeof(u32)); roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4]))); qpc_mask->dmac = 0; roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, V2_QPC_BYTE_52_DMAC_S, 0); mtu = get_mtu(ibqp, attr); hr_qp->path_mtu = mtu; if (attr_mask & IB_QP_PATH_MTU) { roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, V2_QPC_BYTE_24_MTU_S, mtu); roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, V2_QPC_BYTE_24_MTU_S, 0); } #define MAX_LP_MSG_LEN 65536 /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)); roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini); roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, V2_QPC_BYTE_56_LP_PKTN_INI_S, 0); /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M, V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini); roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M, V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0); roce_set_bit(qpc_mask->byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0); roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M, V2_QPC_BYTE_96_RX_REQ_MSN_S, 0); roce_set_field(qpc_mask->byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M, V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0); context->rq_rnr_timer = 0; qpc_mask->rq_rnr_timer = 0; roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M, V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0); roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0); /* rocee send 2^lp_sgen_ini segs every time */ roce_set_field(context->byte_168_irrl_idx, V2_QPC_BYTE_168_LP_SGEN_INI_M, V2_QPC_BYTE_168_LP_SGEN_INI_S, 3); roce_set_field(qpc_mask->byte_168_irrl_idx, V2_QPC_BYTE_168_LP_SGEN_INI_M, V2_QPC_BYTE_168_LP_SGEN_INI_S, 0); return 0; } static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct ib_device *ibdev = &hr_dev->ib_dev; int ret; /* Not support alternate path and path migration */ if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) { ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); return -EINVAL; } ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask); if (ret) { ibdev_err(ibdev, "failed to config sq buf, ret %d\n", ret); return ret; } /* * Set some fields in context to zero, Because the default values * of all fields in context are zero, we need not set them to 0 again. * but we should set the relevant fields of context mask to 0. */ roce_set_field(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_SGE_IDX_M, V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0); roce_set_field(qpc_mask->byte_240_irrl_tail, V2_QPC_BYTE_240_RX_ACK_MSN_M, V2_QPC_BYTE_240_RX_ACK_MSN_S, 0); roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M, V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0); roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0); roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M, V2_QPC_BYTE_248_IRRL_PSN_S, 0); roce_set_field(qpc_mask->byte_240_irrl_tail, V2_QPC_BYTE_240_IRRL_TAIL_REAL_M, V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0); roce_set_field(qpc_mask->byte_220_retry_psn_msn, V2_QPC_BYTE_220_RETRY_MSG_MSN_M, V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0); roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0); roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M, V2_QPC_BYTE_212_CHECK_FLG_S, 0); roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, V2_QPC_BYTE_212_LSN_S, 0x100); roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, V2_QPC_BYTE_212_LSN_S, 0); roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M, V2_QPC_BYTE_196_IRRL_HEAD_S, 0); return 0; } static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn) { if (!fl) fl = rdma_calc_flow_label(lqpn, rqpn); return rdma_flow_label_to_udp_sport(fl); } static int hns_roce_v2_set_path(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct ib_device *ibdev = &hr_dev->ib_dev; const struct ib_gid_attr *gid_attr = NULL; int is_roce_protocol; u16 vlan_id = 0xffff; bool is_udp = false; u8 ib_port; u8 hr_port; int ret; ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1; hr_port = ib_port - 1; is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; if (is_roce_protocol) { gid_attr = attr->ah_attr.grh.sgid_attr; ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL); if (ret) return ret; if (gid_attr) is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); } if (vlan_id < VLAN_N_VID) { roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0); roce_set_bit(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1); roce_set_bit(qpc_mask->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0); } roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, V2_QPC_BYTE_24_VLAN_ID_S, vlan_id); roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, V2_QPC_BYTE_24_VLAN_ID_S, 0); if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n", grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); return -EINVAL; } if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { ibdev_err(ibdev, "ah attr is not RDMA roce type\n"); return -EINVAL; } roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S, is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num, attr->dest_qp_num) : 0); roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S, 0); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index); roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0); roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit); roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, V2_QPC_BYTE_24_HOP_LIMIT_S, 0); if (is_udp) roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2); else roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, grh->traffic_class); roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, 0); roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, V2_QPC_BYTE_28_FL_S, grh->flow_label); roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, V2_QPC_BYTE_28_FL_S, 0); memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { ibdev_err(ibdev, "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n", hr_qp->sl, MAX_SERVICE_LEVEL); return -EINVAL; } roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, V2_QPC_BYTE_28_SL_S, hr_qp->sl); roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, V2_QPC_BYTE_28_SL_S, 0); return 0; } static bool check_qp_state(enum ib_qp_state cur_state, enum ib_qp_state new_state) { static const bool sm[][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = true, [IB_QPS_INIT] = true }, [IB_QPS_INIT] = { [IB_QPS_RESET] = true, [IB_QPS_INIT] = true, [IB_QPS_RTR] = true, [IB_QPS_ERR] = true }, [IB_QPS_RTR] = { [IB_QPS_RESET] = true, [IB_QPS_RTS] = true, [IB_QPS_ERR] = true }, [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_RTS] = true, [IB_QPS_ERR] = true }, [IB_QPS_SQD] = {}, [IB_QPS_SQE] = {}, [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } }; return sm[cur_state][new_state]; } static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); int ret = 0; if (!check_qp_state(cur_state, new_state)) { ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n"); return -EINVAL; } if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { memset(qpc_mask, 0, hr_dev->caps.qpc_sz); modify_qp_reset_to_init(ibqp, attr, attr_mask, context, qpc_mask); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { modify_qp_init_to_init(ibqp, attr, attr_mask, context, qpc_mask); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context, qpc_mask); } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context, qpc_mask); } return ret; } static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); int ret = 0; if (attr_mask & IB_QP_AV) { ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context, qpc_mask); if (ret) return ret; } if (attr_mask & IB_QP_TIMEOUT) { if (attr->timeout < 31) { roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, attr->timeout); roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, 0); } else { ibdev_warn(&hr_dev->ib_dev, "Local ACK timeout shall be 0 to 30.\n"); } } if (attr_mask & IB_QP_RETRY_CNT) { roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M, V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt); roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M, V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0); roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M, V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt); roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M, V2_QPC_BYTE_212_RETRY_CNT_S, 0); } if (attr_mask & IB_QP_RNR_RETRY) { roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_NUM_INIT_M, V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry); roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_NUM_INIT_M, V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0); roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M, V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry); roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M, V2_QPC_BYTE_244_RNR_CNT_S, 0); } /* RC&UC&UD required attr */ if (attr_mask & IB_QP_SQ_PSN) { roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M, V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn); roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M, V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0); roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M, V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn); roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M, V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0); roce_set_field(context->byte_220_retry_psn_msn, V2_QPC_BYTE_220_RETRY_MSG_PSN_M, V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn); roce_set_field(qpc_mask->byte_220_retry_psn_msn, V2_QPC_BYTE_220_RETRY_MSG_PSN_M, V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0); roce_set_field(context->byte_224_retry_msg, V2_QPC_BYTE_224_RETRY_MSG_PSN_M, V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S); roce_set_field(qpc_mask->byte_224_retry_msg, V2_QPC_BYTE_224_RETRY_MSG_PSN_M, V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0); roce_set_field(context->byte_224_retry_msg, V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn); roce_set_field(qpc_mask->byte_224_retry_msg, V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0); roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RX_ACK_EPSN_M, V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn); roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RX_ACK_EPSN_M, V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0); } if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) { roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, V2_QPC_BYTE_140_RR_MAX_S, fls(attr->max_dest_rd_atomic - 1)); roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, V2_QPC_BYTE_140_RR_MAX_S, 0); } if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, V2_QPC_BYTE_208_SR_MAX_S, fls(attr->max_rd_atomic - 1)); roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, V2_QPC_BYTE_208_SR_MAX_S, 0); } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask); if (attr_mask & IB_QP_MIN_RNR_TIMER) { roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_MIN_RNR_TIME_M, V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer); roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_MIN_RNR_TIME_M, V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0); } /* RC&UC required attr */ if (attr_mask & IB_QP_RQ_PSN) { roce_set_field(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_EPSN_M, V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn); roce_set_field(qpc_mask->byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_EPSN_M, V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0); roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M, V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1); roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M, V2_QPC_BYTE_152_RAQ_PSN_S, 0); } if (attr_mask & IB_QP_QKEY) { context->qkey_xrcd = cpu_to_le32(attr->qkey); qpc_mask->qkey_xrcd = 0; hr_qp->qkey = attr->qkey; } return ret; } static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); if (attr_mask & IB_QP_ACCESS_FLAGS) hr_qp->atomic_rd_en = attr->qp_access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) hr_qp->resp_depth = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_PORT) { hr_qp->port = attr->port_num - 1; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; } } static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_v2_qp_context ctx[2]; struct hns_roce_v2_qp_context *context = ctx; struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; struct ib_device *ibdev = &hr_dev->ib_dev; unsigned long sq_flag = 0; unsigned long rq_flag = 0; int ret; /* * In v2 engine, software pass context and context mask to hardware * when modifying qp. If software need modify some fields in context, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ memset(context, 0, hr_dev->caps.qpc_sz); memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz); ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, new_state, context, qpc_mask); if (ret) goto out; /* When QP state is err, SQ and RQ WQE should be flushed */ if (new_state == IB_QPS_ERR) { spin_lock_irqsave(&hr_qp->sq.lock, sq_flag); hr_qp->state = IB_QPS_ERR; roce_set_field(context->byte_160_sq_ci_pi, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, hr_qp->sq.head); roce_set_field(qpc_mask->byte_160_sq_ci_pi, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag); if (!ibqp->srq) { spin_lock_irqsave(&hr_qp->rq.lock, rq_flag); roce_set_field(context->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head); roce_set_field(qpc_mask->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag); } } /* Configure the optional fields */ ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context, qpc_mask); if (ret) goto out; roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S, ibqp->srq ? 1 : 0); roce_set_bit(qpc_mask->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S, 0); /* Every status migrate must change state */ roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S, new_state); roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S, 0); /* SW pass context to HW */ ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp); if (ret) { ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret); goto out; } hr_qp->state = new_state; hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask); if (new_state == IB_QPS_RESET && !ibqp->uobject) { hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); if (ibqp->send_cq != ibqp->recv_cq) hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), hr_qp->qpn, NULL); hr_qp->rq.head = 0; hr_qp->rq.tail = 0; hr_qp->sq.head = 0; hr_qp->sq.tail = 0; hr_qp->next_sge = 0; if (hr_qp->rq.wqe_cnt) *hr_qp->rdb.db_record = 0; } out: return ret; } static int to_ib_qp_st(enum hns_roce_v2_qp_state state) { static const enum ib_qp_state map[] = { [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET, [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT, [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR, [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS, [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD, [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE, [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR, [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD }; return (state < ARRAY_SIZE(map)) ? map[state] : -1; } static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct hns_roce_v2_qp_context *hr_context) { struct hns_roce_cmd_mailbox *mailbox; int ret; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, HNS_ROCE_CMD_QUERY_QPC, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) goto out; memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz); out: hns_roce_free_cmd_mailbox(hr_dev, mailbox); return ret; } static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_v2_qp_context context = {}; struct ib_device *ibdev = &hr_dev->ib_dev; int tmp_qp_state; int state; int ret; memset(qp_attr, 0, sizeof(*qp_attr)); memset(qp_init_attr, 0, sizeof(*qp_init_attr)); mutex_lock(&hr_qp->mutex); if (hr_qp->state == IB_QPS_RESET) { qp_attr->qp_state = IB_QPS_RESET; ret = 0; goto done; } ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context); if (ret) { ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret); ret = -EINVAL; goto out; } state = roce_get_field(context.byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S); tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); if (tmp_qp_state == -1) { ibdev_err(ibdev, "Illegal ib_qp_state\n"); ret = -EINVAL; goto out; } hr_qp->state = (u8)tmp_qp_state; qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, V2_QPC_BYTE_24_MTU_S); qp_attr->path_mig_state = IB_MIG_ARMED; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; if (hr_qp->ibqp.qp_type == IB_QPT_UD) qp_attr->qkey = le32_to_cpu(context.qkey_xrcd); qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_EPSN_M, V2_QPC_BYTE_108_RX_REQ_EPSN_S); qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M, V2_QPC_BYTE_172_SQ_CUR_PSN_S); qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S); qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) | ((roce_get_bit(context.byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) | ((roce_get_bit(context.byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S); if (hr_qp->ibqp.qp_type == IB_QPT_RC || hr_qp->ibqp.qp_type == IB_QPT_UC) { struct ib_global_route *grh = rdma_ah_retrieve_grh(&qp_attr->ah_attr); rdma_ah_set_sl(&qp_attr->ah_attr, roce_get_field(context.byte_28_at_fl, V2_QPC_BYTE_28_SL_M, V2_QPC_BYTE_28_SL_S)); grh->flow_label = roce_get_field(context.byte_28_at_fl, V2_QPC_BYTE_28_FL_M, V2_QPC_BYTE_28_FL_S); grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S); grh->hop_limit = roce_get_field(context.byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, V2_QPC_BYTE_24_HOP_LIMIT_S); grh->traffic_class = roce_get_field(context.byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S); memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw)); } qp_attr->port_num = hr_qp->port + 1; qp_attr->sq_draining = 0; qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, V2_QPC_BYTE_208_SR_MAX_S); qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, V2_QPC_BYTE_140_RR_MAX_S); qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_MIN_RNR_TIME_M, V2_QPC_BYTE_80_MIN_RNR_TIME_S); qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl, V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S); qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M, V2_QPC_BYTE_212_RETRY_NUM_INIT_S); qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_NUM_INIT_M, V2_QPC_BYTE_244_RNR_NUM_INIT_S); done: qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; if (!ibqp->uobject) { qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; } else { qp_attr->cap.max_send_wr = 0; qp_attr->cap.max_send_sge = 0; } qp_init_attr->cap = qp_attr->cap; qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits; out: mutex_unlock(&hr_qp->mutex); return ret; } static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata) { struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_cq *send_cq, *recv_cq; unsigned long flags; int ret = 0; if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) { /* Modify qp to reset before destroying qp */ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); if (ret) ibdev_err(ibdev, "failed to modify QP to RST, ret = %d\n", ret); } send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; spin_lock_irqsave(&hr_dev->qp_list_lock, flags); hns_roce_lock_cqs(send_cq, recv_cq); if (!udata) { if (recv_cq) __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, (hr_qp->ibqp.srq ? to_hr_srq(hr_qp->ibqp.srq) : NULL)); if (send_cq && send_cq != recv_cq) __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); } hns_roce_qp_remove(hr_dev, hr_qp); hns_roce_unlock_cqs(send_cq, recv_cq); spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); return ret; } static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); int ret; ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); if (ret) ibdev_err(&hr_dev->ib_dev, "failed to destroy QP 0x%06lx, ret = %d\n", hr_qp->qpn, ret); hns_roce_qp_destroy(hr_dev, hr_qp, udata); return 0; } static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_sccc_clr_done *resp; struct hns_roce_sccc_clr *clr; struct hns_roce_cmq_desc desc; int ret, i; mutex_lock(&hr_dev->qp_table.scc_mutex); /* set scc ctx clear done flag */ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) { ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret); goto out; } /* clear scc context */ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false); clr = (struct hns_roce_sccc_clr *)desc.data; clr->qpn = cpu_to_le32(hr_qp->qpn); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) { ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret); goto out; } /* query scc context clear is done or not */ resp = (struct hns_roce_sccc_clr_done *)desc.data; for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) { hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_SCCC, true); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) { ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n", ret); goto out; } if (resp->clr_done) goto out; msleep(20); } ibdev_err(ibdev, "Query SCC clr done flag overtime.\n"); ret = -ETIMEDOUT; out: mutex_unlock(&hr_dev->qp_table.scc_mutex); return ret; } static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, u32 pdn, u16 xrcd, u32 cqn, void *mb_buf, u64 *mtts_wqe, u64 *mtts_idx, dma_addr_t dma_handle_wqe, dma_addr_t dma_handle_idx) { struct hns_roce_srq_context *srq_context; srq_context = mb_buf; memset(srq_context, 0, sizeof(*srq_context)); roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M, SRQC_BYTE_4_SRQ_ST_S, 1); roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M, SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S, to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num, srq->wqe_cnt)); roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S, ilog2(srq->wqe_cnt)); roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M, SRQC_BYTE_4_SRQN_S, srq->srqn); roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M, SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0); roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M, SRQC_BYTE_12_SRQ_XRCD_S, xrcd); srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3)); roce_set_field(srq_context->byte_24_wqe_bt_ba, SRQC_BYTE_24_SRQ_WQE_BT_BA_M, SRQC_BYTE_24_SRQ_WQE_BT_BA_S, dma_handle_wqe >> 35); roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M, SRQC_BYTE_28_PD_S, pdn); roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M, SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1)); srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3); roce_set_field(srq_context->rsv_idx_bt_ba, SRQC_BYTE_36_SRQ_IDX_BT_BA_M, SRQC_BYTE_36_SRQ_IDX_BT_BA_S, dma_handle_idx >> 35); srq_context->idx_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0])); roce_set_field(srq_context->byte_44_idxbufpgsz_addr, SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M, SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S, upper_32_bits(to_hr_hw_page_addr(mtts_idx[0]))); roce_set_field(srq_context->byte_44_idxbufpgsz_addr, SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M, SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S, to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt)); roce_set_field(srq_context->byte_44_idxbufpgsz_addr, SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M, SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S, to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift)); roce_set_field(srq_context->byte_44_idxbufpgsz_addr, SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M, SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S, to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift)); srq_context->idx_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1])); roce_set_field(srq_context->rsv_idxnxtblkaddr, SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M, SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S, upper_32_bits(to_hr_hw_page_addr(mtts_idx[1]))); roce_set_field(srq_context->byte_56_xrc_cqn, SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S, cqn); roce_set_field(srq_context->byte_56_xrc_cqn, SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M, SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S, to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift)); roce_set_field(srq_context->byte_56_xrc_cqn, SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M, SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S, to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift)); roce_set_bit(srq_context->db_record_addr_record_en, SRQC_BYTE_60_SRQ_RECORD_EN_S, 0); } static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); struct hns_roce_srq *srq = to_hr_srq(ibsrq); struct hns_roce_srq_context *srq_context; struct hns_roce_srq_context *srqc_mask; struct hns_roce_cmd_mailbox *mailbox; int ret; /* Resizing SRQs is not supported yet */ if (srq_attr_mask & IB_SRQ_MAX_WR) return -EINVAL; if (srq_attr_mask & IB_SRQ_LIMIT) { if (srq_attr->srq_limit >= srq->wqe_cnt) return -EINVAL; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); srq_context = mailbox->buf; srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1; memset(srqc_mask, 0xff, sizeof(*srqc_mask)); roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M, SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit); roce_set_field(srqc_mask->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M, SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0); ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0, HNS_ROCE_CMD_MODIFY_SRQC, HNS_ROCE_CMD_TIMEOUT_MSECS); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) { ibdev_err(&hr_dev->ib_dev, "failed to handle cmd of modifying SRQ, ret = %d.\n", ret); return ret; } } return 0; } static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) { struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); struct hns_roce_srq *srq = to_hr_srq(ibsrq); struct hns_roce_srq_context *srq_context; struct hns_roce_cmd_mailbox *mailbox; int limit_wl; int ret; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); srq_context = mailbox->buf; ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0, HNS_ROCE_CMD_QUERY_SRQC, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) { ibdev_err(&hr_dev->ib_dev, "failed to process cmd of querying SRQ, ret = %d.\n", ret); goto out; } limit_wl = roce_get_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M, SRQC_BYTE_8_SRQ_LIMIT_WL_S); attr->srq_limit = limit_wl; attr->max_wr = srq->wqe_cnt - 1; attr->max_sge = srq->max_gs; out: hns_roce_free_cmd_mailbox(hr_dev, mailbox); return ret; } static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) { struct hns_roce_dev *hr_dev = to_hr_dev(cq->device); struct hns_roce_v2_cq_context *cq_context; struct hns_roce_cq *hr_cq = to_hr_cq(cq); struct hns_roce_v2_cq_context *cqc_mask; struct hns_roce_cmd_mailbox *mailbox; int ret; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); cq_context = mailbox->buf; cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1; memset(cqc_mask, 0xff, sizeof(*cqc_mask)); roce_set_field(cq_context->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S, cq_count); roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S, 0); roce_set_field(cq_context->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S, cq_period); roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S, 0); ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1, HNS_ROCE_CMD_MODIFY_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) ibdev_err(&hr_dev->ib_dev, "failed to process cmd when modifying CQ, ret = %d\n", ret); return ret; } static void hns_roce_irq_work_handle(struct work_struct *work) { struct hns_roce_work *irq_work = container_of(work, struct hns_roce_work, work); struct ib_device *ibdev = &irq_work->hr_dev->ib_dev; u32 qpn = irq_work->qpn; u32 cqn = irq_work->cqn; switch (irq_work->event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: ibdev_info(ibdev, "Path migrated succeeded.\n"); break; case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: ibdev_warn(ibdev, "Path migration failed.\n"); break; case HNS_ROCE_EVENT_TYPE_COMM_EST: break; case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: ibdev_warn(ibdev, "Send queue drained.\n"); break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n", qpn, irq_work->sub_type); break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n", qpn); break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n", qpn, irq_work->sub_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: ibdev_warn(ibdev, "SRQ limit reach.\n"); break; case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: ibdev_warn(ibdev, "SRQ last wqe reach.\n"); break; case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: ibdev_err(ibdev, "SRQ catas error.\n"); break; case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn); break; case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn); break; case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: ibdev_warn(ibdev, "DB overflow.\n"); break; case HNS_ROCE_EVENT_TYPE_FLR: ibdev_warn(ibdev, "Function level reset.\n"); break; default: break; } kfree(irq_work); } static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, u32 qpn, u32 cqn) { struct hns_roce_work *irq_work; irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC); if (!irq_work) return; INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle); irq_work->hr_dev = hr_dev; irq_work->qpn = qpn; irq_work->cqn = cqn; irq_work->event_type = eq->event_type; irq_work->sub_type = eq->sub_type; queue_work(hr_dev->irq_workq, &(irq_work->work)); } static void set_eq_cons_index_v2(struct hns_roce_eq *eq) { struct hns_roce_dev *hr_dev = eq->hr_dev; __le32 doorbell[2] = {}; if (eq->type_flag == HNS_ROCE_AEQ) { roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M, HNS_ROCE_V2_EQ_DB_CMD_S, eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? HNS_ROCE_EQ_DB_CMD_AEQ : HNS_ROCE_EQ_DB_CMD_AEQ_ARMED); } else { roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M, HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn); roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M, HNS_ROCE_V2_EQ_DB_CMD_S, eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? HNS_ROCE_EQ_DB_CMD_CEQ : HNS_ROCE_EQ_DB_CMD_CEQ_ARMED); } roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M, HNS_ROCE_V2_EQ_DB_PARA_S, (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M)); hns_roce_write64(hr_dev, doorbell, eq->doorbell); } static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) { struct hns_roce_aeqe *aeqe; aeqe = hns_roce_buf_offset(eq->mtr.kmem, (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^ !!(eq->cons_index & eq->entries)) ? aeqe : NULL; } static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { struct device *dev = hr_dev->dev; struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq); int aeqe_found = 0; int event_type; int sub_type; u32 srqn; u32 qpn; u32 cqn; while (aeqe) { /* Make sure we read AEQ entry after we have checked the * ownership bit */ dma_rmb(); event_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_EVENT_TYPE_M, HNS_ROCE_V2_AEQE_EVENT_TYPE_S); sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M, HNS_ROCE_V2_AEQE_SUB_TYPE_S); qpn = roce_get_field(aeqe->event.qp_event.qp, HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M, HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S); cqn = roce_get_field(aeqe->event.cq_event.cq, HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M, HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S); srqn = roce_get_field(aeqe->event.srq_event.srq, HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M, HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S); switch (event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: case HNS_ROCE_EVENT_TYPE_COMM_EST: case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: hns_roce_qp_event(hr_dev, qpn, event_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: hns_roce_srq_event(hr_dev, srqn, event_type); break; case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: hns_roce_cq_event(hr_dev, cqn, event_type); break; case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: break; case HNS_ROCE_EVENT_TYPE_MB: hns_roce_cmd_event(hr_dev, le16_to_cpu(aeqe->event.cmd.token), aeqe->event.cmd.status, le64_to_cpu(aeqe->event.cmd.out_param)); break; case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW: break; case HNS_ROCE_EVENT_TYPE_FLR: break; default: dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n", event_type, eq->eqn, eq->cons_index); break; } eq->event_type = event_type; eq->sub_type = sub_type; ++eq->cons_index; aeqe_found = 1; if (eq->cons_index > (2 * eq->entries - 1)) eq->cons_index = 0; hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn); aeqe = next_aeqe_sw_v2(eq); } set_eq_cons_index_v2(eq); return aeqe_found; } static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) { struct hns_roce_ceqe *ceqe; ceqe = hns_roce_buf_offset(eq->mtr.kmem, (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; } static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq); int ceqe_found = 0; u32 cqn; while (ceqe) { /* Make sure we read CEQ entry after we have checked the * ownership bit */ dma_rmb(); cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M, HNS_ROCE_V2_CEQE_COMP_CQN_S); hns_roce_cq_completion(hr_dev, cqn); ++eq->cons_index; ceqe_found = 1; if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) eq->cons_index = 0; ceqe = next_ceqe_sw_v2(eq); } set_eq_cons_index_v2(eq); return ceqe_found; } static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr) { struct hns_roce_eq *eq = eq_ptr; struct hns_roce_dev *hr_dev = eq->hr_dev; int int_work; if (eq->type_flag == HNS_ROCE_CEQ) /* Completion event interrupt */ int_work = hns_roce_v2_ceq_int(hr_dev, eq); else /* Asychronous event interrupt */ int_work = hns_roce_v2_aeq_int(hr_dev, eq); return IRQ_RETVAL(int_work); } static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) { struct hns_roce_dev *hr_dev = dev_id; struct device *dev = hr_dev->dev; int int_work = 0; u32 int_st; u32 int_en; /* Abnormal interrupt */ int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG); int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { struct pci_dev *pdev = hr_dev->pci_dev; struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); const struct hnae3_ae_ops *ops = ae_dev->ops; dev_err(dev, "AEQ overflow!\n"); int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); /* Set reset level for reset_event() */ if (ops->set_default_reset_request) ops->set_default_reset_request(ae_dev, HNAE3_FUNC_RESET); if (ops->reset_event) ops->reset_event(pdev, NULL); int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); int_work = 1; } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) { dev_err(dev, "BUS ERR!\n"); int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); int_work = 1; } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) { dev_err(dev, "OTHER ERR!\n"); int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); int_work = 1; } else dev_err(dev, "There is no abnormal irq found!\n"); return IRQ_RETVAL(int_work); } static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, int eq_num, int enable_flag) { int i; if (enable_flag == EQ_ENABLE) { for (i = 0; i < eq_num; i++) roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG + i * EQ_REG_OFFSET, HNS_ROCE_V2_VF_EVENT_INT_EN_M); roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, HNS_ROCE_V2_VF_ABN_INT_EN_M); roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, HNS_ROCE_V2_VF_ABN_INT_CFG_M); } else { for (i = 0; i < eq_num; i++) roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG + i * EQ_REG_OFFSET, HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0); roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0); roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0); } } static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) { struct device *dev = hr_dev->dev; int ret; if (eqn < hr_dev->caps.num_comp_vectors) ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, 0, HNS_ROCE_CMD_DESTROY_CEQC, HNS_ROCE_CMD_TIMEOUT_MSECS); else ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, 0, HNS_ROCE_CMD_DESTROY_AEQC, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); } static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { hns_roce_mtr_destroy(hr_dev, &eq->mtr); } static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, void *mb_buf) { u64 eqe_ba[MTT_MIN_COUNT] = { 0 }; struct hns_roce_eq_context *eqc; u64 bt_ba = 0; int count; eqc = mb_buf; memset(eqc, 0, sizeof(struct hns_roce_eq_context)); /* init eqc */ eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG; eq->cons_index = 0; eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0; eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0; eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED; eq->shift = ilog2((unsigned int)eq->entries); /* if not multi-hop, eqe buffer only use one trunk */ count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT, &bt_ba); if (count < 1) { dev_err(hr_dev->dev, "failed to find EQE mtr\n"); return -ENOBUFS; } /* set eqc state */ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S, HNS_ROCE_V2_EQ_STATE_VALID); /* set eqe hop num */ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M, HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num); /* set eqc over_ignore */ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M, HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore); /* set eqc coalesce */ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M, HNS_ROCE_EQC_COALESCE_S, eq->coalesce); /* set eqc arm_state */ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M, HNS_ROCE_EQC_ARM_ST_S, eq->arm_st); /* set eqn */ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S, eq->eqn); /* set eqe_cnt */ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M, HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT); /* set eqe_ba_pg_sz */ roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M, HNS_ROCE_EQC_BA_PG_SZ_S, to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift)); /* set eqe_buf_pg_sz */ roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M, HNS_ROCE_EQC_BUF_PG_SZ_S, to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift)); /* set eq_producer_idx */ roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M, HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX); /* set eq_max_cnt */ roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M, HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt); /* set eq_period */ roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M, HNS_ROCE_EQC_PERIOD_S, eq->eq_period); /* set eqe_report_timer */ roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M, HNS_ROCE_EQC_REPORT_TIMER_S, HNS_ROCE_EQ_INIT_REPORT_TIMER); /* set bt_ba [34:3] */ roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M, HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3); /* set bt_ba [64:35] */ roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M, HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35); /* set eq shift */ roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S, eq->shift); /* set eq MSI_IDX */ roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M, HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX); /* set cur_eqe_ba [27:12] */ roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M, HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12); /* set cur_eqe_ba [59:28] */ roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M, HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28); /* set cur_eqe_ba [63:60] */ roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M, HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60); /* set eq consumer idx */ roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M, HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX); roce_set_field(eqc->byte_40, HNS_ROCE_EQC_NXT_EQE_BA_L_M, HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12); roce_set_field(eqc->byte_44, HNS_ROCE_EQC_NXT_EQE_BA_H_M, HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44); roce_set_field(eqc->byte_44, HNS_ROCE_EQC_EQE_SIZE_M, HNS_ROCE_EQC_EQE_SIZE_S, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE ? 1 : 0); return 0; } static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { struct hns_roce_buf_attr buf_attr = {}; int err; if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0) eq->hop_num = 0; else eq->hop_num = hr_dev->caps.eqe_hop_num; buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT; buf_attr.region[0].size = eq->entries * eq->eqe_size; buf_attr.region[0].hopnum = eq->hop_num; buf_attr.region_count = 1; buf_attr.fixed_page = true; err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr, hr_dev->caps.eqe_ba_pg_sz + HNS_HW_PAGE_SHIFT, NULL, 0); if (err) dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err); return err; } static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, unsigned int eq_cmd) { struct hns_roce_cmd_mailbox *mailbox; int ret; /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR_OR_NULL(mailbox)) return -ENOMEM; ret = alloc_eq_buf(hr_dev, eq); if (ret) goto free_cmd_mbox; ret = config_eqc(hr_dev, eq, mailbox->buf); if (ret) goto err_cmd_mbox; ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) { dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n"); goto err_cmd_mbox; } hns_roce_free_cmd_mailbox(hr_dev, mailbox); return 0; err_cmd_mbox: free_eq_buf(hr_dev, eq); free_cmd_mbox: hns_roce_free_cmd_mailbox(hr_dev, mailbox); return ret; } static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, int comp_num, int aeq_num, int other_num) { struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; int i, j; int ret; for (i = 0; i < irq_num; i++) { hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, GFP_KERNEL); if (!hr_dev->irq_names[i]) { ret = -ENOMEM; goto err_kzalloc_failed; } } /* irq contains: abnormal + AEQ + CEQ */ for (j = 0; j < other_num; j++) snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j); for (j = other_num; j < (other_num + aeq_num); j++) snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d", j - other_num); for (j = (other_num + aeq_num); j < irq_num; j++) snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d", j - other_num - aeq_num); for (j = 0; j < irq_num; j++) { if (j < other_num) ret = request_irq(hr_dev->irq[j], hns_roce_v2_msix_interrupt_abn, 0, hr_dev->irq_names[j], hr_dev); else if (j < (other_num + comp_num)) ret = request_irq(eq_table->eq[j - other_num].irq, hns_roce_v2_msix_interrupt_eq, 0, hr_dev->irq_names[j + aeq_num], &eq_table->eq[j - other_num]); else ret = request_irq(eq_table->eq[j - other_num].irq, hns_roce_v2_msix_interrupt_eq, 0, hr_dev->irq_names[j - comp_num], &eq_table->eq[j - other_num]); if (ret) { dev_err(hr_dev->dev, "Request irq error!\n"); goto err_request_failed; } } return 0; err_request_failed: for (j -= 1; j >= 0; j--) if (j < other_num) free_irq(hr_dev->irq[j], hr_dev); else free_irq(eq_table->eq[j - other_num].irq, &eq_table->eq[j - other_num]); err_kzalloc_failed: for (i -= 1; i >= 0; i--) kfree(hr_dev->irq_names[i]); return ret; } static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev) { int irq_num; int eq_num; int i; eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; irq_num = eq_num + hr_dev->caps.num_other_vectors; for (i = 0; i < hr_dev->caps.num_other_vectors; i++) free_irq(hr_dev->irq[i], hr_dev); for (i = 0; i < eq_num; i++) free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]); for (i = 0; i < irq_num; i++) kfree(hr_dev->irq_names[i]); } static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) { struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; struct device *dev = hr_dev->dev; struct hns_roce_eq *eq; unsigned int eq_cmd; int irq_num; int eq_num; int other_num; int comp_num; int aeq_num; int i; int ret; other_num = hr_dev->caps.num_other_vectors; comp_num = hr_dev->caps.num_comp_vectors; aeq_num = hr_dev->caps.num_aeq_vectors; eq_num = comp_num + aeq_num; irq_num = eq_num + other_num; eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); if (!eq_table->eq) return -ENOMEM; /* create eq */ for (i = 0; i < eq_num; i++) { eq = &eq_table->eq[i]; eq->hr_dev = hr_dev; eq->eqn = i; if (i < comp_num) { /* CEQ */ eq_cmd = HNS_ROCE_CMD_CREATE_CEQC; eq->type_flag = HNS_ROCE_CEQ; eq->entries = hr_dev->caps.ceqe_depth; eq->eqe_size = hr_dev->caps.ceqe_size; eq->irq = hr_dev->irq[i + other_num + aeq_num]; eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM; eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL; } else { /* AEQ */ eq_cmd = HNS_ROCE_CMD_CREATE_AEQC; eq->type_flag = HNS_ROCE_AEQ; eq->entries = hr_dev->caps.aeqe_depth; eq->eqe_size = hr_dev->caps.aeqe_size; eq->irq = hr_dev->irq[i - comp_num + other_num]; eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM; eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL; } ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd); if (ret) { dev_err(dev, "eq create failed.\n"); goto err_create_eq_fail; } } /* enable irq */ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE); ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num, other_num); if (ret) { dev_err(dev, "Request irq failed.\n"); goto err_request_irq_fail; } hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0); if (!hr_dev->irq_workq) { dev_err(dev, "Create irq workqueue failed!\n"); ret = -ENOMEM; goto err_create_wq_fail; } return 0; err_create_wq_fail: __hns_roce_free_irq(hr_dev); err_request_irq_fail: hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); err_create_eq_fail: for (i -= 1; i >= 0; i--) free_eq_buf(hr_dev, &eq_table->eq[i]); kfree(eq_table->eq); return ret; } static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) { struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; int eq_num; int i; eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; /* Disable irq */ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); __hns_roce_free_irq(hr_dev); for (i = 0; i < eq_num; i++) { hns_roce_v2_destroy_eqc(hr_dev, i); free_eq_buf(hr_dev, &eq_table->eq[i]); } kfree(eq_table->eq); flush_workqueue(hr_dev->irq_workq); destroy_workqueue(hr_dev->irq_workq); } static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = { .query_cqc_info = hns_roce_v2_query_cqc_info, }; static const struct ib_device_ops hns_roce_v2_dev_ops = { .destroy_qp = hns_roce_v2_destroy_qp, .modify_cq = hns_roce_v2_modify_cq, .poll_cq = hns_roce_v2_poll_cq, .post_recv = hns_roce_v2_post_recv, .post_send = hns_roce_v2_post_send, .query_qp = hns_roce_v2_query_qp, .req_notify_cq = hns_roce_v2_req_notify_cq, }; static const struct ib_device_ops hns_roce_v2_dev_srq_ops = { .modify_srq = hns_roce_v2_modify_srq, .post_srq_recv = hns_roce_v2_post_srq_recv, .query_srq = hns_roce_v2_query_srq, }; static const struct hns_roce_hw hns_roce_hw_v2 = { .cmq_init = hns_roce_v2_cmq_init, .cmq_exit = hns_roce_v2_cmq_exit, .hw_profile = hns_roce_v2_profile, .hw_init = hns_roce_v2_init, .hw_exit = hns_roce_v2_exit, .post_mbox = hns_roce_v2_post_mbox, .chk_mbox = hns_roce_v2_chk_mbox, .rst_prc_mbox = hns_roce_v2_rst_process_cmd, .set_gid = hns_roce_v2_set_gid, .set_mac = hns_roce_v2_set_mac, .write_mtpt = hns_roce_v2_write_mtpt, .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt, .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt, .mw_write_mtpt = hns_roce_v2_mw_write_mtpt, .write_cqc = hns_roce_v2_write_cqc, .set_hem = hns_roce_v2_set_hem, .clear_hem = hns_roce_v2_clear_hem, .modify_qp = hns_roce_v2_modify_qp, .query_qp = hns_roce_v2_query_qp, .destroy_qp = hns_roce_v2_destroy_qp, .qp_flow_control_init = hns_roce_v2_qp_flow_control_init, .modify_cq = hns_roce_v2_modify_cq, .post_send = hns_roce_v2_post_send, .post_recv = hns_roce_v2_post_recv, .req_notify_cq = hns_roce_v2_req_notify_cq, .poll_cq = hns_roce_v2_poll_cq, .init_eq = hns_roce_v2_init_eq_table, .cleanup_eq = hns_roce_v2_cleanup_eq_table, .write_srqc = hns_roce_v2_write_srqc, .modify_srq = hns_roce_v2_modify_srq, .query_srq = hns_roce_v2_query_srq, .post_srq_recv = hns_roce_v2_post_srq_recv, .hns_roce_dev_ops = &hns_roce_v2_dev_ops, .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops, }; static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl); static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, struct hnae3_handle *handle) { struct hns_roce_v2_priv *priv = hr_dev->priv; int i; hr_dev->pci_dev = handle->pdev; hr_dev->dev = &handle->pdev->dev; hr_dev->hw = &hns_roce_hw_v2; hr_dev->dfx = &hns_roce_dfx_hw_v2; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->odb_offset = hr_dev->sdb_offset; /* Get info from NIC driver. */ hr_dev->reg_base = handle->rinfo.roce_io_base; hr_dev->caps.num_ports = 1; hr_dev->iboe.netdevs[0] = handle->rinfo.netdev; hr_dev->iboe.phy_port[0] = 0; addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid, hr_dev->iboe.netdevs[0]->dev_addr); for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++) hr_dev->irq[i] = pci_irq_vector(handle->pdev, i + handle->rinfo.base_vector); /* cmd issue mode: 0 is poll, 1 is event */ hr_dev->cmd_mod = 1; hr_dev->loop_idc = 0; hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle); priv->handle = handle; } static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) { struct hns_roce_dev *hr_dev; int ret; hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); if (!hr_dev) return -ENOMEM; hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL); if (!hr_dev->priv) { ret = -ENOMEM; goto error_failed_kzalloc; } hns_roce_hw_v2_get_cfg(hr_dev, handle); ret = hns_roce_init(hr_dev); if (ret) { dev_err(hr_dev->dev, "RoCE Engine init failed!\n"); goto error_failed_get_cfg; } handle->priv = hr_dev; return 0; error_failed_get_cfg: kfree(hr_dev->priv); error_failed_kzalloc: ib_dealloc_device(&hr_dev->ib_dev); return ret; } static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, bool reset) { struct hns_roce_dev *hr_dev = handle->priv; if (!hr_dev) return; handle->priv = NULL; hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; hns_roce_handle_device_err(hr_dev); hns_roce_exit(hr_dev); kfree(hr_dev->priv); ib_dealloc_device(&hr_dev->ib_dev); } static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) { const struct hnae3_ae_ops *ops = handle->ae_algo->ops; const struct pci_device_id *id; struct device *dev = &handle->pdev->dev; int ret; handle->rinfo.instance_state = HNS_ROCE_STATE_INIT; if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) { handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; goto reset_chk_err; } id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev); if (!id) return 0; ret = __hns_roce_hw_v2_init_instance(handle); if (ret) { handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; dev_err(dev, "RoCE instance init failed! ret = %d\n", ret); if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) goto reset_chk_err; else return ret; } handle->rinfo.instance_state = HNS_ROCE_STATE_INITED; return 0; reset_chk_err: dev_err(dev, "Device is busy in resetting state.\n" "please retry later.\n"); return -EBUSY; } static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, bool reset) { if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) return; handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT; __hns_roce_hw_v2_uninit_instance(handle, reset); handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; } static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) { struct hns_roce_dev *hr_dev; if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) { set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); return 0; } handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN; clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); hr_dev = handle->priv; if (!hr_dev) return 0; hr_dev->is_reset = true; hr_dev->active = false; hr_dev->dis_db = true; hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN; return 0; } static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) { struct device *dev = &handle->pdev->dev; int ret; if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) { handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; return 0; } handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT; dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n"); ret = __hns_roce_hw_v2_init_instance(handle); if (ret) { /* when reset notify type is HNAE3_INIT_CLIENT In reset notify * callback function, RoCE Engine reinitialize. If RoCE reinit * failed, we should inform NIC driver. */ handle->priv = NULL; dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret); } else { handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; dev_info(dev, "Reset done, RoCE client reinit finished.\n"); } return ret; } static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle) { if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) return 0; handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n"); msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY); __hns_roce_hw_v2_uninit_instance(handle, false); return 0; } static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle, enum hnae3_reset_notify_type type) { int ret = 0; switch (type) { case HNAE3_DOWN_CLIENT: ret = hns_roce_hw_v2_reset_notify_down(handle); break; case HNAE3_INIT_CLIENT: ret = hns_roce_hw_v2_reset_notify_init(handle); break; case HNAE3_UNINIT_CLIENT: ret = hns_roce_hw_v2_reset_notify_uninit(handle); break; default: break; } return ret; } static const struct hnae3_client_ops hns_roce_hw_v2_ops = { .init_instance = hns_roce_hw_v2_init_instance, .uninit_instance = hns_roce_hw_v2_uninit_instance, .reset_notify = hns_roce_hw_v2_reset_notify, }; static struct hnae3_client hns_roce_hw_v2_client = { .name = "hns_roce_hw_v2", .type = HNAE3_CLIENT_ROCE, .ops = &hns_roce_hw_v2_ops, }; static int __init hns_roce_hw_v2_init(void) { return hnae3_register_client(&hns_roce_hw_v2_client); } static void __exit hns_roce_hw_v2_exit(void) { hnae3_unregister_client(&hns_roce_hw_v2_client); } module_init(hns_roce_hw_v2_init); module_exit(hns_roce_hw_v2_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>"); MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
503876.c
/* * Copyright (c) 2014, Texas Instruments Incorporated - http://www.ti.com/ * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ /*---------------------------------------------------------------------------*/ /** * \addtogroup ext-flash * @{ * * \file * Implementation of a generic external SPI flash driver */ /*---------------------------------------------------------------------------*/ #include "contiki.h" #include "ext-flash.h" #include "dev/spi.h" #include "gpio-hal.h" #include "sys/log.h" #include <stdint.h> #include <stdbool.h> /*---------------------------------------------------------------------------*/ #ifndef EXT_FLASH_SPI_CONTROLLER #define EXT_FLASH_SPI_CONTROLLER 0xFF /* No controller */ #define EXT_FLASH_SPI_PIN_SCK GPIO_HAL_PIN_UNKNOWN #define EXT_FLASH_SPI_PIN_MOSI GPIO_HAL_PIN_UNKNOWN #define EXT_FLASH_SPI_PIN_MISO GPIO_HAL_PIN_UNKNOWN #define EXT_FLASH_SPI_PIN_CS GPIO_HAL_PIN_UNKNOWN #define EXT_FLASH_DEVICE_ID 0xFF #define EXT_FLASH_MID 0xFF #define EXT_FLASH_PROGRAM_PAGE_SIZE 256 #define EXT_FLASH_ERASE_SECTOR_SIZE 4096 #endif /* EXT_FLASH_SPI_CONTROLLER */ /*---------------------------------------------------------------------------*/ /* Log configuration */ #define LOG_MODULE "ext-flash" #define LOG_LEVEL LOG_LEVEL_NONE /*---------------------------------------------------------------------------*/ /* Instruction codes */ #define BLS_CODE_PROGRAM 0x02 /**< Page Program */ #define BLS_CODE_READ 0x03 /**< Read Data */ #define BLS_CODE_READ_STATUS 0x05 /**< Read Status Register */ #define BLS_CODE_WRITE_ENABLE 0x06 /**< Write Enable */ #define BLS_CODE_SECTOR_ERASE 0x20 /**< Sector Erase */ #define BLS_CODE_MDID 0x90 /**< Manufacturer Device ID */ #define BLS_CODE_PD 0xB9 /**< Power down */ #define BLS_CODE_RPD 0xAB /**< Release Power-Down */ /*---------------------------------------------------------------------------*/ /* Erase instructions */ #define BLS_CODE_ERASE_4K 0x20 /**< Sector Erase */ #define BLS_CODE_ERASE_32K 0x52 #define BLS_CODE_ERASE_64K 0xD8 #define BLS_CODE_ERASE_ALL 0xC7 /**< Mass Erase */ /*---------------------------------------------------------------------------*/ /* Bitmasks of the status register */ #define BLS_STATUS_SRWD_BM 0x80 #define BLS_STATUS_BP_BM 0x0C #define BLS_STATUS_WEL_BM 0x02 #define BLS_STATUS_WIP_BM 0x01 #define BLS_STATUS_BIT_BUSY 0x01 /**< Busy bit of the status register */ /*---------------------------------------------------------------------------*/ #define VERIFY_PART_LOCKED -2 #define VERIFY_PART_ERROR -1 #define VERIFY_PART_POWERED_DOWN 0 #define VERIFY_PART_OK 1 /*---------------------------------------------------------------------------*/ static const spi_device_t flash_spi_configuration_default = { #if GPIO_HAL_PORT_PIN_NUMBERING .port_spi_sck = EXT_FLASH_SPI_PORT_SCK, .port_spi_miso = EXT_FLASH_SPI_PORT_MISO, .port_spi_mosi = EXT_FLASH_SPI_PORT_MOSI, .port_spi_cs = EXT_FLASH_SPI_PORT_CS, #endif .spi_controller = EXT_FLASH_SPI_CONTROLLER, .pin_spi_sck = EXT_FLASH_SPI_PIN_SCK, .pin_spi_miso = EXT_FLASH_SPI_PIN_MISO, .pin_spi_mosi = EXT_FLASH_SPI_PIN_MOSI, .pin_spi_cs = EXT_FLASH_SPI_PIN_CS, .spi_bit_rate = 4000000, .spi_pha = 0, .spi_pol = 0 }; /*---------------------------------------------------------------------------*/ /** * Get spi configuration, return default configuration if NULL */ static const spi_device_t * get_spi_conf(const spi_device_t *conf) { if(conf == NULL) { return &flash_spi_configuration_default; } return conf; } /*---------------------------------------------------------------------------*/ /** * Clear external flash CSN line */ static bool select_on_bus(const spi_device_t *flash_spi_configuration) { if(spi_select(flash_spi_configuration) == SPI_DEV_STATUS_OK) { return true; } return false; } /*---------------------------------------------------------------------------*/ /** * Set external flash CSN line */ static void deselect(const spi_device_t *flash_spi_configuration) { spi_deselect(flash_spi_configuration); } /*---------------------------------------------------------------------------*/ /** * \brief Wait till previous erase/program operation completes. * \return True when successful. */ static bool wait_ready(const spi_device_t *flash_spi_configuration) { bool ret; const uint8_t wbuf[1] = { BLS_CODE_READ_STATUS }; if(select_on_bus(flash_spi_configuration) == false) { return false; } ret = spi_write(flash_spi_configuration, wbuf, sizeof(wbuf)); if(ret != SPI_DEV_STATUS_OK) { deselect(flash_spi_configuration); return false; } for(;;) { uint8_t buf; /* Note that this temporary implementation is not * energy efficient. * Thread could have yielded while waiting for flash * erase/program to complete. */ ret = spi_read(flash_spi_configuration, &buf, sizeof(buf)); if(ret != SPI_DEV_STATUS_OK) { /* Error */ deselect(flash_spi_configuration); return false; } if(!(buf & BLS_STATUS_BIT_BUSY)) { /* Now ready */ break; } } deselect(flash_spi_configuration); return true; } /*---------------------------------------------------------------------------*/ /** * \brief Verify the flash part. * \retval VERIFY_PART_OK The part was identified successfully * \retval VERIFY_PART_ERROR There was an error communicating with the part * \retval VERIFY_PART_POWERED_DOWN Communication was successful, but the part * was powered down */ static uint8_t verify_part(const spi_device_t *flash_spi_configuration) { const uint8_t wbuf[] = { BLS_CODE_MDID, 0xFF, 0xFF, 0x00 }; uint8_t rbuf[2] = { 0, 0 }; bool ret; if(select_on_bus(flash_spi_configuration) == false) { return VERIFY_PART_LOCKED; } if(spi_write(flash_spi_configuration, wbuf, sizeof(wbuf)) != SPI_DEV_STATUS_OK) { deselect(flash_spi_configuration); return VERIFY_PART_ERROR; } ret = spi_read(flash_spi_configuration, rbuf, sizeof(rbuf)); deselect(flash_spi_configuration); if(ret != SPI_DEV_STATUS_OK) { return VERIFY_PART_ERROR; } LOG_DBG("Verify: %02x %02x\n", rbuf[0], rbuf[1]); if(rbuf[0] != EXT_FLASH_MID || rbuf[1] != EXT_FLASH_DEVICE_ID) { return VERIFY_PART_POWERED_DOWN; } return VERIFY_PART_OK; } /*---------------------------------------------------------------------------*/ /** * \brief Put the device in power save mode. No access to data; only * the status register is accessible. */ static bool power_down(const spi_device_t *flash_spi_configuration) { uint8_t cmd; uint8_t i; /* First, wait for the device to be ready */ if(wait_ready(flash_spi_configuration) == false) { /* Entering here will leave the device in standby instead of powerdown */ return false; } cmd = BLS_CODE_PD; if(select_on_bus(flash_spi_configuration) == false) { return false; } if(spi_write_byte(flash_spi_configuration, cmd) != SPI_DEV_STATUS_OK) { deselect(flash_spi_configuration); return false; } deselect(flash_spi_configuration); i = 0; while(i < 10) { if(verify_part(flash_spi_configuration) == VERIFY_PART_POWERED_DOWN) { /* Device is powered down */ return true; } i++; } /* Should not be required */ deselect(flash_spi_configuration); return false; } /*---------------------------------------------------------------------------*/ /** * \brief Take device out of power save mode and prepare it for normal operation * \return True if the command was written successfully */ static bool power_standby(const spi_device_t *flash_spi_configuration) { uint8_t cmd; bool success; cmd = BLS_CODE_RPD; if(select_on_bus(flash_spi_configuration) == false) { return false; } success = (spi_write(flash_spi_configuration, &cmd, sizeof(cmd)) == SPI_DEV_STATUS_OK); if(success) { success = wait_ready(flash_spi_configuration) == true ? true : false; } deselect(flash_spi_configuration); return success; } /*---------------------------------------------------------------------------*/ /** * \brief Enable write. * \return True when successful. */ static bool write_enable(const spi_device_t *flash_spi_configuration) { bool ret; const uint8_t wbuf[] = { BLS_CODE_WRITE_ENABLE }; if(select_on_bus(flash_spi_configuration) == false) { return false; } ret = (spi_write(flash_spi_configuration, wbuf, sizeof(wbuf)) == SPI_DEV_STATUS_OK); deselect(flash_spi_configuration); if(ret == false) { return false; } return true; } /*---------------------------------------------------------------------------*/ bool ext_flash_open(const spi_device_t *conf) { const spi_device_t *flash_spi_configuration; flash_spi_configuration = get_spi_conf(conf); /* Check if platform has ext-flash */ if(flash_spi_configuration->pin_spi_sck == GPIO_HAL_PIN_UNKNOWN) { return false; } if(spi_acquire(flash_spi_configuration) != SPI_DEV_STATUS_OK) { return false; } /* Default output to clear chip select */ deselect(flash_spi_configuration); /* Put the part is standby mode */ power_standby(flash_spi_configuration); if(verify_part(flash_spi_configuration) == VERIFY_PART_OK) { return true; } /* Failed to verify */ spi_release(flash_spi_configuration); return false; } /*---------------------------------------------------------------------------*/ bool ext_flash_close(const spi_device_t *conf) { bool ret; const spi_device_t *flash_spi_configuration; flash_spi_configuration = get_spi_conf(conf); /* Put the part in low power mode */ ret = power_down(flash_spi_configuration); /* SPI is released no matter if power_down() succeeds or fails */ if(spi_release(flash_spi_configuration) != SPI_DEV_STATUS_OK) { return false; } return ret; } /*---------------------------------------------------------------------------*/ bool ext_flash_read(const spi_device_t *conf, uint32_t offset, uint32_t length, uint8_t *buf) { uint8_t wbuf[4]; bool ret; const spi_device_t *flash_spi_configuration; flash_spi_configuration = get_spi_conf(conf); /* Wait till previous erase/program operation completes */ if(wait_ready(flash_spi_configuration) == false) { return false; } /* * SPI is driven with very low frequency (1MHz < 33MHz fR spec) * in this implementation, hence it is not necessary to use fast read. */ wbuf[0] = BLS_CODE_READ; wbuf[1] = (offset >> 16) & 0xff; wbuf[2] = (offset >> 8) & 0xff; wbuf[3] = offset & 0xff; if(select_on_bus(flash_spi_configuration) == false) { return false; } if(spi_write(flash_spi_configuration, wbuf, sizeof(wbuf)) != SPI_DEV_STATUS_OK) { /* failure */ deselect(flash_spi_configuration); return false; } ret = (spi_read(flash_spi_configuration, buf, length) == SPI_DEV_STATUS_OK); deselect(flash_spi_configuration); return ret; } /*---------------------------------------------------------------------------*/ bool ext_flash_write(const spi_device_t *conf, uint32_t offset, uint32_t length, const uint8_t *buf) { uint8_t wbuf[4]; uint32_t ilen; /* interim length per instruction */ const spi_device_t *flash_spi_configuration; flash_spi_configuration = get_spi_conf(conf); while(length > 0) { /* Wait till previous erase/program operation completes */ if(wait_ready(flash_spi_configuration) == false) { return false; } if(write_enable(flash_spi_configuration) == false) { return false; } ilen = EXT_FLASH_PROGRAM_PAGE_SIZE - (offset % EXT_FLASH_PROGRAM_PAGE_SIZE); if(length < ilen) { ilen = length; } wbuf[0] = BLS_CODE_PROGRAM; wbuf[1] = (offset >> 16) & 0xff; wbuf[2] = (offset >> 8) & 0xff; wbuf[3] = offset & 0xff; offset += ilen; length -= ilen; /* Upto 100ns CS hold time (which is not clear * whether it's application only inbetween reads) * is not imposed here since above instructions * should be enough to delay * as much. */ if(select_on_bus(flash_spi_configuration) == false) { return false; } if(spi_write(flash_spi_configuration, wbuf, sizeof(wbuf)) != SPI_DEV_STATUS_OK) { /* failure */ deselect(flash_spi_configuration); return false; } if(spi_write(flash_spi_configuration, buf, ilen) != SPI_DEV_STATUS_OK) { /* failure */ deselect(flash_spi_configuration); return false; } buf += ilen; deselect(flash_spi_configuration); } return true; } /*---------------------------------------------------------------------------*/ bool ext_flash_erase(const spi_device_t *conf, uint32_t offset, uint32_t length) { /* * Note that Block erase might be more efficient when the floor map * is well planned for OTA, but to simplify this implementation, * sector erase is used blindly. */ uint8_t wbuf[4]; uint32_t i, numsectors; uint32_t endoffset = offset + length - 1; const spi_device_t *flash_spi_configuration; flash_spi_configuration = get_spi_conf(conf); offset = (offset / EXT_FLASH_ERASE_SECTOR_SIZE) * EXT_FLASH_ERASE_SECTOR_SIZE; numsectors = (endoffset - offset + EXT_FLASH_ERASE_SECTOR_SIZE - 1) / EXT_FLASH_ERASE_SECTOR_SIZE; wbuf[0] = BLS_CODE_SECTOR_ERASE; for(i = 0; i < numsectors; i++) { /* Wait till previous erase/program operation completes */ if(wait_ready(flash_spi_configuration) == false) { return false; } if(write_enable(flash_spi_configuration) == false) { return false; } wbuf[1] = (offset >> 16) & 0xff; wbuf[2] = (offset >> 8) & 0xff; wbuf[3] = offset & 0xff; if(select_on_bus(flash_spi_configuration) == false) { return false; } if(spi_write(flash_spi_configuration, wbuf, sizeof(wbuf)) != SPI_DEV_STATUS_OK) { /* failure */ deselect(flash_spi_configuration); return false; } deselect(flash_spi_configuration); offset += EXT_FLASH_ERASE_SECTOR_SIZE; } return true; } /*---------------------------------------------------------------------------*/ bool ext_flash_init(const spi_device_t *conf) { if(ext_flash_open(conf) == false) { return false; } if(ext_flash_close(conf) == false) { return false; } LOG_INFO("Flash init successful\n"); return true; } /*---------------------------------------------------------------------------*/ /** @} */
115112.c
#include <stdio.h> #include <stdlib.h> //Funcao principal variaveis void main() { //Definindo variaveis int opcao; //confere e valida a opcao while(opcao < 1 || opcao > 3){ //interface do menu printf("****Escolha uma opcao***"); printf("\n1-Opcao 1"); printf("\n2-Opcao 2"); printf("\n3-opcao 3"); //lendo a opcao scanf("%d", &opcao); //Resultado de acordo com a opcao escolhida switch(opcao){ case 1: printf("\nOpcao 1 foi escolhida"); break; case 2: printf("\nOpcao 2 foi escolhida"); break; case 3: printf("\nOpcao 3 foi escolhida"); break; default: printf("Opcao invailida"); break; } } return 0; }
132654.c
#include "kernel/types.h" #include "kernel/stat.h" #include "user/user.h" int main(int argc, char **argv) { int i; if(argc < 2){ fprintf(2, "usage: kill pid...\n"); exit(1); } for(i=1; i<argc; i++) //Ass2 - Task2.2.2 kill(atoi(argv[i]),9); exit(0); }
372406.c
#define _POSIX_C_SOURCE 200809L #include <assert.h> #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <getopt.h> #include <poll.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <sys/stat.h> #include <time.h> #include <unistd.h> #include <wayland-client.h> #include <wordexp.h> #include "background-image.h" #include "cairo.h" #include "comm.h" #include "log.h" #include "loop.h" #include "pool-buffer.h" #include "seat.h" #include "swaylock.h" #include "wlr-input-inhibitor-unstable-v1-client-protocol.h" #include "wlr-layer-shell-unstable-v1-client-protocol.h" #include "xdg-output-unstable-v1-client-protocol.h" static uint32_t parse_color(const char *color) { if (color[0] == '#') { ++color; } int len = strlen(color); if (len != 6 && len != 8) { swaylock_log(LOG_DEBUG, "Invalid color %s, defaulting to 0xFFFFFFFF", color); return 0xFFFFFFFF; } uint32_t res = (uint32_t)strtoul(color, NULL, 16); if (strlen(color) == 6) { res = (res << 8) | 0xFF; } return res; } int lenient_strcmp(char *a, char *b) { if (a == b) { return 0; } else if (!a) { return -1; } else if (!b) { return 1; } else { return strcmp(a, b); } } static void daemonize(void) { int fds[2]; if (pipe(fds) != 0) { swaylock_log(LOG_ERROR, "Failed to pipe"); exit(1); } if (fork() == 0) { setsid(); close(fds[0]); int devnull = open("/dev/null", O_RDWR); dup2(STDOUT_FILENO, devnull); dup2(STDERR_FILENO, devnull); close(devnull); uint8_t success = 0; if (chdir("/") != 0) { write(fds[1], &success, 1); exit(1); } success = 1; if (write(fds[1], &success, 1) != 1) { exit(1); } close(fds[1]); } else { close(fds[1]); uint8_t success; if (read(fds[0], &success, 1) != 1 || !success) { swaylock_log(LOG_ERROR, "Failed to daemonize"); exit(1); } close(fds[0]); exit(0); } } static void destroy_surface(struct swaylock_surface *surface) { wl_list_remove(&surface->link); if (surface->layer_surface != NULL) { zwlr_layer_surface_v1_destroy(surface->layer_surface); } if (surface->surface != NULL) { wl_surface_destroy(surface->surface); } destroy_buffer(&surface->buffers[0]); destroy_buffer(&surface->buffers[1]); wl_output_destroy(surface->output); free(surface); } static const struct zwlr_layer_surface_v1_listener layer_surface_listener; static cairo_surface_t *select_image(struct swaylock_state *state, struct swaylock_surface *surface); static bool surface_is_opaque(struct swaylock_surface *surface) { if (surface->image) { return cairo_surface_get_content(surface->image) == CAIRO_CONTENT_COLOR; } return (surface->state->args.colors.background & 0xff) == 0xff; } static void create_layer_surface(struct swaylock_surface *surface) { struct swaylock_state *state = surface->state; surface->image = select_image(state, surface); surface->surface = wl_compositor_create_surface(state->compositor); assert(surface->surface); surface->child = wl_compositor_create_surface(state->compositor); assert(surface->child); surface->subsurface = wl_subcompositor_get_subsurface(state->subcompositor, surface->child, surface->surface); assert(surface->subsurface); wl_subsurface_set_sync(surface->subsurface); surface->layer_surface = zwlr_layer_shell_v1_get_layer_surface( state->layer_shell, surface->surface, surface->output, ZWLR_LAYER_SHELL_V1_LAYER_OVERLAY, "lockscreen"); assert(surface->layer_surface); zwlr_layer_surface_v1_set_size(surface->layer_surface, 0, 0); zwlr_layer_surface_v1_set_anchor(surface->layer_surface, ZWLR_LAYER_SURFACE_V1_ANCHOR_TOP | ZWLR_LAYER_SURFACE_V1_ANCHOR_RIGHT | ZWLR_LAYER_SURFACE_V1_ANCHOR_BOTTOM | ZWLR_LAYER_SURFACE_V1_ANCHOR_LEFT); zwlr_layer_surface_v1_set_exclusive_zone(surface->layer_surface, -1); zwlr_layer_surface_v1_set_keyboard_interactivity( surface->layer_surface, true); zwlr_layer_surface_v1_add_listener(surface->layer_surface, &layer_surface_listener, surface); if (surface_is_opaque(surface) && surface->state->args.mode != BACKGROUND_MODE_CENTER && surface->state->args.mode != BACKGROUND_MODE_FIT) { struct wl_region *region = wl_compositor_create_region(surface->state->compositor); wl_region_add(region, 0, 0, INT32_MAX, INT32_MAX); wl_surface_set_opaque_region(surface->surface, region); wl_region_destroy(region); } wl_surface_commit(surface->surface); } static void layer_surface_configure(void *data, struct zwlr_layer_surface_v1 *layer_surface, uint32_t serial, uint32_t width, uint32_t height) { struct swaylock_surface *surface = data; surface->width = width; surface->height = height; zwlr_layer_surface_v1_ack_configure(layer_surface, serial); render_frame_background(surface); render_frame(surface); } static void layer_surface_closed(void *data, struct zwlr_layer_surface_v1 *layer_surface) { struct swaylock_surface *surface = data; destroy_surface(surface); } static const struct zwlr_layer_surface_v1_listener layer_surface_listener = { .configure = layer_surface_configure, .closed = layer_surface_closed, }; static const struct wl_callback_listener surface_frame_listener; static void surface_frame_handle_done(void *data, struct wl_callback *callback, uint32_t time) { struct swaylock_surface *surface = data; wl_callback_destroy(callback); surface->frame_pending = false; if (surface->dirty) { // Schedule a frame in case the surface is damaged again struct wl_callback *callback = wl_surface_frame(surface->surface); wl_callback_add_listener(callback, &surface_frame_listener, surface); surface->frame_pending = true; render_frame(surface); surface->dirty = false; } } static const struct wl_callback_listener surface_frame_listener = { .done = surface_frame_handle_done, }; void damage_surface(struct swaylock_surface *surface) { surface->dirty = true; if (surface->frame_pending) { return; } struct wl_callback *callback = wl_surface_frame(surface->surface); wl_callback_add_listener(callback, &surface_frame_listener, surface); surface->frame_pending = true; wl_surface_commit(surface->surface); } void damage_state(struct swaylock_state *state) { struct swaylock_surface *surface; wl_list_for_each(surface, &state->surfaces, link) { damage_surface(surface); } } static void handle_wl_output_geometry(void *data, struct wl_output *wl_output, int32_t x, int32_t y, int32_t width_mm, int32_t height_mm, int32_t subpixel, const char *make, const char *model, int32_t transform) { struct swaylock_surface *surface = data; surface->subpixel = subpixel; if (surface->state->run_display) { damage_surface(surface); } } static void handle_wl_output_mode(void *data, struct wl_output *output, uint32_t flags, int32_t width, int32_t height, int32_t refresh) { // Who cares } static void handle_wl_output_done(void *data, struct wl_output *output) { // Who cares } static void handle_wl_output_scale(void *data, struct wl_output *output, int32_t factor) { struct swaylock_surface *surface = data; surface->scale = factor; if (surface->state->run_display) { damage_surface(surface); } } struct wl_output_listener _wl_output_listener = { .geometry = handle_wl_output_geometry, .mode = handle_wl_output_mode, .done = handle_wl_output_done, .scale = handle_wl_output_scale, }; static void handle_xdg_output_logical_size(void *data, struct zxdg_output_v1 *output, int width, int height) { // Who cares } static void handle_xdg_output_logical_position(void *data, struct zxdg_output_v1 *output, int x, int y) { // Who cares } static void handle_xdg_output_name(void *data, struct zxdg_output_v1 *output, const char *name) { swaylock_log(LOG_DEBUG, "output name is %s", name); struct swaylock_surface *surface = data; surface->xdg_output = output; surface->output_name = strdup(name); } static void handle_xdg_output_description(void *data, struct zxdg_output_v1 *output, const char *description) { // Who cares } static void handle_xdg_output_done(void *data, struct zxdg_output_v1 *output) { // Who cares } struct zxdg_output_v1_listener _xdg_output_listener = { .logical_position = handle_xdg_output_logical_position, .logical_size = handle_xdg_output_logical_size, .done = handle_xdg_output_done, .name = handle_xdg_output_name, .description = handle_xdg_output_description, }; static void handle_global(void *data, struct wl_registry *registry, uint32_t name, const char *interface, uint32_t version) { struct swaylock_state *state = data; if (strcmp(interface, wl_compositor_interface.name) == 0) { state->compositor = wl_registry_bind(registry, name, &wl_compositor_interface, 3); } else if (strcmp(interface, wl_subcompositor_interface.name) == 0) { state->subcompositor = wl_registry_bind(registry, name, &wl_subcompositor_interface, 1); } else if (strcmp(interface, wl_shm_interface.name) == 0) { state->shm = wl_registry_bind(registry, name, &wl_shm_interface, 1); } else if (strcmp(interface, wl_seat_interface.name) == 0) { struct wl_seat *seat = wl_registry_bind( registry, name, &wl_seat_interface, 3); struct swaylock_seat *swaylock_seat = calloc(1, sizeof(struct swaylock_seat)); swaylock_seat->state = state; wl_seat_add_listener(seat, &seat_listener, swaylock_seat); } else if (strcmp(interface, zwlr_layer_shell_v1_interface.name) == 0) { state->layer_shell = wl_registry_bind( registry, name, &zwlr_layer_shell_v1_interface, 1); } else if (strcmp(interface, zwlr_input_inhibit_manager_v1_interface.name) == 0) { state->input_inhibit_manager = wl_registry_bind( registry, name, &zwlr_input_inhibit_manager_v1_interface, 1); } else if (strcmp(interface, zxdg_output_manager_v1_interface.name) == 0) { state->zxdg_output_manager = wl_registry_bind( registry, name, &zxdg_output_manager_v1_interface, 2); } else if (strcmp(interface, wl_output_interface.name) == 0) { struct swaylock_surface *surface = calloc(1, sizeof(struct swaylock_surface)); surface->state = state; surface->output = wl_registry_bind(registry, name, &wl_output_interface, 3); surface->output_global_name = name; wl_output_add_listener(surface->output, &_wl_output_listener, surface); wl_list_insert(&state->surfaces, &surface->link); if (state->run_display) { create_layer_surface(surface); wl_display_roundtrip(state->display); } } } static void handle_global_remove(void *data, struct wl_registry *registry, uint32_t name) { struct swaylock_state *state = data; struct swaylock_surface *surface; wl_list_for_each(surface, &state->surfaces, link) { if (surface->output_global_name == name) { destroy_surface(surface); break; } } } static const struct wl_registry_listener registry_listener = { .global = handle_global, .global_remove = handle_global_remove, }; static cairo_surface_t *select_image(struct swaylock_state *state, struct swaylock_surface *surface) { struct swaylock_image *image; cairo_surface_t *default_image = NULL; wl_list_for_each(image, &state->images, link) { if (lenient_strcmp(image->output_name, surface->output_name) == 0) { return image->cairo_surface; } else if (!image->output_name) { default_image = image->cairo_surface; } } return default_image; } static char *join_args(char **argv, int argc) { assert(argc > 0); int len = 0, i; for (i = 0; i < argc; ++i) { len += strlen(argv[i]) + 1; } char *res = malloc(len); len = 0; for (i = 0; i < argc; ++i) { strcpy(res + len, argv[i]); len += strlen(argv[i]); res[len++] = ' '; } res[len - 1] = '\0'; return res; } static void load_image(char *arg, struct swaylock_state *state) { // [[<output>]:]<path> struct swaylock_image *image = calloc(1, sizeof(struct swaylock_image)); char *separator = strchr(arg, ':'); if (separator) { *separator = '\0'; image->output_name = separator == arg ? NULL : strdup(arg); image->path = strdup(separator + 1); } else { image->output_name = NULL; image->path = strdup(arg); } struct swaylock_image *iter_image, *temp; wl_list_for_each_safe(iter_image, temp, &state->images, link) { if (lenient_strcmp(iter_image->output_name, image->output_name) == 0) { if (image->output_name) { swaylock_log(LOG_DEBUG, "Replacing image defined for output %s with %s", image->output_name, image->path); } else { swaylock_log(LOG_DEBUG, "Replacing default image with %s", image->path); } wl_list_remove(&iter_image->link); free(iter_image->cairo_surface); free(iter_image->output_name); free(iter_image->path); free(iter_image); break; } } // The shell will not expand ~ to the value of $HOME when an output name is // given. Also, any image paths given in the config file need to have shell // expansions performed wordexp_t p; while (strstr(image->path, " ")) { image->path = realloc(image->path, strlen(image->path) + 2); char *ptr = strstr(image->path, " ") + 1; memmove(ptr + 1, ptr, strlen(ptr) + 1); *ptr = '\\'; } if (wordexp(image->path, &p, 0) == 0) { free(image->path); image->path = join_args(p.we_wordv, p.we_wordc); wordfree(&p); } // Load the actual image image->cairo_surface = load_background_image(image->path); if (!image->cairo_surface) { free(image); return; } wl_list_insert(&state->images, &image->link); swaylock_log(LOG_DEBUG, "Loaded image %s for output %s", image->path, image->output_name ? image->output_name : "*"); } static void set_default_colors(struct swaylock_colors *colors) { colors->background = 0xFFFFFFFF; colors->bs_highlight = 0xDB3300FF; colors->key_highlight = 0x33DB00FF; colors->caps_lock_bs_highlight = 0xDB3300FF; colors->caps_lock_key_highlight = 0x33DB00FF; colors->separator = 0x000000FF; colors->layout_background = 0x000000C0; colors->layout_border = 0x00000000; colors->layout_text = 0xFFFFFFFF; colors->inside = (struct swaylock_colorset){ .input = 0x000000C0, .cleared = 0xE5A445C0, .caps_lock = 0x000000C0, .verifying = 0x0072FFC0, .wrong = 0xFA0000C0, }; colors->line = (struct swaylock_colorset){ .input = 0x000000FF, .cleared = 0x000000FF, .caps_lock = 0x000000FF, .verifying = 0x000000FF, .wrong = 0x000000FF, }; colors->ring = (struct swaylock_colorset){ .input = 0x337D00FF, .cleared = 0xE5A445FF, .caps_lock = 0xE5A445FF, .verifying = 0x3300FFFF, .wrong = 0x7D3300FF, }; colors->text = (struct swaylock_colorset){ .input = 0xE5A445FF, .cleared = 0x000000FF, .caps_lock = 0xE5A445FF, .verifying = 0x000000FF, .wrong = 0x000000FF, }; } enum line_mode { LM_LINE, LM_INSIDE, LM_RING, }; static int parse_options(int argc, char **argv, struct swaylock_state *state, enum line_mode *line_mode, char **config_path) { enum long_option_codes { LO_BS_HL_COLOR = 256, LO_CAPS_LOCK_BS_HL_COLOR, LO_CAPS_LOCK_KEY_HL_COLOR, LO_FONT, LO_IND_RADIUS, LO_IND_THICKNESS, LO_INSIDE_COLOR, LO_INSIDE_CLEAR_COLOR, LO_INSIDE_CAPS_LOCK_COLOR, LO_INSIDE_VER_COLOR, LO_INSIDE_WRONG_COLOR, LO_KEY_HL_COLOR, LO_LAYOUT_TXT_COLOR, LO_LAYOUT_BG_COLOR, LO_LAYOUT_BORDER_COLOR, LO_LINE_COLOR, LO_LINE_CLEAR_COLOR, LO_LINE_CAPS_LOCK_COLOR, LO_LINE_VER_COLOR, LO_LINE_WRONG_COLOR, LO_RING_COLOR, LO_RING_CLEAR_COLOR, LO_RING_CAPS_LOCK_COLOR, LO_RING_VER_COLOR, LO_RING_WRONG_COLOR, LO_SEP_COLOR, LO_TEXT_COLOR, LO_TEXT_CLEAR_COLOR, LO_TEXT_CAPS_LOCK_COLOR, LO_TEXT_VER_COLOR, LO_TEXT_WRONG_COLOR, }; static struct option long_options[] = { {"config", required_argument, NULL, 'C'}, {"color", required_argument, NULL, 'c'}, {"debug", no_argument, NULL, 'd'}, {"ignore-empty-password", no_argument, NULL, 'e'}, {"daemonize", no_argument, NULL, 'f'}, {"help", no_argument, NULL, 'h'}, {"image", required_argument, NULL, 'i'}, {"disable-caps-lock-text", no_argument, NULL, 'L'}, {"indicator-caps-lock", no_argument, NULL, 'l'}, {"line-uses-inside", no_argument, NULL, 'n'}, {"socket", required_argument, NULL, 'p'}, {"line-uses-ring", no_argument, NULL, 'r'}, {"scaling", required_argument, NULL, 's'}, {"tiling", no_argument, NULL, 't'}, {"no-unlock-indicator", no_argument, NULL, 'u'}, {"show-keyboard-layout", no_argument, NULL, 'k'}, {"show-failed-attempts", no_argument, NULL, 'F'}, {"version", no_argument, NULL, 'v'}, {"bs-hl-color", required_argument, NULL, LO_BS_HL_COLOR}, {"caps-lock-bs-hl-color", required_argument, NULL, LO_CAPS_LOCK_BS_HL_COLOR}, {"caps-lock-key-hl-color", required_argument, NULL, LO_CAPS_LOCK_KEY_HL_COLOR}, {"font", required_argument, NULL, LO_FONT}, {"indicator-radius", required_argument, NULL, LO_IND_RADIUS}, {"indicator-thickness", required_argument, NULL, LO_IND_THICKNESS}, {"inside-color", required_argument, NULL, LO_INSIDE_COLOR}, {"inside-clear-color", required_argument, NULL, LO_INSIDE_CLEAR_COLOR}, {"inside-caps-lock-color", required_argument, NULL, LO_INSIDE_CAPS_LOCK_COLOR}, {"inside-ver-color", required_argument, NULL, LO_INSIDE_VER_COLOR}, {"inside-wrong-color", required_argument, NULL, LO_INSIDE_WRONG_COLOR}, {"key-hl-color", required_argument, NULL, LO_KEY_HL_COLOR}, {"layout-bg-color", required_argument, NULL, LO_LAYOUT_BG_COLOR}, {"layout-border-color", required_argument, NULL, LO_LAYOUT_BORDER_COLOR}, {"layout-text-color", required_argument, NULL, LO_LAYOUT_TXT_COLOR}, {"line-color", required_argument, NULL, LO_LINE_COLOR}, {"line-clear-color", required_argument, NULL, LO_LINE_CLEAR_COLOR}, {"line-caps-lock-color", required_argument, NULL, LO_LINE_CAPS_LOCK_COLOR}, {"line-ver-color", required_argument, NULL, LO_LINE_VER_COLOR}, {"line-wrong-color", required_argument, NULL, LO_LINE_WRONG_COLOR}, {"ring-color", required_argument, NULL, LO_RING_COLOR}, {"ring-clear-color", required_argument, NULL, LO_RING_CLEAR_COLOR}, {"ring-caps-lock-color", required_argument, NULL, LO_RING_CAPS_LOCK_COLOR}, {"ring-ver-color", required_argument, NULL, LO_RING_VER_COLOR}, {"ring-wrong-color", required_argument, NULL, LO_RING_WRONG_COLOR}, {"separator-color", required_argument, NULL, LO_SEP_COLOR}, {"text-color", required_argument, NULL, LO_TEXT_COLOR}, {"text-clear-color", required_argument, NULL, LO_TEXT_CLEAR_COLOR}, {"text-caps-lock-color", required_argument, NULL, LO_TEXT_CAPS_LOCK_COLOR}, {"text-ver-color", required_argument, NULL, LO_TEXT_VER_COLOR}, {"text-wrong-color", required_argument, NULL, LO_TEXT_WRONG_COLOR}, {0, 0, 0, 0} }; const char usage[] = "Usage: swaylock [options...]\n" "\n" " -C, --config <config_file> " "Path to the config file.\n" " -c, --color <color> " "Turn the screen into the given color instead of white.\n" " -d, --debug " "Enable debugging output.\n" " -e, --ignore-empty-password " "When an empty password is provided, do not validate it.\n" " -F, --show-failed-attempts " "Show current count of failed authentication attempts.\n" " -f, --daemonize " "Detach from the controlling terminal after locking.\n" " -h, --help " "Show help message and quit.\n" " -i, --image [[<output>]:]<path> " "Display the given image.\n" " -k, --show-keyboard-layout " "Display the current xkb layout while typing.\n" " -L, --disable-caps-lock-text " "Disable the Caps Lock text.\n" " -l, --indicator-caps-lock " "Show the current Caps Lock state also on the indicator.\n" " -s, --scaling <mode> " "Scaling mode: stretch, fill, fit, center, tile.\n" " -t, --tiling " "Same as --scaling=tile.\n" " -u, --no-unlock-indicator " "Disable the unlock indicator.\n" " -v, --version " "Show the version number and quit.\n" " --bs-hl-color <color> " "Sets the color of backspace highlight segments.\n" " --caps-lock-bs-hl-color <color> " "Sets the color of backspace highlight segments when Caps Lock " "is active.\n" " --caps-lock-key-hl-color <color> " "Sets the color of the key press highlight segments when " "Caps Lock is active.\n" " --font <font> " "Sets the font of the text.\n" " --indicator-radius <radius> " "Sets the indicator radius.\n" " --indicator-thickness <thick> " "Sets the indicator thickness.\n" " --inside-color <color> " "Sets the color of the inside of the indicator.\n" " --inside-clear-color <color> " "Sets the color of the inside of the indicator when cleared.\n" " --inside-caps-lock-color <color> " "Sets the color of the inside of the indicator when Caps Lock " "is active.\n" " --inside-ver-color <color> " "Sets the color of the inside of the indicator when verifying.\n" " --inside-wrong-color <color> " "Sets the color of the inside of the indicator when invalid.\n" " --key-hl-color <color> " "Sets the color of the key press highlight segments.\n" " --layout-bg-color <color> " "Sets the background color of the box containing the layout text.\n" " --layout-border-color <color> " "Sets the color of the border of the box containing the layout text.\n" " --layout-text-color <color> " "Sets the color of the layout text.\n" " --line-color <color> " "Sets the color of the line between the inside and ring.\n" " --line-clear-color <color> " "Sets the color of the line between the inside and ring when " "cleared.\n" " --line-caps-lock-color <color> " "Sets the color of the line between the inside and ring when " "Caps Lock is active.\n" " --line-ver-color <color> " "Sets the color of the line between the inside and ring when " "verifying.\n" " --line-wrong-color <color> " "Sets the color of the line between the inside and ring when " "invalid.\n" " -n, --line-uses-inside " "Use the inside color for the line between the inside and ring.\n" " -r, --line-uses-ring " "Use the ring color for the line between the inside and ring.\n" " --ring-color <color> " "Sets the color of the ring of the indicator.\n" " --ring-clear-color <color> " "Sets the color of the ring of the indicator when cleared.\n" " --ring-caps-lock-color <color> " "Sets the color of the ring of the indicator when Caps Lock " "is active.\n" " --ring-ver-color <color> " "Sets the color of the ring of the indicator when verifying.\n" " --ring-wrong-color <color> " "Sets the color of the ring of the indicator when invalid.\n" " --separator-color <color> " "Sets the color of the lines that separate highlight segments.\n" " --text-color <color> " "Sets the color of the text.\n" " --text-clear-color <color> " "Sets the color of the text when cleared.\n" " --text-caps-lock-color <color> " "Sets the color of the text when Caps Lock is active.\n" " --text-ver-color <color> " "Sets the color of the text when verifying.\n" " --text-wrong-color <color> " "Sets the color of the text when invalid.\n" "\n" "All <color> options are of the form <rrggbb[aa]>.\n"; int c; optind = 1; while (1) { int opt_idx = 0; c = getopt_long(argc, argv, "c:deFfhi:kLlnrs:tuvC:", long_options, &opt_idx); if (c == -1) { break; } switch (c) { case 'C': if (config_path) { *config_path = strdup(optarg); } break; case 'c': if (state) { state->args.colors.background = parse_color(optarg); } break; case 'd': swaylock_log_init(LOG_DEBUG); break; case 'e': if (state) { state->args.ignore_empty = true; } break; case 'F': if (state) { state->args.show_failed_attempts = true; } break; case 'f': if (state) { state->args.daemonize = true; } break; case 'i': if (state) { load_image(optarg, state); } break; case 'k': if (state) { state->args.show_keyboard_layout = true; } break; case 'L': if (state) { state->args.show_caps_lock_text = false; } break; case 'l': if (state) { state->args.show_caps_lock_indicator = true; } break; case 'n': if (line_mode) { *line_mode = LM_INSIDE; } break; case 'r': if (line_mode) { *line_mode = LM_RING; } break; case 's': if (state) { state->args.mode = parse_background_mode(optarg); if (state->args.mode == BACKGROUND_MODE_INVALID) { return 1; } } break; case 't': if (state) { state->args.mode = BACKGROUND_MODE_TILE; } break; case 'u': if (state) { state->args.show_indicator = false; } break; case 'v': fprintf(stdout, "swaylock version " SWAYLOCK_VERSION "\n"); exit(EXIT_SUCCESS); break; case LO_BS_HL_COLOR: if (state) { state->args.colors.bs_highlight = parse_color(optarg); } break; case LO_CAPS_LOCK_BS_HL_COLOR: if (state) { state->args.colors.caps_lock_bs_highlight = parse_color(optarg); } break; case LO_CAPS_LOCK_KEY_HL_COLOR: if (state) { state->args.colors.caps_lock_key_highlight = parse_color(optarg); } break; case LO_FONT: if (state) { free(state->args.font); state->args.font = strdup(optarg); } break; case LO_IND_RADIUS: if (state) { state->args.radius = strtol(optarg, NULL, 0); } break; case LO_IND_THICKNESS: if (state) { state->args.thickness = strtol(optarg, NULL, 0); } break; case LO_INSIDE_COLOR: if (state) { state->args.colors.inside.input = parse_color(optarg); } break; case LO_INSIDE_CLEAR_COLOR: if (state) { state->args.colors.inside.cleared = parse_color(optarg); } break; case LO_INSIDE_CAPS_LOCK_COLOR: if (state) { state->args.colors.inside.caps_lock = parse_color(optarg); } break; case LO_INSIDE_VER_COLOR: if (state) { state->args.colors.inside.verifying = parse_color(optarg); } break; case LO_INSIDE_WRONG_COLOR: if (state) { state->args.colors.inside.wrong = parse_color(optarg); } break; case LO_KEY_HL_COLOR: if (state) { state->args.colors.key_highlight = parse_color(optarg); } break; case LO_LAYOUT_BG_COLOR: if (state) { state->args.colors.layout_background = parse_color(optarg); } break; case LO_LAYOUT_BORDER_COLOR: if (state) { state->args.colors.layout_border = parse_color(optarg); } break; case LO_LAYOUT_TXT_COLOR: if (state) { state->args.colors.layout_text = parse_color(optarg); } break; case LO_LINE_COLOR: if (state) { state->args.colors.line.input = parse_color(optarg); } break; case LO_LINE_CLEAR_COLOR: if (state) { state->args.colors.line.cleared = parse_color(optarg); } break; case LO_LINE_CAPS_LOCK_COLOR: if (state) { state->args.colors.line.caps_lock = parse_color(optarg); } break; case LO_LINE_VER_COLOR: if (state) { state->args.colors.line.verifying = parse_color(optarg); } break; case LO_LINE_WRONG_COLOR: if (state) { state->args.colors.line.wrong = parse_color(optarg); } break; case LO_RING_COLOR: if (state) { state->args.colors.ring.input = parse_color(optarg); } break; case LO_RING_CLEAR_COLOR: if (state) { state->args.colors.ring.cleared = parse_color(optarg); } break; case LO_RING_CAPS_LOCK_COLOR: if (state) { state->args.colors.ring.caps_lock = parse_color(optarg); } break; case LO_RING_VER_COLOR: if (state) { state->args.colors.ring.verifying = parse_color(optarg); } break; case LO_RING_WRONG_COLOR: if (state) { state->args.colors.ring.wrong = parse_color(optarg); } break; case LO_SEP_COLOR: if (state) { state->args.colors.separator = parse_color(optarg); } break; case LO_TEXT_COLOR: if (state) { state->args.colors.text.input = parse_color(optarg); } break; case LO_TEXT_CLEAR_COLOR: if (state) { state->args.colors.text.cleared = parse_color(optarg); } break; case LO_TEXT_CAPS_LOCK_COLOR: if (state) { state->args.colors.text.caps_lock = parse_color(optarg); } break; case LO_TEXT_VER_COLOR: if (state) { state->args.colors.text.verifying = parse_color(optarg); } break; case LO_TEXT_WRONG_COLOR: if (state) { state->args.colors.text.wrong = parse_color(optarg); } break; default: fprintf(stderr, "%s", usage); return 1; } } return 0; } static bool file_exists(const char *path) { return path && access(path, R_OK) != -1; } static char *get_config_path(void) { static const char *config_paths[] = { "$HOME/.swaylock/config", "$XDG_CONFIG_HOME/swaylock/config", SYSCONFDIR "/swaylock/config", }; char *config_home = getenv("XDG_CONFIG_HOME"); if (!config_home || config_home[0] == '\0') { config_paths[1] = "$HOME/.config/swaylock/config"; } wordexp_t p; char *path; for (size_t i = 0; i < sizeof(config_paths) / sizeof(char *); ++i) { if (wordexp(config_paths[i], &p, 0) == 0) { path = strdup(p.we_wordv[0]); wordfree(&p); if (file_exists(path)) { return path; } free(path); } } return NULL; } static int load_config(char *path, struct swaylock_state *state, enum line_mode *line_mode) { FILE *config = fopen(path, "r"); if (!config) { swaylock_log(LOG_ERROR, "Failed to read config. Running without it."); return 0; } char *line = NULL; size_t line_size = 0; ssize_t nread; int line_number = 0; int result = 0; while ((nread = getline(&line, &line_size, config)) != -1) { line_number++; if (line[nread - 1] == '\n') { line[--nread] = '\0'; } if (!*line || line[0] == '#') { continue; } swaylock_log(LOG_DEBUG, "Config Line #%d: %s", line_number, line); char *flag = malloc(nread + 3); if (flag == NULL) { free(line); free(config); swaylock_log(LOG_ERROR, "Failed to allocate memory"); return 0; } sprintf(flag, "--%s", line); char *argv[] = {"swaylock", flag}; result = parse_options(2, argv, state, line_mode, NULL); free(flag); if (result != 0) { break; } } free(line); fclose(config); return 0; } static struct swaylock_state state; static void display_in(int fd, short mask, void *data) { if (wl_display_dispatch(state.display) == -1) { state.run_display = false; } } static void comm_in(int fd, short mask, void *data) { if (read_comm_reply()) { // Authentication succeeded state.run_display = false; } else { state.auth_state = AUTH_STATE_INVALID; schedule_indicator_clear(&state); ++state.failed_attempts; damage_state(&state); } } int main(int argc, char **argv) { swaylock_log_init(LOG_ERROR); initialize_pw_backend(argc, argv); enum line_mode line_mode = LM_LINE; state.failed_attempts = 0; state.args = (struct swaylock_args){ .mode = BACKGROUND_MODE_FILL, .font = strdup("sans-serif"), .radius = 50, .thickness = 10, .ignore_empty = false, .show_indicator = true, .show_caps_lock_indicator = false, .show_caps_lock_text = true, .show_keyboard_layout = false, .show_failed_attempts = false }; wl_list_init(&state.images); set_default_colors(&state.args.colors); char *config_path = NULL; int result = parse_options(argc, argv, NULL, NULL, &config_path); if (result != 0) { free(config_path); return result; } if (!config_path) { config_path = get_config_path(); } if (config_path) { swaylock_log(LOG_DEBUG, "Found config at %s", config_path); int config_status = load_config(config_path, &state, &line_mode); free(config_path); if (config_status != 0) { free(state.args.font); return config_status; } } if (argc > 1) { swaylock_log(LOG_DEBUG, "Parsing CLI Args"); int result = parse_options(argc, argv, &state, &line_mode, NULL); if (result != 0) { free(state.args.font); return result; } } if (line_mode == LM_INSIDE) { state.args.colors.line = state.args.colors.inside; } else if (line_mode == LM_RING) { state.args.colors.line = state.args.colors.ring; } #ifdef __linux__ // Most non-linux platforms require root to mlock() if (mlock(state.password.buffer, sizeof(state.password.buffer)) != 0) { swaylock_log(LOG_ERROR, "Unable to mlock() password memory."); return EXIT_FAILURE; } #endif wl_list_init(&state.surfaces); state.xkb.context = xkb_context_new(XKB_CONTEXT_NO_FLAGS); state.display = wl_display_connect(NULL); if (!state.display) { free(state.args.font); swaylock_log(LOG_ERROR, "Unable to connect to the compositor. " "If your compositor is running, check or set the " "WAYLAND_DISPLAY environment variable."); return EXIT_FAILURE; } struct wl_registry *registry = wl_display_get_registry(state.display); wl_registry_add_listener(registry, &registry_listener, &state); wl_display_roundtrip(state.display); assert(state.compositor && state.layer_shell && state.shm); if (!state.input_inhibit_manager) { free(state.args.font); swaylock_log(LOG_ERROR, "Compositor does not support the input " "inhibitor protocol, refusing to run insecurely"); return 1; } zwlr_input_inhibit_manager_v1_get_inhibitor(state.input_inhibit_manager); if (wl_display_roundtrip(state.display) == -1) { free(state.args.font); swaylock_log(LOG_ERROR, "Exiting - failed to inhibit input:" " is another lockscreen already running?"); return 2; } if (state.zxdg_output_manager) { struct swaylock_surface *surface; wl_list_for_each(surface, &state.surfaces, link) { surface->xdg_output = zxdg_output_manager_v1_get_xdg_output( state.zxdg_output_manager, surface->output); zxdg_output_v1_add_listener( surface->xdg_output, &_xdg_output_listener, surface); } wl_display_roundtrip(state.display); } else { swaylock_log(LOG_INFO, "Compositor does not support zxdg output " "manager, images assigned to named outputs will not work"); } struct swaylock_surface *surface; wl_list_for_each(surface, &state.surfaces, link) { create_layer_surface(surface); } if (state.args.daemonize) { wl_display_roundtrip(state.display); daemonize(); } state.eventloop = loop_create(); loop_add_fd(state.eventloop, wl_display_get_fd(state.display), POLLIN, display_in, NULL); loop_add_fd(state.eventloop, get_comm_reply_fd(), POLLIN, comm_in, NULL); state.run_display = true; while (state.run_display) { errno = 0; if (wl_display_flush(state.display) == -1 && errno != EAGAIN) { break; } loop_poll(state.eventloop); } free(state.args.font); return 0; }
960871.c
/** ****************************************************************************** * @file audio_application.c * @author SRA - Central Labs * @version v5.0.0 * @date 6-May-19 * @brief Audio application. ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2019 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed under Software License Agreement * SLA0077, (the "License"). You may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * www.st.com/content/st_com/en/search.html#q=SLA0077-t=keywords-page=1 * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "audio_application.h" /** @addtogroup X_CUBE_MEMSMIC1_Applications * @{ */ /** @addtogroup Microphones_Acquisition * @{ */ /** @defgroup AUDIO_APPLICATION * @{ */ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ /** @defgroup AUDIO_APPLICATION_Exported_Variables * @{ */ uint16_t PDM_Buffer[((((AUDIO_IN_CHANNELS * AUDIO_IN_SAMPLING_FREQUENCY) / 1000) * MAX_DECIMATION_FACTOR) / 16)* N_MS ]; uint16_t PCM_Buffer[((AUDIO_IN_CHANNELS*AUDIO_IN_SAMPLING_FREQUENCY)/1000) * N_MS ]; IKS02A1_AUDIO_Init_t MicParams; /* FFT Manager */ AUDIO_FFT_instance_t audio_fft_M1; float * FFT_Out; float * FFT_Average; /** * @} */ /** @defgroup AUDIO_APPLICATION_Private_Variables * @{ */ /* Private variables ---------------------------------------------------------*/ /** * @} */ /** @defgroup AUDIO_APPLICATION_Exported_Function * @{ */ /** * @brief Half Transfer user callback, called by BSP functions. * @param None * @retval None */ void IKS02A1_AUDIO_IN_HalfTransfer_CallBack(uint32_t Instance) { AudioProcess(); } /** * @brief Transfer Complete user callback, called by BSP functions. * @param None * @retval None */ void IKS02A1_AUDIO_IN_TransferComplete_CallBack(uint32_t Instance) { AudioProcess(); } /** * @brief User function that is called when 1 ms of PDM data is available. * In this application only PDM to PCM conversion and USB streaming * is performed. * User can add his own code here to perform some DSP or audio analysis. * @param none * @retval None */ void AudioProcess(void) { /*for L4 PDM to PCM conversion is performed in hardware by DFSDM peripheral*/ IKS02A1_AUDIO_IN_PDMToPCM(IKS02A1_AUDIO_INSTANCE, (uint16_t * )PDM_Buffer, PCM_Buffer); if(AUDIO_FFT_Data_Input((int16_t *)PCM_Buffer, (audio_fft_M1.sampling_frequency/1000) , &audio_fft_M1)) { SW_Task1_Start(); } } /** * @brief User function that is called when 1 ms of PDM data is available. * In this application only PDM to PCM conversion and USB streaming * is performed. * User can add his own code here to perform some DSP or audio analysis. * @param none * @retval None */ void Init_Acquisition_Peripherals(uint32_t AudioFreq, uint32_t ChnlNbrIn, uint32_t ChnlNbrOut) { MicParams.BitsPerSample = 16; MicParams.ChannelsNbr = ChnlNbrIn; MicParams.Device = AUDIO_IN_DIGITAL_MIC; MicParams.SampleRate = AudioFreq; MicParams.Volume = AUDIO_VOLUME_INPUT; IKS02A1_AUDIO_IN_Init(IKS02A1_AUDIO_INSTANCE, &MicParams); } void Init_FFT_Module(uint32_t AudioFreq, uint32_t FFTLen, float Overlap) { audio_fft_M1.sampling_frequency = AudioFreq; audio_fft_M1.FFT_len = FFTLen; audio_fft_M1.overlap = Overlap; audio_fft_M1.win_type = AUDIO_FTT_HAMMING_WIN; audio_fft_M1.output = MAGNITUDE; AUDIO_FFT_Init(&audio_fft_M1); /* Allocate output buffer */ FFT_Out = calloc(audio_fft_M1.FFT_len / 2, sizeof(float)); FFT_Average = calloc(audio_fft_M1.FFT_len / 2, sizeof(float)); FFTAverage = calloc(audio_fft_M1.FFT_len / 2, sizeof(float)); } /** * @brief User function that is called when 1 ms of PDM data is available. * In this application only PDM to PCM conversion and USB streaming * is performed. * User can add his own code here to perform some DSP or audio analysis. * @param none * @retval None */ void Start_Acquisition(void) { IKS02A1_AUDIO_IN_Record(IKS02A1_AUDIO_INSTANCE, (uint8_t *) PDM_Buffer, AUDIO_IN_BUFFER_SIZE); } /** * @brief Initializes two SW interrupt with different priorities * @param None * @retval None */ void SW_IRQ_Tasks_Init(void) { HAL_NVIC_SetPriority((IRQn_Type)EXTI1_IRQn, 0x0D, 0); HAL_NVIC_EnableIRQ((IRQn_Type)EXTI1_IRQn); } /** * @brief Highest priority interrupt handler routine * @param None * @retval None */ void SW_Task1_Callback(void) { uint16_t nAccTotal = (uint16_t)(((float)(4 * audio_fft_M1.sampling_frequency)/((float)(audio_fft_M1.FFT_len) * (1.0f - (float)(audio_fft_M1.overlap))))); static uint16_t nAcc = 0; AUDIO_FFT_Process(&audio_fft_M1, FFT_Out); arm_add_f32(FFT_Out, FFT_Average, FFT_Average, audio_fft_M1.FFT_len/2); nAcc++; if (nAcc == nAccTotal) { arm_scale_f32(FFT_Average, 1.0f/(float)nAcc, FFT_Average, audio_fft_M1.FFT_len/2); /* Store data to be sent */ FFTLen = audio_fft_M1.FFT_len; memcpy((void *)FFTAverage, (void *)FFT_Average, sizeof(float) * FFTLen / 2); SamplingFreq = (float)audio_fft_M1.sampling_frequency; NewData = 1U; memset((uint8_t *)FFT_Average, 0, audio_fft_M1.FFT_len/2); nAcc = 0; } } /** * @brief Throws Highest priority interrupt * @param None * @retval None */ void SW_Task1_Start(void) { HAL_NVIC_SetPendingIRQ(EXTI1_IRQn); } /** * @} */ /** * @} */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
355340.c
/* $Id$ $Revision$ */ /* vim:set shiftwidth=4 ts=8: */ /************************************************************************* * Copyright (c) 2011 AT&T Intellectual Property * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: See CVS logs. Details at http://www.graphviz.org/ *************************************************************************/ #include <assert.h> #include <signal.h> #include <stdio.h> #include "cgraph.h" #define NILgraph NIL(Agraph_t*) #define NILnode NIL(Agnode_t*) #define NILedge NIL(Agedge_t*) #define NILsym NIL(Agsym_t*) #define NILstr NIL(char*) main() { Agraph_t *g; Agnode_t *n; Agedge_t *e; Agsym_t *sym; char *val; while (g = agread(stdin, NIL(Agdisc_t *))) { #ifdef NOTDEF for (n = agfstnode(g); n; n = agnxtnode(g, n)) { fprintf(stderr, "%s\n", agnameof(n)); for (sym = agnxtattr(g, AGNODE, 0); sym; sym = agnxtattr(g, AGNODE, sym)) { val = agxget(n, sym); fprintf(stderr, "\t%s=%s\n", sym->name, val); } } #endif sym = agattr(g, AGRAPH, "nonsense", "junk"); fprintf(stderr,"sym = %x, %s\n", sym, sym? sym->defval : "(none)"); agwrite(g, stdout); } }
654895.c
/* Generated by Nim Compiler v1.6.2 */ #define NIM_INTBITS 64 #include "nimbase.h" #include <stdio.h> #include <fcntl.h> #include <string.h> #include <errno.h> #include <sys/stat.h> #include <sys/types.h> #include <setjmp.h> #undef LANGUAGE_C #undef MIPSEB #undef MIPSEL #undef PPC #undef R3000 #undef R4000 #undef i386 #undef linux #undef mips #undef near #undef far #undef powerpc #undef unix #define nimfr_(x, y) #define nimln_(x, y) typedef struct NimStringDesc NimStringDesc; typedef struct TGenericSeq TGenericSeq; typedef struct tySequence__sM4lkSb7zS6F7OVMvW9cffQ tySequence__sM4lkSb7zS6F7OVMvW9cffQ; typedef struct TNimType TNimType; typedef struct TNimNode TNimNode; typedef struct tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw; typedef struct tyObject_CatchableError__qrLSDoe2oBoAqNtJ9badtnA tyObject_CatchableError__qrLSDoe2oBoAqNtJ9badtnA; typedef struct Exception Exception; typedef struct RootObj RootObj; typedef struct tySequence__uB9b75OUPRENsBAu4AnoePA tySequence__uB9b75OUPRENsBAu4AnoePA; typedef struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g; typedef struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w; typedef struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ; typedef struct tyObject_GcStack__7fytPA5bBsob6See21YMRA tyObject_GcStack__7fytPA5bBsob6See21YMRA; typedef struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg; typedef struct tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ; typedef struct tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg; typedef struct tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw; typedef struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA; typedef struct tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw; typedef struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw; typedef struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg; typedef struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyTuple__ujsjpB2O9cjj3uDHsXbnSzg; typedef struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg; typedef struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ; typedef struct tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg; typedef struct TSafePoint TSafePoint; typedef struct tyObject_EOFError__KGSY1JdrNB7Xi8KDhXFhSg tyObject_EOFError__KGSY1JdrNB7Xi8KDhXFhSg; typedef struct tyObject_StackTraceEntry__oLyohQ7O2XOvGnflOss8EA tyObject_StackTraceEntry__oLyohQ7O2XOvGnflOss8EA; struct TGenericSeq { NI len; NI reserved; }; struct NimStringDesc { TGenericSeq Sup; NIM_CHAR data[SEQ_DECL_SIZE]; }; typedef NU8 tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A; typedef NU8 tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ; typedef N_NIMCALL_PTR(void, tyProc__ojoeKfW4VYIm36I9cpDTQIg) (void* p, NI op); typedef N_NIMCALL_PTR(void*, tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ) (void* p); struct TNimType { NI size; NI align; tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A kind; tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ flags; TNimType* base; TNimNode* node; void* finalizer; tyProc__ojoeKfW4VYIm36I9cpDTQIg marker; tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ deepcopy; }; typedef NU8 tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ; struct TNimNode { tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ kind; NI offset; TNimType* typ; NCSTRING name; NI len; TNimNode** sons; }; struct RootObj { TNimType* m_type; }; struct Exception { RootObj Sup; Exception* parent; NCSTRING name; NimStringDesc* message; tySequence__uB9b75OUPRENsBAu4AnoePA* trace; Exception* up; }; struct tyObject_CatchableError__qrLSDoe2oBoAqNtJ9badtnA { Exception Sup; }; struct tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw { tyObject_CatchableError__qrLSDoe2oBoAqNtJ9badtnA Sup; }; struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g { NI refcount; TNimType* typ; }; struct tyObject_GcStack__7fytPA5bBsob6See21YMRA { void* bottom; }; struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w { NI len; NI cap; tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g** d; }; typedef tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ* tyArray__SPr7N6UKfuF549bNPiUvSRw[256]; typedef NU32 tyArray__BHbOSqU1t9b3Gt7K2c6fQig[24]; typedef tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* tyArray__N1u1nqOgmuJN9cSZrnMHgOQ[32]; typedef tyArray__N1u1nqOgmuJN9cSZrnMHgOQ tyArray__B6durA4ZCi1xjJvRtyYxMg[24]; typedef tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw* tyArray__lh2A89ahMmYg9bCmpVaplLbA[256]; struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA { tyArray__lh2A89ahMmYg9bCmpVaplLbA data; }; typedef tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* tyArray__0aOLqZchNi8nWtMTi8ND8w[2]; struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw { tyArray__0aOLqZchNi8nWtMTi8ND8w link; NI key; NI upperBound; NI level; }; struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg { tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* Field0; NI Field1; }; typedef tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyArray__LzOv2eCDGiceMKQstCLmhw[30]; struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg { NI len; tyArray__LzOv2eCDGiceMKQstCLmhw chunks; tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg* next; }; struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg { NI minLargeObj; NI maxLargeObj; tyArray__SPr7N6UKfuF549bNPiUvSRw freeSmallChunks; NU32 flBitmap; tyArray__BHbOSqU1t9b3Gt7K2c6fQig slBitmap; tyArray__B6durA4ZCi1xjJvRtyYxMg matrix; tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw* llmem; NI currMem; NI maxMem; NI freeMem; NI occ; NI lastSize; tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA chunkStarts; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* root; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* deleted; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* last; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* freeAvlNodes; NIM_BOOL locked; NIM_BOOL blockChunkSizeIncrease; NI nextChunkSize; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw bottomData; tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg heapLinks; }; struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg { NI stackScans; NI cycleCollections; NI maxThreshold; NI maxStackSize; NI maxStackCells; NI cycleTableSize; NI64 maxPause; }; struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ { NI counter; NI max; tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg* head; tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg** data; }; struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ { tyObject_GcStack__7fytPA5bBsob6See21YMRA stack; NI cycleThreshold; NI zctThreshold; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w zct; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w decStack; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tempStack; NI recGcLock; tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg region; tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg stat; tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ marked; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w additionalRoots; NI gcThreadId; }; typedef NU8 tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg; typedef NCSTRING tyArray__Djap3EijveoDksCG9bY8s5w[5]; struct TSafePoint { TSafePoint* prev; NI status; jmp_buf context; }; struct tyObject_EOFError__KGSY1JdrNB7Xi8KDhXFhSg { tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw Sup; }; typedef NU8 tyEnum_FileSeekPos__I9aQjuvWxs8BspGbxwsngWw; struct tyObject_StackTraceEntry__oLyohQ7O2XOvGnflOss8EA { NCSTRING procname; NI line; NCSTRING filename; }; struct tySequence__sM4lkSb7zS6F7OVMvW9cffQ { TGenericSeq Sup; NimStringDesc* data[SEQ_DECL_SIZE]; }; struct tySequence__uB9b75OUPRENsBAu4AnoePA { TGenericSeq Sup; tyObject_StackTraceEntry__oLyohQ7O2XOvGnflOss8EA data[SEQ_DECL_SIZE]; }; static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(void*, newSeq)(TNimType* typ, NI len); N_LIB_PRIVATE N_NIMCALL(NI, writeBuffer__systemZio_176)(FILE* f, void* buffer, NI len); N_LIB_PRIVATE N_NIMCALL(void, checkErr__systemZio_141)(FILE* f); static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src); static N_INLINE(void, copyMem__system_1727)(void* dest, void* source, NI size); static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, dollar___systemZdollars_3)(NI x); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, cstrToNimstr)(NCSTRING str); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, rawNewString)(NI space); N_LIB_PRIVATE N_NOINLINE(void, raiseEIO__systemZio_96)(NimStringDesc* msg) __attribute__((noreturn)); N_LIB_PRIVATE N_NOINLINE(void*, newObj)(TNimType* typ, NI size); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src); static N_INLINE(void, nimGCunrefNoCycle)(void* p); static N_INLINE(void, decRef__system_5316)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(NI, minuspercent___system_716)(NI x, NI y); static N_INLINE(NIM_BOOL, ltpercent___system_1005)(NI x, NI y); static N_INLINE(void, rtlAddZCT__system_5314)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); N_LIB_PRIVATE N_NOINLINE(void, addZCT__system_5265)(tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w* s, tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__system_5271)(void* usr); static N_INLINE(void, asgnRef)(void** dest, void* src); static N_INLINE(void, incRef__system_5309)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(NI, pluspercent___system_696)(NI x, NI y); N_LIB_PRIVATE N_NIMCALL(void, raiseExceptionEx)(Exception* e, NCSTRING ename, NCSTRING procname, NCSTRING filename, NI line); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, open__systemZio_391)(FILE** f, NimStringDesc* filename, tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg mode, NI bufSize); N_LIB_PRIVATE N_NIMCALL(int, getFileHandle__systemZio_229)(FILE* f); N_LIB_PRIVATE N_NIMCALL(void, close__systemZio_218)(FILE* f); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, setInheritable__systemZio_235)(int f, NIM_BOOL inheritable); N_LIB_PRIVATE N_NIMCALL(int, getOsFileHandle__systemZio_232)(FILE* f); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, setLengthStr)(NimStringDesc* s, NI newLen); N_LIB_PRIVATE N_NIMCALL(void, unsureAsgnRef)(void** dest, void* src); static N_INLINE(NIM_BOOL, eqeq___system_7724)(NCSTRING x, NCSTRING y); static N_INLINE(void, pushSafePoint)(TSafePoint* s); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, readAll__systemZio_352)(FILE* file); N_LIB_PRIVATE N_NIMCALL(NI64, rawFileSize__systemZio_335)(FILE* file); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, readAllFile__systemZio_343)(FILE* file, NI64 len); N_NIMCALL(NimStringDesc*, mnewString)(NI len); N_LIB_PRIVATE N_NIMCALL(NI, readBuffer__systemZio_144)(FILE* f, void* buffer, NI len); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, endOfFile__systemZio_339)(FILE* f); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, readAllBuffer__systemZio_330)(FILE* file); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest, NI addlen); static N_INLINE(void, popSafePoint)(void); N_LIB_PRIVATE N_NIMCALL(void, nimLeaveFinally)(void); N_LIB_PRIVATE N_NIMCALL(void, reraiseException)(void); N_NIMCALL(NimStringDesc*, rawNewString)(NI cap); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, readLine__systemZio_240)(FILE* f, NimStringDesc** line); N_LIB_PRIVATE N_NOINLINE(void, raiseEOF__systemZio_117)(void) __attribute__((noreturn)); N_LIB_PRIVATE N_NIMCALL(void, write__systemZio_205)(FILE* f, NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(tySequence__sM4lkSb7zS6F7OVMvW9cffQ*, newSeq__systemZio_503)(NI len); extern TNimType NTIseqLstringT__sM4lkSb7zS6F7OVMvW9cffQ_; STRING_LITERAL(TM__MnCJ0VAmeZ9aTATUB39cx60Q_2, "errno: ", 7); STRING_LITERAL(TM__MnCJ0VAmeZ9aTATUB39cx60Q_3, " `", 2); STRING_LITERAL(TM__MnCJ0VAmeZ9aTATUB39cx60Q_4, "`", 1); extern TNimType NTIrefioerror__HMIVdYjdZYWskTmTQVo5BQ_; extern TNimType NTIioerror__iLZrPn9anoh9ad1MmO0RczFw_; STRING_LITERAL(TM__MnCJ0VAmeZ9aTATUB39cx60Q_5, "cannot write string to file", 27); N_LIB_PRIVATE NIM_CONST tyArray__Djap3EijveoDksCG9bY8s5w FormatOpen__systemZio_373 = {"rb", "wb", "w+b", "r+b", "ab"} ; STRING_LITERAL(TM__MnCJ0VAmeZ9aTATUB39cx60Q_6, "cannot open: ", 13); extern TNimType NTIrefeoferror__LEclZrWX2FQAodlapxGITw_; extern TNimType NTIeoferror__KGSY1JdrNB7Xi8KDhXFhSg_; STRING_LITERAL(TM__MnCJ0VAmeZ9aTATUB39cx60Q_8, "EOF reached", 11); N_LIB_PRIVATE TNimType NTIcfile__MAWzaQJYFu3mlxj0Ppxhmw_; N_LIB_PRIVATE TNimType NTIfile__XBeRj4rw9bUuE7CB3DS1rgg_; N_LIB_PRIVATE TNimType NTIfilehandle__2gIj3gQlK3HZJjQaYCP6ZQ_; STRING_LITERAL(TM__MnCJ0VAmeZ9aTATUB39cx60Q_11, "cannot set file position", 24); STRING_LITERAL(TM__MnCJ0VAmeZ9aTATUB39cx60Q_12, "cannot retrieve file position", 29); extern tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ gch__system_5218; extern TSafePoint* excHandler__system_2565; extern TSafePoint* excHandler__system_2565; extern TSafePoint* excHandler__system_2565; extern TSafePoint* excHandler__system_2565; static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s) { NCSTRING result; result = (NCSTRING)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = (s == ((NimStringDesc*) NIM_NIL)); if (T3_) goto LA4_; T3_ = ((*s).Sup.len == ((NI) 0)); LA4_: ; if (!T3_) goto LA5_; result = ""; } goto LA1_; LA5_: ; { result = ((NCSTRING) ((*s).data)); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(void, echoBinSafe)(NimStringDesc** args, NI argsLen_0) { int T5_; int T6_; flockfile(__stdoutp); { NimStringDesc** s; NI i; s = (NimStringDesc**)0; i = ((NI) 0); { while (1) { int T4_; if (!(i < argsLen_0)) goto LA3; s = (&args[i]); T4_ = (int)0; T4_ = fwrite(((void*) (nimToCStringConv((*s)))), ((size_t) (((*s) ? (*s)->Sup.len : 0))), ((size_t) 1), __stdoutp); (void)(T4_); i += ((NI) 1); } LA3: ; } } T5_ = (int)0; T5_ = fwrite(((void*) ("\012")), ((size_t) 1), ((size_t) 1), __stdoutp); (void)(T5_); T6_ = (int)0; T6_ = fflush(__stdoutp); (void)(T6_); funlockfile(__stdoutp); } N_LIB_PRIVATE N_NIMCALL(int, getFileHandle__systemZio_229)(FILE* f) { int result; result = (int)0; result = fileno(f); return result; } N_LIB_PRIVATE N_NIMCALL(tySequence__sM4lkSb7zS6F7OVMvW9cffQ*, newSeq__systemZio_503)(NI len) { tySequence__sM4lkSb7zS6F7OVMvW9cffQ* result; result = NIM_NIL; result = (tySequence__sM4lkSb7zS6F7OVMvW9cffQ*) newSeq((&NTIseqLstringT__sM4lkSb7zS6F7OVMvW9cffQ_), len); return result; } static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size) { void* T1_; T1_ = (void*)0; T1_ = memcpy(dest, source, ((size_t) (size))); } static N_INLINE(void, copyMem__system_1727)(void* dest, void* source, NI size) { nimCopyMem(dest, source, size); } static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src) { { if (!!((src == ((NimStringDesc*) NIM_NIL)))) goto LA3_; copyMem__system_1727(((void*) ((&(*dest).data[(*dest).Sup.len]))), ((void*) ((*src).data)), ((NI) ((NI)((*src).Sup.len + ((NI) 1))))); (*dest).Sup.len += (*src).Sup.len; } LA3_: ; } static N_INLINE(NI, minuspercent___system_716)(NI x, NI y) { NI result; result = (NI)0; result = ((NI) ((NU)((NU64)(((NU) (x))) - (NU64)(((NU) (y)))))); return result; } static N_INLINE(NIM_BOOL, ltpercent___system_1005)(NI x, NI y) { NIM_BOOL result; result = (NIM_BOOL)0; result = ((NU64)(((NU) (x))) < (NU64)(((NU) (y)))); return result; } static N_INLINE(void, rtlAddZCT__system_5314)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { addZCT__system_5265((&gch__system_5218.zct), c); } static N_INLINE(void, decRef__system_5316)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = minuspercent___system_716((*c).refcount, ((NI) 8)); { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = ltpercent___system_1005((*c).refcount, ((NI) 8)); if (!T3_) goto LA4_; rtlAddZCT__system_5314(c); } LA4_: ; } static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__system_5271)(void* usr) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* result; NI T1_; result = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T1_ = (NI)0; T1_ = minuspercent___system_716(((NI) (ptrdiff_t) (usr)), ((NI) 16)); result = ((tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*) (T1_)); return result; } static N_INLINE(void, nimGCunrefNoCycle)(void* p) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T1_; T1_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T1_ = usrToCell__system_5271(p); decRef__system_5316(T1_); } static N_INLINE(NI, pluspercent___system_696)(NI x, NI y) { NI result; result = (NI)0; result = ((NI) ((NU)((NU64)(((NU) (x))) + (NU64)(((NU) (y)))))); return result; } static N_INLINE(void, incRef__system_5309)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = pluspercent___system_696((*c).refcount, ((NI) 8)); } static N_INLINE(void, asgnRef)(void** dest, void* src) { { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T5_; if (!!((src == NIM_NIL))) goto LA3_; T5_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T5_ = usrToCell__system_5271(src); incRef__system_5309(T5_); } LA3_: ; { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T10_; if (!!(((*dest) == NIM_NIL))) goto LA8_; T10_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T10_ = usrToCell__system_5271((*dest)); decRef__system_5316(T10_); } LA8_: ; (*dest) = src; } N_LIB_PRIVATE N_NOINLINE(void, raiseEIO__systemZio_96)(NimStringDesc* msg) { tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw* T1_; NimStringDesc* T2_; T1_ = NIM_NIL; T1_ = (tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw*) newObj((&NTIrefioerror__HMIVdYjdZYWskTmTQVo5BQ_), sizeof(tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw)); (*T1_).Sup.Sup.Sup.m_type = (&NTIioerror__iLZrPn9anoh9ad1MmO0RczFw_); (*T1_).Sup.Sup.name = "IOError"; T2_ = NIM_NIL; T2_ = (*T1_).Sup.Sup.message; (*T1_).Sup.Sup.message = copyStringRC1(msg); if (T2_) nimGCunrefNoCycle(T2_); asgnRef((void**) (&(*T1_).Sup.Sup.parent), ((Exception*) NIM_NIL)); raiseExceptionEx((Exception*)T1_, "IOError", "raiseEIO", "io.nim", 139); } N_LIB_PRIVATE N_NIMCALL(void, checkErr__systemZio_141)(FILE* f) { { int T3_; NimStringDesc* msg; NimStringDesc* T6_; NimStringDesc* T7_; NCSTRING T8_; NimStringDesc* T9_; T3_ = (int)0; T3_ = ferror(f); if (!!((T3_ == ((NI32) 0)))) goto LA4_; T6_ = NIM_NIL; T7_ = NIM_NIL; T7_ = dollar___systemZdollars_3(((NI) (errno))); T8_ = (NCSTRING)0; T8_ = strerror(errno); T9_ = NIM_NIL; T9_ = cstrToNimstr(T8_); T6_ = rawNewString((T7_ ? T7_->Sup.len : 0) + (T9_ ? T9_->Sup.len : 0) + 10); appendString(T6_, ((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_2)); appendString(T6_, T7_); appendString(T6_, ((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_3)); appendString(T6_, T9_); appendString(T6_, ((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_4)); msg = T6_; clearerr(f); raiseEIO__systemZio_96(msg); } LA4_: ; } N_LIB_PRIVATE N_NIMCALL(NI, writeBuffer__systemZio_176)(FILE* f, void* buffer, NI len) { NI result; int T1_; result = (NI)0; T1_ = (int)0; T1_ = fwrite(buffer, ((size_t) 1), ((size_t) (len)), f); result = ((NI) (T1_)); checkErr__systemZio_141(f); return result; } N_LIB_PRIVATE N_NIMCALL(void, write__systemZio_205)(FILE* f, NimStringDesc* s) { { NI T3_; T3_ = (NI)0; T3_ = writeBuffer__systemZio_176(f, ((void*) (nimToCStringConv(s))), ((NI) ((s ? s->Sup.len : 0)))); if (!!((T3_ == (s ? s->Sup.len : 0)))) goto LA4_; raiseEIO__systemZio_96(((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_5)); } LA4_: ; } N_LIB_PRIVATE N_NIMCALL(void, flushFile__systemZio_227)(FILE* f) { int T1_; T1_ = (int)0; T1_ = fflush(f); (void)(T1_); } N_LIB_PRIVATE N_NIMCALL(void, close__systemZio_218)(FILE* f) { { int T5_; if (!!((f == 0))) goto LA3_; T5_ = (int)0; T5_ = fclose(f); (void)(T5_); } LA3_: ; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, setInheritable__systemZio_235)(int f, NIM_BOOL inheritable) { NIM_BOOL result; int flags; int T10_; { result = (NIM_BOOL)0; flags = fcntl(f, F_GETFD); { if (!(flags == ((NI32) -1))) goto LA3_; result = NIM_FALSE; goto BeforeRet_; } LA3_: ; { if (!inheritable) goto LA7_; flags = (NI32)(flags & (NI32)((NU32) ~(FD_CLOEXEC))); } goto LA5_; LA7_: ; { flags = (NI32)(flags | FD_CLOEXEC); } LA5_: ; T10_ = (int)0; T10_ = fcntl(f, F_SETFD, flags); result = !((T10_ == ((NI32) -1))); }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(int, getOsFileHandle__systemZio_232)(FILE* f) { int result; result = (int)0; result = fileno(f); return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, open__systemZio_391)(FILE** f, NimStringDesc* filename, tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg mode, NI bufSize) { NIM_BOOL result; void* p; { result = (NIM_BOOL)0; p = fopen(nimToCStringConv(filename), FormatOpen__systemZio_373[(mode)- 0]); { FILE* f2; struct stat res; if (!!((p == NIM_NIL))) goto LA3_; f2 = ((FILE*) (p)); { NIM_BOOL T7_; int T8_; int T9_; T7_ = (NIM_BOOL)0; T8_ = (int)0; T8_ = getFileHandle__systemZio_229(f2); T9_ = (int)0; T9_ = fstat(T8_, (&res)); T7_ = (((NI32) 0) <= T9_); if (!(T7_)) goto LA10_; T7_ = S_ISDIR(res.st_mode); LA10_: ; if (!T7_) goto LA11_; close__systemZio_218(f2); result = NIM_FALSE; goto BeforeRet_; } LA11_: ; { int T15_; NIM_BOOL T16_; T15_ = (int)0; T15_ = getOsFileHandle__systemZio_232(f2); T16_ = (NIM_BOOL)0; T16_ = setInheritable__systemZio_235(T15_, NIM_FALSE); if (!!(T16_)) goto LA17_; close__systemZio_218(f2); result = NIM_FALSE; goto BeforeRet_; } LA17_: ; result = NIM_TRUE; (*f) = ((FILE*) (p)); { NIM_BOOL T21_; int T25_; T21_ = (NIM_BOOL)0; T21_ = (((NI) 0) < bufSize); if (!(T21_)) goto LA22_; T21_ = (bufSize <= ((NI) 2147483647)); LA22_: ; if (!T21_) goto LA23_; T25_ = (int)0; T25_ = setvbuf((*f), NIM_NIL, _IOFBF, ((size_t) (bufSize))); (void)(T25_); } goto LA19_; LA23_: ; { int T29_; if (!(bufSize == ((NI) 0))) goto LA27_; T29_ = (int)0; T29_ = setvbuf((*f), NIM_NIL, _IONBF, ((size_t) 0)); (void)(T29_); } goto LA19_; LA27_: ; LA19_: ; } LA3_: ; }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(FILE*, open__systemZio_432)(NimStringDesc* filename, tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg mode, NI bufSize) { FILE* result; result = (FILE*)0; { NIM_BOOL T3_; tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw* T6_; NimStringDesc* T7_; T3_ = (NIM_BOOL)0; T3_ = open__systemZio_391(&result, filename, mode, bufSize); if (!!(T3_)) goto LA4_; T6_ = NIM_NIL; T6_ = (tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw*) newObj((&NTIrefioerror__HMIVdYjdZYWskTmTQVo5BQ_), sizeof(tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw)); (*T6_).Sup.Sup.Sup.m_type = (&NTIioerror__iLZrPn9anoh9ad1MmO0RczFw_); (*T6_).Sup.Sup.name = "IOError"; T7_ = NIM_NIL; T7_ = rawNewString((filename ? filename->Sup.len : 0) + 13); appendString(T7_, ((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_6)); appendString(T7_, filename); asgnRef((void**) (&(*T6_).Sup.Sup.message), T7_); asgnRef((void**) (&(*T6_).Sup.Sup.parent), ((Exception*) NIM_NIL)); raiseExceptionEx((Exception*)T6_, "IOError", "open", "io.nim", 745); } LA4_: ; return result; } static N_INLINE(NIM_BOOL, eqeq___system_7724)(NCSTRING x, NCSTRING y) { NIM_BOOL result; result = (NIM_BOOL)0; { if (!(((void*) (x)) == ((void*) (y)))) goto LA3_; result = NIM_TRUE; } goto LA1_; LA3_: ; { NIM_BOOL T6_; T6_ = (NIM_BOOL)0; T6_ = (x == 0); if (T6_) goto LA7_; T6_ = (y == 0); LA7_: ; if (!T6_) goto LA8_; result = NIM_FALSE; } goto LA1_; LA8_: ; { int T11_; T11_ = (int)0; T11_ = strcmp(x, y); result = (T11_ == ((NI32) 0)); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, readLine__systemZio_240)(FILE* f, NimStringDesc** line) { NIM_BOOL result; NI pos; NI sp; { result = (NIM_BOOL)0; pos = ((NI) 0); sp = ((((*line) ? (*line)->Sup.len : 0) >= ((NI) 80)) ? ((*line) ? (*line)->Sup.len : 0) : ((NI) 80)); unsureAsgnRef((void**) (&(*line)), setLengthStr((*line), ((NI) (sp)))); { while (1) { NIM_BOOL fgetsSuccess; void* m; { NI i; NI i_2; i = (NI)0; i_2 = ((NI) 0); { while (1) { if (!(i_2 < sp)) goto LA5; i = i_2; (*line)->data[(NI)(pos + i)] = 10; i_2 += ((NI) 1); } LA5: ; } } fgetsSuccess = (NIM_BOOL)0; { while (1) { { NCSTRING T9_; NIM_BOOL T10_; T9_ = (NCSTRING)0; T9_ = fgets(((NCSTRING) ((&(*line)->data[pos]))), ((int) (sp)), f); T10_ = (NIM_BOOL)0; T10_ = eqeq___system_7724(T9_, ((NCSTRING) NIM_NIL)); fgetsSuccess = !(T10_); { if (!fgetsSuccess) goto LA13_; goto LA6; } LA13_: ; { if (!(errno == EINTR)) goto LA17_; errno = ((int) 0); clearerr(f); goto LA8; } LA17_: ; checkErr__systemZio_141(f); goto LA6; } LA8: ; } } LA6: ; m = memchr(((void*) ((&(*line)->data[pos]))), ((int) 10), ((size_t) (sp))); { NI last; NIM_BOOL T42_; if (!!((m == NIM_NIL))) goto LA21_; last = (NI)(((NI) (ptrdiff_t) (m)) - ((NI) (ptrdiff_t) ((&(*line)->data[((NI) 0)])))); { NIM_BOOL T25_; NIM_BOOL T29_; T25_ = (NIM_BOOL)0; T25_ = (((NI) 0) < last); if (!(T25_)) goto LA26_; T25_ = ((NU8)((*line)->data[(NI)(last - ((NI) 1))]) == (NU8)(13)); LA26_: ; if (!T25_) goto LA27_; unsureAsgnRef((void**) (&(*line)), setLengthStr((*line), ((NI) ((NI)(last - ((NI) 1)))))); T29_ = (NIM_BOOL)0; T29_ = (((NI) 1) < last); if (T29_) goto LA30_; T29_ = fgetsSuccess; LA30_: ; result = T29_; goto BeforeRet_; } goto LA23_; LA27_: ; { NIM_BOOL T32_; T32_ = (NIM_BOOL)0; T32_ = (((NI) 0) < last); if (!(T32_)) goto LA33_; T32_ = ((NU8)((*line)->data[(NI)(last - ((NI) 1))]) == (NU8)(0)); LA33_: ; if (!T32_) goto LA34_; { NIM_BOOL T38_; T38_ = (NIM_BOOL)0; T38_ = (last < (NI)((NI)(pos + sp) - ((NI) 1))); if (!(T38_)) goto LA39_; T38_ = !(((NU8)((*line)->data[(NI)(last + ((NI) 1))]) == (NU8)(0))); LA39_: ; if (!T38_) goto LA40_; last -= ((NI) 1); } LA40_: ; } goto LA23_; LA34_: ; LA23_: ; unsureAsgnRef((void**) (&(*line)), setLengthStr((*line), ((NI) (last)))); T42_ = (NIM_BOOL)0; T42_ = (((NI) 0) < last); if (T42_) goto LA43_; T42_ = fgetsSuccess; LA43_: ; result = T42_; goto BeforeRet_; } goto LA19_; LA21_: ; { sp -= ((NI) 1); } LA19_: ; pos += sp; sp = ((NI) 128); unsureAsgnRef((void**) (&(*line)), setLengthStr((*line), ((NI) ((NI)(pos + sp))))); } } }BeforeRet_: ; return result; } static N_INLINE(void, pushSafePoint)(TSafePoint* s) { (*s).prev = excHandler__system_2565; excHandler__system_2565 = s; } N_LIB_PRIVATE N_NIMCALL(NI64, rawFileSize__systemZio_335)(FILE* file) { NI64 result; NI64 oldPos; int T1_; int T2_; result = (NI64)0; oldPos = ftello(file); T1_ = (int)0; T1_ = fseeko(file, IL64(0), ((int) 2)); (void)(T1_); result = ftello(file); T2_ = (int)0; T2_ = fseeko(file, oldPos, ((int) 0)); (void)(T2_); return result; } N_LIB_PRIVATE N_NIMCALL(NI, readBuffer__systemZio_144)(FILE* f, void* buffer, NI len) { NI result; size_t T1_; result = (NI)0; T1_ = (size_t)0; T1_ = fread(buffer, ((size_t) 1), ((size_t) (len)), f); result = ((NI) (T1_)); { if (!!((result == ((NI) (len))))) goto LA4_; checkErr__systemZio_141(f); } LA4_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, endOfFile__systemZio_339)(FILE* f) { NIM_BOOL result; int c; int T1_; { result = (NIM_BOOL)0; c = fgetc(f); T1_ = (int)0; T1_ = ungetc(c, f); (void)(T1_); result = (c < ((NI32) 0)); goto BeforeRet_; }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, readAllBuffer__systemZio_330)(FILE* file) { NimStringDesc* result; NimStringDesc* buffer; result = NIM_NIL; result = ((NimStringDesc*) NIM_NIL); buffer = mnewString(((NI) 4000)); { while (1) { NI bytesRead; bytesRead = readBuffer__systemZio_144(file, ((void*) ((&buffer->data[((NI) 0)]))), ((NI) 4000)); { if (!(bytesRead == ((NI) 4000))) goto LA5_; result = resizeString(result, (buffer ? buffer->Sup.len : 0) + 0); appendString(result, buffer); } goto LA3_; LA5_: ; { buffer = setLengthStr(buffer, ((NI) (bytesRead))); result = resizeString(result, (buffer ? buffer->Sup.len : 0) + 0); appendString(result, buffer); goto LA1; } LA3_: ; } } LA1: ; return result; } N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, readAllFile__systemZio_343)(FILE* file, NI64 len) { NimStringDesc* result; NI bytes; result = NIM_NIL; result = mnewString(((NI) (len))); bytes = readBuffer__systemZio_144(file, ((void*) ((&result->data[((NI) 0)]))), ((NI) (len))); { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = endOfFile__systemZio_339(file); if (!T3_) goto LA4_; { if (!(((NI64) (bytes)) < len)) goto LA8_; result = setLengthStr(result, ((NI) (bytes))); } LA8_: ; } goto LA1_; LA4_: ; { NimStringDesc* T11_; T11_ = NIM_NIL; T11_ = readAllBuffer__systemZio_330(file); result = resizeString(result, (T11_ ? T11_->Sup.len : 0) + 0); appendString(result, T11_); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, readAll__systemZio_352)(FILE* file) { NimStringDesc* result; NI64 len; result = NIM_NIL; { if (!!((file == __stdinp))) goto LA3_; len = rawFileSize__systemZio_335(file); } goto LA1_; LA3_: ; { len = IL64(-1); } LA1_: ; { if (!(IL64(0) < len)) goto LA8_; result = readAllFile__systemZio_343(file, len); } goto LA6_; LA8_: ; { result = readAllBuffer__systemZio_330(file); } LA6_: ; return result; } static N_INLINE(void, popSafePoint)(void) { excHandler__system_2565 = (*excHandler__system_2565).prev; } N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, readFile__systemZio_471)(NimStringDesc* filename) { NimStringDesc* volatile result; FILE* f; result = NIM_NIL; f = ((FILE*) NIM_NIL); { NIM_BOOL T3_; TSafePoint TM__MnCJ0VAmeZ9aTATUB39cx60Q_7; T3_ = (NIM_BOOL)0; T3_ = open__systemZio_391(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 0), ((NI) -1)); if (!T3_) goto LA4_; pushSafePoint(&TM__MnCJ0VAmeZ9aTATUB39cx60Q_7); TM__MnCJ0VAmeZ9aTATUB39cx60Q_7.status = _setjmp(TM__MnCJ0VAmeZ9aTATUB39cx60Q_7.context); if (TM__MnCJ0VAmeZ9aTATUB39cx60Q_7.status == 0) { result = readAll__systemZio_352(f); popSafePoint(); } else { popSafePoint(); } { close__systemZio_218(f); if (TM__MnCJ0VAmeZ9aTATUB39cx60Q_7.status != 0) nimLeaveFinally(); } if (TM__MnCJ0VAmeZ9aTATUB39cx60Q_7.status != 0) reraiseException(); } goto LA1_; LA4_: ; { tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw* T9_; NimStringDesc* T10_; T9_ = NIM_NIL; T9_ = (tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw*) newObj((&NTIrefioerror__HMIVdYjdZYWskTmTQVo5BQ_), sizeof(tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw)); (*T9_).Sup.Sup.Sup.m_type = (&NTIioerror__iLZrPn9anoh9ad1MmO0RczFw_); (*T9_).Sup.Sup.name = "IOError"; T10_ = NIM_NIL; T10_ = rawNewString((filename ? filename->Sup.len : 0) + 13); appendString(T10_, ((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_6)); appendString(T10_, filename); asgnRef((void**) (&(*T9_).Sup.Sup.message), T10_); asgnRef((void**) (&(*T9_).Sup.Sup.parent), ((Exception*) NIM_NIL)); raiseExceptionEx((Exception*)T9_, "IOError", "readFile", "io.nim", 853); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NI, readChars__systemZio_156)(FILE* f, NIM_CHAR* a, NI aLen_0) { NI result; result = (NI)0; result = readBuffer__systemZio_144(f, ((void*) ((&a[((NI) 0)]))), ((NI) (aLen_0))); return result; } N_LIB_PRIVATE N_NOINLINE(void, raiseEOF__systemZio_117)(void) { tyObject_EOFError__KGSY1JdrNB7Xi8KDhXFhSg* T1_; NimStringDesc* T2_; T1_ = NIM_NIL; T1_ = (tyObject_EOFError__KGSY1JdrNB7Xi8KDhXFhSg*) newObj((&NTIrefeoferror__LEclZrWX2FQAodlapxGITw_), sizeof(tyObject_EOFError__KGSY1JdrNB7Xi8KDhXFhSg)); (*T1_).Sup.Sup.Sup.Sup.m_type = (&NTIeoferror__KGSY1JdrNB7Xi8KDhXFhSg_); (*T1_).Sup.Sup.Sup.name = "EOFError"; T2_ = NIM_NIL; T2_ = (*T1_).Sup.Sup.Sup.message; (*T1_).Sup.Sup.Sup.message = copyStringRC1(((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_8)); if (T2_) nimGCunrefNoCycle(T2_); asgnRef((void**) (&(*T1_).Sup.Sup.Sup.parent), ((Exception*) NIM_NIL)); raiseExceptionEx((Exception*)T1_, "EOFError", "raiseEOF", "io.nim", 142); } N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, readLine__systemZio_273)(FILE* f) { NimStringDesc* result; result = NIM_NIL; result = rawNewString(((NI) 80)); { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = readLine__systemZio_240(f, (&result)); if (!!(T3_)) goto LA4_; raiseEOF__systemZio_117(); } LA4_: ; return result; } N_LIB_PRIVATE N_NIMCALL(void, writeFile__systemZio_476)(NimStringDesc* filename, NimStringDesc* content) { FILE* f; f = ((FILE*) NIM_NIL); { NIM_BOOL T3_; TSafePoint TM__MnCJ0VAmeZ9aTATUB39cx60Q_9; T3_ = (NIM_BOOL)0; T3_ = open__systemZio_391(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 1), ((NI) -1)); if (!T3_) goto LA4_; pushSafePoint(&TM__MnCJ0VAmeZ9aTATUB39cx60Q_9); TM__MnCJ0VAmeZ9aTATUB39cx60Q_9.status = _setjmp(TM__MnCJ0VAmeZ9aTATUB39cx60Q_9.context); if (TM__MnCJ0VAmeZ9aTATUB39cx60Q_9.status == 0) { write__systemZio_205(f, content); popSafePoint(); } else { popSafePoint(); } { close__systemZio_218(f); if (TM__MnCJ0VAmeZ9aTATUB39cx60Q_9.status != 0) nimLeaveFinally(); } if (TM__MnCJ0VAmeZ9aTATUB39cx60Q_9.status != 0) reraiseException(); } goto LA1_; LA4_: ; { tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw* T9_; NimStringDesc* T10_; T9_ = NIM_NIL; T9_ = (tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw*) newObj((&NTIrefioerror__HMIVdYjdZYWskTmTQVo5BQ_), sizeof(tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw)); (*T9_).Sup.Sup.Sup.m_type = (&NTIioerror__iLZrPn9anoh9ad1MmO0RczFw_); (*T9_).Sup.Sup.name = "IOError"; T10_ = NIM_NIL; T10_ = rawNewString((filename ? filename->Sup.len : 0) + 13); appendString(T10_, ((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_6)); appendString(T10_, filename); asgnRef((void**) (&(*T9_).Sup.Sup.message), T10_); asgnRef((void**) (&(*T9_).Sup.Sup.parent), ((Exception*) NIM_NIL)); raiseExceptionEx((Exception*)T9_, "IOError", "writeFile", "io.nim", 866); } LA1_: ; } N_LIB_PRIVATE N_NIMCALL(tySequence__sM4lkSb7zS6F7OVMvW9cffQ*, readLines__systemZio_494)(NimStringDesc* filename, NI n) { tySequence__sM4lkSb7zS6F7OVMvW9cffQ* volatile result; FILE* f; result = NIM_NIL; f = ((FILE*) NIM_NIL); { NIM_BOOL T3_; TSafePoint TM__MnCJ0VAmeZ9aTATUB39cx60Q_10; T3_ = (NIM_BOOL)0; T3_ = open__systemZio_391(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 0), ((NI) -1)); if (!T3_) goto LA4_; pushSafePoint(&TM__MnCJ0VAmeZ9aTATUB39cx60Q_10); TM__MnCJ0VAmeZ9aTATUB39cx60Q_10.status = _setjmp(TM__MnCJ0VAmeZ9aTATUB39cx60Q_10.context); if (TM__MnCJ0VAmeZ9aTATUB39cx60Q_10.status == 0) { result = newSeq__systemZio_503(n); { NI i; NI colontmp_; NI res; i = (NI)0; colontmp_ = (NI)0; colontmp_ = (NI)(((NI) (n)) - ((NI) 1)); res = ((NI) 0); { while (1) { if (!(res <= colontmp_)) goto LA8; i = res; { NIM_BOOL T11_; T11_ = (NIM_BOOL)0; T11_ = readLine__systemZio_240(f, (&result->data[i])); if (!!(T11_)) goto LA12_; raiseEOF__systemZio_117(); } LA12_: ; res += ((NI) 1); } LA8: ; } } popSafePoint(); } else { popSafePoint(); } { close__systemZio_218(f); if (TM__MnCJ0VAmeZ9aTATUB39cx60Q_10.status != 0) nimLeaveFinally(); } if (TM__MnCJ0VAmeZ9aTATUB39cx60Q_10.status != 0) reraiseException(); } goto LA1_; LA4_: ; { tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw* T17_; NimStringDesc* T18_; T17_ = NIM_NIL; T17_ = (tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw*) newObj((&NTIrefioerror__HMIVdYjdZYWskTmTQVo5BQ_), sizeof(tyObject_IOError__iLZrPn9anoh9ad1MmO0RczFw)); (*T17_).Sup.Sup.Sup.m_type = (&NTIioerror__iLZrPn9anoh9ad1MmO0RczFw_); (*T17_).Sup.Sup.name = "IOError"; T18_ = NIM_NIL; T18_ = rawNewString((filename ? filename->Sup.len : 0) + 13); appendString(T18_, ((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_6)); appendString(T18_, filename); asgnRef((void**) (&(*T17_).Sup.Sup.message), T18_); asgnRef((void**) (&(*T17_).Sup.Sup.parent), ((Exception*) NIM_NIL)); raiseExceptionEx((Exception*)T17_, "IOError", "readLines", "io.nim", 896); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, open__systemZio_422)(FILE** f, int filehandle, tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg mode) { NIM_BOOL result; int oshandle; { result = (NIM_BOOL)0; oshandle = filehandle; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = setInheritable__systemZio_235(oshandle, NIM_FALSE); if (!!(T3_)) goto LA4_; result = NIM_FALSE; goto BeforeRet_; } LA4_: ; (*f) = fdopen(filehandle, FormatOpen__systemZio_373[(mode)- 0]); result = !(((*f) == ((FILE*) NIM_NIL))); }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(void, setFilePos__systemZio_438)(FILE* f, NI64 pos, tyEnum_FileSeekPos__I9aQjuvWxs8BspGbxwsngWw relativeTo) { { int T3_; T3_ = (int)0; T3_ = fseeko(f, pos, ((int) (relativeTo))); if (!!((T3_ == ((NI32) 0)))) goto LA4_; raiseEIO__systemZio_96(((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_11)); } LA4_: ; } N_LIB_PRIVATE N_NIMCALL(NI64, getFilePos__systemZio_451)(FILE* f) { NI64 result; result = (NI64)0; result = ftello(f); { if (!(result < IL64(0))) goto LA3_; raiseEIO__systemZio_96(((NimStringDesc*) &TM__MnCJ0VAmeZ9aTATUB39cx60Q_12)); } LA3_: ; return result; } N_LIB_PRIVATE N_NIMCALL(void, write__systemZio_306)(FILE* f, NimStringDesc** a, NI aLen_0) { { NimStringDesc** x; NI i; x = (NimStringDesc**)0; i = ((NI) 0); { while (1) { if (!(i < aLen_0)) goto LA3; x = (&a[i]); write__systemZio_205(f, (*x)); i += ((NI) 1); } LA3: ; } } } N_LIB_PRIVATE N_NIMCALL(void, stdlib_ioDatInit000)(void) { static TNimNode TM__MnCJ0VAmeZ9aTATUB39cx60Q_0[1]; NTIcfile__MAWzaQJYFu3mlxj0Ppxhmw_.size = sizeof(void*); NTIcfile__MAWzaQJYFu3mlxj0Ppxhmw_.align = NIM_ALIGNOF(void*); NTIcfile__MAWzaQJYFu3mlxj0Ppxhmw_.kind = 18; NTIcfile__MAWzaQJYFu3mlxj0Ppxhmw_.base = 0; NTIcfile__MAWzaQJYFu3mlxj0Ppxhmw_.flags = 1; NTIcfile__MAWzaQJYFu3mlxj0Ppxhmw_.node = &TM__MnCJ0VAmeZ9aTATUB39cx60Q_0[0]; NTIfile__XBeRj4rw9bUuE7CB3DS1rgg_.size = sizeof(FILE*); NTIfile__XBeRj4rw9bUuE7CB3DS1rgg_.align = NIM_ALIGNOF(FILE*); NTIfile__XBeRj4rw9bUuE7CB3DS1rgg_.kind = 21; NTIfile__XBeRj4rw9bUuE7CB3DS1rgg_.base = (&NTIcfile__MAWzaQJYFu3mlxj0Ppxhmw_); NTIfile__XBeRj4rw9bUuE7CB3DS1rgg_.flags = 3; NTIfilehandle__2gIj3gQlK3HZJjQaYCP6ZQ_.size = sizeof(int); NTIfilehandle__2gIj3gQlK3HZJjQaYCP6ZQ_.align = NIM_ALIGNOF(int); NTIfilehandle__2gIj3gQlK3HZJjQaYCP6ZQ_.kind = 34; NTIfilehandle__2gIj3gQlK3HZJjQaYCP6ZQ_.base = 0; NTIfilehandle__2gIj3gQlK3HZJjQaYCP6ZQ_.flags = 3; }
574378.c
/** @file Implementation for PlatformBootManagerLib library class interfaces. Copyright (c) 2021 Loongson Technology Corporation Limited. All rights reserved.<BR> SPDX-License-Identifier: BSD-2-Clause-Patent **/ #include <IndustryStandard/Pci22.h> #include <Library/BootLogoLib.h> #include <Library/PcdLib.h> #include <Library/QemuBootOrderLib.h> #include <Library/UefiBootManagerLib.h> #include <Protocol/FirmwareVolume2.h> #include <Protocol/LoadedImage.h> #include <Protocol/PciIo.h> #include <Library/UefiBootServicesTableLib.h> #include <Library/DebugLib.h> #include <Library/MemoryAllocationLib.h> #include <Library/UefiLib.h> #include <Library/BaseMemoryLib.h> #include "PlatformBm.h" STATIC PLATFORM_SERIAL_CONSOLE mSerialConsole = { // // VENDOR_DEVICE_PATH SerialDxe // { { HARDWARE_DEVICE_PATH, HW_VENDOR_DP, DP_NODE_LEN (VENDOR_DEVICE_PATH) }, SERIAL_DXE_FILE_GUID }, // // UART_DEVICE_PATH Uart // { { MESSAGING_DEVICE_PATH, MSG_UART_DP, DP_NODE_LEN (UART_DEVICE_PATH) }, 0, // Reserved FixedPcdGet64 (PcdUartDefaultBaudRate), // BaudRate FixedPcdGet8 (PcdUartDefaultDataBits), // DataBits FixedPcdGet8 (PcdUartDefaultParity), // Parity FixedPcdGet8 (PcdUartDefaultStopBits) // StopBits }, // // VENDOR_DEFINED_DEVICE_PATH TermType // { { MESSAGING_DEVICE_PATH, MSG_VENDOR_DP, DP_NODE_LEN (VENDOR_DEFINED_DEVICE_PATH) } // // Guid to be filled in dynamically // }, // // EFI_DEVICE_PATH_PROTOCOL End // { END_DEVICE_PATH_TYPE, END_ENTIRE_DEVICE_PATH_SUBTYPE, DP_NODE_LEN (EFI_DEVICE_PATH_PROTOCOL) } }; STATIC PLATFORM_USB_KEYBOARD mUsbKeyboard = { // // USB_CLASS_DEVICE_PATH Keyboard // { { MESSAGING_DEVICE_PATH, MSG_USB_CLASS_DP, DP_NODE_LEN (USB_CLASS_DEVICE_PATH) }, 0xFFFF, // VendorId: any 0xFFFF, // ProductId: any 3, // DeviceClass: HID 1, // DeviceSubClass: boot 1 // DeviceProtocol: keyboard }, // // EFI_DEVICE_PATH_PROTOCOL End // { END_DEVICE_PATH_TYPE, END_ENTIRE_DEVICE_PATH_SUBTYPE, DP_NODE_LEN (EFI_DEVICE_PATH_PROTOCOL) } }; /** Locate all handles that carry the specified protocol, filter them with a callback function, and pass each handle that passes the filter to another callback. @param[in] ProtocolGuid The protocol to look for. @param[in] Filter The filter function to pass each handle to. If this parameter is NULL, then all handles are processed. @param[in] Process The callback function to pass each handle to that clears the filter. **/ VOID FilterAndProcess ( IN EFI_GUID *ProtocolGuid, IN FILTER_FUNCTION Filter OPTIONAL, IN CALLBACK_FUNCTION Process ) { EFI_STATUS Status; EFI_HANDLE *Handles; UINTN NoHandles; UINTN Idx; Status = gBS->LocateHandleBuffer (ByProtocol, ProtocolGuid, NULL /* SearchKey */, &NoHandles, &Handles); if (EFI_ERROR (Status)) { // // This is not an error, just an informative condition. // DEBUG ((DEBUG_VERBOSE, "%a: %g: %r\n", __FUNCTION__, ProtocolGuid, Status)); return; } ASSERT (NoHandles > 0); for (Idx = 0; Idx < NoHandles; ++Idx) { CHAR16 *DevicePathText; STATIC CHAR16 Fallback[] = L"<device path unavailable>"; // // The ConvertDevicePathToText () function handles NULL input transparently. // DevicePathText = ConvertDevicePathToText ( DevicePathFromHandle (Handles[Idx]), FALSE, // DisplayOnly FALSE // AllowShortcuts ); if (DevicePathText == NULL) { DevicePathText = Fallback; } if ((Filter == NULL) || (Filter (Handles[Idx], DevicePathText))) { Process (Handles[Idx], DevicePathText); } if (DevicePathText != Fallback) { FreePool (DevicePathText); } } gBS->FreePool (Handles); } /** This FILTER_FUNCTION checks if a handle corresponds to a PCI display device. @param Handle The handle to check @param ReportText A pointer to a string at the time of the error. @retval TURE THe handle corresponds to a PCI display device. @retval FALSE THe handle does not corresponds to a PCI display device. **/ BOOLEAN EFIAPI IsPciDisplay ( IN EFI_HANDLE Handle, IN CONST CHAR16 *ReportText ) { EFI_STATUS Status; EFI_PCI_IO_PROTOCOL *PciIo; PCI_TYPE00 Pci; Status = gBS->HandleProtocol (Handle, &gEfiPciIoProtocolGuid, (VOID**)&PciIo); if (EFI_ERROR (Status)) { // // This is not an error worth reporting. // return FALSE; } Status = PciIo->Pci.Read (PciIo, EfiPciIoWidthUint32, 0 /* Offset */, sizeof Pci / sizeof (UINT32), &Pci); if (EFI_ERROR (Status)) { DEBUG ((DEBUG_ERROR, "%a: %s: %r\n", __FUNCTION__, ReportText, Status)); return FALSE; } return IS_PCI_DISPLAY (&Pci); } /** This CALLBACK_FUNCTION attempts to connect a handle non-recursively, asking the matching driver to produce all first-level child handles. @param Handle The handle to connect. @param ReportText A pointer to a string at the time of the error. @retval VOID **/ VOID EFIAPI Connect ( IN EFI_HANDLE Handle, IN CONST CHAR16 *ReportText ) { EFI_STATUS Status; Status = gBS->ConnectController ( Handle, // ControllerHandle NULL, // DriverImageHandle NULL, // RemainingDevicePath -- produce all children FALSE // Recursive ); DEBUG ((EFI_ERROR (Status) ? DEBUG_ERROR : DEBUG_VERBOSE, "%a: %s: %r\n", __FUNCTION__, ReportText, Status)); } /** This CALLBACK_FUNCTION retrieves the EFI_DEVICE_PATH_PROTOCOL from the handle, and adds it to ConOut and ErrOut. @param Handle The handle to retrieves. @param ReportText A pointer to a string at the time of the error. @retval VOID **/ VOID EFIAPI AddOutput ( IN EFI_HANDLE Handle, IN CONST CHAR16 *ReportText ) { EFI_STATUS Status; EFI_DEVICE_PATH_PROTOCOL *DevicePath; DevicePath = DevicePathFromHandle (Handle); if (DevicePath == NULL) { DEBUG ((DEBUG_ERROR, "%a: %s: handle %p: device path not found\n", __FUNCTION__, ReportText, Handle)); return; } Status = EfiBootManagerUpdateConsoleVariable (ConOut, DevicePath, NULL); if (EFI_ERROR (Status)) { DEBUG ((DEBUG_ERROR, "%a: %s: adding to ConOut: %r\n", __FUNCTION__, ReportText, Status)); return; } Status = EfiBootManagerUpdateConsoleVariable (ErrOut, DevicePath, NULL); if (EFI_ERROR (Status)) { DEBUG ((DEBUG_ERROR, "%a: %s: adding to ErrOut: %r\n", __FUNCTION__, ReportText, Status)); return; } DEBUG ((DEBUG_VERBOSE, "%a: %s: added to ConOut and ErrOut\n", __FUNCTION__, ReportText)); } /** Register the boot option. @param FileGuid File Guid. @param Description Option descriptor. @param Attributes Option Attributes. @retval VOID **/ VOID PlatformRegisterFvBootOption ( IN EFI_GUID *FileGuid, IN CHAR16 *Description, IN UINT32 Attributes ) { EFI_STATUS Status; INTN OptionIndex; EFI_BOOT_MANAGER_LOAD_OPTION NewOption; EFI_BOOT_MANAGER_LOAD_OPTION *BootOptions; UINTN BootOptionCount; MEDIA_FW_VOL_FILEPATH_DEVICE_PATH FileNode; EFI_LOADED_IMAGE_PROTOCOL *LoadedImage; EFI_DEVICE_PATH_PROTOCOL *DevicePath; Status = gBS->HandleProtocol ( gImageHandle, &gEfiLoadedImageProtocolGuid, (VOID **) &LoadedImage ); ASSERT_EFI_ERROR (Status); EfiInitializeFwVolDevicepathNode (&FileNode, FileGuid); DevicePath = DevicePathFromHandle (LoadedImage->DeviceHandle); ASSERT (DevicePath != NULL); DevicePath = AppendDevicePathNode ( DevicePath, (EFI_DEVICE_PATH_PROTOCOL *) &FileNode ); ASSERT (DevicePath != NULL); Status = EfiBootManagerInitializeLoadOption ( &NewOption, LoadOptionNumberUnassigned, LoadOptionTypeBoot, Attributes, Description, DevicePath, NULL, 0 ); ASSERT_EFI_ERROR (Status); FreePool (DevicePath); BootOptions = EfiBootManagerGetLoadOptions ( &BootOptionCount, LoadOptionTypeBoot ); OptionIndex = EfiBootManagerFindLoadOption ( &NewOption, BootOptions, BootOptionCount ); if (OptionIndex == -1) { Status = EfiBootManagerAddLoadOptionVariable (&NewOption, MAX_UINTN); ASSERT_EFI_ERROR (Status); } EfiBootManagerFreeLoadOption (&NewOption); EfiBootManagerFreeLoadOptions (BootOptions, BootOptionCount); } /** Remove all MemoryMapped (...)/FvFile (...) and Fv (...)/FvFile (...) boot options whose device paths do not resolve exactly to an FvFile in the system. This removes any boot options that point to binaries built into the firmware and have become stale due to any of the following: - FvMain's base address or size changed (historical), - FvMain's FvNameGuid changed, - the FILE_GUID of the pointed-to binary changed, - the referenced binary is no longer built into the firmware. EfiBootManagerFindLoadOption () used in PlatformRegisterFvBootOption () only avoids exact duplicates. **/ VOID RemoveStaleFvFileOptions ( VOID ) { EFI_BOOT_MANAGER_LOAD_OPTION *BootOptions; UINTN BootOptionCount; UINTN Index; BootOptions = EfiBootManagerGetLoadOptions (&BootOptionCount, LoadOptionTypeBoot); for (Index = 0; Index < BootOptionCount; ++Index) { EFI_DEVICE_PATH_PROTOCOL *Node1, *Node2, *SearchNode; EFI_STATUS Status; EFI_HANDLE FvHandle; // // If the device path starts with neither MemoryMapped (...) nor Fv (...), // then keep the boot option. // Node1 = BootOptions[Index].FilePath; if (!(DevicePathType (Node1) == HARDWARE_DEVICE_PATH && DevicePathSubType (Node1) == HW_MEMMAP_DP) && !(DevicePathType (Node1) == MEDIA_DEVICE_PATH && DevicePathSubType (Node1) == MEDIA_PIWG_FW_VOL_DP)) { continue; } // // If the second device path node is not FvFile (...), then keep the boot // option. // Node2 = NextDevicePathNode (Node1); if ((DevicePathType (Node2) != MEDIA_DEVICE_PATH) || (DevicePathSubType (Node2) != MEDIA_PIWG_FW_FILE_DP)) { continue; } // // Locate the Firmware Volume2 protocol instance that is denoted by the // boot option. If this lookup fails (i.e., the boot option references a // firmware volume that doesn't exist), then we'll proceed to delete the // boot option. // SearchNode = Node1; Status = gBS->LocateDevicePath (&gEfiFirmwareVolume2ProtocolGuid, &SearchNode, &FvHandle); if (!EFI_ERROR (Status)) { // // The firmware volume was found; now let's see if it contains the FvFile // identified by GUID. // EFI_FIRMWARE_VOLUME2_PROTOCOL *FvProtocol; MEDIA_FW_VOL_FILEPATH_DEVICE_PATH *FvFileNode; UINTN BufferSize; EFI_FV_FILETYPE FoundType; EFI_FV_FILE_ATTRIBUTES FileAttributes; UINT32 AuthenticationStatus; Status = gBS->HandleProtocol (FvHandle, &gEfiFirmwareVolume2ProtocolGuid, (VOID **)&FvProtocol); ASSERT_EFI_ERROR (Status); FvFileNode = (MEDIA_FW_VOL_FILEPATH_DEVICE_PATH *)Node2; // // Buffer==NULL means we request metadata only: BufferSize, FoundType, // FileAttributes. // Status = FvProtocol->ReadFile ( FvProtocol, &FvFileNode->FvFileName, // NameGuid NULL, // Buffer &BufferSize, &FoundType, &FileAttributes, &AuthenticationStatus ); if (!EFI_ERROR (Status)) { // // The FvFile was found. Keep the boot option. // continue; } } // // Delete the boot option. // Status = EfiBootManagerDeleteLoadOptionVariable ( BootOptions[Index].OptionNumber, LoadOptionTypeBoot); DEBUG_CODE ( CHAR16 *DevicePathString; DevicePathString = ConvertDevicePathToText (BootOptions[Index].FilePath, FALSE, FALSE); DEBUG (( EFI_ERROR (Status) ? EFI_D_WARN : DEBUG_VERBOSE, "%a: removing stale Boot#%04x %s: %r\n", __FUNCTION__, (UINT32)BootOptions[Index].OptionNumber, DevicePathString == NULL ? L"<unavailable>" : DevicePathString, Status )); if (DevicePathString != NULL) { FreePool (DevicePathString); } ); } EfiBootManagerFreeLoadOptions (BootOptions, BootOptionCount); } /** Register the boot option And Keys. @param VOID @retval VOID **/ VOID PlatformRegisterOptionsAndKeys ( VOID ) { EFI_STATUS Status; EFI_INPUT_KEY Enter; EFI_INPUT_KEY F2; EFI_INPUT_KEY Esc; EFI_BOOT_MANAGER_LOAD_OPTION BootOption; // // Register ENTER as CONTINUE key // Enter.ScanCode = SCAN_NULL; Enter.UnicodeChar = CHAR_CARRIAGE_RETURN; Status = EfiBootManagerRegisterContinueKeyOption (0, &Enter, NULL); ASSERT_EFI_ERROR (Status); // // Map F2 and ESC to Boot Manager Menu // F2.ScanCode = SCAN_F2; F2.UnicodeChar = CHAR_NULL; Esc.ScanCode = SCAN_ESC; Esc.UnicodeChar = CHAR_NULL; Status = EfiBootManagerGetBootManagerMenu (&BootOption); ASSERT_EFI_ERROR (Status); Status = EfiBootManagerAddKeyOptionVariable ( NULL, (UINT16) BootOption.OptionNumber, 0, &F2, NULL ); ASSERT (Status == EFI_SUCCESS || Status == EFI_ALREADY_STARTED); Status = EfiBootManagerAddKeyOptionVariable ( NULL, (UINT16) BootOption.OptionNumber, 0, &Esc, NULL ); ASSERT (Status == EFI_SUCCESS || Status == EFI_ALREADY_STARTED); } // // BDS Platform Functions // /** Do the platform init, can be customized by OEM/IBV Possible things that can be done in PlatformBootManagerBeforeConsole: > Update console variable: 1. include hot-plug devices; > 2. Clear ConIn and add SOL for AMT > Register new Driver#### or Boot#### > Register new Key####: e.g.: F12 > Signal ReadyToLock event > Authentication action: 1. connect Auth devices; > 2. Identify auto logon user. **/ VOID EFIAPI PlatformBootManagerBeforeConsole ( VOID ) { RETURN_STATUS PcdStatus; // // Signal EndOfDxe PI Event // EfiEventGroupSignal (&gEfiEndOfDxeEventGroupGuid); // // Dispatch deferred images after EndOfDxe event. // EfiBootManagerDispatchDeferredImages (); // // Locate the PCI root bridges and make the PCI bus driver connect each, // non-recursively. This will produce a number of child handles with PciIo on // them. // FilterAndProcess (&gEfiPciRootBridgeIoProtocolGuid, NULL, Connect); // // Signal the ACPI platform driver that it can download QEMU ACPI tables. // EfiEventGroupSignal (&gRootBridgesConnectedEventGroupGuid); // // Find all display class PCI devices (using the handles from the previous // step), and connect them non-recursively. This should produce a number of // child handles with GOPs on them. // FilterAndProcess (&gEfiPciIoProtocolGuid, IsPciDisplay, Connect); // // Now add the device path of all handles with GOP on them to ConOut and // ErrOut. // FilterAndProcess (&gEfiGraphicsOutputProtocolGuid, NULL, AddOutput); // // Add the hardcoded short-form USB keyboard device path to ConIn. // EfiBootManagerUpdateConsoleVariable (ConIn, (EFI_DEVICE_PATH_PROTOCOL *)&mUsbKeyboard, NULL); // // Add the hardcoded serial console device path to ConIn, ConOut, ErrOut. // CopyGuid (&mSerialConsole.TermType.Guid, &gEfiTtyTermGuid); EfiBootManagerUpdateConsoleVariable (ConIn, (EFI_DEVICE_PATH_PROTOCOL *)&mSerialConsole, NULL); EfiBootManagerUpdateConsoleVariable (ConOut, (EFI_DEVICE_PATH_PROTOCOL *)&mSerialConsole, NULL); EfiBootManagerUpdateConsoleVariable (ErrOut, (EFI_DEVICE_PATH_PROTOCOL *)&mSerialConsole, NULL); // // Set the front page timeout from the QEMU configuration. // PcdStatus = PcdSet16S (PcdPlatformBootTimeOut, GetFrontPageTimeoutFromQemu ()); ASSERT_RETURN_ERROR (PcdStatus); // // Register platform-specific boot options and keyboard shortcuts. // PlatformRegisterOptionsAndKeys (); } /** Do the platform specific action after the console is ready Possible things that can be done in PlatformBootManagerAfterConsole: > Console post action: > Dynamically switch output mode from 100x31 to 80x25 for certain senarino > Signal console ready platform customized event > Run diagnostics like memory testing > Connect certain devices > Dispatch aditional option roms > Special boot: e.g.: USB boot, enter UI **/ VOID EFIAPI PlatformBootManagerAfterConsole ( VOID ) { // // Show the splash screen. // DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); BootLogoEnableLogo (); DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); // // Connect the rest of the devices. // DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); EfiBootManagerConnectAll (); DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); // // Process QEMU's -kernel command line option. Note that the kernel booted // this way should receive ACPI tables, which is why we connect all devices // first (see above) -- PCI enumeration blocks ACPI table installation, if // there is a PCI host. // DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); TryRunningQemuKernel (); DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); // // Enumerate all possible boot options, then filter and reorder them based on // the QEMU configuration. // DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); EfiBootManagerRefreshAllBootOption (); DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); // // Register UEFI Shell // DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); PlatformRegisterFvBootOption ( &gUefiShellFileGuid, L"EFI Internal Shell", LOAD_OPTION_ACTIVE ); DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); RemoveStaleFvFileOptions (); DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); SetBootOrderFromQemu (); DEBUG ((DEBUG_INFO, "PlatformBootManagerAfterConsole, func: %a, line: %d\n", __func__, __LINE__)); } /** This function is called each second during the boot manager waits the timeout. @param TimeoutRemain The remaining timeout. **/ VOID EFIAPI PlatformBootManagerWaitCallback ( IN UINT16 TimeoutRemain ) { EFI_GRAPHICS_OUTPUT_BLT_PIXEL_UNION Black; EFI_GRAPHICS_OUTPUT_BLT_PIXEL_UNION White; UINT16 Timeout; Timeout = PcdGet16 (PcdPlatformBootTimeOut); Black.Raw = 0x00000000; White.Raw = 0x00FFFFFF; BootLogoUpdateProgress ( White.Pixel, Black.Pixel, L"Start boot option", White.Pixel, (Timeout - TimeoutRemain) * 100 / Timeout, 0 ); } /** The function is called when no boot option could be launched, including platform recovery options and options pointing to applications built into firmware volumes. If this function returns, BDS attempts to enter an infinite loop. **/ VOID EFIAPI PlatformBootManagerUnableToBoot ( VOID ) { EFI_STATUS Status; EFI_INPUT_KEY Key; EFI_BOOT_MANAGER_LOAD_OPTION BootManagerMenu; UINTN Index; // // BootManagerMenu doesn't contain the correct information when return status // is EFI_NOT_FOUND. // Status = EfiBootManagerGetBootManagerMenu (&BootManagerMenu); if (EFI_ERROR (Status)) { return; } // // Normally BdsDxe does not print anything to the system console, but this is // a last resort -- the end-user will likely not see any DEBUG messages // logged in this situation. // // AsciiPrint () will NULL-check gST->ConOut internally. We check gST->ConIn // here to see if it makes sense to request and wait for a keypress. // if (gST->ConIn != NULL) { AsciiPrint ( "%a: No bootable option or device was found.\n" "%a: Press any key to enter the Boot Manager Menu.\n", gEfiCallerBaseName, gEfiCallerBaseName ); Status = gBS->WaitForEvent (1, &gST->ConIn->WaitForKey, &Index); ASSERT_EFI_ERROR (Status); ASSERT (Index == 0); // // Drain any queued keys. // while (!EFI_ERROR (gST->ConIn->ReadKeyStroke (gST->ConIn, &Key))) { // // just throw away Key // } } for (;;) { EfiBootManagerBoot (&BootManagerMenu); } }
869804.c
/* USER CODE BEGIN Header */ /** ****************************************************************************** * @file stm32f0xx_it.c * @brief Interrupt Service Routines. ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2021 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under Ultimate Liberty license * SLA0044, the "License"; You may not use this file except in compliance with * the License. You may obtain a copy of the License at: * www.st.com/SLA0044 * ****************************************************************************** */ /* USER CODE END Header */ /* Includes ------------------------------------------------------------------*/ #include "main.h" #include "stm32f0xx_it.h" /* Private includes ----------------------------------------------------------*/ /* USER CODE BEGIN Includes */ /* USER CODE END Includes */ /* Private typedef -----------------------------------------------------------*/ /* USER CODE BEGIN TD */ /* USER CODE END TD */ /* Private define ------------------------------------------------------------*/ /* USER CODE BEGIN PD */ /* USER CODE END PD */ /* Private macro -------------------------------------------------------------*/ /* USER CODE BEGIN PM */ /* USER CODE END PM */ /* Private variables ---------------------------------------------------------*/ /* USER CODE BEGIN PV */ /* USER CODE END PV */ /* Private function prototypes -----------------------------------------------*/ /* USER CODE BEGIN PFP */ /* USER CODE END PFP */ /* Private user code ---------------------------------------------------------*/ /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ /* External variables --------------------------------------------------------*/ extern CAN_HandleTypeDef hcan; extern TIM_HandleTypeDef htim1; /* USER CODE BEGIN EV */ /* USER CODE END EV */ /******************************************************************************/ /* Cortex-M0 Processor Interruption and Exception Handlers */ /******************************************************************************/ /** * @brief This function handles Non maskable interrupt. */ void NMI_Handler(void) { /* USER CODE BEGIN NonMaskableInt_IRQn 0 */ /* USER CODE END NonMaskableInt_IRQn 0 */ /* USER CODE BEGIN NonMaskableInt_IRQn 1 */ while (1) { } /* USER CODE END NonMaskableInt_IRQn 1 */ } /** * @brief This function handles Hard fault interrupt. */ void HardFault_Handler(void) { /* USER CODE BEGIN HardFault_IRQn 0 */ HAL_GPIO_WritePin(ERROR_LED_GPIO_Port, ERROR_LED_Pin, GPIO_PIN_SET); /* USER CODE END HardFault_IRQn 0 */ while (1) { /* USER CODE BEGIN W1_HardFault_IRQn 0 */ /* USER CODE END W1_HardFault_IRQn 0 */ } } /******************************************************************************/ /* STM32F0xx Peripheral Interrupt Handlers */ /* Add here the Interrupt Handlers for the used peripherals. */ /* For the available peripheral interrupt handler names, */ /* please refer to the startup file (startup_stm32f0xx.s). */ /******************************************************************************/ /** * @brief This function handles EXTI line 0 and 1 interrupts. */ void EXTI0_1_IRQHandler(void) { /* USER CODE BEGIN EXTI0_1_IRQn 0 */ /* USER CODE END EXTI0_1_IRQn 0 */ HAL_GPIO_EXTI_IRQHandler(Horn_Button_Pin); HAL_GPIO_EXTI_IRQHandler(Cruise_Button_Pin); /* USER CODE BEGIN EXTI0_1_IRQn 1 */ /* USER CODE END EXTI0_1_IRQn 1 */ } /** * @brief This function handles EXTI line 2 and 3 interrupts. */ void EXTI2_3_IRQHandler(void) { /* USER CODE BEGIN EXTI2_3_IRQn 0 */ /* USER CODE END EXTI2_3_IRQn 0 */ HAL_GPIO_EXTI_IRQHandler(Reverse_Button_Pin); /* USER CODE BEGIN EXTI2_3_IRQn 1 */ /* USER CODE END EXTI2_3_IRQn 1 */ } /** * @brief This function handles EXTI line 4 to 15 interrupts. */ void EXTI4_15_IRQHandler(void) { /* USER CODE BEGIN EXTI4_15_IRQn 0 */ /* USER CODE END EXTI4_15_IRQn 0 */ HAL_GPIO_EXTI_IRQHandler(TP_IRQ_Pin); HAL_GPIO_EXTI_IRQHandler(HL_Button_Pin); HAL_GPIO_EXTI_IRQHandler(Eco_Led_Pin); HAL_GPIO_EXTI_IRQHandler(Eco_Button_Pin); HAL_GPIO_EXTI_IRQHandler(LT_Button_Pin); HAL_GPIO_EXTI_IRQHandler(Haz_Button_Pin); HAL_GPIO_EXTI_IRQHandler(RT_Button_Pin); HAL_GPIO_EXTI_IRQHandler(Cruise_P_Pin); /* USER CODE BEGIN EXTI4_15_IRQn 1 */ /* USER CODE END EXTI4_15_IRQn 1 */ } /** * @brief This function handles TIM1 break, update, trigger and commutation interrupts. */ void TIM1_BRK_UP_TRG_COM_IRQHandler(void) { /* USER CODE BEGIN TIM1_BRK_UP_TRG_COM_IRQn 0 */ /* USER CODE END TIM1_BRK_UP_TRG_COM_IRQn 0 */ HAL_TIM_IRQHandler(&htim1); /* USER CODE BEGIN TIM1_BRK_UP_TRG_COM_IRQn 1 */ /* USER CODE END TIM1_BRK_UP_TRG_COM_IRQn 1 */ } /** * @brief This function handles HDMI-CEC and CAN interrupts / HDMI-CEC wake-up interrupt through EXTI line 27. */ void CEC_CAN_IRQHandler(void) { /* USER CODE BEGIN CEC_CAN_IRQn 0 */ /* USER CODE END CEC_CAN_IRQn 0 */ HAL_CAN_IRQHandler(&hcan); /* USER CODE BEGIN CEC_CAN_IRQn 1 */ /* USER CODE END CEC_CAN_IRQn 1 */ } /* USER CODE BEGIN 1 */ /* USER CODE END 1 */
827688.c
/*! * @file * @brief */ #include <stddef.h> #include "stm32g0xx.h" #include "clock.h" #include "systick.h" #include "tiny_timer.h" #include "watchdog.h" #include "heartbeat.h" static tiny_timer_group_t timer_group; int main(void) { __disable_irq(); { clock_init(); tiny_timer_group_init(&timer_group, systick_init()); watchdog_init(&timer_group); heartbeat_init(&timer_group); } __enable_irq(); while(1) { tiny_timer_group_run(&timer_group); __WFI(); } }
908233.c
/* Encode and decode algorithms for OKI ADPCM 2019 by superctr. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #define CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x))) static const uint16_t oki_step_table[49] = { 16, 17, 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 50, 55, 60, 66, 73, 80, 88, 97, 107,118,130,143, 157,173,190,209,230,253,279,307, 337,371,408,449,494,544,598,658, 724,796,876,963,1060,1166,1282,1411,1552 }; static inline int16_t oki_step(uint8_t step, int16_t* history, uint8_t* step_hist) { static const int8_t delta_table[16] = { 1,3,5,7,9,11,13,15, -1,-3,-5,-7,-9,-11,-13,-15 }; static const int8_t adjust_table[8] = { -1,-1,-1,-1,2,4,6,8 }; uint16_t step_size = oki_step_table[*step_hist]; int16_t delta = delta_table[step & 15] * step_size / 8; int16_t out = *history + delta; *history = out = CLAMP(out, -2048, 2047); // Saturate output int8_t adjusted_step = *step_hist + adjust_table[step & 7]; *step_hist = CLAMP(adjusted_step, 0, 48); return out; } static inline uint8_t oki_encode_step(int16_t input, int16_t* history, uint8_t *step_hist) { int bit; uint16_t step_size = oki_step_table[*step_hist]; int16_t delta = input - *history; uint8_t adpcm_sample = (delta < 0) ? 8 : 0; if(delta < 0) adpcm_sample = 8; delta = abs(delta); for(bit=3; bit--; ) { if(delta >= step_size) { adpcm_sample |= (1<<bit); delta -= step_size; } step_size >>= 1; } oki_step(adpcm_sample,history,step_hist); return adpcm_sample; } void oki_encode(int16_t *buffer,uint8_t *outbuffer,long len) { long i; int16_t history = 0; uint8_t step_hist = 0; uint8_t buf_sample = 0, nibble = 0; for(i=0;i<len;i++) { int16_t sample = *buffer++; if(sample < 0x7ff8) // round up sample += 8; sample >>= 4; int step = oki_encode_step(sample, &history, &step_hist); if(nibble) *outbuffer++ = buf_sample | (step&15); else buf_sample = (step&15)<<4; nibble^=1; } } void oki_decode(uint8_t *buffer,int16_t *outbuffer,long len) { long i; int16_t history = 0; uint8_t step_hist = 0; uint8_t nibble = 0; for(i=0;i<len;i++) { int8_t step = (*(int8_t*)buffer)<<nibble; step >>= 4; if(nibble) buffer++; nibble^=4; *outbuffer++ = oki_step(step, &history, &step_hist) << 4; } }
392140.c
/**************************************************************************** * drivers/syslog/syslog_chardev.c * * Copyright (C) 2016 Gregory Nutt. All rights reserved. * Author: Gregory Nutt <gnutt@nuttx.org> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <sys/types.h> #include <stdbool.h> #include <string.h> #include <poll.h> #include <errno.h> #include <nuttx/fs/fs.h> #include <syslog.h> #include "syslog.h" #ifdef CONFIG_SYSLOG_CHARDEV /**************************************************************************** * Private Function Prototypes ****************************************************************************/ static ssize_t syslog_chardev_write(FAR struct file *filep, FAR const char *buffer, size_t buflen); /**************************************************************************** * Private Data ****************************************************************************/ static const struct file_operations syslog_fops = { NULL, /* open */ NULL, /* close */ NULL, /* read */ syslog_chardev_write, /* write */ NULL, /* seek */ NULL /* ioctl */ #ifndef CONFIG_DISABLE_POLL , NULL /* poll */ #endif #ifndef CONFIG_DISABLE_PSEUDOFS_OPERATIONS , NULL /* unlink */ #endif }; /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** * Name: syslog_chardev_write ****************************************************************************/ static ssize_t syslog_chardev_write(FAR struct file *filep, FAR const char *buffer, size_t len) { syslog(LOG_INFO, "%.*s", (int)len, buffer); return len; } /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: syslog_register * * Description: * Register a simple character driver at /dev/syslog whose write() method * will transfer data to the SYSLOG device. This can be useful if, for * example, you want to redirect the output of a program to the SYSLOG. * * NOTE that unlike other syslog output, this data is unformatted raw * byte output with no time-stamping or any other SYSLOG features * supported. * ****************************************************************************/ void syslog_register(void) { (void)register_driver("/dev/syslog", &syslog_fops, 0222, NULL); } #endif /* CONFIG_SYSLOG_CHARDEV */
234977.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014 MundoReader S.L. * Author: Heiko Stuebner <heiko@sntech.de> * * Copyright (c) 2015 Rockchip Electronics Co. Ltd. * Author: Xing Zheng <zhengxing@rock-chips.com> */ #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/syscore_ops.h> #include <dt-bindings/clock/rk3036-cru.h> #include "clk.h" #define RK3036_GRF_SOC_STATUS0 0x14c enum rk3036_plls { apll, dpll, gpll, }; static struct rockchip_pll_rate_table rk3036_pll_rates[] = { /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0), RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0), RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0), RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0), RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0), RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0), RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0), RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0), RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0), RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0), RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0), RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0), RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0), RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0), RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0), RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0), RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0), RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0), RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0), RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0), RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0), RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0), RK3036_PLL_RATE( 984000000, 1, 82, 2, 1, 1, 0), RK3036_PLL_RATE( 960000000, 1, 80, 2, 1, 1, 0), RK3036_PLL_RATE( 936000000, 1, 78, 2, 1, 1, 0), RK3036_PLL_RATE( 912000000, 1, 76, 2, 1, 1, 0), RK3036_PLL_RATE( 900000000, 4, 300, 2, 1, 1, 0), RK3036_PLL_RATE( 888000000, 1, 74, 2, 1, 1, 0), RK3036_PLL_RATE( 864000000, 1, 72, 2, 1, 1, 0), RK3036_PLL_RATE( 840000000, 1, 70, 2, 1, 1, 0), RK3036_PLL_RATE( 816000000, 1, 68, 2, 1, 1, 0), RK3036_PLL_RATE( 800000000, 6, 400, 2, 1, 1, 0), RK3036_PLL_RATE( 700000000, 6, 350, 2, 1, 1, 0), RK3036_PLL_RATE( 696000000, 1, 58, 2, 1, 1, 0), RK3036_PLL_RATE( 600000000, 1, 75, 3, 1, 1, 0), RK3036_PLL_RATE( 594000000, 2, 99, 2, 1, 1, 0), RK3036_PLL_RATE( 504000000, 1, 63, 3, 1, 1, 0), RK3036_PLL_RATE( 500000000, 6, 250, 2, 1, 1, 0), RK3036_PLL_RATE( 408000000, 1, 68, 2, 2, 1, 0), RK3036_PLL_RATE( 312000000, 1, 52, 2, 2, 1, 0), RK3036_PLL_RATE( 216000000, 1, 72, 4, 2, 1, 0), RK3036_PLL_RATE( 96000000, 1, 64, 4, 4, 1, 0), { /* sentinel */ }, }; #define RK3036_DIV_CPU_MASK 0x1f #define RK3036_DIV_CPU_SHIFT 8 #define RK3036_DIV_PERI_MASK 0xf #define RK3036_DIV_PERI_SHIFT 0 #define RK3036_DIV_ACLK_MASK 0x7 #define RK3036_DIV_ACLK_SHIFT 4 #define RK3036_DIV_HCLK_MASK 0x3 #define RK3036_DIV_HCLK_SHIFT 8 #define RK3036_DIV_PCLK_MASK 0x7 #define RK3036_DIV_PCLK_SHIFT 12 #define RK3036_CLKSEL1(_core_periph_div) \ { \ .reg = RK2928_CLKSEL_CON(1), \ .val = HIWORD_UPDATE(_core_periph_div, RK3036_DIV_PERI_MASK, \ RK3036_DIV_PERI_SHIFT) \ } #define RK3036_CPUCLK_RATE(_prate, _core_periph_div) \ { \ .prate = _prate, \ .divs = { \ RK3036_CLKSEL1(_core_periph_div), \ }, \ } static struct rockchip_cpuclk_rate_table rk3036_cpuclk_rates[] __initdata = { RK3036_CPUCLK_RATE(816000000, 4), RK3036_CPUCLK_RATE(600000000, 4), RK3036_CPUCLK_RATE(312000000, 4), }; static const struct rockchip_cpuclk_reg_data rk3036_cpuclk_data = { .core_reg[0] = RK2928_CLKSEL_CON(0), .div_core_shift[0] = 0, .div_core_mask[0] = 0x1f, .num_cores = 1, .mux_core_alt = 1, .mux_core_main = 0, .mux_core_shift = 7, .mux_core_mask = 0x1, }; PNAME(mux_pll_p) = { "xin24m", "xin24m" }; PNAME(mux_armclk_p) = { "apll", "gpll_armclk" }; PNAME(mux_busclk_p) = { "apll", "dpll_cpu", "gpll_cpu" }; PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" }; PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" }; PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" }; PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" }; PNAME(mux_spdif_p) = { "spdif_src", "spdif_frac", "xin12m" }; PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; PNAME(mux_mac_p) = { "mac_pll_src", "rmii_clkin" }; PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" }; static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = { [apll] = PLL(pll_rk3036, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0), RK2928_MODE_CON, 0, 5, 0, rk3036_pll_rates), [dpll] = PLL(pll_rk3036, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4), RK2928_MODE_CON, 4, 4, 0, NULL), [gpll] = PLL(pll_rk3036, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12), RK2928_MODE_CON, 12, 6, ROCKCHIP_PLL_SYNC_RATE, rk3036_pll_rates), }; #define MFLAGS CLK_MUX_HIWORD_MASK #define DFLAGS CLK_DIVIDER_HIWORD_MASK #define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) static struct rockchip_clk_branch rk3036_uart0_fracmux __initdata = MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(13), 8, 2, MFLAGS); static struct rockchip_clk_branch rk3036_uart1_fracmux __initdata = MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(14), 8, 2, MFLAGS); static struct rockchip_clk_branch rk3036_uart2_fracmux __initdata = MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(15), 8, 2, MFLAGS); static struct rockchip_clk_branch rk3036_i2s_fracmux __initdata = MUX(0, "i2s_pre", mux_i2s_pre_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(3), 8, 2, MFLAGS); static struct rockchip_clk_branch rk3036_spdif_fracmux __initdata = MUX(SCLK_SPDIF, "sclk_spdif", mux_spdif_p, 0, RK2928_CLKSEL_CON(5), 8, 2, MFLAGS); static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = { /* * Clock-Architecture Diagram 1 */ GATE(0, "gpll_armclk", "gpll", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(0), 6, GFLAGS), FACTOR(0, "xin12m", "xin24m", 0, 1, 2), /* * Clock-Architecture Diagram 2 */ GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(0), 2, GFLAGS), GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(0), 8, GFLAGS), COMPOSITE_NOGATE(0, "ddrphy2x", mux_ddrphy_p, CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO), FACTOR(0, "ddrphy", "ddrphy2x", 0, 1, 2), COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(1), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, RK2928_CLKGATE_CON(0), 7, GFLAGS), COMPOSITE_NOMUX(0, "aclk_core_pre", "armclk", CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(1), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, RK2928_CLKGATE_CON(0), 7, GFLAGS), GATE(0, "dpll_cpu", "dpll", 0, RK2928_CLKGATE_CON(10), 8, GFLAGS), GATE(0, "gpll_cpu", "gpll", 0, RK2928_CLKGATE_CON(0), 1, GFLAGS), COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_busclk_p, 0, RK2928_CLKSEL_CON(0), 14, 2, MFLAGS, 8, 5, DFLAGS), GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_src", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(0), 3, GFLAGS), COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_src", CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(1), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, RK2928_CLKGATE_CON(0), 5, GFLAGS), COMPOSITE_NOMUX(HCLK_CPU, "hclk_cpu", "aclk_cpu_src", CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(1), 8, 2, DFLAGS | CLK_DIVIDER_READ_ONLY, RK2928_CLKGATE_CON(0), 4, GFLAGS), COMPOSITE(0, "aclk_peri_src", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS, RK2928_CLKGATE_CON(2), 0, GFLAGS), GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0, RK2928_CLKGATE_CON(2), 1, GFLAGS), DIV(0, "pclk_peri_src", "aclk_peri_src", CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO), GATE(PCLK_PERI, "pclk_peri", "pclk_peri_src", 0, RK2928_CLKGATE_CON(2), 3, GFLAGS), DIV(0, "hclk_peri_src", "aclk_peri_src", CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO), GATE(HCLK_PERI, "hclk_peri", "hclk_peri_src", 0, RK2928_CLKGATE_CON(2), 2, GFLAGS), COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(2), 4, 1, MFLAGS, RK2928_CLKGATE_CON(1), 0, GFLAGS), COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(2), 5, 1, MFLAGS, RK2928_CLKGATE_CON(1), 1, GFLAGS), COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(2), 6, 1, MFLAGS, RK2928_CLKGATE_CON(2), 4, GFLAGS), COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED, RK2928_CLKSEL_CON(2), 7, 1, MFLAGS, RK2928_CLKGATE_CON(2), 5, GFLAGS), MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0, RK2928_CLKSEL_CON(13), 10, 2, MFLAGS), COMPOSITE_NOMUX(0, "uart0_src", "uart_pll_clk", 0, RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, RK2928_CLKGATE_CON(1), 8, GFLAGS), COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0, RK2928_CLKSEL_CON(14), 0, 7, DFLAGS, RK2928_CLKGATE_CON(1), 10, GFLAGS), COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0, RK2928_CLKSEL_CON(15), 0, 7, DFLAGS, RK2928_CLKGATE_CON(1), 12, GFLAGS), COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(17), 0, RK2928_CLKGATE_CON(1), 9, GFLAGS, &rk3036_uart0_fracmux), COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(18), 0, RK2928_CLKGATE_CON(1), 11, GFLAGS, &rk3036_uart1_fracmux), COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(19), 0, RK2928_CLKGATE_CON(1), 13, GFLAGS, &rk3036_uart2_fracmux), COMPOSITE(0, "aclk_vcodec", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS, RK2928_CLKGATE_CON(3), 11, GFLAGS), FACTOR_GATE(HCLK_VCODEC, "hclk_vcodec", "aclk_vcodec", 0, 1, 4, RK2928_CLKGATE_CON(3), 12, GFLAGS), COMPOSITE(0, "aclk_hvec", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(20), 0, 2, MFLAGS, 2, 5, DFLAGS, RK2928_CLKGATE_CON(10), 6, GFLAGS), COMPOSITE(0, "aclk_disp1_pre", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS, RK2928_CLKGATE_CON(1), 4, GFLAGS), COMPOSITE(0, "hclk_disp_pre", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(30), 14, 2, MFLAGS, 8, 5, DFLAGS, RK2928_CLKGATE_CON(0), 11, GFLAGS), COMPOSITE(SCLK_LCDC, "dclk_lcdc", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(28), 0, 2, MFLAGS, 8, 8, DFLAGS, RK2928_CLKGATE_CON(3), 2, GFLAGS), COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0, RK2928_CLKSEL_CON(12), 8, 2, MFLAGS, RK2928_CLKGATE_CON(2), 11, GFLAGS), DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0, RK2928_CLKSEL_CON(11), 0, 7, DFLAGS), COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, RK2928_CLKSEL_CON(12), 10, 2, MFLAGS, RK2928_CLKGATE_CON(2), 13, GFLAGS), DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, RK2928_CLKSEL_CON(11), 8, 7, DFLAGS), COMPOSITE(SCLK_EMMC, "sclk_emmc", mux_mmc_src_p, 0, RK2928_CLKSEL_CON(12), 12, 2, MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(2), 14, GFLAGS), MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3036_SDMMC_CON0, 1), MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3036_SDMMC_CON1, 0), MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK3036_SDIO_CON0, 1), MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3036_SDIO_CON1, 0), MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3036_EMMC_CON0, 1), MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3036_EMMC_CON1, 0), COMPOSITE(0, "i2s_src", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(3), 14, 2, MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(0), 9, GFLAGS), COMPOSITE_FRACMUX(0, "i2s_frac", "i2s_src", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(7), 0, RK2928_CLKGATE_CON(0), 10, GFLAGS, &rk3036_i2s_fracmux), COMPOSITE_NODIV(SCLK_I2S_OUT, "i2s_clkout", mux_i2s_clkout_p, 0, RK2928_CLKSEL_CON(3), 12, 1, MFLAGS, RK2928_CLKGATE_CON(0), 13, GFLAGS), GATE(SCLK_I2S, "sclk_i2s", "i2s_pre", CLK_SET_RATE_PARENT, RK2928_CLKGATE_CON(0), 14, GFLAGS), COMPOSITE(0, "spdif_src", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(5), 10, 2, MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(2), 10, GFLAGS), COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_src", 0, RK2928_CLKSEL_CON(9), 0, RK2928_CLKGATE_CON(2), 12, GFLAGS, &rk3036_spdif_fracmux), GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin12m", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(1), 5, GFLAGS), COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(34), 8, 2, MFLAGS, 0, 5, DFLAGS, RK2928_CLKGATE_CON(3), 13, GFLAGS), COMPOSITE(SCLK_SPI, "sclk_spi", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(2), 9, GFLAGS), COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_3plls_p, 0, RK2928_CLKSEL_CON(16), 8, 2, MFLAGS, 10, 5, DFLAGS, RK2928_CLKGATE_CON(10), 4, GFLAGS), COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_apll_dpll_gpll_usb480m_p, 0, RK2928_CLKSEL_CON(16), 0, 2, MFLAGS, 2, 5, DFLAGS, RK2928_CLKGATE_CON(10), 5, GFLAGS), COMPOSITE_NOGATE(SCLK_MACPLL, "mac_pll_src", mux_pll_src_3plls_p, CLK_SET_RATE_NO_REPARENT, RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS), MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(21), 3, 1, MFLAGS), COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0, RK2928_CLKSEL_CON(21), 4, 5, DFLAGS, RK2928_CLKGATE_CON(2), 6, GFLAGS), FACTOR(0, "sclk_macref_out", "hclk_peri_src", 0, 1, 2), MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0, RK2928_CLKSEL_CON(31), 0, 1, MFLAGS), /* * Clock-Architecture Diagram 3 */ /* aclk_cpu gates */ GATE(0, "sclk_intmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 12, GFLAGS), GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 10, GFLAGS), /* hclk_cpu gates */ GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 6, GFLAGS), /* pclk_cpu gates */ GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS), GATE(PCLK_DDRUPCTL, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS), GATE(PCLK_ACODEC, "pclk_acodec", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS), GATE(PCLK_HDMI, "pclk_hdmi", "pclk_cpu", 0, RK2928_CLKGATE_CON(3), 8, GFLAGS), /* aclk_vio gates */ GATE(ACLK_VIO, "aclk_vio", "aclk_disp1_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 13, GFLAGS), GATE(ACLK_LCDC, "aclk_lcdc", "aclk_disp1_pre", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS), GATE(HCLK_VIO_BUS, "hclk_vio_bus", "hclk_disp_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 12, GFLAGS), GATE(HCLK_LCDC, "hclk_lcdc", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(9), 5, GFLAGS), /* xin24m gates */ GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK2928_CLKGATE_CON(10), 0, GFLAGS), GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0, RK2928_CLKGATE_CON(10), 1, GFLAGS), /* aclk_peri gates */ GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 3, GFLAGS), GATE(0, "aclk_cpu_peri", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 2, GFLAGS), GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS), GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 15, GFLAGS), /* hclk_peri gates */ GATE(0, "hclk_peri_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS), GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS), GATE(0, "hclk_peri_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS), GATE(HCLK_NANDC, "hclk_nandc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS), GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS), GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 11, GFLAGS), GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS), GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS), GATE(HCLK_OTG1, "hclk_otg1", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(7), 3, GFLAGS), GATE(HCLK_I2S, "hclk_i2s", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS), GATE(0, "hclk_sfc", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 14, GFLAGS), GATE(HCLK_MAC, "hclk_mac", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS), /* pclk_peri gates */ GATE(0, "pclk_peri_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 1, GFLAGS), GATE(0, "pclk_efuse", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 2, GFLAGS), GATE(PCLK_TIMER, "pclk_timer", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 7, GFLAGS), GATE(PCLK_PWM, "pclk_pwm", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 10, GFLAGS), GATE(PCLK_SPI, "pclk_spi", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS), GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS), GATE(PCLK_UART0, "pclk_uart0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 0, GFLAGS), GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS), GATE(PCLK_UART2, "pclk_uart2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 2, GFLAGS), GATE(PCLK_I2C0, "pclk_i2c0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 4, GFLAGS), GATE(PCLK_I2C1, "pclk_i2c1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 5, GFLAGS), GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 6, GFLAGS), GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS), GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS), GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS), }; static const char *const rk3036_critical_clocks[] __initconst = { "aclk_cpu", "aclk_peri", "hclk_peri", "pclk_peri", "pclk_ddrupctl", }; static void __init rk3036_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; void __iomem *reg_base; struct clk *clk; reg_base = of_iomap(np, 0); if (!reg_base) { pr_err("%s: could not map cru region\n", __func__); return; } /* * Make uart_pll_clk a child of the gpll, as all other sources are * not that usable / stable. */ writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10), reg_base + RK2928_CLKSEL_CON(13)); ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); return; } clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1); if (IS_ERR(clk)) pr_warn("%s: could not register clock usb480m: %ld\n", __func__, PTR_ERR(clk)); rockchip_clk_register_plls(ctx, rk3036_pll_clks, ARRAY_SIZE(rk3036_pll_clks), RK3036_GRF_SOC_STATUS0); rockchip_clk_register_branches(ctx, rk3036_clk_branches, ARRAY_SIZE(rk3036_clk_branches)); rockchip_clk_protect_critical(rk3036_critical_clocks, ARRAY_SIZE(rk3036_critical_clocks)); rockchip_clk_register_armclk(ctx, ARMCLK, "armclk", mux_armclk_p, ARRAY_SIZE(mux_armclk_p), &rk3036_cpuclk_data, rk3036_cpuclk_rates, ARRAY_SIZE(rk3036_cpuclk_rates)); rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0), ROCKCHIP_SOFTRST_HIWORD_MASK); rockchip_register_restart_notifier(ctx, RK2928_GLB_SRST_FST, NULL); rockchip_clk_of_add_provider(np, ctx); } CLK_OF_DECLARE(rk3036_cru, "rockchip,rk3036-cru", rk3036_clk_init);
801246.c
/* keyboard.c: Keyboard event for vimcaps * Copyright (C) 2010-2014 LiTuX, all wrongs reserved. * * Last Change: 2014-02-16 10:33:24 * * This file is part of vimcaps, a layer for `calling` APIs with libcall. * The library provides some low-level functions similar to system APIs * which can be called in vim using libcallnr(), to read and modify the * state of the keyboard. * * I'm trying to find some APIs similar to Win32API SendInput/keybd_event * under linux, but failed so far (HELP!): * /dev/input/eventX needs permission, * ioctl on /dev/console needs permission, * letleds needs TTY, or root permission under X (/dev/tty7, for example). * xset needs X, and failed to turn on/off my capslock and numlock. * (Wait! Why my capslock led is on but capslock status is off?? * Who designed this strange "feature"?? Why the man page and header file * are different? Why are the documents so useless? \cdots) * Finally I find the following methods works for me (I don't know if it * works everywhere or not) under X, thus in this version, * I'll use this ugly way, to make it at least work first. * * See Makefile for how to compile it. * */ /* FIXME: Too many #if-else */ #ifdef _WIN32 /************************** For windows ****************************/ #define WINVER 0x0502 #define _WIN32_WINNT 0x0502 #define NOCOMM #define WIN32_LEAN_AND_MEAN #include <windows.h> /* TinyCC and old MinGW do not define this */ #ifndef MAPVK_VK_TO_VSC # define MAPVK_VK_TO_VSC 0 #endif #elif __APPLE__ /************************** For Mac OS ****************************/ # error "Mac is not supported yet." #elif __linux__ /*************************** For linux ****************************/ #include <X11/XKBlib.h> Display *display = NULL; #else /* TODO: BSD support? */ # error "Platform not supported." #endif #ifdef _WIN32 # define DLL_EXPORT __declspec(dllexport) #else # define DLL_EXPORT #endif #ifdef _WIN32 DLL_EXPORT int GetState( int vKey ) { /* GetKeyState return SHORT, force to signed by two steps of cast. * * The return value specifies the status of the specified virtual key * If the high-order bit is 1, the key is down; otherwise, it is up. * If the low-order bit is 1, the key is toggled. * A key, such as the CAPS LOCK key, is toggled if it is turned on. * The key is off and untoggled if the low-order bit is 0. * A toggle key's indicator light (if any) on the keyboard * will be on when the key is toggled, * and off when the key is untoggled. --- MSDN * */ signed short ret; ret = GetKeyState(vKey); return (int) ret; } static int KiEvent( int vKey, DWORD dwFlags ) { INPUT in = {INPUT_KEYBOARD}; in.ki.wVk = vKey; in.ki.wScan = MapVirtualKey(vKey, MAPVK_VK_TO_VSC); in.ki.dwFlags = dwFlags; in.ki.time = 0; // let the system manage in.ki.dwExtraInfo = 0; // No extra info. return SendInput(1, &in, sizeof(INPUT)); // 1 if success } DLL_EXPORT int Press( int vKey ) { return KiEvent(vKey, 0); } DLL_EXPORT int PressExt( int vKey ) { return KiEvent(vKey, KEYEVENTF_EXTENDEDKEY); } DLL_EXPORT int Release( int vKey ) { return KiEvent(vKey, KEYEVENTF_KEYUP); } DLL_EXPORT int ReleaseExt( int vKey ) { return KiEvent(vKey, KEYEVENTF_EXTENDEDKEY | KEYEVENTF_KEYUP); } DLL_EXPORT int SendKey( int vKey ) { int ret; ret = Press(vKey); Sleep(10); ret += Release(vKey); return ret; // 2 if all success } DLL_EXPORT int SendKeyExt( int vKey ) { int ret; ret = PressExt(vKey); Sleep(10); ret += ReleaseExt(vKey); return ret; // 2 if all success } DLL_EXPORT int ToggleLock( int lock ) { /* lock can be: * 1 for capslock; * 2 for numlock; * 4 for scrollock; * */ int vKey = 0; switch (lock) { case 1: // caps vKey = VK_CAPITAL; break; case 2: // num vKey = VK_NUMLOCK; break; case 4: // scroll vKey = VK_SCROLL; break; default: // do nothing break; } return SendKey( vKey ); } DLL_EXPORT BOOL WINAPI DllMain(HINSTANCE hModule, DWORD dwReason, LPVOID lpvReserved) { /* Do nothing */ switch (dwReason) { case DLL_PROCESS_ATTACH: case DLL_PROCESS_DETACH: case DLL_THREAD_ATTACH: case DLL_THREAD_DETACH: break; default: break; } return TRUE; } #elif __APPLE__ /* TODO */ #elif __linux__ void xkbd_init(void) __attribute__((constructor)); void xkbd_fini(void) __attribute__((destructor)); void xkbd_init(void) { /* open connection with the server */ display = XOpenDisplay(NULL); if (display == NULL) { /* TODO: what should I do on error? */ } } void xkbd_fini(void) { /* close connection to server */ if (NULL != display) { XCloseDisplay(display); } } int xMaskedState( unsigned mask ) { unsigned int state = 0; XkbGetIndicatorState(display, XkbUseCoreKbd, &state); return state & mask; } int xAtom(const char * const name) { return XInternAtom(display, name, 0); } int xGetState(unsigned atom) { Bool state = 0; XkbGetNamedIndicator(display, atom, NULL, &state, NULL, NULL); return state; } int xGetNamedState(const char * const name) { Bool state = 0; Atom atom = XInternAtom(display, name, 0); XkbGetNamedIndicator(display, atom, NULL, &state, NULL, NULL); return state; } static int xSetIndicator(unsigned atom, Bool state) { /* FIXME: This function don't work under my system */ return XkbSetNamedIndicator(display, atom, True, state, False, NULL); } int xIndicatorOn(unsigned atom) { return xSetIndicator(atom, 1); } int xIndicatorOff(unsigned atom) { return xSetIndicator(atom, 0); } int xNamedIndicatorOn(const char * const name) { return xSetIndicator(XInternAtom(display, name, 0), 1); } int xNamedIndicatorOff(const char * const name) { return xSetIndicator(XInternAtom(display, name, 0), 0); } static int xModifierMask(int lock) { int mask; switch (lock) { case 1: // caps lock mask = 0x02; break; case 2: // num lock mask = 0x10; break; default: mask = 0; } return mask; } int xLockModifier(unsigned mask) { /* This one works on my system, but can only modify caps/num lock. * where caps lock is 0x02(2) and num lock is 0x10(16): * 8 Real Modifier Masks from man page: (1, 2, 4, 8, 16, 32, 64, 128) * ShiftMask LockMask ControlMask Mod1Mask * Mod2Mask Mod3Mask Mod4Mask Mod5Mask * It seems that none of them is scroll lock. * BTW the shiftmask seems to be a lock too. * */ return XkbLockModifiers(display, XkbUseCoreKbd, mask, mask); } int xUnlockModifier(unsigned mask) { return XkbLockModifiers(display, XkbUseCoreKbd, mask, 0); } #else #endif /* "High level" interfaces */ DLL_EXPORT int LibReady( void ) { #ifdef __linux__ return NULL == display? 0: 1; #else return 1; #endif } DLL_EXPORT int LockToggled( int lock ) { /* lock can be: * 1 for capslock; * 2 for numlock; * 4 for scrollock; * */ int result; #ifdef _WIN32 int vKey = 0; switch (lock) { case 1: // caps vKey = VK_CAPITAL; break; case 2: // num vKey = VK_NUMLOCK; break; case 4: // scroll vKey = VK_SCROLL; break; default: // do nothing break; } result = 1 & (int) GetState( vKey ); #elif __APPLE__ #elif __linux__ Atom which; switch (lock) { case 1: which = XInternAtom(display, "Caps Lock", 0); break; case 2: which = XInternAtom(display, "Num Lock", 0); break; case 4: which = XInternAtom(display, "Scroll Lock", 0); break; default: /* illegal */ which = 0; } result = xGetState(which); #endif return result; } DLL_EXPORT int ToggleOn( int lock ) { int result; if ( !LockToggled(lock) ) { #ifdef _WIN32 result = ToggleLock(lock); #elif __APPLE__ #elif __linux__ result = xLockModifier( xModifierMask(lock) ); #else #endif } return result; } DLL_EXPORT int ToggleOff( int lock ) { int result; if ( LockToggled(lock) ) { #ifdef _WIN32 result = ToggleLock(lock); #elif __APPLE__ #elif __linux__ result = xUnlockModifier( xModifierMask(lock) ); #else #endif } return result; }
826500.c
/* * This file is part of the MicroPython project, http://micropython.org/ * * The MIT License (MIT) * * Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <stdio.h> #include <string.h> #include "py/runtime.h" #include "py/stackctrl.h" #if MICROPY_PY_THREAD #include "py/mpthread.h" #if MICROPY_DEBUG_VERBOSE // print debugging info #define DEBUG_PRINT (1) #define DEBUG_printf DEBUG_printf #else // don't print debugging info #define DEBUG_PRINT (0) #define DEBUG_printf(...) (void)0 #endif /****************************************************************/ // Lock object STATIC const mp_obj_type_t mp_type_thread_lock; typedef struct _mp_obj_thread_lock_t { mp_obj_base_t base; mp_thread_mutex_t mutex; volatile bool locked; } mp_obj_thread_lock_t; STATIC mp_obj_thread_lock_t *mp_obj_new_thread_lock(void) { mp_obj_thread_lock_t *self = m_new_obj(mp_obj_thread_lock_t); self->base.type = &mp_type_thread_lock; mp_thread_mutex_init(&self->mutex); self->locked = false; return self; } STATIC mp_obj_t thread_lock_acquire(size_t n_args, const mp_obj_t *args) { mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(args[0]); bool wait = true; if (n_args > 1) { wait = mp_obj_get_int(args[1]); // TODO support timeout arg } MP_THREAD_GIL_EXIT(); int ret = mp_thread_mutex_lock(&self->mutex, wait); MP_THREAD_GIL_ENTER(); if (ret == 0) { return mp_const_false; } else if (ret == 1) { self->locked = true; return mp_const_true; } else { mp_raise_OSError(-ret); } } STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(thread_lock_acquire_obj, 1, 3, thread_lock_acquire); STATIC mp_obj_t thread_lock_release(mp_obj_t self_in) { mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(self_in); if (!self->locked) { mp_raise_msg(&mp_type_RuntimeError, NULL); } self->locked = false; MP_THREAD_GIL_EXIT(); mp_thread_mutex_unlock(&self->mutex); MP_THREAD_GIL_ENTER(); return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_1(thread_lock_release_obj, thread_lock_release); STATIC mp_obj_t thread_lock_locked(mp_obj_t self_in) { mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(self_in); return mp_obj_new_bool(self->locked); } STATIC MP_DEFINE_CONST_FUN_OBJ_1(thread_lock_locked_obj, thread_lock_locked); STATIC mp_obj_t thread_lock___exit__(size_t n_args, const mp_obj_t *args) { (void)n_args; // unused return thread_lock_release(args[0]); } STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(thread_lock___exit___obj, 4, 4, thread_lock___exit__); STATIC const mp_rom_map_elem_t thread_lock_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_acquire), MP_ROM_PTR(&thread_lock_acquire_obj) }, { MP_ROM_QSTR(MP_QSTR_release), MP_ROM_PTR(&thread_lock_release_obj) }, { MP_ROM_QSTR(MP_QSTR_locked), MP_ROM_PTR(&thread_lock_locked_obj) }, { MP_ROM_QSTR(MP_QSTR___enter__), MP_ROM_PTR(&thread_lock_acquire_obj) }, { MP_ROM_QSTR(MP_QSTR___exit__), MP_ROM_PTR(&thread_lock___exit___obj) }, }; STATIC MP_DEFINE_CONST_DICT(thread_lock_locals_dict, thread_lock_locals_dict_table); STATIC const mp_obj_type_t mp_type_thread_lock = { { &mp_type_type }, .name = MP_QSTR_lock, .locals_dict = (mp_obj_dict_t*)&thread_lock_locals_dict, }; /****************************************************************/ // _thread module STATIC size_t thread_stack_size = 0; STATIC mp_obj_t mod_thread_get_ident(void) { return mp_obj_new_int_from_uint((uintptr_t)mp_thread_get_state()); } STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_get_ident_obj, mod_thread_get_ident); STATIC mp_obj_t mod_thread_stack_size(size_t n_args, const mp_obj_t *args) { mp_obj_t ret = mp_obj_new_int_from_uint(thread_stack_size); if (n_args == 0) { thread_stack_size = 0; } else { thread_stack_size = mp_obj_get_int(args[0]); } return ret; } STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_thread_stack_size_obj, 0, 1, mod_thread_stack_size); typedef struct _thread_entry_args_t { mp_obj_dict_t *dict_locals; mp_obj_dict_t *dict_globals; size_t stack_size; mp_obj_t fun; size_t n_args; size_t n_kw; mp_obj_t args[]; } thread_entry_args_t; STATIC void *thread_entry(void *args_in) { // Execution begins here for a new thread. We do not have the GIL. thread_entry_args_t *args = (thread_entry_args_t*)args_in; mp_state_thread_t ts; mp_thread_set_state(&ts); mp_stack_set_top(&ts + 1); // need to include ts in root-pointer scan mp_stack_set_limit(args->stack_size); #if MICROPY_ENABLE_PYSTACK // TODO threading and pystack is not fully supported, for now just make a small stack mp_obj_t mini_pystack[128]; mp_pystack_init(mini_pystack, &mini_pystack[128]); #endif // set locals and globals from the calling context mp_locals_set(args->dict_locals); mp_globals_set(args->dict_globals); MP_THREAD_GIL_ENTER(); // signal that we are set up and running mp_thread_start(); // TODO set more thread-specific state here: // mp_pending_exception? (root pointer) // cur_exception (root pointer) DEBUG_printf("[thread] start ts=%p args=%p stack=%p\n", &ts, &args, MP_STATE_THREAD(stack_top)); nlr_buf_t nlr; if (nlr_push(&nlr) == 0) { mp_call_function_n_kw(args->fun, args->n_args, args->n_kw, args->args); nlr_pop(); } else { // uncaught exception // check for SystemExit mp_obj_base_t *exc = (mp_obj_base_t*)nlr.ret_val; if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(exc->type), MP_OBJ_FROM_PTR(&mp_type_SystemExit))) { // swallow exception silently } else { // print exception out mp_printf(MICROPY_ERROR_PRINTER, "Unhandled exception in thread started by "); mp_obj_print_helper(MICROPY_ERROR_PRINTER, args->fun, PRINT_REPR); mp_printf(MICROPY_ERROR_PRINTER, "\n"); mp_obj_print_exception(MICROPY_ERROR_PRINTER, MP_OBJ_FROM_PTR(exc)); } } DEBUG_printf("[thread] finish ts=%p\n", &ts); // signal that we are finished mp_thread_finish(); MP_THREAD_GIL_EXIT(); for(;;); return NULL; } STATIC mp_obj_t mod_thread_start_new_thread(size_t n_args, const mp_obj_t *args) { // This structure holds the Python function and arguments for thread entry. // We copy all arguments into this structure to keep ownership of them. // We must be very careful about root pointers because this pointer may // disappear from our address space before the thread is created. thread_entry_args_t *th_args; // get positional arguments size_t pos_args_len; mp_obj_t *pos_args_items; mp_obj_get_array(args[1], &pos_args_len, &pos_args_items); // check for keyword arguments if (n_args == 2) { // just position arguments th_args = m_new_obj_var(thread_entry_args_t, mp_obj_t, pos_args_len); th_args->n_kw = 0; } else { // positional and keyword arguments if (mp_obj_get_type(args[2]) != &mp_type_dict) { mp_raise_TypeError("expecting a dict for keyword args"); } mp_map_t *map = &((mp_obj_dict_t*)MP_OBJ_TO_PTR(args[2]))->map; th_args = m_new_obj_var(thread_entry_args_t, mp_obj_t, pos_args_len + 2 * map->used); th_args->n_kw = map->used; // copy across the keyword arguments for (size_t i = 0, n = pos_args_len; i < map->alloc; ++i) { if (MP_MAP_SLOT_IS_FILLED(map, i)) { th_args->args[n++] = map->table[i].key; th_args->args[n++] = map->table[i].value; } } } // copy agross the positional arguments th_args->n_args = pos_args_len; memcpy(th_args->args, pos_args_items, pos_args_len * sizeof(mp_obj_t)); // pass our locals and globals into the new thread th_args->dict_locals = mp_locals_get(); th_args->dict_globals = mp_globals_get(); // set the stack size to use th_args->stack_size = thread_stack_size; // set the function for thread entry th_args->fun = args[0]; // spawn the thread! mp_thread_create(thread_entry, th_args, &th_args->stack_size); return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_thread_start_new_thread_obj, 2, 3, mod_thread_start_new_thread); STATIC mp_obj_t mod_thread_exit(void) { nlr_raise(mp_obj_new_exception(&mp_type_SystemExit)); } STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_exit_obj, mod_thread_exit); STATIC mp_obj_t mod_thread_allocate_lock(void) { return MP_OBJ_FROM_PTR(mp_obj_new_thread_lock()); } STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_allocate_lock_obj, mod_thread_allocate_lock); STATIC const mp_rom_map_elem_t mp_module_thread_globals_table[] = { { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR__thread) }, { MP_ROM_QSTR(MP_QSTR_LockType), MP_ROM_PTR(&mp_type_thread_lock) }, { MP_ROM_QSTR(MP_QSTR_get_ident), MP_ROM_PTR(&mod_thread_get_ident_obj) }, { MP_ROM_QSTR(MP_QSTR_stack_size), MP_ROM_PTR(&mod_thread_stack_size_obj) }, { MP_ROM_QSTR(MP_QSTR_start_new_thread), MP_ROM_PTR(&mod_thread_start_new_thread_obj) }, { MP_ROM_QSTR(MP_QSTR_exit), MP_ROM_PTR(&mod_thread_exit_obj) }, { MP_ROM_QSTR(MP_QSTR_allocate_lock), MP_ROM_PTR(&mod_thread_allocate_lock_obj) }, }; STATIC MP_DEFINE_CONST_DICT(mp_module_thread_globals, mp_module_thread_globals_table); const mp_obj_module_t mp_module_thread = { .base = { &mp_type_module }, .globals = (mp_obj_dict_t*)&mp_module_thread_globals, }; #endif // MICROPY_PY_THREAD
68791.c
#include <assert.h> #include <stdlib.h> #include <stdarg.h> #include <stdbool.h> #include "base/wbuf.h" #include "geometry.h" #include "quadedge.h" typedef eref quadedge[4]; void init_eset(struct eset *set) { wbuf_init(&set->edges); wbuf_init(&set->data); set->free = -1; } eref eset_max_edge(struct eset *set) { return wbuf_nmemb(&set->edges, sizeof (quadedge)); } void term_eset(struct eset *set) { wbuf_term(&set->edges); wbuf_term(&set->data); } void init_quadedge(eref *qe, float2 **data, eref e0) { qe[e0 + 0] = mkref(e0, 0); qe[e0 + 1] = mkref(e0, 3); qe[e0 + 2] = mkref(e0, 2); qe[e0 + 3] = mkref(e0, 1); data[e0 >> 1] = 0; data[(e0 >> 1) + 1] = 0; } /* allocate *n* empty subdivisions (edges) and store references (eref) in the *n* following arguments */ int eset_alloc(struct eset *set, size_t n, ...) { eref *alloc, *p, *edges, e0; float2 **data; size_t i, j, nfree; va_list ap; /* look in free list first */ e0 = set->free; edges = set->edges.begin; nfree = 0; while (e0 >= 0 && nfree < n) { e0 = edges[e0]; nfree++; } /* allocate the rest */ alloc = wbuf_alloc(&set->edges, (n - nfree) * sizeof (quadedge)); if (!alloc) { return -1; } if (!wbuf_alloc(&set->data, (n - nfree) * sizeof (float2 *[2]))) { return -1; } edges = set->edges.begin; data = set->data.begin; va_start(ap, n); for (i = 0, j = 0; i < n; i++) { if (j < nfree) { e0 = set->free; set->free = edges[e0]; p = edges + e0; j++; } else { p = alloc + 4*(i - j); e0 = p - edges; } *va_arg(ap, eref *) = e0; init_quadedge(edges, data, e0); } va_end(ap); return 0; } void eset_splice(struct eset *set, eref a, eref b) { eref alpha, beta, tmp, *edges; edges = set->edges.begin; alpha = rot(onext(set, a)); beta = rot(onext(set, b)); tmp = edges[a]; edges[a] = edges[b]; edges[b] = tmp; tmp = edges[alpha]; edges[alpha] = edges[beta]; edges[beta] = tmp; } /* connect the destination of `a` to the origin of `b` with the new edge `c` so that left(a) = left(b) = left(c) */ void eset_connect(struct eset *set, eref a, eref b, eref c) { *org(set, c) = *dest(set, a); *dest(set, c) = *org(set, b); eset_splice(set, c, lnext(set, a)); eset_splice(set, sym(c), b); } void eset_delete(struct eset *set, eref e) { eset_splice(set, e, oprev(set, e)); eset_splice(set, sym(e), oprev(set, sym(e))); /* add to free list */ ((eref *)set->edges.begin)[e & ~0x3] = set->free; set->free = e & ~0x3; }
279920.c
// Online C compiler to run C program online #include <stdio.h> int main() { int num1; printf("Num 1 : "); scanf("%d",&num1); switch (num1) { case 80...100 : printf("A"); break; case 60...79 : printf("B"); break; case 40...59 : printf("C"); break; case 0...39 : printf("D"); break; default : printf("F"); break; } }
671280.c
/* $OpenBSD$ */ /* * Copyright (c) 2009 Nicholas Marriott <nicholas.marriott@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <sys/param.h> #include <sys/stat.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include "tmux.h" char * osdep_get_name (int fd, __unused char *tty) { FILE *f; char *path, *buf; size_t len; int ch; pid_t pgrp; if ((pgrp = tcgetpgrp (fd)) == -1) return (NULL); xasprintf (&path, "/proc/%lld/cmdline", (long long) pgrp); if ((f = fopen (path, "r")) == NULL) { free (path); return (NULL); } free (path); len = 0; buf = NULL; while ((ch = fgetc (f)) != EOF) { if (ch == '\0') break; buf = xrealloc (buf, len + 2); buf[len++] = ch; } if (buf != NULL) buf[len] = '\0'; fclose (f); return (buf); } char * osdep_get_cwd (int fd) { static char target[MAXPATHLEN + 1]; char *path; pid_t pgrp; ssize_t n; if ((pgrp = tcgetpgrp (fd)) == -1) return (NULL); xasprintf (&path, "/proc/%lld/cwd", (long long) pgrp); n = readlink (path, target, MAXPATHLEN); free (path); if (n > 0) { target[n] = '\0'; return (target); } return (NULL); } struct event_base * osdep_event_init (void) { return (event_init ()); }
370955.c
/* * Automatically Generated from Mathematica. * Thu 4 Nov 2021 16:59:07 GMT-04:00 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "H_RightToeRollB_src.h" #ifdef _MSC_VER #define INLINE __forceinline /* use __forceinline (VC++ specific) */ #else #define INLINE static __inline__ /* use standard inline */ #endif /** * Copied from Wolfram Mathematica C Definitions file mdefs.hpp * Changed marcos to inline functions (Eric Cousineau) */ INLINE double Power(double x, double y) { return pow(x, y); } INLINE double Sqrt(double x) { return sqrt(x); } INLINE double Abs(double x) { return fabs(x); } INLINE double Exp(double x) { return exp(x); } INLINE double Log(double x) { return log(x); } INLINE double Sin(double x) { return sin(x); } INLINE double Cos(double x) { return cos(x); } INLINE double Tan(double x) { return tan(x); } INLINE double Csc(double x) { return 1.0/sin(x); } INLINE double Sec(double x) { return 1.0/cos(x); } INLINE double ArcSin(double x) { return asin(x); } INLINE double ArcCos(double x) { return acos(x); } /* update ArcTan function to use atan2 instead. */ INLINE double ArcTan(double x, double y) { return atan2(y,x); } INLINE double Sinh(double x) { return sinh(x); } INLINE double Cosh(double x) { return cosh(x); } INLINE double Tanh(double x) { return tanh(x); } #define E 2.71828182845904523536029 #define Pi 3.14159265358979323846264 #define Degree 0.01745329251994329576924 /* * Sub functions */ static void output1(double *p_output1,const double *var1) { double t670; double t780; double t677; double t750; double t805; double t849; double t856; double t865; double t911; double t966; double t1010; double t1133; double t763; double t1095; double t1120; double t662; double t1152; double t1178; double t1218; double t1406; double t1129; double t1251; double t1252; double t646; double t1407; double t1451; double t1488; double t1533; double t1340; double t1498; double t1509; double t645; double t1575; double t1595; double t1605; double t1709; double t1515; double t1617; double t1675; double t632; double t1714; double t1743; double t1768; double t1832; double t1683; double t1780; double t1802; double t631; double t1834; double t1859; double t1877; double t1926; double t1823; double t1897; double t1901; double t621; double t1952; double t2033; double t2046; double t585; double t2201; double t2222; double t2231; double t2190; double t2234; double t2250; double t2298; double t2316; double t2329; double t2295; double t2331; double t2358; double t2405; double t2417; double t2421; double t2400; double t2426; double t2481; double t2515; double t2542; double t2573; double t2514; double t2586; double t2591; double t2671; double t2709; double t2715; double t2635; double t2727; double t2733; double t2784; double t2818; double t2846; double t2090; double t2745; double t2873; double t2876; double t2918; double t2937; double t2964; double t3037; double t3050; double t3052; double t3076; double t3081; double t3119; double t3070; double t3123; double t3138; double t3157; double t3163; double t3166; double t3152; double t3175; double t3186; double t3256; double t3276; double t3279; double t3226; double t3295; double t3306; double t3329; double t3336; double t3338; double t3327; double t3345; double t3346; double t3355; double t3359; double t3368; double t3352; double t3379; double t3398; double t3447; double t3461; double t3477; double t1910; double t2050; double t2068; double t2092; double t2115; double t2156; double t2908; double t2965; double t2973; double t2987; double t3003; double t3005; double t3446; double t3486; double t3491; double t3495; double t3497; double t3510; double t3675; double t3682; double t3683; double t2069; double t2168; double t2176; double t3568; double t3570; double t3571; double t3694; double t3695; double t3703; double t2976; double t3027; double t3036; double t3579; double t3618; double t3630; double t3494; double t3532; double t3563; double t3642; double t3652; double t3672; t670 = Cos(var1[3]); t780 = Cos(var1[21]); t677 = Cos(var1[4]); t750 = Sin(var1[21]); t805 = Cos(var1[5]); t849 = Sin(var1[3]); t856 = -1.*t805*t849; t865 = Sin(var1[4]); t911 = Sin(var1[5]); t966 = t670*t865*t911; t1010 = t856 + t966; t1133 = Cos(var1[22]); t763 = -1.*t670*t677*t750; t1095 = t780*t1010; t1120 = t763 + t1095; t662 = Sin(var1[22]); t1152 = t780*t670*t677; t1178 = t750*t1010; t1218 = t1152 + t1178; t1406 = Cos(var1[23]); t1129 = t662*t1120; t1251 = t1133*t1218; t1252 = t1129 + t1251; t646 = Sin(var1[23]); t1407 = t1133*t1120; t1451 = -1.*t662*t1218; t1488 = t1407 + t1451; t1533 = Cos(var1[24]); t1340 = t646*t1252; t1498 = t1406*t1488; t1509 = t1340 + t1498; t645 = Sin(var1[24]); t1575 = t1406*t1252; t1595 = -1.*t646*t1488; t1605 = t1575 + t1595; t1709 = Cos(var1[25]); t1515 = t645*t1509; t1617 = t1533*t1605; t1675 = t1515 + t1617; t632 = Sin(var1[25]); t1714 = t1533*t1509; t1743 = -1.*t645*t1605; t1768 = t1714 + t1743; t1832 = Cos(var1[26]); t1683 = -1.*t632*t1675; t1780 = t1709*t1768; t1802 = t1683 + t1780; t631 = Sin(var1[26]); t1834 = t1709*t1675; t1859 = t632*t1768; t1877 = t1834 + t1859; t1926 = Cos(var1[30]); t1823 = t631*t1802; t1897 = t1832*t1877; t1901 = t1823 + t1897; t621 = Sin(var1[30]); t1952 = t1832*t1802; t2033 = -1.*t631*t1877; t2046 = t1952 + t2033; t585 = Sin(var1[31]); t2201 = t670*t805; t2222 = t849*t865*t911; t2231 = t2201 + t2222; t2190 = -1.*t677*t750*t849; t2234 = t780*t2231; t2250 = t2190 + t2234; t2298 = t780*t677*t849; t2316 = t750*t2231; t2329 = t2298 + t2316; t2295 = t662*t2250; t2331 = t1133*t2329; t2358 = t2295 + t2331; t2405 = t1133*t2250; t2417 = -1.*t662*t2329; t2421 = t2405 + t2417; t2400 = t646*t2358; t2426 = t1406*t2421; t2481 = t2400 + t2426; t2515 = t1406*t2358; t2542 = -1.*t646*t2421; t2573 = t2515 + t2542; t2514 = t645*t2481; t2586 = t1533*t2573; t2591 = t2514 + t2586; t2671 = t1533*t2481; t2709 = -1.*t645*t2573; t2715 = t2671 + t2709; t2635 = -1.*t632*t2591; t2727 = t1709*t2715; t2733 = t2635 + t2727; t2784 = t1709*t2591; t2818 = t632*t2715; t2846 = t2784 + t2818; t2090 = Cos(var1[31]); t2745 = t631*t2733; t2873 = t1832*t2846; t2876 = t2745 + t2873; t2918 = t1832*t2733; t2937 = -1.*t631*t2846; t2964 = t2918 + t2937; t3037 = t750*t865; t3050 = t780*t677*t911; t3052 = t3037 + t3050; t3076 = -1.*t780*t865; t3081 = t677*t750*t911; t3119 = t3076 + t3081; t3070 = t662*t3052; t3123 = t1133*t3119; t3138 = t3070 + t3123; t3157 = t1133*t3052; t3163 = -1.*t662*t3119; t3166 = t3157 + t3163; t3152 = t646*t3138; t3175 = t1406*t3166; t3186 = t3152 + t3175; t3256 = t1406*t3138; t3276 = -1.*t646*t3166; t3279 = t3256 + t3276; t3226 = t645*t3186; t3295 = t1533*t3279; t3306 = t3226 + t3295; t3329 = t1533*t3186; t3336 = -1.*t645*t3279; t3338 = t3329 + t3336; t3327 = -1.*t632*t3306; t3345 = t1709*t3338; t3346 = t3327 + t3345; t3355 = t1709*t3306; t3359 = t632*t3338; t3368 = t3355 + t3359; t3352 = t631*t3346; t3379 = t1832*t3368; t3398 = t3352 + t3379; t3447 = t1832*t3346; t3461 = -1.*t631*t3368; t3477 = t3447 + t3461; t1910 = -1.*t621*t1901; t2050 = t1926*t2046; t2068 = t1910 + t2050; t2092 = t1926*t1901; t2115 = t621*t2046; t2156 = t2092 + t2115; t2908 = -1.*t621*t2876; t2965 = t1926*t2964; t2973 = t2908 + t2965; t2987 = t1926*t2876; t3003 = t621*t2964; t3005 = t2987 + t3003; t3446 = -1.*t621*t3398; t3486 = t1926*t3477; t3491 = t3446 + t3486; t3495 = t1926*t3398; t3497 = t621*t3477; t3510 = t3495 + t3497; t3675 = t670*t805*t865; t3682 = t849*t911; t3683 = t3675 + t3682; t2069 = t585*t2068; t2168 = t2090*t2156; t2176 = t2069 + t2168; t3568 = t2090*t2068; t3570 = -1.*t585*t2156; t3571 = t3568 + t3570; t3694 = t805*t849*t865; t3695 = -1.*t670*t911; t3703 = t3694 + t3695; t2976 = t585*t2973; t3027 = t2090*t3005; t3036 = t2976 + t3027; t3579 = t2090*t2973; t3618 = -1.*t585*t3005; t3630 = t3579 + t3618; t3494 = t585*t3491; t3532 = t2090*t3510; t3563 = t3494 + t3532; t3642 = t2090*t3491; t3652 = -1.*t585*t3510; t3672 = t3642 + t3652; p_output1[0]=t2176; p_output1[1]=t3036; p_output1[2]=t3563; p_output1[3]=0; p_output1[4]=t3571; p_output1[5]=t3630; p_output1[6]=t3672; p_output1[7]=0; p_output1[8]=t3683; p_output1[9]=t3703; p_output1[10]=t677*t805; p_output1[11]=0; p_output1[12]=-0.0181*t2176 + 0.009551*t3571 - 0.054164*t3683 + var1[0]; p_output1[13]=-0.0181*t3036 + 0.009551*t3630 - 0.054164*t3703 + var1[1]; p_output1[14]=-0.0181*t3563 + 0.009551*t3672 - 0.054164*t677*t805 + var1[2]; p_output1[15]=1.; } void H_RightToeRollB_src(double *p_output1, const double *var1) { /* Call Subroutines */ output1(p_output1, var1); }
945264.c
/**************************************************************************** * @file main.c * @version V2.0 * $Revision: 4 $ * $Date: 14/11/27 2:33p $ * @brief Perform A/D Conversion with ADC single cycle scan mode. * @note * Copyright (C) 2014 Nuvoton Technology Corp. All rights reserved. * ******************************************************************************/ #include <stdio.h> #include "NUC200Series.h" #define PLLCON_SETTING CLK_PLLCON_50MHz_HXT #define PLL_CLOCK 50000000 /*---------------------------------------------------------------------------------------------------------*/ /* Define Function Prototypes */ /*---------------------------------------------------------------------------------------------------------*/ void SYS_Init(void); void UART0_Init(void); void AdcSingleCycleScanModeTest(void); void SYS_Init(void) { /*---------------------------------------------------------------------------------------------------------*/ /* Init System Clock */ /*---------------------------------------------------------------------------------------------------------*/ /* Enable Internal RC 22.1184MHz clock */ CLK->PWRCON |= CLK_PWRCON_OSC22M_EN_Msk; /* Waiting for Internal RC clock ready */ while(!(CLK->CLKSTATUS & CLK_CLKSTATUS_OSC22M_STB_Msk)); /* Switch HCLK clock source to Internal RC */ CLK->CLKSEL0 &= ~CLK_CLKSEL0_HCLK_S_Msk; CLK->CLKSEL0 |= CLK_CLKSEL0_HCLK_S_HIRC; /* Enable external XTAL 12MHz clock */ CLK->PWRCON |= CLK_PWRCON_XTL12M_EN_Msk; /* Waiting for external XTAL clock ready */ while(!(CLK->CLKSTATUS & CLK_CLKSTATUS_XTL12M_STB_Msk)); /* Set core clock as PLL_CLOCK from PLL */ CLK->PLLCON = PLLCON_SETTING; while(!(CLK->CLKSTATUS & CLK_CLKSTATUS_PLL_STB_Msk)); CLK->CLKSEL0 &= (~CLK_CLKSEL0_HCLK_S_Msk); CLK->CLKSEL0 |= CLK_CLKSEL0_HCLK_S_PLL; /* Update System Core Clock */ /* User can use SystemCoreClockUpdate() to calculate PllClock, SystemCoreClock and CycylesPerUs automatically. */ //SystemCoreClockUpdate(); PllClock = PLL_CLOCK; // PLL SystemCoreClock = PLL_CLOCK / 1; // HCLK CyclesPerUs = PLL_CLOCK / 1000000; // For SYS_SysTickDelay() /* Enable UART module clock */ CLK->APBCLK |= CLK_APBCLK_UART0_EN_Msk; /* Enable ADC module clock */ CLK->APBCLK |= CLK_APBCLK_ADC_EN_Msk ; /* Select UART module clock source */ CLK->CLKSEL1 &= ~CLK_CLKSEL1_UART_S_Msk; CLK->CLKSEL1 |= CLK_CLKSEL1_UART_S_HXT; /* Select ADC module clock source */ CLK->CLKSEL1 &= CLK_CLKSEL1_ADC_S_Msk ; CLK->CLKSEL1 |= CLK_CLKSEL1_ADC_S_HIRC ; /* ADC clock source is 22.1184MHz, set divider to 7, ADC clock is 22.1184/7 MHz */ CLK->CLKDIV = (CLK->CLKDIV & ~CLK_CLKDIV_ADC_N_Msk) | (((7) - 1) << CLK_CLKDIV_ADC_N_Pos); /*---------------------------------------------------------------------------------------------------------*/ /* Init I/O Multi-function */ /*---------------------------------------------------------------------------------------------------------*/ /* Set GPB multi-function pins for UART0 RXD and TXD */ SYS->GPB_MFP &= ~(SYS_GPB_MFP_PB0_Msk | SYS_GPB_MFP_PB1_Msk); SYS->GPB_MFP |= SYS_GPB_MFP_PB0_UART0_RXD | SYS_GPB_MFP_PB1_UART0_TXD; /* Disable the GPA0 - GPA3 digital input path to avoid the leakage current. */ PA->OFFD |= 0xF << GPIO_OFFD_OFFD_Pos; /* Configure the GPA0 - GPA3 ADC analog input pins */ SYS->GPA_MFP &= ~(SYS_GPA_MFP_PA0_Msk | SYS_GPA_MFP_PA1_Msk | SYS_GPA_MFP_PA2_Msk | SYS_GPA_MFP_PA3_Msk) ; SYS->GPA_MFP |= SYS_GPA_MFP_PA0_ADC0 | SYS_GPA_MFP_PA1_ADC1 | SYS_GPA_MFP_PA2_ADC2 | SYS_GPA_MFP_PA3_ADC3 ; SYS->ALT_MFP1 = 0; } /*---------------------------------------------------------------------------------------------------------*/ /* Init UART */ /*---------------------------------------------------------------------------------------------------------*/ void UART0_Init() { /* Reset UART IP */ SYS->IPRSTC2 |= SYS_IPRSTC2_UART0_RST_Msk; SYS->IPRSTC2 &= ~SYS_IPRSTC2_UART0_RST_Msk; /* Configure UART0 and set UART0 Baudrate */ UART0->BAUD = UART_BAUD_MODE2 | UART_BAUD_MODE2_DIVIDER(__HXT, 115200); UART0->LCR = UART_WORD_LEN_8 | UART_PARITY_NONE | UART_STOP_BIT_1; } /*---------------------------------------------------------------------------------------------------------*/ /* Function: AdcSingleCycleScanModeTest */ /* */ /* Parameters: */ /* None. */ /* */ /* Returns: */ /* None. */ /* */ /* Description: */ /* ADC single cycle scan mode test. */ /*---------------------------------------------------------------------------------------------------------*/ void AdcSingleCycleScanModeTest() { uint8_t u8Option; uint32_t u32ChannelCount; int32_t i32ConversionData; printf("\n"); printf("+----------------------------------------------------------------------+\n"); printf("| ADC single cycle scan mode sample code |\n"); printf("+----------------------------------------------------------------------+\n"); while(1) { printf("\n\nSelect input mode:\n"); printf(" [1] Single end input (channel 0, 1, 2 and 3)\n"); printf(" [2] Differential input (input channel pair 0 and 1)\n"); printf(" Other keys: exit single cycle scan mode test\n"); u8Option = getchar(); if(u8Option == '1') { /* Set the ADC operation mode as single-cycle, input mode as single-end and enable the ADC converter */ ADC->ADCR = (ADC_ADCR_ADMD_SINGLE_CYCLE | ADC_ADCR_DIFFEN_SINGLE_END | ADC_ADCR_ADEN_CONVERTER_ENABLE); /* Enable analog input channel 0, 1, 2 and 3 */ ADC->ADCHER |= ((ADC->ADCHER & ~ADC_ADCHER_CHEN_Msk) | (0xF)); /* Clear the A/D interrupt flag for safe */ ADC->ADSR = ADC_ADSR_ADF_Msk; /* Start A/D conversion */ ADC->ADCR |= ADC_ADCR_ADST_Msk; /* Wait conversion done */ while(!((ADC->ADSR & ADC_ADSR_ADF_Msk) >> ADC_ADSR_ADF_Pos)); for(u32ChannelCount = 0; u32ChannelCount < 4; u32ChannelCount++) { i32ConversionData = (ADC->ADDR[(u32ChannelCount)] & ADC_ADDR_RSLT_Msk) >> ADC_ADDR_RSLT_Pos; printf("Conversion result of channel %d: 0x%X (%d)\n", u32ChannelCount, i32ConversionData, i32ConversionData); } } else if(u8Option == '2') { /* Set the ADC operation mode as single-cycle, input mode as differential and enable the ADC converter */ ADC->ADCR = (ADC_ADCR_ADMD_SINGLE_CYCLE | ADC_ADCR_DIFFEN_DIFFERENTIAL | ADC_ADCR_ADEN_CONVERTER_ENABLE); /* Enable analog input channel 0 and 2 */ ADC->ADCHER |= ((ADC->ADCHER & ~ADC_ADCHER_CHEN_Msk) | (0xF)); /* Clear the A/D interrupt flag for safe */ ADC->ADSR = ADC_ADSR_ADF_Msk; /* Start A/D conversion */ ADC->ADCR |= ADC_ADCR_ADST_Msk; /* Wait conversion done */ while(!((ADC->ADSR & ADC_ADSR_ADF_Msk) >> ADC_ADSR_ADF_Pos)); for(u32ChannelCount = 0; u32ChannelCount < 2; u32ChannelCount++) { i32ConversionData = (ADC->ADDR[(u32ChannelCount * 2)] & ADC_ADDR_RSLT_Msk) >> ADC_ADDR_RSLT_Pos; printf("Conversion result of differential input pair %d: 0x%X (%d)\n", u32ChannelCount, i32ConversionData, i32ConversionData); } } else return ; } } /*---------------------------------------------------------------------------------------------------------*/ /* MAIN function */ /*---------------------------------------------------------------------------------------------------------*/ int32_t main(void) { /* Unlock protected registers */ SYS_UnlockReg(); /* Init System, IP clock and multi-function I/O */ SYS_Init(); /* Lock protected registers */ SYS_LockReg(); /* Init UART0 for printf */ UART0_Init(); /*---------------------------------------------------------------------------------------------------------*/ /* SAMPLE CODE */ /*---------------------------------------------------------------------------------------------------------*/ printf("\nSystem clock rate: %d Hz", SystemCoreClock); /* Single cycle scan mode test */ AdcSingleCycleScanModeTest(); /* Reset ADC module */ SYS->IPRSTC2 |= (1 << SYS_IPRSTC2_ADC_RST_Pos) ; SYS->IPRSTC2 &= ~(1 << (SYS_IPRSTC2_ADC_RST_Pos)) ; /* Disable ADC IP clock */ CLK->APBCLK &= ~CLK_APBCLK_ADC_EN_Msk; /* Disable External Interrupt */ NVIC_DisableIRQ(ADC_IRQn); printf("Exit ADC sample code\n"); while(1); }
962522.c
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <subdev/bios.h> #include <subdev/bus.h> #include <subdev/gpio.h> #include <subdev/i2c.h> #include <subdev/clock.h> #include <subdev/therm.h> #include <subdev/devinit.h> #include <subdev/mc.h> #include <subdev/timer.h> #include <subdev/fb.h> #include <subdev/instmem.h> #include <subdev/vm.h> #include <engine/device.h> #include <engine/dmaobj.h> #include <engine/fifo.h> #include <engine/software.h> #include <engine/graph.h> #include <engine/disp.h> int nv20_identify(struct nouveau_device *device) { switch (device->chipset) { case 0x20: device->cname = "NV20"; device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; break; case 0x25: device->cname = "NV25"; device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; break; case 0x28: device->cname = "NV28"; device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; break; case 0x2a: device->cname = "NV2A"; device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; break; default: nv_fatal(device, "unknown Kelvin chipset\n"); return -EINVAL; } return 0; }
458918.c
// Meridian 59, Copyright 1994-2012 Andrew Kirmse and Chris Kirmse. // All rights reserved. // // This software is distributed under a license that is described in // the LICENSE file that accompanies it. // // Meridian is a registered trademark. /* * util.c: Utility functions for club. */ #include "club.h" #define MAXSTRINGLEN 255 void CenterWindow(HWND hwnd, HWND hwndParent) { RECT rcDlg, rcParent; int screen_width, screen_height, x, y; /* If dialog has no parent, then its parent is really the desktop */ if (hwndParent == NULL) hwndParent = GetDesktopWindow(); GetWindowRect(hwndParent, &rcParent); GetWindowRect(hwnd, &rcDlg); /* Move dialog rectangle to upper left (0, 0) for ease of calculation */ OffsetRect(&rcDlg, -rcDlg.left, -rcDlg.top); x = rcParent.left + (rcParent.right - rcParent.left)/2 - rcDlg.right/2; y = rcParent.top + (rcParent.bottom - rcParent.top)/2 - rcDlg.bottom/2; /* Make sure that child window is completely on the screen */ screen_width = GetSystemMetrics(SM_CXSCREEN); screen_height = GetSystemMetrics(SM_CYSCREEN); x = max(0, min(x, screen_width - rcDlg.right)); y = max(0, min(y, screen_height - rcDlg.bottom)); SetWindowPos(hwnd, NULL, x, y, 0, 0, SWP_NOSIZE | SWP_NOACTIVATE); } /***********************************************************************/ /* * ClearMessageQueue: Process outstanding Windows messages. Use when * a time-consuming operation may be blocking messages. * This procedure should be called as infrequently as possible; it is * preferable to have all messages go through the main window loop. */ void ClearMessageQueue(void) { MSG msg; while (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) { TranslateMessage(&msg); DispatchMessage(&msg); } } /******************************************************************************/ /* * GetString: Load and return resource string with given resource identifier. * String is loaded from given module's resources. * NOTE: Only use 4 strings from this function at a time; it uses a circular * buffer of static strings!! */ char *GetString(HMODULE hModule, int idnum) { static int index = 0; static char szLoadedString[4][MAXSTRINGLEN]; index = (index + 1) % 4; szLoadedString[index][0] = 0; LoadString (hModule, idnum, szLoadedString[index], MAXSTRINGLEN); return (LPSTR)szLoadedString[index]; }
416794.c
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * libcfs/libcfs/tracefile.c * * Author: Zach Brown <zab@clusterfs.com> * Author: Phil Schwan <phil@clusterfs.com> */ #define DEBUG_SUBSYSTEM S_LNET #define LUSTRE_TRACEFILE_PRIVATE #include "tracefile.h" #include "../../include/linux/libcfs/libcfs.h" /* XXX move things up to the top, comment */ union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned; char cfs_tracefile[TRACEFILE_NAME_SIZE]; long long cfs_tracefile_size = CFS_TRACEFILE_SIZE; static struct tracefiled_ctl trace_tctl; static DEFINE_MUTEX(cfs_trace_thread_mutex); static int thread_running; static atomic_t cfs_tage_allocated = ATOMIC_INIT(0); static void put_pages_on_tcd_daemon_list(struct page_collection *pc, struct cfs_trace_cpu_data *tcd); static inline struct cfs_trace_page * cfs_tage_from_list(struct list_head *list) { return list_entry(list, struct cfs_trace_page, linkage); } static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) { struct page *page; struct cfs_trace_page *tage; /* My caller is trying to free memory */ if (!in_interrupt() && memory_pressure_get()) return NULL; /* * Don't spam console with allocation failures: they will be reported * by upper layer anyway. */ gfp |= __GFP_NOWARN; page = alloc_page(gfp); if (page == NULL) return NULL; tage = kmalloc(sizeof(*tage), gfp); if (tage == NULL) { __free_page(page); return NULL; } tage->page = page; atomic_inc(&cfs_tage_allocated); return tage; } static void cfs_tage_free(struct cfs_trace_page *tage) { __LASSERT(tage != NULL); __LASSERT(tage->page != NULL); __free_page(tage->page); kfree(tage); atomic_dec(&cfs_tage_allocated); } static void cfs_tage_to_tail(struct cfs_trace_page *tage, struct list_head *queue) { __LASSERT(tage != NULL); __LASSERT(queue != NULL); list_move_tail(&tage->linkage, queue); } int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, struct list_head *stock) { int i; /* * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) * from here: this will lead to infinite recursion. */ for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) { struct cfs_trace_page *tage; tage = cfs_tage_alloc(gfp); if (tage == NULL) break; list_add_tail(&tage->linkage, stock); } return i; } /* return a page that has 'len' bytes left at the end */ static struct cfs_trace_page * cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) { struct cfs_trace_page *tage; if (tcd->tcd_cur_pages > 0) { __LASSERT(!list_empty(&tcd->tcd_pages)); tage = cfs_tage_from_list(tcd->tcd_pages.prev); if (tage->used + len <= PAGE_CACHE_SIZE) return tage; } if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { if (tcd->tcd_cur_stock_pages > 0) { tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); --tcd->tcd_cur_stock_pages; list_del_init(&tage->linkage); } else { tage = cfs_tage_alloc(GFP_ATOMIC); if (unlikely(tage == NULL)) { if ((!memory_pressure_get() || in_interrupt()) && printk_ratelimit()) printk(KERN_WARNING "cannot allocate a tage (%ld)\n", tcd->tcd_cur_pages); return NULL; } } tage->used = 0; tage->cpu = smp_processor_id(); tage->type = tcd->tcd_type; list_add_tail(&tage->linkage, &tcd->tcd_pages); tcd->tcd_cur_pages++; if (tcd->tcd_cur_pages > 8 && thread_running) { struct tracefiled_ctl *tctl = &trace_tctl; /* * wake up tracefiled to process some pages. */ wake_up(&tctl->tctl_waitq); } return tage; } return NULL; } static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd) { int pgcount = tcd->tcd_cur_pages / 10; struct page_collection pc; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; /* * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) * from here: this will lead to infinite recursion. */ if (printk_ratelimit()) printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n", pgcount + 1, tcd->tcd_cur_pages); INIT_LIST_HEAD(&pc.pc_pages); spin_lock_init(&pc.pc_lock); list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { if (pgcount-- == 0) break; list_move_tail(&tage->linkage, &pc.pc_pages); tcd->tcd_cur_pages--; } put_pages_on_tcd_daemon_list(&pc, tcd); } /* return a page that has 'len' bytes left at the end */ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, unsigned long len) { struct cfs_trace_page *tage; /* * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) * from here: this will lead to infinite recursion. */ if (len > PAGE_CACHE_SIZE) { pr_err("cowardly refusing to write %lu bytes in a page\n", len); return NULL; } tage = cfs_trace_get_tage_try(tcd, len); if (tage != NULL) return tage; if (thread_running) cfs_tcd_shrink(tcd); if (tcd->tcd_cur_pages > 0) { tage = cfs_tage_from_list(tcd->tcd_pages.next); tage->used = 0; cfs_tage_to_tail(tage, &tcd->tcd_pages); } return tage; } int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, const char *format, ...) { va_list args; int rc; va_start(args, format); rc = libcfs_debug_vmsg2(msgdata, format, args, NULL); va_end(args); return rc; } EXPORT_SYMBOL(libcfs_debug_msg); int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, const char *format1, va_list args, const char *format2, ...) { struct cfs_trace_cpu_data *tcd = NULL; struct ptldebug_header header = {0}; struct cfs_trace_page *tage; /* string_buf is used only if tcd != NULL, and is always set then */ char *string_buf = NULL; char *debug_buf; int known_size; int needed = 85; /* average message length */ int max_nob; va_list ap; int depth; int i; int remain; int mask = msgdata->msg_mask; const char *file = kbasename(msgdata->msg_file); struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; tcd = cfs_trace_get_tcd(); /* cfs_trace_get_tcd() grabs a lock, which disables preemption and * pins us to a particular CPU. This avoids an smp_processor_id() * warning on Linux when debugging is enabled. */ cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK()); if (tcd == NULL) /* arch may not log in IRQ context */ goto console; if (tcd->tcd_cur_pages == 0) header.ph_flags |= PH_FLAG_FIRST_RECORD; if (tcd->tcd_shutting_down) { cfs_trace_put_tcd(tcd); tcd = NULL; goto console; } depth = __current_nesting_level(); known_size = strlen(file) + 1 + depth; if (msgdata->msg_fn) known_size += strlen(msgdata->msg_fn) + 1; if (libcfs_debug_binary) known_size += sizeof(header); /*/ * '2' used because vsnprintf return real size required for output * _without_ terminating NULL. * if needed is to small for this format. */ for (i = 0; i < 2; i++) { tage = cfs_trace_get_tage(tcd, needed + known_size + 1); if (tage == NULL) { if (needed + known_size > PAGE_CACHE_SIZE) mask |= D_ERROR; cfs_trace_put_tcd(tcd); tcd = NULL; goto console; } string_buf = (char *)page_address(tage->page) + tage->used + known_size; max_nob = PAGE_CACHE_SIZE - tage->used - known_size; if (max_nob <= 0) { printk(KERN_EMERG "negative max_nob: %d\n", max_nob); mask |= D_ERROR; cfs_trace_put_tcd(tcd); tcd = NULL; goto console; } needed = 0; if (format1) { va_copy(ap, args); needed = vsnprintf(string_buf, max_nob, format1, ap); va_end(ap); } if (format2) { remain = max_nob - needed; if (remain < 0) remain = 0; va_start(ap, format2); needed += vsnprintf(string_buf + needed, remain, format2, ap); va_end(ap); } if (needed < max_nob) /* well. printing ok.. */ break; } if (*(string_buf+needed-1) != '\n') printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n", file, msgdata->msg_line, msgdata->msg_fn); header.ph_len = known_size + needed; debug_buf = (char *)page_address(tage->page) + tage->used; if (libcfs_debug_binary) { memcpy(debug_buf, &header, sizeof(header)); tage->used += sizeof(header); debug_buf += sizeof(header); } /* indent message according to the nesting level */ while (depth-- > 0) { *(debug_buf++) = '.'; ++tage->used; } strcpy(debug_buf, file); tage->used += strlen(file) + 1; debug_buf += strlen(file) + 1; if (msgdata->msg_fn) { strcpy(debug_buf, msgdata->msg_fn); tage->used += strlen(msgdata->msg_fn) + 1; debug_buf += strlen(msgdata->msg_fn) + 1; } __LASSERT(debug_buf == string_buf); tage->used += needed; __LASSERT (tage->used <= PAGE_CACHE_SIZE); console: if ((mask & libcfs_printk) == 0) { /* no console output requested */ if (tcd != NULL) cfs_trace_put_tcd(tcd); return 1; } if (cdls != NULL) { if (libcfs_console_ratelimit && cdls->cdls_next != 0 && /* not first time ever */ !cfs_time_after(cfs_time_current(), cdls->cdls_next)) { /* skipping a console message */ cdls->cdls_count++; if (tcd != NULL) cfs_trace_put_tcd(tcd); return 1; } if (cfs_time_after(cfs_time_current(), cdls->cdls_next + libcfs_console_max_delay + cfs_time_seconds(10))) { /* last timeout was a long time ago */ cdls->cdls_delay /= libcfs_console_backoff * 4; } else { cdls->cdls_delay *= libcfs_console_backoff; } if (cdls->cdls_delay < libcfs_console_min_delay) cdls->cdls_delay = libcfs_console_min_delay; else if (cdls->cdls_delay > libcfs_console_max_delay) cdls->cdls_delay = libcfs_console_max_delay; /* ensure cdls_next is never zero after it's been seen */ cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1; } if (tcd != NULL) { cfs_print_to_console(&header, mask, string_buf, needed, file, msgdata->msg_fn); cfs_trace_put_tcd(tcd); } else { string_buf = cfs_trace_get_console_buffer(); needed = 0; if (format1 != NULL) { va_copy(ap, args); needed = vsnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, format1, ap); va_end(ap); } if (format2 != NULL) { remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed; if (remain > 0) { va_start(ap, format2); needed += vsnprintf(string_buf+needed, remain, format2, ap); va_end(ap); } } cfs_print_to_console(&header, mask, string_buf, needed, file, msgdata->msg_fn); cfs_trace_put_console_buffer(string_buf); } if (cdls != NULL && cdls->cdls_count != 0) { string_buf = cfs_trace_get_console_buffer(); needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, "Skipped %d previous similar message%s\n", cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : ""); cfs_print_to_console(&header, mask, string_buf, needed, file, msgdata->msg_fn); cfs_trace_put_console_buffer(string_buf); cdls->cdls_count = 0; } return 0; } EXPORT_SYMBOL(libcfs_debug_vmsg2); void cfs_trace_assertion_failed(const char *str, struct libcfs_debug_msg_data *msgdata) { struct ptldebug_header hdr; libcfs_panic_in_progress = 1; libcfs_catastrophe = 1; mb(); cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK()); cfs_print_to_console(&hdr, D_EMERG, str, strlen(str), msgdata->msg_file, msgdata->msg_fn); panic("Lustre debug assertion failure\n"); /* not reached */ } static void panic_collect_pages(struct page_collection *pc) { /* Do the collect_pages job on a single CPU: assumes that all other * CPUs have been stopped during a panic. If this isn't true for some * arch, this will have to be implemented separately in each arch. */ int i; int j; struct cfs_trace_cpu_data *tcd; INIT_LIST_HEAD(&pc->pc_pages); cfs_tcd_for_each(tcd, i, j) { list_splice_init(&tcd->tcd_pages, &pc->pc_pages); tcd->tcd_cur_pages = 0; if (pc->pc_want_daemon_pages) { list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages); tcd->tcd_cur_daemon_pages = 0; } } } static void collect_pages_on_all_cpus(struct page_collection *pc) { struct cfs_trace_cpu_data *tcd; int i, cpu; spin_lock(&pc->pc_lock); for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) { list_splice_init(&tcd->tcd_pages, &pc->pc_pages); tcd->tcd_cur_pages = 0; if (pc->pc_want_daemon_pages) { list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages); tcd->tcd_cur_daemon_pages = 0; } } } spin_unlock(&pc->pc_lock); } static void collect_pages(struct page_collection *pc) { INIT_LIST_HEAD(&pc->pc_pages); if (libcfs_panic_in_progress) panic_collect_pages(pc); else collect_pages_on_all_cpus(pc); } static void put_pages_back_on_all_cpus(struct page_collection *pc) { struct cfs_trace_cpu_data *tcd; struct list_head *cur_head; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; int i, cpu; spin_lock(&pc->pc_lock); for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) { cur_head = tcd->tcd_pages.next; list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { __LASSERT_TAGE_INVARIANT(tage); if (tage->cpu != cpu || tage->type != i) continue; cfs_tage_to_tail(tage, cur_head); tcd->tcd_cur_pages++; } } } spin_unlock(&pc->pc_lock); } static void put_pages_back(struct page_collection *pc) { if (!libcfs_panic_in_progress) put_pages_back_on_all_cpus(pc); } /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that * we have a good amount of data at all times for dumping during an LBUG, even * if we have been steadily writing (and otherwise discarding) pages via the * debug daemon. */ static void put_pages_on_tcd_daemon_list(struct page_collection *pc, struct cfs_trace_cpu_data *tcd) { struct cfs_trace_page *tage; struct cfs_trace_page *tmp; spin_lock(&pc->pc_lock); list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { __LASSERT_TAGE_INVARIANT(tage); if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) continue; cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages); tcd->tcd_cur_daemon_pages++; if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { struct cfs_trace_page *victim; __LASSERT(!list_empty(&tcd->tcd_daemon_pages)); victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next); __LASSERT_TAGE_INVARIANT(victim); list_del(&victim->linkage); cfs_tage_free(victim); tcd->tcd_cur_daemon_pages--; } } spin_unlock(&pc->pc_lock); } static void put_pages_on_daemon_list(struct page_collection *pc) { struct cfs_trace_cpu_data *tcd; int i, cpu; for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) put_pages_on_tcd_daemon_list(pc, tcd); } } void cfs_trace_debug_print(void) { struct page_collection pc; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; spin_lock_init(&pc.pc_lock); pc.pc_want_daemon_pages = 1; collect_pages(&pc); list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { char *p, *file, *fn; struct page *page; __LASSERT_TAGE_INVARIANT(tage); page = tage->page; p = page_address(page); while (p < ((char *)page_address(page) + tage->used)) { struct ptldebug_header *hdr; int len; hdr = (void *)p; p += sizeof(*hdr); file = p; p += strlen(file) + 1; fn = p; p += strlen(fn) + 1; len = hdr->ph_len - (int)(p - (char *)hdr); cfs_print_to_console(hdr, D_EMERG, p, len, file, fn); p += len; } list_del(&tage->linkage); cfs_tage_free(tage); } } int cfs_tracefile_dump_all_pages(char *filename) { struct page_collection pc; struct file *filp; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; char *buf; int rc; DECL_MMSPACE; cfs_tracefile_write_lock(); filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600); if (IS_ERR(filp)) { rc = PTR_ERR(filp); filp = NULL; pr_err("LustreError: can't open %s for dump: rc %d\n", filename, rc); goto out; } spin_lock_init(&pc.pc_lock); pc.pc_want_daemon_pages = 1; collect_pages(&pc); if (list_empty(&pc.pc_pages)) { rc = 0; goto close; } /* ok, for now, just write the pages. in the future we'll be building * iobufs with the pages and calling generic_direct_IO */ MMSPACE_OPEN; list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { __LASSERT_TAGE_INVARIANT(tage); buf = kmap(tage->page); rc = vfs_write(filp, (__force const char __user *)buf, tage->used, &filp->f_pos); kunmap(tage->page); if (rc != (int)tage->used) { printk(KERN_WARNING "wanted to write %u but wrote %d\n", tage->used, rc); put_pages_back(&pc); __LASSERT(list_empty(&pc.pc_pages)); break; } list_del(&tage->linkage); cfs_tage_free(tage); } MMSPACE_CLOSE; rc = vfs_fsync(filp, 1); if (rc) pr_err("sync returns %d\n", rc); close: filp_close(filp, NULL); out: cfs_tracefile_write_unlock(); return rc; } void cfs_trace_flush_pages(void) { struct page_collection pc; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; spin_lock_init(&pc.pc_lock); pc.pc_want_daemon_pages = 1; collect_pages(&pc); list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { __LASSERT_TAGE_INVARIANT(tage); list_del(&tage->linkage); cfs_tage_free(tage); } } int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, const char __user *usr_buffer, int usr_buffer_nob) { int nob; if (usr_buffer_nob > knl_buffer_nob) return -EOVERFLOW; if (copy_from_user((void *)knl_buffer, usr_buffer, usr_buffer_nob)) return -EFAULT; nob = strnlen(knl_buffer, usr_buffer_nob); while (nob-- >= 0) /* strip trailing whitespace */ if (!isspace(knl_buffer[nob])) break; if (nob < 0) /* empty string */ return -EINVAL; if (nob == knl_buffer_nob) /* no space to terminate */ return -EOVERFLOW; knl_buffer[nob + 1] = 0; /* terminate */ return 0; } EXPORT_SYMBOL(cfs_trace_copyin_string); int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, const char *knl_buffer, char *append) { /* NB if 'append' != NULL, it's a single character to append to the * copied out string - usually "\n", for /proc entries and "" (i.e. a * terminating zero byte) for sysctl entries */ int nob = strlen(knl_buffer); if (nob > usr_buffer_nob) nob = usr_buffer_nob; if (copy_to_user(usr_buffer, knl_buffer, nob)) return -EFAULT; if (append != NULL && nob < usr_buffer_nob) { if (copy_to_user(usr_buffer + nob, append, 1)) return -EFAULT; nob++; } return nob; } EXPORT_SYMBOL(cfs_trace_copyout_string); int cfs_trace_allocate_string_buffer(char **str, int nob) { if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ return -EINVAL; *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); if (*str == NULL) return -ENOMEM; return 0; } void cfs_trace_free_string_buffer(char *str, int nob) { kfree(str); } int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) { char *str; int rc; rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); if (rc != 0) return rc; rc = cfs_trace_copyin_string(str, usr_str_nob + 1, usr_str, usr_str_nob); if (rc != 0) goto out; if (str[0] != '/') { rc = -EINVAL; goto out; } rc = cfs_tracefile_dump_all_pages(str); out: cfs_trace_free_string_buffer(str, usr_str_nob + 1); return rc; } int cfs_trace_daemon_command(char *str) { int rc = 0; cfs_tracefile_write_lock(); if (strcmp(str, "stop") == 0) { cfs_tracefile_write_unlock(); cfs_trace_stop_thread(); cfs_tracefile_write_lock(); memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); } else if (strncmp(str, "size=", 5) == 0) { cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0); if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480) cfs_tracefile_size = CFS_TRACEFILE_SIZE; else cfs_tracefile_size <<= 20; } else if (strlen(str) >= sizeof(cfs_tracefile)) { rc = -ENAMETOOLONG; } else if (str[0] != '/') { rc = -EINVAL; } else { strcpy(cfs_tracefile, str); printk(KERN_INFO "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n", cfs_tracefile, (long)(cfs_tracefile_size >> 10)); cfs_trace_start_thread(); } cfs_tracefile_write_unlock(); return rc; } int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) { char *str; int rc; rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); if (rc != 0) return rc; rc = cfs_trace_copyin_string(str, usr_str_nob + 1, usr_str, usr_str_nob); if (rc == 0) rc = cfs_trace_daemon_command(str); cfs_trace_free_string_buffer(str, usr_str_nob + 1); return rc; } int cfs_trace_set_debug_mb(int mb) { int i; int j; int pages; int limit = cfs_trace_max_debug_mb(); struct cfs_trace_cpu_data *tcd; if (mb < num_possible_cpus()) { printk(KERN_WARNING "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n", mb, num_possible_cpus()); mb = num_possible_cpus(); } if (mb > limit) { printk(KERN_WARNING "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n", mb, limit); mb = limit; } mb /= num_possible_cpus(); pages = mb << (20 - PAGE_CACHE_SHIFT); cfs_tracefile_write_lock(); cfs_tcd_for_each(tcd, i, j) tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100; cfs_tracefile_write_unlock(); return 0; } int cfs_trace_get_debug_mb(void) { int i; int j; struct cfs_trace_cpu_data *tcd; int total_pages = 0; cfs_tracefile_read_lock(); cfs_tcd_for_each(tcd, i, j) total_pages += tcd->tcd_max_pages; cfs_tracefile_read_unlock(); return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; } static int tracefiled(void *arg) { struct page_collection pc; struct tracefiled_ctl *tctl = arg; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; struct file *filp; char *buf; int last_loop = 0; int rc; DECL_MMSPACE; /* we're started late enough that we pick up init's fs context */ /* this is so broken in uml? what on earth is going on? */ spin_lock_init(&pc.pc_lock); complete(&tctl->tctl_start); while (1) { wait_queue_t __wait; pc.pc_want_daemon_pages = 0; collect_pages(&pc); if (list_empty(&pc.pc_pages)) goto end_loop; filp = NULL; cfs_tracefile_read_lock(); if (cfs_tracefile[0] != 0) { filp = filp_open(cfs_tracefile, O_CREAT | O_RDWR | O_LARGEFILE, 0600); if (IS_ERR(filp)) { rc = PTR_ERR(filp); filp = NULL; printk(KERN_WARNING "couldn't open %s: %d\n", cfs_tracefile, rc); } } cfs_tracefile_read_unlock(); if (filp == NULL) { put_pages_on_daemon_list(&pc); __LASSERT(list_empty(&pc.pc_pages)); goto end_loop; } MMSPACE_OPEN; list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { static loff_t f_pos; __LASSERT_TAGE_INVARIANT(tage); if (f_pos >= (off_t)cfs_tracefile_size) f_pos = 0; else if (f_pos > i_size_read(file_inode(filp))) f_pos = i_size_read(file_inode(filp)); buf = kmap(tage->page); rc = vfs_write(filp, (__force const char __user *)buf, tage->used, &f_pos); kunmap(tage->page); if (rc != (int)tage->used) { printk(KERN_WARNING "wanted to write %u but wrote %d\n", tage->used, rc); put_pages_back(&pc); __LASSERT(list_empty(&pc.pc_pages)); break; } } MMSPACE_CLOSE; filp_close(filp, NULL); put_pages_on_daemon_list(&pc); if (!list_empty(&pc.pc_pages)) { int i; printk(KERN_ALERT "Lustre: trace pages aren't empty\n"); pr_err("total cpus(%d): ", num_possible_cpus()); for (i = 0; i < num_possible_cpus(); i++) if (cpu_online(i)) pr_cont("%d(on) ", i); else pr_cont("%d(off) ", i); pr_cont("\n"); i = 0; list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) pr_err("page %d belongs to cpu %d\n", ++i, tage->cpu); pr_err("There are %d pages unwritten\n", i); } __LASSERT(list_empty(&pc.pc_pages)); end_loop: if (atomic_read(&tctl->tctl_shutdown)) { if (last_loop == 0) { last_loop = 1; continue; } else { break; } } init_waitqueue_entry(&__wait, current); add_wait_queue(&tctl->tctl_waitq, &__wait); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(cfs_time_seconds(1)); remove_wait_queue(&tctl->tctl_waitq, &__wait); } complete(&tctl->tctl_stop); return 0; } int cfs_trace_start_thread(void) { struct tracefiled_ctl *tctl = &trace_tctl; int rc = 0; mutex_lock(&cfs_trace_thread_mutex); if (thread_running) goto out; init_completion(&tctl->tctl_start); init_completion(&tctl->tctl_stop); init_waitqueue_head(&tctl->tctl_waitq); atomic_set(&tctl->tctl_shutdown, 0); if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) { rc = -ECHILD; goto out; } wait_for_completion(&tctl->tctl_start); thread_running = 1; out: mutex_unlock(&cfs_trace_thread_mutex); return rc; } void cfs_trace_stop_thread(void) { struct tracefiled_ctl *tctl = &trace_tctl; mutex_lock(&cfs_trace_thread_mutex); if (thread_running) { printk(KERN_INFO "Lustre: shutting down debug daemon thread...\n"); atomic_set(&tctl->tctl_shutdown, 1); wait_for_completion(&tctl->tctl_stop); thread_running = 0; } mutex_unlock(&cfs_trace_thread_mutex); } int cfs_tracefile_init(int max_pages) { struct cfs_trace_cpu_data *tcd; int i; int j; int rc; int factor; rc = cfs_tracefile_init_arch(); if (rc != 0) return rc; cfs_tcd_for_each(tcd, i, j) { /* tcd_pages_factor is initialized int tracefile_init_arch. */ factor = tcd->tcd_pages_factor; INIT_LIST_HEAD(&tcd->tcd_pages); INIT_LIST_HEAD(&tcd->tcd_stock_pages); INIT_LIST_HEAD(&tcd->tcd_daemon_pages); tcd->tcd_cur_pages = 0; tcd->tcd_cur_stock_pages = 0; tcd->tcd_cur_daemon_pages = 0; tcd->tcd_max_pages = (max_pages * factor) / 100; LASSERT(tcd->tcd_max_pages > 0); tcd->tcd_shutting_down = 0; } return 0; } static void trace_cleanup_on_all_cpus(void) { struct cfs_trace_cpu_data *tcd; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; int i, cpu; for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) { tcd->tcd_shutting_down = 1; list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { __LASSERT_TAGE_INVARIANT(tage); list_del(&tage->linkage); cfs_tage_free(tage); } tcd->tcd_cur_pages = 0; } } } static void cfs_trace_cleanup(void) { struct page_collection pc; INIT_LIST_HEAD(&pc.pc_pages); spin_lock_init(&pc.pc_lock); trace_cleanup_on_all_cpus(); cfs_tracefile_fini_arch(); } void cfs_tracefile_exit(void) { cfs_trace_stop_thread(); cfs_trace_cleanup(); }
236424.c
#include "glmc.h" inline float glmc_mat2f_determinant(mat2f dest) { float disc; disc = (dest[0][0]*dest[1][1] - dest[1][0]*dest[0][1]); return disc; } inline void glmc_mat2f_add(mat2f dest, mat2f src_a, mat2f src_b) { dest[0][0] = src_a[0][0] + src_b[0][0]; dest[0][1] = src_a[0][1] + src_b[0][1]; dest[1][0] = src_a[1][0] + src_b[1][0]; dest[1][1] = src_a[1][1] + src_b[1][1]; } inline void glmc_mat2f_add_dest(mat2f dest, mat2f src_b) { dest[0][0] += src_b[0][0]; dest[0][1] += src_b[0][1]; dest[1][0] += src_b[1][0]; dest[1][1] += src_b[1][1]; } inline void glmc_mat2f_sub(mat2f dest, mat2f src_a, mat2f src_b) { dest[0][0] = src_a[0][0] - src_b[0][0]; dest[0][1] = src_a[0][1] - src_b[0][1]; dest[1][0] = src_a[1][0] - src_b[1][0]; dest[1][1] = src_a[1][1] - src_b[1][1]; } inline void glmc_mat2f_sub_dest(mat2f dest, mat2f src_b) { dest[0][0] -= src_b[0][0]; dest[0][1] -= src_b[0][1]; dest[1][0] -= src_b[1][0]; dest[1][1] -= src_b[1][1]; } inline void glmc_mat2f_mul(mat2f dest, mat2f src_a, mat2f src_b) { dest[0][0] = (src_a[0][0]*src_b[0][0] + src_a[1][0]*src_b[0][1]); dest[0][1] = (src_a[0][1]*src_b[0][0] + src_a[1][1]*src_b[0][1]); dest[1][0] = (src_a[0][0]*src_b[1][0] + src_a[1][0]*src_b[1][1]); dest[1][1] = (src_a[0][1]*src_b[1][0] + src_a[1][1]*src_b[1][1]); } inline void glmc_mat2f_mul_dest(mat2f dest, mat2f src_b) { mat2f temp; temp[0][0] = dest[0][0]; temp[0][1] = dest[0][1]; temp[1][0] = dest[1][0]; temp[1][1] = dest[1][1]; dest[0][0] = (temp[0][0]*src_b[0][0] + temp[1][0]*src_b[0][1]); dest[0][1] = (temp[0][1]*src_b[0][0] + temp[1][1]*src_b[0][1]); dest[1][0] = (temp[0][0]*src_b[1][0] + temp[1][0]*src_b[1][1]); dest[1][1] = (temp[0][1]*src_b[1][0] + temp[1][1]*src_b[1][1]); } inline void glmc_mat2f_mul_s(mat2f dest, mat2f src_a, float src_b) { dest[0][0] = src_a[0][0] * src_b; dest[0][1] = src_a[0][1] * src_b; dest[1][0] = src_a[1][0] * src_b; dest[1][1] = src_a[1][1] * src_b; } inline void glmc_mat2f_div(mat2f dest, mat2f src_a, mat2f src_b) { mat2f src_b_inv; glmc_mat2f_inverse(src_b_inv, src_b); glmc_mat2f_mul(dest, src_a, src_b_inv); } inline void glmc_mat2f_div_dest(mat2f dest, mat2f src_b) { mat2f temp; temp[0][0] = dest[0][0]; temp[0][1] = dest[0][1]; temp[1][0] = dest[1][0]; temp[1][1] = dest[1][1]; mat2f inv; glmc_mat2f_inverse(inv, src_b); glmc_mat2f_mul(dest, temp, inv); } inline void glmc_mat2f_div_s(mat2f dest, mat2f src_a, float src_b) { dest[1][1] = src_a[1][1] / src_b; dest[1][0] = src_a[1][0] / src_b; dest[0][1] = src_a[0][1] / src_b; dest[0][0] = src_a[0][0] / src_b; } inline void glmc_mat2f_madd(mat2f dest, mat2f src_a, mat2f src_b) { dest[0][0] = dest[0][0] + (src_a[0][0]*src_b[0][0] + src_a[1][0]*src_b[0][1]); dest[0][1] = dest[0][1] + (src_a[0][1]*src_b[0][0] + src_a[1][1]*src_b[0][1]); dest[1][0] = dest[1][0] + (src_a[0][0]*src_b[1][0] + src_a[1][0]*src_b[1][1]); dest[1][1] = dest[1][1] + (src_a[0][1]*src_b[1][0] + src_a[1][1]*src_b[1][1]); } inline void glmc_mat2f_msub(mat2f dest, mat2f src_a, mat2f src_b) { dest[0][0] = dest[0][0] - (src_a[0][0]*src_b[0][0] + src_a[1][0]*src_b[0][1]); dest[0][1] = dest[0][1] - (src_a[0][1]*src_b[0][0] + src_a[1][1]*src_b[0][1]); dest[1][0] = dest[1][0] - (src_a[0][0]*src_b[1][0] + src_a[1][0]*src_b[1][1]); dest[1][1] = dest[1][1] - (src_a[0][1]*src_b[1][0] + src_a[1][1]*src_b[1][1]); } inline void glmc_mat2f_transpose(mat2f dest, mat2f src) { dest[0][0] = src[0][0]; dest[0][1] = src[1][0]; dest[1][0] = src[0][1]; dest[1][1] = src[1][1]; } inline void glmc_mat2f_transpose_dest(mat2f dest) { float temp; temp = dest[0][1]; dest[0][1] = dest[1][0]; dest[1][0] = temp; } inline void glmc_mat2f_inverse(mat2f dest, mat2f src) { float inv; inv = 1.0f/(glmc_mat2f_determinant(src)); dest[0][0] = src[1][1]*inv; dest[0][1] = -1.0f*src[0][1]*inv; dest[1][0] = -1.0f*src[1][0]*inv; dest[1][1] = src[0][0]*inv;; } inline void glmc_mat2f_normlize(mat2f dest, mat2f src) { float inv; inv = glmc_mat2f_determinant(src); dest[0][0] = src[0][0]/inv; dest[0][1] = src[0][1]/inv; dest[1][0] = src[1][0]/inv; dest[1][1] = src[1][1]/inv; } inline void glmc_mat2f_normlize_dest(mat2f dest) { float inv; inv = glmc_mat2f_determinant(dest); dest[0][0] = dest[0][0]*inv; dest[0][1] = dest[0][1]*inv; dest[1][0] = dest[1][0]*inv; dest[1][1] = dest[1][1]*inv; } inline void glmc_mat2f_scale(mat2f dest, float x) { dest[0][0] = x ; dest[0][1] = 0; dest[1][0] = 0; dest[1][1] = 1; }
69419.c
/* Android JNI Client Layer Copyright 2010-2012 Marc-Andre Moreau <marcandre.moreau@gmail.com> Copyright 2013 Thincast Technologies GmbH, Author: Martin Fleisz Copyright 2013 Thincast Technologies GmbH, Author: Armin Novak Copyright 2015 Bernhard Miklautz <bernhard.miklautz@thincast.com> This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <assert.h> #include <jni.h> #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <sys/select.h> #include <freerdp/graphics.h> #include <freerdp/codec/rfx.h> #include <freerdp/gdi/gdi.h> #include <freerdp/gdi/gfx.h> #include <freerdp/client/rdpei.h> #include <freerdp/client/rdpgfx.h> #include <freerdp/client/cliprdr.h> #include <freerdp/channels/channels.h> #include <freerdp/client/channels.h> #include <freerdp/client/cmdline.h> #include <freerdp/constants.h> #include <freerdp/locale/keyboard.h> #include <freerdp/primitives.h> #include <freerdp/version.h> #include <freerdp/settings.h> #include <android/bitmap.h> #include "android_freerdp.h" #include "android_jni_callback.h" #include "android_jni_utils.h" #include "android_debug.h" #include "android_cliprdr.h" #if defined(WITH_GPROF) #include "jni/prof.h" #endif static BOOL android_context_new(freerdp* instance, rdpContext* context) { if (!(context->channels = freerdp_channels_new())) return FALSE; if (!android_event_queue_init(instance)) { freerdp_channels_free(context->channels); return FALSE; } return TRUE; } static void android_context_free(freerdp* instance, rdpContext* context) { if (context && context->channels) { freerdp_channels_close(context->channels, instance); freerdp_channels_free(context->channels); context->channels = NULL; } android_event_queue_uninit(instance); } static void android_OnChannelConnectedEventHandler(rdpContext* context, ChannelConnectedEventArgs* e) { rdpSettings* settings = context->settings; androidContext* afc = (androidContext*) context; if (strcmp(e->name, RDPEI_DVC_CHANNEL_NAME) == 0) { DEBUG_ANDROID("Unhandled case.. RDPEI_DVC_CHANNEL_NAME"); } else if (strcmp(e->name, RDPGFX_DVC_CHANNEL_NAME) == 0) { if (settings->SoftwareGdi) gdi_graphics_pipeline_init(context->gdi, (RdpgfxClientContext*) e->pInterface); } else if (strcmp(e->name, CLIPRDR_SVC_CHANNEL_NAME) == 0) { android_cliprdr_init(afc, (CliprdrClientContext*) e->pInterface); } } static void android_OnChannelDisconnectedEventHandler(rdpContext* context, ChannelDisconnectedEventArgs* e) { rdpSettings* settings = context->settings; androidContext* afc = (androidContext*) context; if (strcmp(e->name, RDPEI_DVC_CHANNEL_NAME) == 0) { DEBUG_ANDROID("Unhandled case.. RDPEI_DVC_CHANNEL_NAME"); } else if (strcmp(e->name, RDPGFX_DVC_CHANNEL_NAME) == 0) { if (settings->SoftwareGdi) gdi_graphics_pipeline_uninit(context->gdi, (RdpgfxClientContext*) e->pInterface); } else if (strcmp(e->name, CLIPRDR_SVC_CHANNEL_NAME) == 0) { android_cliprdr_uninit(afc, (CliprdrClientContext*) e->pInterface); } } static BOOL android_begin_paint(rdpContext* context) { rdpGdi* gdi = context->gdi; gdi->primary->hdc->hwnd->invalid->null = 1; gdi->primary->hdc->hwnd->ninvalid = 0; return TRUE; } static BOOL android_end_paint(rdpContext* context) { int i; int ninvalid; HGDI_RGN cinvalid; int x1, y1, x2, y2; androidContext *ctx = (androidContext*)context; rdpSettings* settings = context->instance->settings; assert(ctx); assert(settings); assert(context->instance); ninvalid = ctx->rdpCtx.gdi->primary->hdc->hwnd->ninvalid; if (ninvalid == 0) { DEBUG_ANDROID("ui_update: ninvalid=%d", ninvalid); return TRUE; } cinvalid = ctx->rdpCtx.gdi->primary->hdc->hwnd->cinvalid; x1 = cinvalid[0].x; y1 = cinvalid[0].y; x2 = cinvalid[0].x + cinvalid[0].w; y2 = cinvalid[0].y + cinvalid[0].h; for (i = 0; i < ninvalid; i++) { x1 = MIN(x1, cinvalid[i].x); y1 = MIN(y1, cinvalid[i].y); x2 = MAX(x2, cinvalid[i].x + cinvalid[i].w); y2 = MAX(y2, cinvalid[i].y + cinvalid[i].h); } DEBUG_ANDROID("ui_update: ninvalid=%d x=%d, y=%d, width=%d, height=%d, bpp=%d", ninvalid, x1, y1, x2 - x1, y2 - y1, settings->ColorDepth); freerdp_callback("OnGraphicsUpdate", "(IIIII)V", context->instance, x1, y1, x2 - x1, y2 - y1); return TRUE; } static BOOL android_desktop_resize(rdpContext* context) { DEBUG_ANDROID("ui_desktop_resize"); assert(context); assert(context->settings); assert(context->instance); freerdp_callback("OnGraphicsResize", "(IIII)V", context->instance, context->settings->DesktopWidth, context->settings->DesktopHeight, context->settings->ColorDepth); return TRUE; } static BOOL android_pre_connect(freerdp* instance) { DEBUG_ANDROID("android_pre_connect"); rdpSettings* settings = instance->settings; BOOL bitmap_cache = settings->BitmapCacheEnabled; settings->OrderSupport[NEG_DSTBLT_INDEX] = TRUE; settings->OrderSupport[NEG_PATBLT_INDEX] = TRUE; settings->OrderSupport[NEG_SCRBLT_INDEX] = TRUE; settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] = TRUE; settings->OrderSupport[NEG_DRAWNINEGRID_INDEX] = FALSE; settings->OrderSupport[NEG_MULTIDSTBLT_INDEX] = FALSE; settings->OrderSupport[NEG_MULTIPATBLT_INDEX] = FALSE; settings->OrderSupport[NEG_MULTISCRBLT_INDEX] = FALSE; settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX] = TRUE; settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX] = FALSE; settings->OrderSupport[NEG_LINETO_INDEX] = TRUE; settings->OrderSupport[NEG_POLYLINE_INDEX] = TRUE; settings->OrderSupport[NEG_MEMBLT_INDEX] = bitmap_cache; settings->OrderSupport[NEG_MEM3BLT_INDEX] = TRUE; settings->OrderSupport[NEG_MEMBLT_V2_INDEX] = bitmap_cache; settings->OrderSupport[NEG_MEM3BLT_V2_INDEX] = FALSE; settings->OrderSupport[NEG_SAVEBITMAP_INDEX] = FALSE; settings->OrderSupport[NEG_GLYPH_INDEX_INDEX] = TRUE; settings->OrderSupport[NEG_FAST_INDEX_INDEX] = TRUE; settings->OrderSupport[NEG_FAST_GLYPH_INDEX] = TRUE; settings->OrderSupport[NEG_POLYGON_SC_INDEX] = FALSE; settings->OrderSupport[NEG_POLYGON_CB_INDEX] = FALSE; settings->OrderSupport[NEG_ELLIPSE_SC_INDEX] = FALSE; settings->OrderSupport[NEG_ELLIPSE_CB_INDEX] = FALSE; settings->FrameAcknowledge = 10; PubSub_SubscribeChannelConnected(instance->context->pubSub, (pChannelConnectedEventHandler) android_OnChannelConnectedEventHandler); PubSub_SubscribeChannelDisconnected(instance->context->pubSub, (pChannelDisconnectedEventHandler) android_OnChannelDisconnectedEventHandler); freerdp_register_addin_provider(freerdp_channels_load_static_addin_entry, 0); freerdp_client_load_addins(instance->context->channels, instance->settings); freerdp_channels_pre_connect(instance->context->channels, instance); return TRUE; } static BOOL android_post_connect(freerdp* instance) { UINT32 gdi_flags; rdpSettings *settings = instance->settings; DEBUG_ANDROID("android_post_connect"); assert(instance); assert(settings); freerdp_callback("OnSettingsChanged", "(IIII)V", instance, settings->DesktopWidth, settings->DesktopHeight, settings->ColorDepth); if (!(instance->context->cache = cache_new(settings))) return FALSE; if (instance->settings->ColorDepth > 16) gdi_flags = CLRBUF_32BPP | CLRCONV_ALPHA | CLRCONV_INVERT; else gdi_flags = CLRBUF_16BPP; if (!gdi_init(instance, gdi_flags, NULL)) return FALSE; instance->update->BeginPaint = android_begin_paint; instance->update->EndPaint = android_end_paint; instance->update->DesktopResize = android_desktop_resize; if (freerdp_channels_post_connect(instance->context->channels, instance) < 0) return FALSE; freerdp_callback("OnConnectionSuccess", "(I)V", instance); return TRUE; } static void android_post_disconnect(freerdp* instance) { DEBUG_ANDROID("android_post_disconnect"); gdi_free(instance); cache_free(instance->context->cache); } static BOOL android_authenticate(freerdp* instance, char** username, char** password, char** domain) { DEBUG_ANDROID("Authenticate user:"); DEBUG_ANDROID(" Username: %s", *username); DEBUG_ANDROID(" Domain: %s", *domain); JNIEnv* env; jboolean attached = jni_attach_thread(&env); jobject jstr1 = create_string_builder(env, *username); jobject jstr2 = create_string_builder(env, *domain); jobject jstr3 = create_string_builder(env, *password); jboolean res = freerdp_callback_bool_result("OnAuthenticate", "(ILjava/lang/StringBuilder;Ljava/lang/StringBuilder;Ljava/lang/StringBuilder;)Z", instance, jstr1, jstr2, jstr3); if (res == JNI_TRUE) { // read back string values free(*username); *username = get_string_from_string_builder(env, jstr1); free(*domain); *domain = get_string_from_string_builder(env, jstr2); free(*password); *password = get_string_from_string_builder(env, jstr3); } if (attached == JNI_TRUE) jni_detach_thread(); return ((res == JNI_TRUE) ? TRUE : FALSE); } static BOOL android_verify_certificate(freerdp* instance, char* subject, char* issuer, char* fingerprint) { DEBUG_ANDROID("Certificate details:"); DEBUG_ANDROID("\tSubject: %s", subject); DEBUG_ANDROID("\tIssuer: %s", issuer); DEBUG_ANDROID("\tThumbprint: %s", fingerprint); DEBUG_ANDROID("The above X.509 certificate could not be verified, possibly because you do not have " "the CA certificate in your certificate store, or the certificate has expired." "Please look at the documentation on how to create local certificate store for a private CA.\n"); JNIEnv* env; jboolean attached = jni_attach_thread(&env); jstring jstr1 = (*env)->NewStringUTF(env, subject); jstring jstr2 = (*env)->NewStringUTF(env, issuer); jstring jstr3 = (*env)->NewStringUTF(env, fingerprint); jboolean res = freerdp_callback_bool_result("OnVerifyCertificate", "(ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)Z", instance, jstr1, jstr2, jstr3); if (attached == JNI_TRUE) jni_detach_thread(); return ((res == JNI_TRUE) ? TRUE : FALSE); } static BOOL android_verify_changed_certificate(freerdp* instance, char* subject, char* issuer, char* new_fingerprint, char* old_fingerprint) { return android_verify_certificate(instance, subject, issuer, new_fingerprint); } static void* jni_input_thread(void* arg) { HANDLE event[3]; wMessageQueue* queue; freerdp* instance = (freerdp*) arg; androidContext *aCtx = (androidContext*)instance->context; assert(NULL != instance); assert(NULL != aCtx); DEBUG_ANDROID("input_thread Start."); if (!(queue = freerdp_get_message_queue(instance, FREERDP_INPUT_MESSAGE_QUEUE))) goto fail_get_message_queue; if (!(event[0] = CreateFileDescriptorEvent(NULL, FALSE, FALSE, aCtx->event_queue->pipe_fd[0], FD_READ))) goto fail_create_event_0; if (!(event[1] = CreateFileDescriptorEvent(NULL, FALSE, FALSE, aCtx->event_queue->pipe_fd[1], FD_READ))) goto fail_create_event_1; if (!(event[2] = freerdp_get_message_queue_event_handle(instance, FREERDP_INPUT_MESSAGE_QUEUE))) goto fail_get_message_queue_event; do { DWORD rc = WaitForMultipleObjects(3, event, FALSE, INFINITE); if ((rc < WAIT_OBJECT_0) || (rc > WAIT_OBJECT_0 + 2)) continue; if (rc == WAIT_OBJECT_0 + 2) { wMessage msg; MessageQueue_Peek(queue, &msg, FALSE); if (msg.id == WMQ_QUIT) break; } if (android_check_fds(instance) != TRUE) break; } while(1); DEBUG_ANDROID("input_thread Quit."); fail_get_message_queue_event: CloseHandle(event[1]); fail_create_event_1: CloseHandle(event[0]); fail_create_event_0: MessageQueue_PostQuit(queue, 0); fail_get_message_queue: ExitThread(0); return NULL; } static void* jni_channels_thread(void* arg) { int status; HANDLE event; rdpChannels* channels; freerdp* instance = (freerdp*) arg; assert(NULL != instance); DEBUG_ANDROID("Channels_thread Start."); channels = instance->context->channels; event = freerdp_channels_get_event_handle(instance); while (WaitForSingleObject(event, INFINITE) == WAIT_OBJECT_0) { status = freerdp_channels_process_pending_messages(instance); if (!status) break; } DEBUG_ANDROID("channels_thread Quit."); ExitThread(0); return NULL; } static int android_freerdp_run(freerdp* instance) { int i; int fds; int max_fds; int rcount; int wcount; int fd_input_event; HANDLE input_event = NULL; void* rfds[32]; void* wfds[32]; fd_set rfds_set; fd_set wfds_set; int select_status; struct timeval timeout; const rdpSettings* settings = instance->context->settings; HANDLE input_thread = NULL; HANDLE channels_thread = NULL; BOOL async_input = settings->AsyncInput; BOOL async_channels = settings->AsyncChannels; BOOL async_transport = settings->AsyncTransport; DEBUG_ANDROID("AsyncUpdate=%d", settings->AsyncUpdate); DEBUG_ANDROID("AsyncInput=%d", settings->AsyncInput); DEBUG_ANDROID("AsyncChannels=%d", settings->AsyncChannels); DEBUG_ANDROID("AsyncTransport=%d", settings->AsyncTransport); memset(rfds, 0, sizeof(rfds)); memset(wfds, 0, sizeof(wfds)); if (!freerdp_connect(instance)) { freerdp_callback("OnConnectionFailure", "(I)V", instance); return 0; } if (async_input) { if (!(input_thread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) jni_input_thread, instance, 0, NULL))) { DEBUG_ANDROID("Failed to create async input thread\n"); goto disconnect; } } if (async_channels) { if (!(channels_thread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) jni_channels_thread, instance, 0, NULL))) { DEBUG_ANDROID("Failed to create async channels thread\n"); goto disconnect; } } ((androidContext*)instance->context)->is_connected = TRUE; while (!freerdp_shall_disconnect(instance)) { rcount = 0; wcount = 0; if (!async_transport) { if (freerdp_get_fds(instance, rfds, &rcount, wfds, &wcount) != TRUE) { DEBUG_ANDROID("Failed to get FreeRDP file descriptor\n"); break; } } if (!async_channels) { if (freerdp_channels_get_fds(instance->context->channels, instance, rfds, &rcount, wfds, &wcount) != TRUE) { DEBUG_ANDROID("Failed to get channel manager file descriptor\n"); break; } } if (!async_input) { if (android_get_fds(instance, rfds, &rcount, wfds, &wcount) != TRUE) { DEBUG_ANDROID("Failed to get android file descriptor\n"); break; } } else { input_event = freerdp_get_message_queue_event_handle(instance, FREERDP_INPUT_MESSAGE_QUEUE); fd_input_event = GetEventFileDescriptor(input_event); rfds[rcount++] = (void*) (long) fd_input_event; } max_fds = 0; FD_ZERO(&rfds_set); FD_ZERO(&wfds_set); for (i = 0; i < rcount; i++) { fds = (int)(long)(rfds[i]); if (fds > max_fds) max_fds = fds; FD_SET(fds, &rfds_set); } if (max_fds == 0) break; timeout.tv_sec = 1; timeout.tv_usec = 0; select_status = select(max_fds + 1, &rfds_set, NULL, NULL, &timeout); if (select_status == 0) continue; else if (select_status == -1) { /* these are not really errors */ if (!((errno == EAGAIN) || (errno == EWOULDBLOCK) || (errno == EINPROGRESS) || (errno == EINTR))) /* signal occurred */ { DEBUG_ANDROID("android_run: select failed\n"); break; } } if (freerdp_shall_disconnect(instance)) break; if (!async_transport) { if (freerdp_check_fds(instance) != TRUE) { DEBUG_ANDROID("Failed to check FreeRDP file descriptor\n"); break; } } if (!async_input) { if (android_check_fds(instance) != TRUE) { DEBUG_ANDROID("Failed to check android file descriptor\n"); break; } } else if (input_event) { if (WaitForSingleObject(input_event, 0) == WAIT_OBJECT_0) { if (!freerdp_message_queue_process_pending_messages(instance, FREERDP_INPUT_MESSAGE_QUEUE)) { DEBUG_ANDROID("User Disconnect"); break; } } } if (!async_channels) { if (freerdp_channels_check_fds(instance->context->channels, instance) != TRUE) { DEBUG_ANDROID("Failed to check channel manager file descriptor\n"); break; } } } disconnect: DEBUG_ANDROID("Prepare shutdown..."); // issue another OnDisconnecting here in case the disconnect was initiated by the server and not our client freerdp_callback("OnDisconnecting", "(I)V", instance); DEBUG_ANDROID("Close channels..."); freerdp_channels_disconnect(instance->context->channels, instance); DEBUG_ANDROID("Cleanup threads..."); if (async_channels && channels_thread) { WaitForSingleObject(channels_thread, INFINITE); CloseHandle(channels_thread); } if (async_input && input_thread) { wMessageQueue* input_queue = freerdp_get_message_queue(instance, FREERDP_INPUT_MESSAGE_QUEUE); if (input_queue) { if (MessageQueue_PostQuit(input_queue, 0)) WaitForSingleObject(input_thread, INFINITE); } CloseHandle(input_thread); } DEBUG_ANDROID("run Disconnecting..."); freerdp_disconnect(instance); freerdp_callback("OnDisconnected", "(I)V", instance); DEBUG_ANDROID("run Quit."); return 0; } static void* android_thread_func(void* param) { freerdp* instance = param; DEBUG_ANDROID("android_thread_func Start."); assert(instance); android_freerdp_run(instance); DEBUG_ANDROID("android_thread_func Quit."); ExitThread(0); return NULL; } JNIEXPORT jint JNICALL jni_freerdp_new(JNIEnv *env, jclass cls) { freerdp* instance; #if defined(WITH_GPROF) setenv("CPUPROFILE_FREQUENCY", "200", 1); monstartup("libfreerdp-android.so"); #endif // create instance if (!(instance = freerdp_new())) return (jint)NULL; instance->PreConnect = android_pre_connect; instance->PostConnect = android_post_connect; instance->PostDisconnect = android_post_disconnect; instance->Authenticate = android_authenticate; instance->VerifyCertificate = android_verify_certificate; instance->VerifyChangedCertificate = android_verify_changed_certificate; // create context instance->ContextSize = sizeof(androidContext); instance->ContextNew = android_context_new; instance->ContextFree = android_context_free; if (!freerdp_context_new(instance)) { freerdp_free(instance); instance = NULL; } return (jint) instance; } JNIEXPORT void JNICALL jni_freerdp_free(JNIEnv *env, jclass cls, jint instance) { freerdp* inst = (freerdp*)instance; freerdp_context_free(inst); freerdp_free(inst); #if defined(WITH_GPROF) moncleanup(); #endif } JNIEXPORT jboolean JNICALL jni_freerdp_connect(JNIEnv *env, jclass cls, jint instance) { freerdp* inst = (freerdp*)instance; androidContext* ctx = (androidContext*)inst->context; assert(inst); assert(ctx); if (!(ctx->thread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)android_thread_func, inst, 0, NULL))) { return JNI_FALSE; } return JNI_TRUE; } JNIEXPORT jboolean JNICALL jni_freerdp_disconnect(JNIEnv *env, jclass cls, jint instance) { freerdp* inst = (freerdp*)instance; androidContext* ctx = (androidContext*)inst->context; ANDROID_EVENT* event = (ANDROID_EVENT*)android_event_disconnect_new(); if (!event) return JNI_FALSE; DEBUG_ANDROID("DISCONNECT!"); assert(inst); assert(ctx); assert(event); if (!android_push_event(inst, event)) { android_event_disconnect_free(event); return JNI_FALSE; } WaitForSingleObject(ctx->thread, INFINITE); CloseHandle(ctx->thread); ctx->thread = NULL; freerdp_callback("OnDisconnecting", "(I)V", instance); return (jboolean) JNI_TRUE; } JNIEXPORT jboolean JNICALL jni_freerdp_cancel_connection(JNIEnv *env, jclass cls, jint instance) { return jni_freerdp_disconnect(env, cls, instance); } JNIEXPORT jboolean JNICALL jni_freerdp_set_data_directory(JNIEnv *env, jclass cls, jint instance, jstring jdirectory) { freerdp* inst = (freerdp*)instance; rdpSettings * settings = inst->settings; const jbyte* directory = (*env)->GetStringUTFChars(env, jdirectory, NULL); if (!directory) return JNI_FALSE; free(settings->HomePath); free(settings->ConfigPath); settings->HomePath = settings->ConfigPath = NULL; int config_dir_len = strlen(directory) + 10; /* +9 chars for /.freerdp and +1 for \0 */ char* config_dir_buf = (char*)malloc(config_dir_len); if (!config_dir_buf) goto out_malloc_fail; strcpy(config_dir_buf, directory); strcat(config_dir_buf, "/.freerdp"); settings->HomePath = strdup(directory); if (!settings->HomePath) goto out_strdup_fail; settings->ConfigPath = config_dir_buf; /* will be freed by freerdp library */ (*env)->ReleaseStringUTFChars(env, jdirectory, directory); return JNI_TRUE; out_strdup_fail: free(config_dir_buf); out_malloc_fail: (*env)->ReleaseStringUTFChars(env, jdirectory, directory); return JNI_FALSE; } JNIEXPORT jboolean JNICALL jni_freerdp_set_connection_info(JNIEnv *env, jclass cls, jint instance, jstring jhostname, jstring jusername, jstring jpassword, jstring jdomain, jint width, jint height, jint color_depth, jint port, jboolean console, jint security, jstring jcertname) { freerdp* inst = (freerdp*)instance; rdpSettings * settings = inst->settings; const jbyte *hostname; const jbyte *username; const jbyte *password; const jbyte *domain; const jbyte *certname; if(!(hostname = (*env)->GetStringUTFChars(env, jhostname, NULL))) return JNI_FALSE; if (!(username = (*env)->GetStringUTFChars(env, jusername, NULL))) goto out_fail_username; if (!(password = (*env)->GetStringUTFChars(env, jpassword, NULL))) goto out_fail_password; if (!(domain = (*env)->GetStringUTFChars(env, jdomain, NULL))) goto out_fail_domain; if (!(certname = (*env)->GetStringUTFChars(env, jcertname, NULL))) goto out_fail_certname; DEBUG_ANDROID("hostname: %s", (char*) hostname); DEBUG_ANDROID("username: %s", (char*) username); DEBUG_ANDROID("password: %s", (char*) password); DEBUG_ANDROID("domain: %s", (char*) domain); DEBUG_ANDROID("width: %d", width); DEBUG_ANDROID("height: %d", height); DEBUG_ANDROID("color depth: %d", color_depth); DEBUG_ANDROID("port: %d", port); DEBUG_ANDROID("security: %d", security); settings->DesktopWidth = width; settings->DesktopHeight = height; settings->ColorDepth = color_depth; settings->ServerPort = port; // Hack for 16 bit RDVH connections: // In this case we get screen corruptions when we have an odd screen resolution width ... need to investigate what is causing this... if (color_depth <= 16) settings->DesktopWidth &= (~1); if (!(settings->ServerHostname = strdup(hostname))) goto out_fail_strdup; if (username && strlen(username) > 0) { if (!(settings->Username = strdup(username))) goto out_fail_strdup; } if (password && strlen(password) > 0) { if (!(settings->Password = strdup(password))) goto out_fail_strdup; settings->AutoLogonEnabled = TRUE; } if (!(settings->Domain = strdup(domain))) goto out_fail_strdup; if (certname && strlen(certname) > 0) { if (!(settings->CertificateName = strdup(certname))) goto out_fail_strdup; } settings->ConsoleSession = (console == JNI_TRUE) ? TRUE : FALSE; settings->SoftwareGdi = TRUE; settings->BitmapCacheV3Enabled = TRUE; switch ((int) security) { case 1: /* Standard RDP */ settings->RdpSecurity = TRUE; settings->TlsSecurity = FALSE; settings->NlaSecurity = FALSE; settings->ExtSecurity = FALSE; settings->UseRdpSecurityLayer = TRUE; break; case 2: /* TLS */ settings->NlaSecurity = FALSE; settings->TlsSecurity = TRUE; settings->RdpSecurity = FALSE; settings->ExtSecurity = FALSE; break; case 3: /* NLA */ settings->NlaSecurity = TRUE; settings->TlsSecurity = FALSE; settings->RdpSecurity = FALSE; settings->ExtSecurity = FALSE; break; default: break; } // set US keyboard layout settings->KeyboardLayout = 0x0409; (*env)->ReleaseStringUTFChars(env, jhostname, hostname); (*env)->ReleaseStringUTFChars(env, jusername, username); (*env)->ReleaseStringUTFChars(env, jpassword, password); (*env)->ReleaseStringUTFChars(env, jdomain, domain); (*env)->ReleaseStringUTFChars(env, jcertname, certname); return JNI_TRUE; out_fail_strdup: (*env)->ReleaseStringUTFChars(env, jcertname, certname); out_fail_certname: (*env)->ReleaseStringUTFChars(env, jdomain, domain); out_fail_domain: (*env)->ReleaseStringUTFChars(env, jpassword, password); out_fail_password: (*env)->ReleaseStringUTFChars(env, jusername, username); out_fail_username: (*env)->ReleaseStringUTFChars(env, jhostname, hostname); return JNI_FALSE; } JNIEXPORT void JNICALL jni_freerdp_set_performance_flags( JNIEnv *env, jclass cls, jint instance, jboolean remotefx, jboolean disableWallpaper, jboolean disableFullWindowDrag, jboolean disableMenuAnimations, jboolean disableTheming, jboolean enableFontSmoothing, jboolean enableDesktopComposition) { freerdp* inst = (freerdp*)instance; rdpSettings * settings = inst->settings; DEBUG_ANDROID("remotefx: %d", (remotefx == JNI_TRUE) ? 1 : 0); if (remotefx == JNI_TRUE) { settings->RemoteFxCodec = TRUE; settings->FastPathOutput = TRUE; settings->ColorDepth = 32; settings->LargePointerFlag = TRUE; settings->FrameMarkerCommandEnabled = TRUE; } else { /* enable NSCodec if we don't use remotefx */ settings->NSCodec = TRUE; } /* store performance settings */ settings->DisableWallpaper = (disableWallpaper == JNI_TRUE) ? TRUE : FALSE; settings->DisableFullWindowDrag = (disableFullWindowDrag == JNI_TRUE) ? TRUE : FALSE; settings->DisableMenuAnims = (disableMenuAnimations == JNI_TRUE) ? TRUE : FALSE; settings->DisableThemes = (disableTheming == JNI_TRUE) ? TRUE : FALSE; settings->AllowFontSmoothing = (enableFontSmoothing == JNI_TRUE) ? TRUE : FALSE; settings->AllowDesktopComposition = (enableDesktopComposition == JNI_TRUE) ? TRUE : FALSE; /* Create performance flags from settings */ freerdp_performance_flags_make(settings); DEBUG_ANDROID("performance_flags: %04X", settings->PerformanceFlags); } JNIEXPORT jboolean JNICALL jni_freerdp_set_advanced_settings(JNIEnv *env, jclass cls, jint instance, jstring jRemoteProgram, jstring jWorkDir, jboolean async_channel, jboolean async_transport, jboolean async_input, jboolean async_update) { freerdp* inst = (freerdp*)instance; rdpSettings * settings = inst->settings; jboolean ret = JNI_FALSE; const jbyte *remote_program; const jbyte *work_dir; if (!(remote_program = (*env)->GetStringUTFChars(env, jRemoteProgram, NULL))) return JNI_FALSE; if (!(work_dir = (*env)->GetStringUTFChars(env, jWorkDir, NULL))) goto out_fail_work_dir; DEBUG_ANDROID("Remote Program: %s", (char*) remote_program); DEBUG_ANDROID("Work Dir: %s", (char*) work_dir); /* Enable async mode. */ settings->AsyncUpdate = async_update; settings->AsyncChannels = async_channel; settings->AsyncTransport = async_transport; settings->AsyncInput = async_input; if (remote_program && strlen(remote_program) > 0) { if (!(settings->AlternateShell = strdup(remote_program))) goto out_fail_strdup; } if (work_dir && strlen(work_dir) > 0) { if (!(settings->ShellWorkingDirectory = strdup(work_dir))) goto out_fail_strdup; } ret = JNI_TRUE; out_fail_strdup: (*env)->ReleaseStringUTFChars(env, jWorkDir, work_dir); out_fail_work_dir: (*env)->ReleaseStringUTFChars(env, jRemoteProgram, remote_program); return ret; } JNIEXPORT jboolean JNICALL jni_freerdp_set_drive_redirection(JNIEnv *env, jclass cls, jint instance, jstring jpath) { freerdp* inst = (freerdp*)instance; rdpSettings * settings = inst->settings; char* args[] = {"drive", "Android", ""}; jboolean ret = JNI_FALSE; const jbyte *path = (*env)->GetStringUTFChars(env, jpath, NULL); if (!path) return JNI_FALSE; DEBUG_ANDROID("drive redirect: %s", (char*)path); args[2] = (char*)path; if (freerdp_client_add_device_channel(settings, 3, args) == -1) { settings->DeviceRedirection = FALSE; goto out_fail; } settings->DeviceRedirection = TRUE; ret = JNI_TRUE; out_fail: (*env)->ReleaseStringUTFChars(env, jpath, path); return ret; } JNIEXPORT jboolean JNICALL jni_freerdp_set_sound_redirection(JNIEnv *env, jclass cls, jint instance, jint redirect) { freerdp* inst = (freerdp*)instance; rdpSettings * settings = inst->settings; DEBUG_ANDROID("sound: %s", redirect ? ((redirect == 1) ? "Server" : "Redirect") : "None"); settings->AudioPlayback = (redirect == 2) ? TRUE : FALSE; if (settings->AudioPlayback) { int ret; char* p[1] = {"rdpsnd"}; int count = 1; ret = freerdp_client_add_static_channel(settings, count, p); if(ret == -1) return JNI_FALSE; } settings->RemoteConsoleAudio = (redirect == 1) ? TRUE : FALSE; return JNI_TRUE; } JNIEXPORT jboolean JNICALL jni_freerdp_set_microphone_redirection(JNIEnv *env, jclass cls, jint instance, jboolean enable) { freerdp* inst = (freerdp*)instance; rdpSettings * settings = inst->settings; DEBUG_ANDROID("microphone redirect: %s", enable ? "TRUE" : "FALSE"); settings->AudioCapture = enable; if (enable) { int ret; char* p[1] = {"audin"}; int count = 1; ret = freerdp_client_add_dynamic_channel(settings, count, p); if (ret == -1) return JNI_FALSE; } return JNI_TRUE; } JNIEXPORT void JNICALL jni_freerdp_set_clipboard_redirection(JNIEnv *env, jclass cls, jint instance, jboolean enable) { freerdp* inst = (freerdp*)instance; rdpSettings * settings = inst->settings; DEBUG_ANDROID("clipboard redirect: %s", enable ? "TRUE" : "FALSE"); settings->RedirectClipboard = enable ? TRUE : FALSE; } JNIEXPORT jboolean JNICALL jni_freerdp_set_gateway_info(JNIEnv *env, jclass cls, jint instance, jstring jgatewayhostname, jint port, jstring jgatewayusername, jstring jgatewaypassword, jstring jgatewaydomain) { freerdp* inst = (freerdp*)instance; rdpSettings * settings = inst->settings; jboolean ret = JNI_FALSE; const jbyte *gatewayhostname; const jbyte *gatewayusername; const jbyte *gatewaypassword; const jbyte *gatewaydomain; if (!(gatewayhostname = (*env)->GetStringUTFChars(env, jgatewayhostname, NULL))) return JNI_FALSE; if (!(gatewayusername = (*env)->GetStringUTFChars(env, jgatewayusername, NULL))) goto out_fail_username; if (!(gatewaypassword = (*env)->GetStringUTFChars(env, jgatewaypassword, NULL))) goto out_fail_password; if (!(gatewaydomain = (*env)->GetStringUTFChars(env, jgatewaydomain, NULL))) goto out_fail_domain; DEBUG_ANDROID("gatewayhostname: %s", (char*) gatewayhostname); DEBUG_ANDROID("gatewayport: %d", port); DEBUG_ANDROID("gatewayusername: %s", (char*) gatewayusername); DEBUG_ANDROID("gatewaypassword: %s", (char*) gatewaypassword); DEBUG_ANDROID("gatewaydomain: %s", (char*) gatewaydomain); settings->GatewayPort = port; settings->GatewayUsageMethod = TSC_PROXY_MODE_DIRECT; settings->GatewayEnabled = TRUE; settings->GatewayUseSameCredentials = FALSE; settings->GatewayHostname = strdup(gatewayhostname); settings->GatewayUsername = strdup(gatewayusername); settings->GatewayPassword = strdup(gatewaypassword); settings->GatewayDomain = strdup(gatewaydomain); if (!settings->GatewayHostname || !settings->GatewayUsername || !settings->GatewayPassword || !settings->GatewayDomain) { goto out_fail_strdup; } ret = JNI_TRUE; out_fail_strdup: (*env)->ReleaseStringUTFChars(env, jgatewaydomain, gatewaydomain); out_fail_domain: (*env)->ReleaseStringUTFChars(env, jgatewaypassword, gatewaypassword); out_fail_password: (*env)->ReleaseStringUTFChars(env, jgatewayusername, gatewayusername); out_fail_username: (*env)->ReleaseStringUTFChars(env, jgatewayhostname, gatewayhostname); return ret; } static void copy_pixel_buffer(UINT8* dstBuf, UINT8* srcBuf, int x, int y, int width, int height, int wBuf, int hBuf, int bpp) { int i; int length; int scanline; UINT8 *dstp, *srcp; length = width * bpp; scanline = wBuf * bpp; srcp = (UINT8*) &srcBuf[(scanline * y) + (x * bpp)]; dstp = (UINT8*) &dstBuf[(scanline * y) + (x * bpp)]; for (i = 0; i < height; i++) { memcpy(dstp, srcp, length); srcp += scanline; dstp += scanline; } } JNIEXPORT jboolean JNICALL jni_freerdp_update_graphics( JNIEnv *env, jclass cls, jint instance, jobject bitmap, jint x, jint y, jint width, jint height) { int ret; void* pixels; AndroidBitmapInfo info; freerdp* inst = (freerdp*)instance; rdpGdi *gdi = inst->context->gdi; if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) { DEBUG_ANDROID("AndroidBitmap_getInfo() failed ! error=%d", ret); return JNI_FALSE; } if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) { DEBUG_ANDROID("AndroidBitmap_lockPixels() failed ! error=%d", ret); return JNI_FALSE; } copy_pixel_buffer(pixels, gdi->primary_buffer, x, y, width, height, gdi->width, gdi->height, gdi->bytesPerPixel); AndroidBitmap_unlockPixels(env, bitmap); return JNI_TRUE; } JNIEXPORT jboolean JNICALL jni_freerdp_send_key_event( JNIEnv *env, jclass cls, jint instance, jint keycode, jboolean down) { DWORD scancode; ANDROID_EVENT* event; freerdp* inst = (freerdp*)instance; scancode = GetVirtualScanCodeFromVirtualKeyCode(keycode, 4); int flags = (down == JNI_TRUE) ? KBD_FLAGS_DOWN : KBD_FLAGS_RELEASE; flags |= (scancode & KBDEXT) ? KBD_FLAGS_EXTENDED : 0; event = (ANDROID_EVENT*) android_event_key_new(flags, scancode & 0xFF); if (!event) return JNI_FALSE; if (!android_push_event(inst, event)) { android_event_key_free((ANDROID_EVENT_KEY *)event); return JNI_FALSE; } DEBUG_ANDROID("send_key_event: %d, %d", (int)scancode, flags); return JNI_TRUE; } JNIEXPORT jboolean JNICALL jni_freerdp_send_unicodekey_event( JNIEnv *env, jclass cls, jint instance, jint keycode) { ANDROID_EVENT* event; freerdp* inst = (freerdp*)instance; event = (ANDROID_EVENT*) android_event_unicodekey_new(keycode); if (!event) return JNI_FALSE; if (!android_push_event(inst, event)) { android_event_unicodekey_free((ANDROID_EVENT_KEY *)event); return JNI_FALSE; } DEBUG_ANDROID("send_unicodekey_event: %d", keycode); return JNI_TRUE; } JNIEXPORT jboolean JNICALL jni_freerdp_send_cursor_event( JNIEnv *env, jclass cls, jint instance, jint x, jint y, jint flags) { ANDROID_EVENT* event; freerdp* inst = (freerdp*)instance; event = (ANDROID_EVENT*) android_event_cursor_new(flags, x, y); if (!event) return JNI_FALSE; if (!android_push_event(inst, event)) { android_event_cursor_free((ANDROID_EVENT_CURSOR *)event); return JNI_FALSE; } DEBUG_ANDROID("send_cursor_event: (%d, %d), %d", x, y, flags); return JNI_TRUE; } JNIEXPORT jboolean JNICALL jni_freerdp_send_clipboard_data(JNIEnv *env, jclass cls, jint instance, jstring jdata) { ANDROID_EVENT* event; freerdp* inst = (freerdp*)instance; const jbyte *data = jdata != NULL ? (*env)->GetStringUTFChars(env, jdata, NULL) : NULL; int data_length = data ? strlen(data) : 0; jboolean ret = JNI_FALSE;; event = (ANDROID_EVENT*) android_event_clipboard_new((void*)data, data_length); if (!event) goto out_fail; if (!android_push_event(inst, event)) { android_event_clipboard_free((ANDROID_EVENT_CLIPBOARD *)event); goto out_fail; } DEBUG_ANDROID("send_clipboard_data: (%s)", data); ret = JNI_TRUE; out_fail: if (data) (*env)->ReleaseStringUTFChars(env, jdata, data); return ret; } JNIEXPORT jstring JNICALL jni_freerdp_get_version(JNIEnv *env, jclass cls) { return (*env)->NewStringUTF(env, GIT_REVISION); }
824853.c
#include<stdio.h> #include<stdlib.h> #include<math.h> void myFuntion(int a)//passing value to function (int a). of type int."how to pass" { //printf("this is my function\n a = %i\n\n", a); printf("this is my function\n"); }//end myfunction main()//main doesnt know what my function is.(5) { int a; printf("this is main function.\n a = %i\n\n",a); myFunction(5);//CALLED OR INVOKED MY FUNCTION //int a is replaced by (5) system("pause"); int shape1, shape2; int square(int a); { printf("this is square\n"); shape1 = askSquare(); shape2 = askCube(); } int cube(int a); printf("this is cube\n"); shape1 = askSquare(); shape2 = askCube(); }//end main int askNum() { int x; printf("enter a number\n"); scanf_s("%i", &x); return x; } int addNum(int a, int b)//parameters or arguments { int sum; sum = a + b; return sum; } void display(int a) { printf("the result is %i\n", a); int num1, num2, num3, prod; //printf("this is main funtion\n"); num1 = askNum ();//invoking num2 = askNum();//invoking addNum(num1 , num2); num3 = addNum(num1, num2);//2 argumnents //prod = multi(num1, num2); display(num3); //display(1000); //printf("the additions is %i\n", num3); //myfunctions(1); //myfunction(5); system("pause"); }//end main //google all function, you can create your own functions.
211362.c
#include "gfx/legato/generated/le_gen_init.h" static uint32_t currentScreen; void legato_initialize(void) { leSetStringTable(&stringTable); screenInit_Screen0(); currentScreen = -1; legato_showScreen(screenID_Screen0); } uint32_t legato_getCurrentScreen(void) { return currentScreen; } void legato_hideCurrentScreen() { switch(currentScreen) { case screenID_Screen0: { screenHide_Screen0(); currentScreen = 0; break; } } } void legato_showScreen(uint32_t id) { legato_hideCurrentScreen(currentScreen); switch(id) { case screenID_Screen0: { screenShow_Screen0(); currentScreen = id; break; } } } void legato_updateCurrentScreen(void) { switch(currentScreen) { case screenID_Screen0: { screenUpdate_Screen0(); break; } } }
982064.c
// _pp.c // Rogue ability // created 04 November 1992 by Descartes of Borg #include <std.h> inherit DAEMON; #define INVIS_PENALTY 75 #define INVIS_CHECK_DIE 20 void check_caught(int roll,object target, int sLevel); void do_caught(object victim); int cmd_pp(string str) { object ob; string amt_string; int steal, roll, sLevel,i, amt; int platinum, gold, electrum, silver, copper; if (TP->query_ghost()) { notify_fail("You cannot do that in your immaterial state.\n"); return 0; } if (TP->query_bound() || TP->query_tripped()) { TP->send_paralyzed_message("info",TP); return 1; } /* if(!TP->is_class("thief") && !TP->is_class("bard")) { notify_fail("Too bad you don't know how to do that.\n"); return 0; } */ if (!str) { notify_fail("Pick whom?\n"); return 0; } if (TP->query_current_attacker()) { notify_fail("You can't do that while in combat!\n"); return 0; } ob = present(str, ETP); if (!ob) ob = parse_objects(ETP, str); if (!ob) { notify_fail("No "+str+" here!\n"); return 0; } if (!living(ob)) { notify_fail(capitalize(str)+" is not a living thing!\n"); return 0; } if (wizardp(ob)) { notify_fail("That is not adviseable.\n"); return 0; } if (ob->is_player() && !interactive(ob)) return 0; if (ob==TP) { notify_fail("You cannot pick your own purse!\n"); return 0; } if(ETP->query_property("no steal")) { notify_fail("A magic force prevents you from doing that!\n"); return 0; } if(ob->query_property("no steal")) { notify_fail((string)ob->query_cap_name()+" cannot be stolen from!\n"); return 0; } if (!TP->ok_to_kill(ob)) return notify_fail("Supernatural forces prevent you.\n"); /* Calculations */ /* steal = (int)TP->query_thief_skill("pick pockets"); if (!TP->is_ok_armour("thief")) { steal -= 10000; //it just doesn't work } if (!TP->is_ok_armour("mage")) steal -= 30; // Mages can wear clothing magic and nothing at all else steal += 5; if (ob->query_invis()) steal -= INVIS_PENALTY; /* Display messages */ roll = random(100)+1; // tell_object(TP,"x = "+roll+" steal = "+steal); */ steal = TP->query_skill("thievery") + roll_dice(1,20); if(sizeof(TP->query_armour("torso"))) steal += TP->skill_armor_mod(TP->query_armour("torso")); roll = ob->query_skill("perception") + roll_dice(1,20); if (roll>steal || (TP->get_static("caught") && time() - (int)((mapping)TP->get_static("caught"))[ob] <= 150)) { write("You utterly fail in your attempt to pick from "+ob->query_cap_name()+"."); check_caught(roll,ob,steal); return 1; } platinum = (int)ob->query_money("platinum"); gold = (int)ob->query_money("gold"); electrum = (int)ob->query_money("electrum"); silver = (int)ob->query_money("silver"); copper = (int)ob->query_money("copper"); if (!platinum && !gold && !electrum && !silver && !copper) { tell_object(TP,""+ob->query_cap_name()+" is flat broke!\n"); return 1; } platinum = (steal*(platinum/10))/100; gold = (steal*(gold/10))/100; electrum = (steal*(electrum/10))/100; silver = (steal*(silver/10))/100; copper = (steal*(copper/10))/100; if (!platinum && !gold && !silver && !electrum && !copper) { write("You fail to get anything from "+ob->query_cap_name()+"'s purse."); } else { ob->add_money("platinum", -platinum); ob->add_money("gold", -gold); ob->add_money("electrum", -electrum); ob->add_money("silver", -silver); ob->add_money("copper", -copper); TP->add_money("platinum", platinum); TP->add_money("gold", gold); TP->add_money("electrum", electrum); TP->add_money("silver", silver); TP->add_money("copper", copper); write("You pick some money from "+ob->query_cap_name()+ "'s purse."); amt = gold + platinum*5 + electrum/2 + silver/10 + copper/100; amt_string = "gold"; if(!amt) { amt = electrum + silver / 5 + copper / 50; amt_string = "electrum"; } if(!amt) { amt = silver + copper / 10; amt_string = "silver"; } if(!amt) { amt = copper; amt_string = "copper"; } write("It amounts to "+amt+" "+amt_string+" in various coins."); } //if (interactive(ob)) log_file("player/theft", TPQN+" stole "+gold+" gold from "+ob->query_name()+" on "+ctime(time())+"\n"); i = check_caught(roll,ob, sLevel); if(TP->query("stolen money")){ TP->set("stolen money",(int)TP->query("stolen money")+amt); } else { TP->set("stolen money",amt); } return 1; } void help() { write( " %^CYAN%^NAME%^RESET%^ pp - pickpocket someone %^CYAN%^SYNOPSIS%^RESET%^ pp %^ORANGE%^%^ULINE%^TARGET%^RESET%^ %^CYAN%^DESCRIPTION%^RESET%^ This command will attempt to steal some money from %^ORANGE%^%^ULINE%^TARGET%^RESET%^. If %^ORANGE%^%^ULINE%^TARGET%^RESET%^ discovers you attempt, they will be upset at you. %^CYAN%^SEE ALSO%^RESET%^ stealth, steal, spy, look, glance, pkilling, flee "); } void check_caught(int roll, object target, int sLevel){ int test; int weight; int intox,condition,busy,bonus; string *pkills; object * inven; int i; intox = (((int)target->query_intox())/35) - ((int)TP->query_intox())/35; condition = (100- (int)target->query_condition_percent()) - (100- (int)TP->query_condition_percent()); busy = (5 * ( sizeof(all_living(ETP)) -2) ) - 10; bonus = intox + condition + busy + sLevel; test = 50 + ((int)target->query_highest_level() - bonus); if ((100 - roll)<test) { TP->set_hidden(0); if(TP->query_magic_hidden()) { if (TP->is_thief()) bonus = 5; else bonus = 0; if ((int)target->query_stats("wisdom") > (random(INVIS_CHECK_DIE) + bonus)) { TP->force_me("appear"); TP->set_magic_hidden(0); } } tell_object(target,"You catch "+TPQCN+" with "+TP->query_possessive()+" hand in your pocket.\n"); //tell_object(target,capitalize(TP->query_subjective())+" was stealing from you.\n"); tell_object(TP,"You get caught."); tell_room(environment(TP),"You see "+target->query_cap_name()+" catch "+TPQCN+" with a hand in "+target->query_possessive()+" pocket.",({TP,target})); inven = all_living(ETP); for(i=0;i<sizeof(inven);i++){ if(objectp(inven[i])) inven[i]->check_caught(TP,target,roll); } if (!interactive(target)) target->kill_ob(TP,0); else log_file("player/theft", TPQN+"("+sLevel+") was caught stealing from "+target->query_name()+"("+target->query_lowest_level()+") on "+ctime(time())+"\n"); if (TP->is_singleClass()) { TP->set_disable(2,target); } else { TP->set_disable(2*sizeof(TP->query_classes()),target); } if (interactive(TP)) { pkills = TP->query_pkilled(); if (member_array(target->query_name(),pkills) == -1) { pkills += ({target->query_name()}); TP->set_pkilled(pkills); } } do_caught(target); } return test; } void do_caught(object victim){ TP->set_static("caught",([victim:time()])); }
318317.c
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2018 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Rasmus Lerdorf <rasmus@php.net> | | Stig Bakken <ssb@php.net> | | Jim Winstead <jimw@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ /* gd 1.2 is copyright 1994, 1995, Quest Protein Database Center, Cold Spring Harbor Labs. */ /* Note that there is no code from the gd package in this file */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/head.h" #include <math.h> #include "SAPI.h" #include "php_gd.h" #include "ext/standard/info.h" #include "php_open_temporary_file.h" #if HAVE_SYS_WAIT_H # include <sys/wait.h> #endif #if HAVE_UNISTD_H # include <unistd.h> #endif #ifdef PHP_WIN32 # include <io.h> # include <fcntl.h> # include <windows.h> # include <Winuser.h> # include <Wingdi.h> #endif #ifdef HAVE_GD_XPM # include <X11/xpm.h> #endif # include "gd_compat.h" static int le_gd, le_gd_font; #include <gd.h> #include <gd_errors.h> #include <gdfontt.h> /* 1 Tiny font */ #include <gdfonts.h> /* 2 Small font */ #include <gdfontmb.h> /* 3 Medium bold font */ #include <gdfontl.h> /* 4 Large font */ #include <gdfontg.h> /* 5 Giant font */ #ifdef ENABLE_GD_TTF # ifdef HAVE_LIBFREETYPE # include <ft2build.h> # include FT_FREETYPE_H # endif #endif #if defined(HAVE_GD_XPM) && defined(HAVE_GD_BUNDLED) # include "X11/xpm.h" #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #ifdef ENABLE_GD_TTF static void php_imagettftext_common(INTERNAL_FUNCTION_PARAMETERS, int, int); #endif #include "gd_ctx.c" /* as it is not really public, duplicate declaration here to avoid pointless warnings */ int overflow2(int a, int b); /* Section Filters Declarations */ /* IMPORTANT NOTE FOR NEW FILTER * Do not forget to update: * IMAGE_FILTER_MAX: define the last filter index * IMAGE_FILTER_MAX_ARGS: define the biggest amount of arguments * image_filter array in PHP_FUNCTION(imagefilter) * */ #define IMAGE_FILTER_NEGATE 0 #define IMAGE_FILTER_GRAYSCALE 1 #define IMAGE_FILTER_BRIGHTNESS 2 #define IMAGE_FILTER_CONTRAST 3 #define IMAGE_FILTER_COLORIZE 4 #define IMAGE_FILTER_EDGEDETECT 5 #define IMAGE_FILTER_EMBOSS 6 #define IMAGE_FILTER_GAUSSIAN_BLUR 7 #define IMAGE_FILTER_SELECTIVE_BLUR 8 #define IMAGE_FILTER_MEAN_REMOVAL 9 #define IMAGE_FILTER_SMOOTH 10 #define IMAGE_FILTER_PIXELATE 11 #define IMAGE_FILTER_MAX 11 #define IMAGE_FILTER_MAX_ARGS 6 static void php_image_filter_negate(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_grayscale(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_brightness(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_contrast(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_colorize(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_edgedetect(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_emboss(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_gaussian_blur(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_selective_blur(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_mean_removal(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_smooth(INTERNAL_FUNCTION_PARAMETERS); static void php_image_filter_pixelate(INTERNAL_FUNCTION_PARAMETERS); /* End Section filters declarations */ static gdImagePtr _php_image_create_from_string (zval *Data, char *tn, gdImagePtr (*ioctx_func_p)()); static void _php_image_create_from(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, gdImagePtr (*func_p)(), gdImagePtr (*ioctx_func_p)()); static void _php_image_output(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, void (*func_p)()); static int _php_image_type(char data[8]); static void _php_image_convert(INTERNAL_FUNCTION_PARAMETERS, int image_type); static void _php_image_bw_convert(gdImagePtr im_org, gdIOCtx *out, int threshold); /* {{{ arginfo */ ZEND_BEGIN_ARG_INFO(arginfo_gd_info, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageloadfont, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetstyle, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, styles) /* ARRAY_INFO(0, styles, 0) */ ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatetruecolor, 0) ZEND_ARG_INFO(0, x_size) ZEND_ARG_INFO(0, y_size) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageistruecolor, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagetruecolortopalette, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, ditherFlag) ZEND_ARG_INFO(0, colorsWanted) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagepalettetotruecolor, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolormatch, 0) ZEND_ARG_INFO(0, im1) ZEND_ARG_INFO(0, im2) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetthickness, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, thickness) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilledellipse, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, cx) ZEND_ARG_INFO(0, cy) ZEND_ARG_INFO(0, w) ZEND_ARG_INFO(0, h) ZEND_ARG_INFO(0, color) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilledarc, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, cx) ZEND_ARG_INFO(0, cy) ZEND_ARG_INFO(0, w) ZEND_ARG_INFO(0, h) ZEND_ARG_INFO(0, s) ZEND_ARG_INFO(0, e) ZEND_ARG_INFO(0, col) ZEND_ARG_INFO(0, style) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagealphablending, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, blend) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesavealpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, save) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagelayereffect, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, effect) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorallocatealpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorresolvealpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorclosestalpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorexactalpha, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopyresampled, 0) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, dst_w) ZEND_ARG_INFO(0, dst_h) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_END_ARG_INFO() #ifdef PHP_WIN32 ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegrabwindow, 0, 0, 1) ZEND_ARG_INFO(0, handle) ZEND_ARG_INFO(0, client_area) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagegrabscreen, 0) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_imagerotate, 0, 0, 3) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, bgdcolor) ZEND_ARG_INFO(0, ignoretransparent) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesettile, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, tile) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetbrush, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, brush) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreate, 0) ZEND_ARG_INFO(0, x_size) ZEND_ARG_INFO(0, y_size) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagetypes, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromstring, 0) ZEND_ARG_INFO(0, image) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgif, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #ifdef HAVE_GD_JPG ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromjpeg, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif #ifdef HAVE_GD_PNG ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefrompng, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif #ifdef HAVE_GD_WEBP ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromwebp, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromxbm, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #if defined(HAVE_GD_XPM) ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromxpm, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromwbmp, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgd, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgd2, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgd2part, 0) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, srcX) ZEND_ARG_INFO(0, srcY) ZEND_ARG_INFO(0, width) ZEND_ARG_INFO(0, height) ZEND_END_ARG_INFO() #if defined(HAVE_GD_BMP) ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefrombmp, 0) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_imagexbm, 0, 0, 2) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, foreground) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegif, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, to) ZEND_END_ARG_INFO() #ifdef HAVE_GD_PNG ZEND_BEGIN_ARG_INFO_EX(arginfo_imagepng, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, to) ZEND_ARG_INFO(0, quality) ZEND_ARG_INFO(0, filters) ZEND_END_ARG_INFO() #endif #ifdef HAVE_GD_WEBP ZEND_BEGIN_ARG_INFO_EX(arginfo_imagewebp, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, to) ZEND_ARG_INFO(0, quality) ZEND_END_ARG_INFO() #endif #ifdef HAVE_GD_JPG ZEND_BEGIN_ARG_INFO_EX(arginfo_imagejpeg, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, to) ZEND_ARG_INFO(0, quality) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_imagewbmp, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, to) ZEND_ARG_INFO(0, foreground) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegd, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, to) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegd2, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, to) ZEND_ARG_INFO(0, chunk_size) ZEND_ARG_INFO(0, type) ZEND_END_ARG_INFO() #if defined(HAVE_GD_BMP) ZEND_BEGIN_ARG_INFO_EX(arginfo_imagebmp, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, to) ZEND_ARG_INFO(0, compressed) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_imagedestroy, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorallocate, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagepalettecopy, 0) ZEND_ARG_INFO(0, dst) ZEND_ARG_INFO(0, src) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorat, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorclosest, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorclosesthwb, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolordeallocate, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorresolve, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorexact, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagecolorset, 0, 0, 5) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, color) ZEND_ARG_INFO(0, red) ZEND_ARG_INFO(0, green) ZEND_ARG_INFO(0, blue) ZEND_ARG_INFO(0, alpha) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorsforindex, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagegammacorrect, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, inputgamma) ZEND_ARG_INFO(0, outputgamma) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetpixel, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageline, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x1) ZEND_ARG_INFO(0, y1) ZEND_ARG_INFO(0, x2) ZEND_ARG_INFO(0, y2) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagedashedline, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x1) ZEND_ARG_INFO(0, y1) ZEND_ARG_INFO(0, x2) ZEND_ARG_INFO(0, y2) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagerectangle, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x1) ZEND_ARG_INFO(0, y1) ZEND_ARG_INFO(0, x2) ZEND_ARG_INFO(0, y2) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilledrectangle, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x1) ZEND_ARG_INFO(0, y1) ZEND_ARG_INFO(0, x2) ZEND_ARG_INFO(0, y2) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagearc, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, cx) ZEND_ARG_INFO(0, cy) ZEND_ARG_INFO(0, w) ZEND_ARG_INFO(0, h) ZEND_ARG_INFO(0, s) ZEND_ARG_INFO(0, e) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageellipse, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, cx) ZEND_ARG_INFO(0, cy) ZEND_ARG_INFO(0, w) ZEND_ARG_INFO(0, h) ZEND_ARG_INFO(0, color) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilltoborder, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, border) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefill, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecolorstotal, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagecolortransparent, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imageinterlace, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, interlace) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagepolygon, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, points) /* ARRAY_INFO(0, points, 0) */ ZEND_ARG_INFO(0, num_pos) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageopenpolygon, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, points) /* ARRAY_INFO(0, points, 0) */ ZEND_ARG_INFO(0, num_pos) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefilledpolygon, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, points) /* ARRAY_INFO(0, points, 0) */ ZEND_ARG_INFO(0, num_pos) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefontwidth, 0) ZEND_ARG_INFO(0, font) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagefontheight, 0) ZEND_ARG_INFO(0, font) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagechar, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, c) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecharup, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, c) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagestring, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, str) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagestringup, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, font) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, str) ZEND_ARG_INFO(0, col) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopy, 0) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopymerge, 0) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_ARG_INFO(0, pct) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopymergegray, 0) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_ARG_INFO(0, pct) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecopyresized, 0) ZEND_ARG_INFO(0, dst_im) ZEND_ARG_INFO(0, src_im) ZEND_ARG_INFO(0, dst_x) ZEND_ARG_INFO(0, dst_y) ZEND_ARG_INFO(0, src_x) ZEND_ARG_INFO(0, src_y) ZEND_ARG_INFO(0, dst_w) ZEND_ARG_INFO(0, dst_h) ZEND_ARG_INFO(0, src_w) ZEND_ARG_INFO(0, src_h) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesx, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesy, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagesetclip, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, x1) ZEND_ARG_INFO(0, y1) ZEND_ARG_INFO(0, x2) ZEND_ARG_INFO(0, y2) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagegetclip, 0) ZEND_ARG_INFO(0, im) ZEND_END_ARG_INFO() #ifdef ENABLE_GD_TTF #if HAVE_LIBFREETYPE ZEND_BEGIN_ARG_INFO_EX(arginfo_imageftbbox, 0, 0, 4) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, font_file) ZEND_ARG_INFO(0, text) ZEND_ARG_INFO(0, extrainfo) /* ARRAY_INFO(0, extrainfo, 0) */ ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagefttext, 0, 0, 8) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, col) ZEND_ARG_INFO(0, font_file) ZEND_ARG_INFO(0, text) ZEND_ARG_INFO(0, extrainfo) /* ARRAY_INFO(0, extrainfo, 0) */ ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_imagettfbbox, 0) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, font_file) ZEND_ARG_INFO(0, text) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagettftext, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, size) ZEND_ARG_INFO(0, angle) ZEND_ARG_INFO(0, x) ZEND_ARG_INFO(0, y) ZEND_ARG_INFO(0, col) ZEND_ARG_INFO(0, font_file) ZEND_ARG_INFO(0, text) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_image2wbmp, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, threshold) ZEND_END_ARG_INFO() #if defined(HAVE_GD_JPG) ZEND_BEGIN_ARG_INFO(arginfo_jpeg2wbmp, 0) ZEND_ARG_INFO(0, f_org) ZEND_ARG_INFO(0, f_dest) ZEND_ARG_INFO(0, d_height) ZEND_ARG_INFO(0, d_width) ZEND_ARG_INFO(0, d_threshold) ZEND_END_ARG_INFO() #endif #if defined(HAVE_GD_PNG) ZEND_BEGIN_ARG_INFO(arginfo_png2wbmp, 0) ZEND_ARG_INFO(0, f_org) ZEND_ARG_INFO(0, f_dest) ZEND_ARG_INFO(0, d_height) ZEND_ARG_INFO(0, d_width) ZEND_ARG_INFO(0, d_threshold) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_imagefilter, 0, 0, 2) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, filtertype) ZEND_ARG_INFO(0, arg1) ZEND_ARG_INFO(0, arg2) ZEND_ARG_INFO(0, arg3) ZEND_ARG_INFO(0, arg4) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageconvolution, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, matrix3x3) /* ARRAY_INFO(0, matrix3x3, 0) */ ZEND_ARG_INFO(0, div) ZEND_ARG_INFO(0, offset) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageflip, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, mode) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageantialias, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, on) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imagecrop, 0) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, rect) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagecropauto, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, mode) ZEND_ARG_INFO(0, threshold) ZEND_ARG_INFO(0, color) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagescale, 0, 0, 2) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, new_width) ZEND_ARG_INFO(0, new_height) ZEND_ARG_INFO(0, mode) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imageaffine, 0, 0, 2) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, affine) ZEND_ARG_INFO(0, clip) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imageaffinematrixget, 0, 0, 1) ZEND_ARG_INFO(0, type) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_imageaffinematrixconcat, 0) ZEND_ARG_INFO(0, m1) ZEND_ARG_INFO(0, m2) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imagesetinterpolation, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, method) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_imageresolution, 0, 0, 1) ZEND_ARG_INFO(0, im) ZEND_ARG_INFO(0, res_x) ZEND_ARG_INFO(0, res_y) ZEND_END_ARG_INFO() /* }}} */ /* {{{ gd_functions[] */ const zend_function_entry gd_functions[] = { PHP_FE(gd_info, arginfo_gd_info) PHP_FE(imagearc, arginfo_imagearc) PHP_FE(imageellipse, arginfo_imageellipse) PHP_FE(imagechar, arginfo_imagechar) PHP_FE(imagecharup, arginfo_imagecharup) PHP_FE(imagecolorat, arginfo_imagecolorat) PHP_FE(imagecolorallocate, arginfo_imagecolorallocate) PHP_FE(imagepalettecopy, arginfo_imagepalettecopy) PHP_FE(imagecreatefromstring, arginfo_imagecreatefromstring) PHP_FE(imagecolorclosest, arginfo_imagecolorclosest) PHP_FE(imagecolorclosesthwb, arginfo_imagecolorclosesthwb) PHP_FE(imagecolordeallocate, arginfo_imagecolordeallocate) PHP_FE(imagecolorresolve, arginfo_imagecolorresolve) PHP_FE(imagecolorexact, arginfo_imagecolorexact) PHP_FE(imagecolorset, arginfo_imagecolorset) PHP_FE(imagecolortransparent, arginfo_imagecolortransparent) PHP_FE(imagecolorstotal, arginfo_imagecolorstotal) PHP_FE(imagecolorsforindex, arginfo_imagecolorsforindex) PHP_FE(imagecopy, arginfo_imagecopy) PHP_FE(imagecopymerge, arginfo_imagecopymerge) PHP_FE(imagecopymergegray, arginfo_imagecopymergegray) PHP_FE(imagecopyresized, arginfo_imagecopyresized) PHP_FE(imagecreate, arginfo_imagecreate) PHP_FE(imagecreatetruecolor, arginfo_imagecreatetruecolor) PHP_FE(imageistruecolor, arginfo_imageistruecolor) PHP_FE(imagetruecolortopalette, arginfo_imagetruecolortopalette) PHP_FE(imagepalettetotruecolor, arginfo_imagepalettetotruecolor) PHP_FE(imagesetthickness, arginfo_imagesetthickness) PHP_FE(imagefilledarc, arginfo_imagefilledarc) PHP_FE(imagefilledellipse, arginfo_imagefilledellipse) PHP_FE(imagealphablending, arginfo_imagealphablending) PHP_FE(imagesavealpha, arginfo_imagesavealpha) PHP_FE(imagecolorallocatealpha, arginfo_imagecolorallocatealpha) PHP_FE(imagecolorresolvealpha, arginfo_imagecolorresolvealpha) PHP_FE(imagecolorclosestalpha, arginfo_imagecolorclosestalpha) PHP_FE(imagecolorexactalpha, arginfo_imagecolorexactalpha) PHP_FE(imagecopyresampled, arginfo_imagecopyresampled) #ifdef PHP_WIN32 PHP_FE(imagegrabwindow, arginfo_imagegrabwindow) PHP_FE(imagegrabscreen, arginfo_imagegrabscreen) #endif PHP_FE(imagerotate, arginfo_imagerotate) PHP_FE(imageflip, arginfo_imageflip) PHP_FE(imageantialias, arginfo_imageantialias) PHP_FE(imagecrop, arginfo_imagecrop) PHP_FE(imagecropauto, arginfo_imagecropauto) PHP_FE(imagescale, arginfo_imagescale) PHP_FE(imageaffine, arginfo_imageaffine) PHP_FE(imageaffinematrixconcat, arginfo_imageaffinematrixconcat) PHP_FE(imageaffinematrixget, arginfo_imageaffinematrixget) PHP_FE(imagesetinterpolation, arginfo_imagesetinterpolation) PHP_FE(imagesettile, arginfo_imagesettile) PHP_FE(imagesetbrush, arginfo_imagesetbrush) PHP_FE(imagesetstyle, arginfo_imagesetstyle) #ifdef HAVE_GD_PNG PHP_FE(imagecreatefrompng, arginfo_imagecreatefrompng) #endif #ifdef HAVE_GD_WEBP PHP_FE(imagecreatefromwebp, arginfo_imagecreatefromwebp) #endif PHP_FE(imagecreatefromgif, arginfo_imagecreatefromgif) #ifdef HAVE_GD_JPG PHP_FE(imagecreatefromjpeg, arginfo_imagecreatefromjpeg) #endif PHP_FE(imagecreatefromwbmp, arginfo_imagecreatefromwbmp) PHP_FE(imagecreatefromxbm, arginfo_imagecreatefromxbm) #if defined(HAVE_GD_XPM) PHP_FE(imagecreatefromxpm, arginfo_imagecreatefromxpm) #endif PHP_FE(imagecreatefromgd, arginfo_imagecreatefromgd) PHP_FE(imagecreatefromgd2, arginfo_imagecreatefromgd2) PHP_FE(imagecreatefromgd2part, arginfo_imagecreatefromgd2part) #ifdef HAVE_GD_BMP PHP_FE(imagecreatefrombmp, arginfo_imagecreatefrombmp) #endif #ifdef HAVE_GD_PNG PHP_FE(imagepng, arginfo_imagepng) #endif #ifdef HAVE_GD_WEBP PHP_FE(imagewebp, arginfo_imagewebp) #endif PHP_FE(imagegif, arginfo_imagegif) #ifdef HAVE_GD_JPG PHP_FE(imagejpeg, arginfo_imagejpeg) #endif PHP_FE(imagewbmp, arginfo_imagewbmp) PHP_FE(imagegd, arginfo_imagegd) PHP_FE(imagegd2, arginfo_imagegd2) #ifdef HAVE_GD_BMP PHP_FE(imagebmp, arginfo_imagebmp) #endif PHP_FE(imagedestroy, arginfo_imagedestroy) PHP_FE(imagegammacorrect, arginfo_imagegammacorrect) PHP_FE(imagefill, arginfo_imagefill) PHP_FE(imagefilledpolygon, arginfo_imagefilledpolygon) PHP_FE(imagefilledrectangle, arginfo_imagefilledrectangle) PHP_FE(imagefilltoborder, arginfo_imagefilltoborder) PHP_FE(imagefontwidth, arginfo_imagefontwidth) PHP_FE(imagefontheight, arginfo_imagefontheight) PHP_FE(imageinterlace, arginfo_imageinterlace) PHP_FE(imageline, arginfo_imageline) PHP_FE(imageloadfont, arginfo_imageloadfont) PHP_FE(imagepolygon, arginfo_imagepolygon) PHP_FE(imageopenpolygon, arginfo_imageopenpolygon) PHP_FE(imagerectangle, arginfo_imagerectangle) PHP_FE(imagesetpixel, arginfo_imagesetpixel) PHP_FE(imagestring, arginfo_imagestring) PHP_FE(imagestringup, arginfo_imagestringup) PHP_FE(imagesx, arginfo_imagesx) PHP_FE(imagesy, arginfo_imagesy) PHP_FE(imagesetclip, arginfo_imagesetclip) PHP_FE(imagegetclip, arginfo_imagegetclip) PHP_FE(imagedashedline, arginfo_imagedashedline) #ifdef ENABLE_GD_TTF PHP_FE(imagettfbbox, arginfo_imagettfbbox) PHP_FE(imagettftext, arginfo_imagettftext) #if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE PHP_FE(imageftbbox, arginfo_imageftbbox) PHP_FE(imagefttext, arginfo_imagefttext) #endif #endif PHP_FE(imagetypes, arginfo_imagetypes) #if defined(HAVE_GD_JPG) PHP_DEP_FE(jpeg2wbmp, arginfo_jpeg2wbmp) #endif #if defined(HAVE_GD_PNG) PHP_DEP_FE(png2wbmp, arginfo_png2wbmp) #endif PHP_FE(image2wbmp, arginfo_image2wbmp) PHP_FE(imagelayereffect, arginfo_imagelayereffect) PHP_FE(imagexbm, arginfo_imagexbm) PHP_FE(imagecolormatch, arginfo_imagecolormatch) /* gd filters */ PHP_FE(imagefilter, arginfo_imagefilter) PHP_FE(imageconvolution, arginfo_imageconvolution) PHP_FE(imageresolution, arginfo_imageresolution) PHP_FE_END }; /* }}} */ zend_module_entry gd_module_entry = { STANDARD_MODULE_HEADER, "gd", gd_functions, PHP_MINIT(gd), NULL, NULL, #if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE PHP_RSHUTDOWN(gd), #else NULL, #endif PHP_MINFO(gd), PHP_GD_VERSION, STANDARD_MODULE_PROPERTIES }; #ifdef COMPILE_DL_GD ZEND_GET_MODULE(gd) #endif /* {{{ PHP_INI_BEGIN */ PHP_INI_BEGIN() PHP_INI_ENTRY("gd.jpeg_ignore_warning", "1", PHP_INI_ALL, NULL) PHP_INI_END() /* }}} */ /* {{{ php_free_gd_image */ static void php_free_gd_image(zend_resource *rsrc) { gdImageDestroy((gdImagePtr) rsrc->ptr); } /* }}} */ /* {{{ php_free_gd_font */ static void php_free_gd_font(zend_resource *rsrc) { gdFontPtr fp = (gdFontPtr) rsrc->ptr; if (fp->data) { efree(fp->data); } efree(fp); } /* }}} */ /* {{{ php_gd_error_method */ void php_gd_error_method(int type, const char *format, va_list args) { switch (type) { #ifndef PHP_WIN32 case GD_DEBUG: case GD_INFO: #endif case GD_NOTICE: type = E_NOTICE; break; case GD_WARNING: type = E_WARNING; break; default: type = E_ERROR; } php_verror(NULL, "", type, format, args); } /* }}} */ /* {{{ PHP_MINIT_FUNCTION */ PHP_MINIT_FUNCTION(gd) { le_gd = zend_register_list_destructors_ex(php_free_gd_image, NULL, "gd", module_number); le_gd_font = zend_register_list_destructors_ex(php_free_gd_font, NULL, "gd font", module_number); #if HAVE_GD_BUNDLED && HAVE_LIBFREETYPE gdFontCacheMutexSetup(); #endif gdSetErrorMethod(php_gd_error_method); REGISTER_INI_ENTRIES(); REGISTER_LONG_CONSTANT("IMG_GIF", 1, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_JPG", 2, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_JPEG", 2, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_PNG", 4, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_WBMP", 8, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_XPM", 16, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_WEBP", 32, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BMP", 64, CONST_CS | CONST_PERSISTENT); /* special colours for gd */ REGISTER_LONG_CONSTANT("IMG_COLOR_TILED", gdTiled, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_COLOR_STYLED", gdStyled, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_COLOR_BRUSHED", gdBrushed, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_COLOR_STYLEDBRUSHED", gdStyledBrushed, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_COLOR_TRANSPARENT", gdTransparent, CONST_CS | CONST_PERSISTENT); /* for imagefilledarc */ REGISTER_LONG_CONSTANT("IMG_ARC_ROUNDED", gdArc, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_ARC_PIE", gdPie, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_ARC_CHORD", gdChord, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_ARC_NOFILL", gdNoFill, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_ARC_EDGED", gdEdged, CONST_CS | CONST_PERSISTENT); /* GD2 image format types */ REGISTER_LONG_CONSTANT("IMG_GD2_RAW", GD2_FMT_RAW, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_GD2_COMPRESSED", GD2_FMT_COMPRESSED, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FLIP_HORIZONTAL", GD_FLIP_HORINZONTAL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FLIP_VERTICAL", GD_FLIP_VERTICAL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FLIP_BOTH", GD_FLIP_BOTH, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_EFFECT_REPLACE", gdEffectReplace, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_EFFECT_ALPHABLEND", gdEffectAlphaBlend, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_EFFECT_NORMAL", gdEffectNormal, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_EFFECT_OVERLAY", gdEffectOverlay, CONST_CS | CONST_PERSISTENT); #ifdef gdEffectMultiply REGISTER_LONG_CONSTANT("IMG_EFFECT_MULTIPLY", gdEffectMultiply, CONST_CS | CONST_PERSISTENT); #endif REGISTER_LONG_CONSTANT("IMG_CROP_DEFAULT", GD_CROP_DEFAULT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_TRANSPARENT", GD_CROP_TRANSPARENT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_BLACK", GD_CROP_BLACK, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_WHITE", GD_CROP_WHITE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_SIDES", GD_CROP_SIDES, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CROP_THRESHOLD", GD_CROP_THRESHOLD, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BELL", GD_BELL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BESSEL", GD_BESSEL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BILINEAR_FIXED", GD_BILINEAR_FIXED, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BICUBIC", GD_BICUBIC, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BICUBIC_FIXED", GD_BICUBIC_FIXED, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BLACKMAN", GD_BLACKMAN, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BOX", GD_BOX, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_BSPLINE", GD_BSPLINE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_CATMULLROM", GD_CATMULLROM, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_GAUSSIAN", GD_GAUSSIAN, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_GENERALIZED_CUBIC", GD_GENERALIZED_CUBIC, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_HERMITE", GD_HERMITE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_HAMMING", GD_HAMMING, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_HANNING", GD_HANNING, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_MITCHELL", GD_MITCHELL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_POWER", GD_POWER, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_QUADRATIC", GD_QUADRATIC, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_SINC", GD_SINC, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_NEAREST_NEIGHBOUR", GD_NEAREST_NEIGHBOUR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_WEIGHTED4", GD_WEIGHTED4, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_TRIANGLE", GD_TRIANGLE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_TRANSLATE", GD_AFFINE_TRANSLATE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_SCALE", GD_AFFINE_SCALE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_ROTATE", GD_AFFINE_ROTATE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_SHEAR_HORIZONTAL", GD_AFFINE_SHEAR_HORIZONTAL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_AFFINE_SHEAR_VERTICAL", GD_AFFINE_SHEAR_VERTICAL, CONST_CS | CONST_PERSISTENT); #if defined(HAVE_GD_BUNDLED) REGISTER_LONG_CONSTANT("GD_BUNDLED", 1, CONST_CS | CONST_PERSISTENT); #else REGISTER_LONG_CONSTANT("GD_BUNDLED", 0, CONST_CS | CONST_PERSISTENT); #endif /* Section Filters */ REGISTER_LONG_CONSTANT("IMG_FILTER_NEGATE", IMAGE_FILTER_NEGATE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_GRAYSCALE", IMAGE_FILTER_GRAYSCALE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_BRIGHTNESS", IMAGE_FILTER_BRIGHTNESS, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_CONTRAST", IMAGE_FILTER_CONTRAST, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_COLORIZE", IMAGE_FILTER_COLORIZE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_EDGEDETECT", IMAGE_FILTER_EDGEDETECT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_GAUSSIAN_BLUR", IMAGE_FILTER_GAUSSIAN_BLUR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_SELECTIVE_BLUR", IMAGE_FILTER_SELECTIVE_BLUR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_EMBOSS", IMAGE_FILTER_EMBOSS, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_MEAN_REMOVAL", IMAGE_FILTER_MEAN_REMOVAL, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_SMOOTH", IMAGE_FILTER_SMOOTH, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("IMG_FILTER_PIXELATE", IMAGE_FILTER_PIXELATE, CONST_CS | CONST_PERSISTENT); /* End Section Filters */ #ifdef GD_VERSION_STRING REGISTER_STRING_CONSTANT("GD_VERSION", GD_VERSION_STRING, CONST_CS | CONST_PERSISTENT); #endif #if defined(GD_MAJOR_VERSION) && defined(GD_MINOR_VERSION) && defined(GD_RELEASE_VERSION) && defined(GD_EXTRA_VERSION) REGISTER_LONG_CONSTANT("GD_MAJOR_VERSION", GD_MAJOR_VERSION, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("GD_MINOR_VERSION", GD_MINOR_VERSION, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("GD_RELEASE_VERSION", GD_RELEASE_VERSION, CONST_CS | CONST_PERSISTENT); REGISTER_STRING_CONSTANT("GD_EXTRA_VERSION", GD_EXTRA_VERSION, CONST_CS | CONST_PERSISTENT); #endif #ifdef HAVE_GD_PNG /* * cannot include #include "png.h" * /usr/include/pngconf.h:310:2: error: #error png.h already includes setjmp.h with some additional fixup. * as error, use the values for now... */ REGISTER_LONG_CONSTANT("PNG_NO_FILTER", 0x00, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_NONE", 0x08, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_SUB", 0x10, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_UP", 0x20, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_AVG", 0x40, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_FILTER_PAETH", 0x80, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("PNG_ALL_FILTERS", 0x08 | 0x10 | 0x20 | 0x40 | 0x80, CONST_CS | CONST_PERSISTENT); #endif return SUCCESS; } /* }}} */ /* {{{ PHP_RSHUTDOWN_FUNCTION */ #if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE PHP_RSHUTDOWN_FUNCTION(gd) { gdFontCacheShutdown(); return SUCCESS; } #endif /* }}} */ #if defined(HAVE_GD_BUNDLED) #define PHP_GD_VERSION_STRING "bundled (2.1.0 compatible)" #else # define PHP_GD_VERSION_STRING GD_VERSION_STRING #endif /* {{{ PHP_MINFO_FUNCTION */ PHP_MINFO_FUNCTION(gd) { php_info_print_table_start(); php_info_print_table_row(2, "GD Support", "enabled"); /* need to use a PHPAPI function here because it is external module in windows */ #if defined(HAVE_GD_BUNDLED) php_info_print_table_row(2, "GD Version", PHP_GD_VERSION_STRING); #else php_info_print_table_row(2, "GD headers Version", PHP_GD_VERSION_STRING); #if defined(HAVE_GD_LIBVERSION) php_info_print_table_row(2, "GD library Version", gdVersionString()); #endif #endif #ifdef ENABLE_GD_TTF php_info_print_table_row(2, "FreeType Support", "enabled"); #if HAVE_LIBFREETYPE php_info_print_table_row(2, "FreeType Linkage", "with freetype"); { char tmp[256]; #ifdef FREETYPE_PATCH snprintf(tmp, sizeof(tmp), "%d.%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR, FREETYPE_PATCH); #elif defined(FREETYPE_MAJOR) snprintf(tmp, sizeof(tmp), "%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR); #else snprintf(tmp, sizeof(tmp), "1.x"); #endif php_info_print_table_row(2, "FreeType Version", tmp); } #else php_info_print_table_row(2, "FreeType Linkage", "with unknown library"); #endif #endif php_info_print_table_row(2, "GIF Read Support", "enabled"); php_info_print_table_row(2, "GIF Create Support", "enabled"); #ifdef HAVE_GD_JPG { php_info_print_table_row(2, "JPEG Support", "enabled"); php_info_print_table_row(2, "libJPEG Version", gdJpegGetVersionString()); } #endif #ifdef HAVE_GD_PNG php_info_print_table_row(2, "PNG Support", "enabled"); php_info_print_table_row(2, "libPNG Version", gdPngGetVersionString()); #endif php_info_print_table_row(2, "WBMP Support", "enabled"); #if defined(HAVE_GD_XPM) php_info_print_table_row(2, "XPM Support", "enabled"); { char tmp[12]; snprintf(tmp, sizeof(tmp), "%d", XpmLibraryVersion()); php_info_print_table_row(2, "libXpm Version", tmp); } #endif php_info_print_table_row(2, "XBM Support", "enabled"); #if defined(USE_GD_JISX0208) php_info_print_table_row(2, "JIS-mapped Japanese Font Support", "enabled"); #endif #ifdef HAVE_GD_WEBP php_info_print_table_row(2, "WebP Support", "enabled"); #endif php_info_print_table_end(); DISPLAY_INI_ENTRIES(); } /* }}} */ /* {{{ proto array gd_info() */ PHP_FUNCTION(gd_info) { if (zend_parse_parameters_none() == FAILURE) { RETURN_FALSE; } array_init(return_value); add_assoc_string(return_value, "GD Version", PHP_GD_VERSION_STRING); #ifdef ENABLE_GD_TTF add_assoc_bool(return_value, "FreeType Support", 1); #if HAVE_LIBFREETYPE add_assoc_string(return_value, "FreeType Linkage", "with freetype"); #else add_assoc_string(return_value, "FreeType Linkage", "with unknown library"); #endif #else add_assoc_bool(return_value, "FreeType Support", 0); #endif add_assoc_bool(return_value, "GIF Read Support", 1); add_assoc_bool(return_value, "GIF Create Support", 1); #ifdef HAVE_GD_JPG add_assoc_bool(return_value, "JPEG Support", 1); #else add_assoc_bool(return_value, "JPEG Support", 0); #endif #ifdef HAVE_GD_PNG add_assoc_bool(return_value, "PNG Support", 1); #else add_assoc_bool(return_value, "PNG Support", 0); #endif add_assoc_bool(return_value, "WBMP Support", 1); #if defined(HAVE_GD_XPM) add_assoc_bool(return_value, "XPM Support", 1); #else add_assoc_bool(return_value, "XPM Support", 0); #endif add_assoc_bool(return_value, "XBM Support", 1); #ifdef HAVE_GD_WEBP add_assoc_bool(return_value, "WebP Support", 1); #else add_assoc_bool(return_value, "WebP Support", 0); #endif #ifdef HAVE_GD_BMP add_assoc_bool(return_value, "BMP Support", 1); #else add_assoc_bool(return_value, "BMP Support", 0); #endif #if defined(USE_GD_JISX0208) add_assoc_bool(return_value, "JIS-mapped Japanese Font Support", 1); #else add_assoc_bool(return_value, "JIS-mapped Japanese Font Support", 0); #endif } /* }}} */ /* Need this for cpdf. See also comment in file.c php3i_get_le_fp() */ PHP_GD_API int phpi_get_le_gd(void) { return le_gd; } /* }}} */ #define FLIPWORD(a) (((a & 0xff000000) >> 24) | ((a & 0x00ff0000) >> 8) | ((a & 0x0000ff00) << 8) | ((a & 0x000000ff) << 24)) /* {{{ proto int imageloadfont(string filename) Load a new font */ PHP_FUNCTION(imageloadfont) { zval *ind; zend_string *file; int hdr_size = sizeof(gdFont) - sizeof(char *); int body_size, n = 0, b, i, body_size_check; gdFontPtr font; php_stream *stream; if (zend_parse_parameters(ZEND_NUM_ARGS(), "P", &file) == FAILURE) { return; } stream = php_stream_open_wrapper(ZSTR_VAL(file), "rb", IGNORE_PATH | IGNORE_URL_WIN | REPORT_ERRORS, NULL); if (stream == NULL) { RETURN_FALSE; } /* Only supports a architecture-dependent binary dump format * at the moment. * The file format is like this on machines with 32-byte integers: * * byte 0-3: (int) number of characters in the font * byte 4-7: (int) value of first character in the font (often 32, space) * byte 8-11: (int) pixel width of each character * byte 12-15: (int) pixel height of each character * bytes 16-: (char) array with character data, one byte per pixel * in each character, for a total of * (nchars*width*height) bytes. */ font = (gdFontPtr) emalloc(sizeof(gdFont)); b = 0; while (b < hdr_size && (n = php_stream_read(stream, (char*)&font[b], hdr_size - b))) { b += n; } if (!n) { efree(font); if (php_stream_eof(stream)) { php_error_docref(NULL, E_WARNING, "End of file while reading header"); } else { php_error_docref(NULL, E_WARNING, "Error while reading header"); } php_stream_close(stream); RETURN_FALSE; } i = php_stream_tell(stream); php_stream_seek(stream, 0, SEEK_END); body_size_check = php_stream_tell(stream) - hdr_size; php_stream_seek(stream, i, SEEK_SET); body_size = font->w * font->h * font->nchars; if (body_size != body_size_check) { font->w = FLIPWORD(font->w); font->h = FLIPWORD(font->h); font->nchars = FLIPWORD(font->nchars); body_size = font->w * font->h * font->nchars; } if (overflow2(font->nchars, font->h) || overflow2(font->nchars * font->h, font->w )) { php_error_docref(NULL, E_WARNING, "Error reading font, invalid font header"); efree(font); php_stream_close(stream); RETURN_FALSE; } if (body_size != body_size_check) { php_error_docref(NULL, E_WARNING, "Error reading font"); efree(font); php_stream_close(stream); RETURN_FALSE; } font->data = emalloc(body_size); b = 0; while (b < body_size && (n = php_stream_read(stream, &font->data[b], body_size - b))) { b += n; } if (!n) { efree(font->data); efree(font); if (php_stream_eof(stream)) { php_error_docref(NULL, E_WARNING, "End of file while reading body"); } else { php_error_docref(NULL, E_WARNING, "Error while reading body"); } php_stream_close(stream); RETURN_FALSE; } php_stream_close(stream); ind = zend_list_insert(font, le_gd_font); /* Adding 5 to the font index so we will never have font indices * that overlap with the old fonts (with indices 1-5). The first * list index given out is always 1. */ RETURN_LONG(Z_RES_HANDLE_P(ind) + 5); } /* }}} */ /* {{{ proto bool imagesetstyle(resource im, array styles) Set the line drawing styles for use with imageline and IMG_COLOR_STYLED. */ PHP_FUNCTION(imagesetstyle) { zval *IM, *styles, *item; gdImagePtr im; int *stylearr; int index = 0; uint32_t num_styles; if (zend_parse_parameters(ZEND_NUM_ARGS(), "ra", &IM, &styles) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } num_styles = zend_hash_num_elements(Z_ARRVAL_P(styles)); if (num_styles == 0) { php_error_docref(NULL, E_WARNING, "styles array must not be empty"); RETURN_FALSE; } /* copy the style values in the stylearr */ stylearr = safe_emalloc(sizeof(int), num_styles, 0); ZEND_HASH_FOREACH_VAL(Z_ARRVAL_P(styles), item) { stylearr[index++] = zval_get_long(item); } ZEND_HASH_FOREACH_END(); gdImageSetStyle(im, stylearr, index); efree(stylearr); RETURN_TRUE; } /* }}} */ /* {{{ proto resource imagecreatetruecolor(int x_size, int y_size) Create a new true color image */ PHP_FUNCTION(imagecreatetruecolor) { zend_long x_size, y_size; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "ll", &x_size, &y_size) == FAILURE) { return; } if (x_size <= 0 || y_size <= 0 || x_size >= INT_MAX || y_size >= INT_MAX) { php_error_docref(NULL, E_WARNING, "Invalid image dimensions"); RETURN_FALSE; } im = gdImageCreateTrueColor(x_size, y_size); if (!im) { RETURN_FALSE; } RETURN_RES(zend_register_resource(im, le_gd)); } /* }}} */ /* {{{ proto bool imageistruecolor(resource im) return true if the image uses truecolor */ PHP_FUNCTION(imageistruecolor) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &IM) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_BOOL(im->trueColor); } /* }}} */ /* {{{ proto void imagetruecolortopalette(resource im, bool ditherFlag, int colorsWanted) Convert a true color image to a palette based image with a number of colors, optionally using dithering. */ PHP_FUNCTION(imagetruecolortopalette) { zval *IM; zend_bool dither; zend_long ncolors; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rbl", &IM, &dither, &ncolors) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (ncolors <= 0 || ZEND_LONG_INT_OVFL(ncolors)) { php_error_docref(NULL, E_WARNING, "Number of colors has to be greater than zero and no more than %d", INT_MAX); RETURN_FALSE; } if (gdImageTrueColorToPalette(im, dither, (int)ncolors)) { RETURN_TRUE; } else { php_error_docref(NULL, E_WARNING, "Couldn't convert to palette"); RETURN_FALSE; } } /* }}} */ /* {{{ proto void imagepalettetotruecolor(resource im) Convert a palette based image to a true color image. */ PHP_FUNCTION(imagepalettetotruecolor) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &IM) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (gdImagePaletteToTrueColor(im) == 0) { RETURN_FALSE; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagecolormatch(resource im1, resource im2) Makes the colors of the palette version of an image more closely match the true color version */ PHP_FUNCTION(imagecolormatch) { zval *IM1, *IM2; gdImagePtr im1, im2; int result; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rr", &IM1, &IM2) == FAILURE) { return; } if ((im1 = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM1), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((im2 = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM2), "Image", le_gd)) == NULL) { RETURN_FALSE; } result = gdImageColorMatch(im1, im2); switch (result) { case -1: php_error_docref(NULL, E_WARNING, "Image1 must be TrueColor" ); RETURN_FALSE; break; case -2: php_error_docref(NULL, E_WARNING, "Image2 must be Palette" ); RETURN_FALSE; break; case -3: php_error_docref(NULL, E_WARNING, "Image1 and Image2 must be the same size" ); RETURN_FALSE; break; case -4: php_error_docref(NULL, E_WARNING, "Image2 must have at least one color" ); RETURN_FALSE; break; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagesetthickness(resource im, int thickness) Set line thickness for drawing lines, ellipses, rectangles, polygons etc. */ PHP_FUNCTION(imagesetthickness) { zval *IM; zend_long thick; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl", &IM, &thick) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageSetThickness(im, thick); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefilledellipse(resource im, int cx, int cy, int w, int h, int color) Draw an ellipse */ PHP_FUNCTION(imagefilledellipse) { zval *IM; zend_long cx, cy, w, h, color; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlllll", &IM, &cx, &cy, &w, &h, &color) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageFilledEllipse(im, cx, cy, w, h, color); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefilledarc(resource im, int cx, int cy, int w, int h, int s, int e, int col, int style) Draw a filled partial ellipse */ PHP_FUNCTION(imagefilledarc) { zval *IM; zend_long cx, cy, w, h, ST, E, col, style; gdImagePtr im; int e, st; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rllllllll", &IM, &cx, &cy, &w, &h, &ST, &E, &col, &style) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } e = E; if (e < 0) { e %= 360; } st = ST; if (st < 0) { st %= 360; } gdImageFilledArc(im, cx, cy, w, h, st, e, col, style); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagealphablending(resource im, bool on) Turn alpha blending mode on or off for the given image */ PHP_FUNCTION(imagealphablending) { zval *IM; zend_bool blend; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rb", &IM, &blend) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageAlphaBlending(im, blend); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagesavealpha(resource im, bool on) Include alpha channel to a saved image */ PHP_FUNCTION(imagesavealpha) { zval *IM; zend_bool save; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rb", &IM, &save) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageSaveAlpha(im, save); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagelayereffect(resource im, int effect) Set the alpha blending flag to use the bundled libgd layering effects */ PHP_FUNCTION(imagelayereffect) { zval *IM; zend_long effect; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl", &IM, &effect) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageAlphaBlending(im, effect); RETURN_TRUE; } /* }}} */ /* {{{ proto int imagecolorallocatealpha(resource im, int red, int green, int blue, int alpha) Allocate a color with an alpha level. Works for true color and palette based images */ PHP_FUNCTION(imagecolorallocatealpha) { zval *IM; zend_long red, green, blue, alpha; gdImagePtr im; int ct = (-1); if (zend_parse_parameters(ZEND_NUM_ARGS(), "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) { RETURN_FALSE; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } ct = gdImageColorAllocateAlpha(im, red, green, blue, alpha); if (ct < 0) { RETURN_FALSE; } RETURN_LONG((zend_long)ct); } /* }}} */ /* {{{ proto int imagecolorresolvealpha(resource im, int red, int green, int blue, int alpha) Resolve/Allocate a colour with an alpha level. Works for true colour and palette based images */ PHP_FUNCTION(imagecolorresolvealpha) { zval *IM; zend_long red, green, blue, alpha; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageColorResolveAlpha(im, red, green, blue, alpha)); } /* }}} */ /* {{{ proto int imagecolorclosestalpha(resource im, int red, int green, int blue, int alpha) Find the closest matching colour with alpha transparency */ PHP_FUNCTION(imagecolorclosestalpha) { zval *IM; zend_long red, green, blue, alpha; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageColorClosestAlpha(im, red, green, blue, alpha)); } /* }}} */ /* {{{ proto int imagecolorexactalpha(resource im, int red, int green, int blue, int alpha) Find exact match for colour with transparency */ PHP_FUNCTION(imagecolorexactalpha) { zval *IM; zend_long red, green, blue, alpha; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageColorExactAlpha(im, red, green, blue, alpha)); } /* }}} */ /* {{{ proto bool imagecopyresampled(resource dst_im, resource src_im, int dst_x, int dst_y, int src_x, int src_y, int dst_w, int dst_h, int src_w, int src_h) Copy and resize part of an image using resampling to help ensure clarity */ PHP_FUNCTION(imagecopyresampled) { zval *SIM, *DIM; zend_long SX, SY, SW, SH, DX, DY, DW, DH; gdImagePtr im_dst, im_src; int srcH, srcW, dstH, dstW, srcY, srcX, dstY, dstX; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rrllllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &DW, &DH, &SW, &SH) == FAILURE) { return; } if ((im_dst = (gdImagePtr)zend_fetch_resource(Z_RES_P(DIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; dstH = DH; dstW = DW; gdImageCopyResampled(im_dst, im_src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH); RETURN_TRUE; } /* }}} */ #ifdef PHP_WIN32 /* {{{ proto resource imagegrabwindow(int window_handle [, int client_area]) Grab a window or its client area using a windows handle (HWND property in COM instance) */ PHP_FUNCTION(imagegrabwindow) { HWND window; zend_long client_area = 0; RECT rc = {0}; RECT rc_win = {0}; int Width, Height; HDC hdc; HDC memDC; HBITMAP memBM; HBITMAP hOld; zend_long lwindow_handle; gdImagePtr im = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS(), "l|l", &lwindow_handle, &client_area) == FAILURE) { RETURN_FALSE; } window = (HWND) lwindow_handle; if (!IsWindow(window)) { php_error_docref(NULL, E_NOTICE, "Invalid window handle"); RETURN_FALSE; } hdc = GetDC(0); if (client_area) { GetClientRect(window, &rc); Width = rc.right; Height = rc.bottom; } else { GetWindowRect(window, &rc); Width = rc.right - rc.left; Height = rc.bottom - rc.top; } Width = (Width/4)*4; memDC = CreateCompatibleDC(hdc); memBM = CreateCompatibleBitmap(hdc, Width, Height); hOld = (HBITMAP) SelectObject (memDC, memBM); PrintWindow(window, memDC, (UINT) client_area); im = gdImageCreateTrueColor(Width, Height); if (im) { int x,y; for (y=0; y <= Height; y++) { for (x=0; x <= Width; x++) { int c = GetPixel(memDC, x,y); gdImageSetPixel(im, x, y, gdTrueColor(GetRValue(c), GetGValue(c), GetBValue(c))); } } } SelectObject(memDC,hOld); DeleteObject(memBM); DeleteDC(memDC); ReleaseDC( 0, hdc ); if (!im) { RETURN_FALSE; } else { RETURN_RES(zend_register_resource(im, le_gd)); } } /* }}} */ /* {{{ proto resource imagegrabscreen() Grab a screenshot */ PHP_FUNCTION(imagegrabscreen) { HWND window = GetDesktopWindow(); RECT rc = {0}; int Width, Height; HDC hdc; HDC memDC; HBITMAP memBM; HBITMAP hOld; typedef BOOL (WINAPI *tPrintWindow)(HWND, HDC,UINT); tPrintWindow pPrintWindow = 0; gdImagePtr im; hdc = GetDC(0); if (zend_parse_parameters_none() == FAILURE) { return; } if (!hdc) { RETURN_FALSE; } GetWindowRect(window, &rc); Width = rc.right - rc.left; Height = rc.bottom - rc.top; Width = (Width/4)*4; memDC = CreateCompatibleDC(hdc); memBM = CreateCompatibleBitmap(hdc, Width, Height); hOld = (HBITMAP) SelectObject (memDC, memBM); BitBlt( memDC, 0, 0, Width, Height , hdc, rc.left, rc.top , SRCCOPY ); im = gdImageCreateTrueColor(Width, Height); if (im) { int x,y; for (y=0; y <= Height; y++) { for (x=0; x <= Width; x++) { int c = GetPixel(memDC, x,y); gdImageSetPixel(im, x, y, gdTrueColor(GetRValue(c), GetGValue(c), GetBValue(c))); } } } SelectObject(memDC,hOld); DeleteObject(memBM); DeleteDC(memDC); ReleaseDC( 0, hdc ); if (!im) { RETURN_FALSE; } else { RETURN_RES(zend_register_resource(im, le_gd)); } } /* }}} */ #endif /* PHP_WIN32 */ /* {{{ proto resource imagerotate(resource src_im, float angle, int bgdcolor [, int ignoretransparent]) Rotate an image using a custom angle */ PHP_FUNCTION(imagerotate) { zval *SIM; gdImagePtr im_dst, im_src; double degrees; zend_long color; zend_long ignoretransparent = 0; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rdl|l", &SIM, &degrees, &color, &ignoretransparent) == FAILURE) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } im_dst = gdImageRotateInterpolated(im_src, (const float)degrees, color); if (im_dst != NULL) { RETURN_RES(zend_register_resource(im_dst, le_gd)); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto bool imagesettile(resource image, resource tile) Set the tile image to $tile when filling $image with the "IMG_COLOR_TILED" color */ PHP_FUNCTION(imagesettile) { zval *IM, *TILE; gdImagePtr im, tile; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rr", &IM, &TILE) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((tile = (gdImagePtr)zend_fetch_resource(Z_RES_P(TILE), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageSetTile(im, tile); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagesetbrush(resource image, resource brush) Set the brush image to $brush when filling $image with the "IMG_COLOR_BRUSHED" color */ PHP_FUNCTION(imagesetbrush) { zval *IM, *TILE; gdImagePtr im, tile; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rr", &IM, &TILE) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((tile = (gdImagePtr)zend_fetch_resource(Z_RES_P(TILE), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageSetBrush(im, tile); RETURN_TRUE; } /* }}} */ /* {{{ proto resource imagecreate(int x_size, int y_size) Create a new image */ PHP_FUNCTION(imagecreate) { zend_long x_size, y_size; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "ll", &x_size, &y_size) == FAILURE) { return; } if (x_size <= 0 || y_size <= 0 || x_size >= INT_MAX || y_size >= INT_MAX) { php_error_docref(NULL, E_WARNING, "Invalid image dimensions"); RETURN_FALSE; } im = gdImageCreate(x_size, y_size); if (!im) { RETURN_FALSE; } RETURN_RES(zend_register_resource(im, le_gd)); } /* }}} */ /* {{{ proto int imagetypes(void) Return the types of images supported in a bitfield - 1=GIF, 2=JPEG, 4=PNG, 8=WBMP, 16=XPM */ PHP_FUNCTION(imagetypes) { int ret=0; ret = 1; #ifdef HAVE_GD_JPG ret |= 2; #endif #ifdef HAVE_GD_PNG ret |= 4; #endif ret |= 8; #if defined(HAVE_GD_XPM) ret |= 16; #endif #ifdef HAVE_GD_WEBP ret |= 32; #endif #ifdef HAVE_GD_BMP ret |= 64; #endif if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(ret); } /* }}} */ /* {{{ _php_ctx_getmbi */ static int _php_ctx_getmbi(gdIOCtx *ctx) { int i, mbi = 0; do { i = (ctx->getC)(ctx); if (i < 0) { return -1; } mbi = (mbi << 7) | (i & 0x7f); } while (i & 0x80); return mbi; } /* }}} */ /* {{{ _php_image_type */ static const char php_sig_gd2[3] = {'g', 'd', '2'}; static int _php_image_type (char data[8]) { /* Based on ext/standard/image.c */ if (data == NULL) { return -1; } if (!memcmp(data, php_sig_gd2, 3)) { return PHP_GDIMG_TYPE_GD2; } else if (!memcmp(data, php_sig_jpg, 3)) { return PHP_GDIMG_TYPE_JPG; } else if (!memcmp(data, php_sig_png, 3)) { if (!memcmp(data, php_sig_png, 8)) { return PHP_GDIMG_TYPE_PNG; } } else if (!memcmp(data, php_sig_gif, 3)) { return PHP_GDIMG_TYPE_GIF; } else if (!memcmp(data, php_sig_bmp, sizeof(php_sig_bmp))) { return PHP_GDIMG_TYPE_BMP; } else { gdIOCtx *io_ctx; io_ctx = gdNewDynamicCtxEx(8, data, 0); if (io_ctx) { if (_php_ctx_getmbi(io_ctx) == 0 && _php_ctx_getmbi(io_ctx) >= 0) { io_ctx->gd_free(io_ctx); return PHP_GDIMG_TYPE_WBM; } else { io_ctx->gd_free(io_ctx); } } } return -1; } /* }}} */ /* {{{ _php_image_create_from_string */ gdImagePtr _php_image_create_from_string(zval *data, char *tn, gdImagePtr (*ioctx_func_p)()) { gdImagePtr im; gdIOCtx *io_ctx; io_ctx = gdNewDynamicCtxEx(Z_STRLEN_P(data), Z_STRVAL_P(data), 0); if (!io_ctx) { return NULL; } im = (*ioctx_func_p)(io_ctx); if (!im) { php_error_docref(NULL, E_WARNING, "Passed data is not in '%s' format", tn); io_ctx->gd_free(io_ctx); return NULL; } io_ctx->gd_free(io_ctx); return im; } /* }}} */ /* {{{ proto resource imagecreatefromstring(string image) Create a new image from the image stream in the string */ PHP_FUNCTION(imagecreatefromstring) { zval *data; gdImagePtr im; int imtype; char sig[8]; if (zend_parse_parameters(ZEND_NUM_ARGS(), "z", &data) == FAILURE) { return; } convert_to_string_ex(data); if (Z_STRLEN_P(data) < 8) { php_error_docref(NULL, E_WARNING, "Empty string or invalid image"); RETURN_FALSE; } memcpy(sig, Z_STRVAL_P(data), 8); imtype = _php_image_type(sig); switch (imtype) { case PHP_GDIMG_TYPE_JPG: #ifdef HAVE_GD_JPG im = _php_image_create_from_string(data, "JPEG", gdImageCreateFromJpegCtx); #else php_error_docref(NULL, E_WARNING, "No JPEG support in this PHP build"); RETURN_FALSE; #endif break; case PHP_GDIMG_TYPE_PNG: #ifdef HAVE_GD_PNG im = _php_image_create_from_string(data, "PNG", gdImageCreateFromPngCtx); #else php_error_docref(NULL, E_WARNING, "No PNG support in this PHP build"); RETURN_FALSE; #endif break; case PHP_GDIMG_TYPE_GIF: im = _php_image_create_from_string(data, "GIF", gdImageCreateFromGifCtx); break; case PHP_GDIMG_TYPE_WBM: im = _php_image_create_from_string(data, "WBMP", gdImageCreateFromWBMPCtx); break; case PHP_GDIMG_TYPE_GD2: im = _php_image_create_from_string(data, "GD2", gdImageCreateFromGd2Ctx); break; case PHP_GDIMG_TYPE_BMP: im = _php_image_create_from_string(data, "BMP", gdImageCreateFromBmpCtx); break; default: php_error_docref(NULL, E_WARNING, "Data is not in a recognized format"); RETURN_FALSE; } if (!im) { php_error_docref(NULL, E_WARNING, "Couldn't create GD Image Stream out of Data"); RETURN_FALSE; } RETURN_RES(zend_register_resource(im, le_gd)); } /* }}} */ /* {{{ _php_image_create_from */ static void _php_image_create_from(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, gdImagePtr (*func_p)(), gdImagePtr (*ioctx_func_p)()) { char *file; size_t file_len; zend_long srcx, srcy, width, height; gdImagePtr im = NULL; php_stream *stream; FILE * fp = NULL; #ifdef HAVE_GD_JPG long ignore_warning; #endif if (image_type == PHP_GDIMG_TYPE_GD2PART) { if (zend_parse_parameters(ZEND_NUM_ARGS(), "pllll", &file, &file_len, &srcx, &srcy, &width, &height) == FAILURE) { return; } if (width < 1 || height < 1) { php_error_docref(NULL, E_WARNING, "Zero width or height not allowed"); RETURN_FALSE; } } else { if (zend_parse_parameters(ZEND_NUM_ARGS(), "p", &file, &file_len) == FAILURE) { return; } } stream = php_stream_open_wrapper(file, "rb", REPORT_ERRORS|IGNORE_PATH|IGNORE_URL_WIN, NULL); if (stream == NULL) { RETURN_FALSE; } /* try and avoid allocating a FILE* if the stream is not naturally a FILE* */ if (php_stream_is(stream, PHP_STREAM_IS_STDIO)) { if (FAILURE == php_stream_cast(stream, PHP_STREAM_AS_STDIO, (void**)&fp, REPORT_ERRORS)) { goto out_err; } } else if (ioctx_func_p) { /* we can create an io context */ gdIOCtx* io_ctx; zend_string *buff; char *pstr; buff = php_stream_copy_to_mem(stream, PHP_STREAM_COPY_ALL, 0); if (!buff) { php_error_docref(NULL, E_WARNING,"Cannot read image data"); goto out_err; } /* needs to be malloc (persistent) - GD will free() it later */ pstr = pestrndup(ZSTR_VAL(buff), ZSTR_LEN(buff), 1); io_ctx = gdNewDynamicCtxEx(ZSTR_LEN(buff), pstr, 0); if (!io_ctx) { pefree(pstr, 1); zend_string_release(buff); php_error_docref(NULL, E_WARNING,"Cannot allocate GD IO context"); goto out_err; } if (image_type == PHP_GDIMG_TYPE_GD2PART) { im = (*ioctx_func_p)(io_ctx, srcx, srcy, width, height); } else { im = (*ioctx_func_p)(io_ctx); } io_ctx->gd_free(io_ctx); pefree(pstr, 1); zend_string_release(buff); } else if (php_stream_can_cast(stream, PHP_STREAM_AS_STDIO)) { /* try and force the stream to be FILE* */ if (FAILURE == php_stream_cast(stream, PHP_STREAM_AS_STDIO | PHP_STREAM_CAST_TRY_HARD, (void **) &fp, REPORT_ERRORS)) { goto out_err; } } if (!im && fp) { switch (image_type) { case PHP_GDIMG_TYPE_GD2PART: im = (*func_p)(fp, srcx, srcy, width, height); break; #if defined(HAVE_GD_XPM) case PHP_GDIMG_TYPE_XPM: im = gdImageCreateFromXpm(file); break; #endif #ifdef HAVE_GD_JPG case PHP_GDIMG_TYPE_JPG: ignore_warning = INI_INT("gd.jpeg_ignore_warning"); im = gdImageCreateFromJpegEx(fp, ignore_warning); break; #endif default: im = (*func_p)(fp); break; } fflush(fp); } /* register_im: */ if (im) { RETVAL_RES(zend_register_resource(im, le_gd)); php_stream_close(stream); return; } php_error_docref(NULL, E_WARNING, "'%s' is not a valid %s file", file, tn); out_err: php_stream_close(stream); RETURN_FALSE; } /* }}} */ /* {{{ proto resource imagecreatefromgif(string filename) Create a new image from GIF file or URL */ PHP_FUNCTION(imagecreatefromgif) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GIF, "GIF", gdImageCreateFromGif, gdImageCreateFromGifCtx); } /* }}} */ #ifdef HAVE_GD_JPG /* {{{ proto resource imagecreatefromjpeg(string filename) Create a new image from JPEG file or URL */ PHP_FUNCTION(imagecreatefromjpeg) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG, "JPEG", gdImageCreateFromJpeg, gdImageCreateFromJpegCtx); } /* }}} */ #endif /* HAVE_GD_JPG */ #ifdef HAVE_GD_PNG /* {{{ proto resource imagecreatefrompng(string filename) Create a new image from PNG file or URL */ PHP_FUNCTION(imagecreatefrompng) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_PNG, "PNG", gdImageCreateFromPng, gdImageCreateFromPngCtx); } /* }}} */ #endif /* HAVE_GD_PNG */ #ifdef HAVE_GD_WEBP /* {{{ proto resource imagecreatefromwebp(string filename) Create a new image from WEBP file or URL */ PHP_FUNCTION(imagecreatefromwebp) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WEBP, "WEBP", gdImageCreateFromWebp, gdImageCreateFromWebpCtx); } /* }}} */ #endif /* HAVE_GD_WEBP */ /* {{{ proto resource imagecreatefromxbm(string filename) Create a new image from XBM file or URL */ PHP_FUNCTION(imagecreatefromxbm) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XBM, "XBM", gdImageCreateFromXbm, NULL); } /* }}} */ #if defined(HAVE_GD_XPM) /* {{{ proto resource imagecreatefromxpm(string filename) Create a new image from XPM file or URL */ PHP_FUNCTION(imagecreatefromxpm) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XPM, "XPM", gdImageCreateFromXpm, NULL); } /* }}} */ #endif /* {{{ proto resource imagecreatefromwbmp(string filename) Create a new image from WBMP file or URL */ PHP_FUNCTION(imagecreatefromwbmp) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WBM, "WBMP", gdImageCreateFromWBMP, gdImageCreateFromWBMPCtx); } /* }}} */ /* {{{ proto resource imagecreatefromgd(string filename) Create a new image from GD file or URL */ PHP_FUNCTION(imagecreatefromgd) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD, "GD", gdImageCreateFromGd, gdImageCreateFromGdCtx); } /* }}} */ /* {{{ proto resource imagecreatefromgd2(string filename) Create a new image from GD2 file or URL */ PHP_FUNCTION(imagecreatefromgd2) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD2, "GD2", gdImageCreateFromGd2, gdImageCreateFromGd2Ctx); } /* }}} */ /* {{{ proto resource imagecreatefromgd2part(string filename, int srcX, int srcY, int width, int height) Create a new image from a given part of GD2 file or URL */ PHP_FUNCTION(imagecreatefromgd2part) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD2PART, "GD2", gdImageCreateFromGd2Part, gdImageCreateFromGd2PartCtx); } /* }}} */ #if defined(HAVE_GD_BMP) /* {{{ proto resource imagecreatefrombmp(string filename) Create a new image from BMP file or URL */ PHP_FUNCTION(imagecreatefrombmp) { _php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_BMP, "BMP", gdImageCreateFromBmp, gdImageCreateFromBmpCtx); } /* }}} */ #endif /* {{{ _php_image_output */ static void _php_image_output(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, void (*func_p)()) { zval *imgind; char *file = NULL; zend_long quality = 0, type = 0; gdImagePtr im; char *fn = NULL; FILE *fp; size_t file_len = 0; int argc = ZEND_NUM_ARGS(); int q = -1, i, t = 1; /* The quality parameter for Wbmp stands for the threshold when called from image2wbmp() */ /* When called from imagewbmp() the quality parameter stands for the foreground color. Default: black. */ /* The quality parameter for gd2 stands for chunk size */ if (zend_parse_parameters(argc, "r|pll", &imgind, &file, &file_len, &quality, &type) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(imgind), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (argc > 1) { fn = file; if (argc >= 3) { q = quality; if (argc == 4) { t = type; } } } if (argc >= 2 && file_len) { PHP_GD_CHECK_OPEN_BASEDIR(fn, "Invalid filename"); fp = VCWD_FOPEN(fn, "wb"); if (!fp) { php_error_docref(NULL, E_WARNING, "Unable to open '%s' for writing", fn); RETURN_FALSE; } switch (image_type) { case PHP_GDIMG_CONVERT_WBM: if (q == -1) { q = 0; } else if (q < 0 || q > 255) { php_error_docref(NULL, E_WARNING, "Invalid threshold value '%d'. It must be between 0 and 255", q); q = 0; } gdImageWBMP(im, q, fp); break; case PHP_GDIMG_TYPE_JPG: (*func_p)(im, fp, q); break; case PHP_GDIMG_TYPE_WBM: for (i = 0; i < gdImageColorsTotal(im); i++) { if (gdImageRed(im, i) == 0) break; } (*func_p)(im, i, fp); break; case PHP_GDIMG_TYPE_GD: (*func_p)(im, fp); break; case PHP_GDIMG_TYPE_GD2: if (q == -1) { q = 128; } (*func_p)(im, fp, q, t); break; default: if (q == -1) { q = 128; } (*func_p)(im, fp, q, t); break; } fflush(fp); fclose(fp); } else { int b; FILE *tmp; char buf[4096]; zend_string *path; tmp = php_open_temporary_file(NULL, NULL, &path); if (tmp == NULL) { php_error_docref(NULL, E_WARNING, "Unable to open temporary file"); RETURN_FALSE; } switch (image_type) { case PHP_GDIMG_CONVERT_WBM: if (q == -1) { q = 0; } else if (q < 0 || q > 255) { php_error_docref(NULL, E_WARNING, "Invalid threshold value '%d'. It must be between 0 and 255", q); q = 0; } gdImageWBMP(im, q, tmp); break; case PHP_GDIMG_TYPE_JPG: (*func_p)(im, tmp, q); break; case PHP_GDIMG_TYPE_WBM: for (i = 0; i < gdImageColorsTotal(im); i++) { if (gdImageRed(im, i) == 0) { break; } } (*func_p)(im, q, tmp); break; case PHP_GDIMG_TYPE_GD: (*func_p)(im, tmp); break; case PHP_GDIMG_TYPE_GD2: if (q == -1) { q = 128; } (*func_p)(im, tmp, q, t); break; default: (*func_p)(im, tmp); break; } fseek(tmp, 0, SEEK_SET); #if APACHE && defined(CHARSET_EBCDIC) /* XXX this is unlikely to work any more thies@thieso.net */ /* This is a binary file already: avoid EBCDIC->ASCII conversion */ ap_bsetflag(php3_rqst->connection->client, B_EBCDIC2ASCII, 0); #endif while ((b = fread(buf, 1, sizeof(buf), tmp)) > 0) { php_write(buf, b); } fclose(tmp); VCWD_UNLINK((const char *)ZSTR_VAL(path)); /* make sure that the temporary file is removed */ zend_string_release(path); } RETURN_TRUE; } /* }}} */ /* {{{ proto int imagexbm(int im, string filename [, int foreground]) Output XBM image to browser or file */ PHP_FUNCTION(imagexbm) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XBM, "XBM", gdImageXbmCtx); } /* }}} */ /* {{{ proto bool imagegif(resource im [, mixed to]) Output GIF image to browser or file */ PHP_FUNCTION(imagegif) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GIF, "GIF", gdImageGifCtx); } /* }}} */ #ifdef HAVE_GD_PNG /* {{{ proto bool imagepng(resource im [, mixed to]) Output PNG image to browser or file */ PHP_FUNCTION(imagepng) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_PNG, "PNG", gdImagePngCtxEx); } /* }}} */ #endif /* HAVE_GD_PNG */ #ifdef HAVE_GD_WEBP /* {{{ proto bool imagewebp(resource im [, mixed to[, int quality]] ) Output WEBP image to browser or file */ PHP_FUNCTION(imagewebp) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WEBP, "WEBP", gdImageWebpCtx); } /* }}} */ #endif /* HAVE_GD_WEBP */ #ifdef HAVE_GD_JPG /* {{{ proto bool imagejpeg(resource im [, mixed to [, int quality]]) Output JPEG image to browser or file */ PHP_FUNCTION(imagejpeg) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG, "JPEG", gdImageJpegCtx); } /* }}} */ #endif /* HAVE_GD_JPG */ /* {{{ proto bool imagewbmp(resource im [, mixed to [, int foreground]]) Output WBMP image to browser or file */ PHP_FUNCTION(imagewbmp) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WBM, "WBMP", gdImageWBMPCtx); } /* }}} */ /* {{{ proto bool imagegd(resource im [, mixed to]) Output GD image to browser or file */ PHP_FUNCTION(imagegd) { _php_image_output(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD, "GD", gdImageGd); } /* }}} */ /* {{{ proto bool imagegd2(resource im [, mixed to [, int chunk_size [, int type]]]) Output GD2 image to browser or file */ PHP_FUNCTION(imagegd2) { _php_image_output(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD2, "GD2", gdImageGd2); } /* }}} */ #ifdef HAVE_GD_BMP /* {{{ proto bool imagebmp(resource im [, mixed to [, bool compressed]]) Output BMP image to browser or file */ PHP_FUNCTION(imagebmp) { _php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_BMP, "BMP", gdImageBmpCtx); } /* }}} */ #endif /* {{{ proto bool imagedestroy(resource im) Destroy an image */ PHP_FUNCTION(imagedestroy) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &IM) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } zend_list_close(Z_RES_P(IM)); RETURN_TRUE; } /* }}} */ /* {{{ proto int imagecolorallocate(resource im, int red, int green, int blue) Allocate a color for an image */ PHP_FUNCTION(imagecolorallocate) { zval *IM; zend_long red, green, blue; gdImagePtr im; int ct = (-1); if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } ct = gdImageColorAllocate(im, red, green, blue); if (ct < 0) { RETURN_FALSE; } RETURN_LONG(ct); } /* }}} */ /* {{{ proto void imagepalettecopy(resource dst, resource src) Copy the palette from the src image onto the dst image */ PHP_FUNCTION(imagepalettecopy) { zval *dstim, *srcim; gdImagePtr dst, src; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rr", &dstim, &srcim) == FAILURE) { return; } if ((dst = (gdImagePtr)zend_fetch_resource(Z_RES_P(dstim), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((src = (gdImagePtr)zend_fetch_resource(Z_RES_P(srcim), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImagePaletteCopy(dst, src); } /* }}} */ /* {{{ proto int imagecolorat(resource im, int x, int y) Get the index of the color of a pixel */ PHP_FUNCTION(imagecolorat) { zval *IM; zend_long x, y; gdImagePtr im; ZEND_PARSE_PARAMETERS_START(3, 3) Z_PARAM_RESOURCE(IM) Z_PARAM_LONG(x) Z_PARAM_LONG(y) ZEND_PARSE_PARAMETERS_END(); if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (gdImageTrueColor(im)) { if (im->tpixels && gdImageBoundsSafe(im, x, y)) { RETURN_LONG(gdImageTrueColorPixel(im, x, y)); } else { php_error_docref(NULL, E_NOTICE, "" ZEND_LONG_FMT "," ZEND_LONG_FMT " is out of bounds", x, y); RETURN_FALSE; } } else { if (im->pixels && gdImageBoundsSafe(im, x, y)) { RETURN_LONG(im->pixels[y][x]); } else { php_error_docref(NULL, E_NOTICE, "" ZEND_LONG_FMT "," ZEND_LONG_FMT " is out of bounds", x, y); RETURN_FALSE; } } } /* }}} */ /* {{{ proto int imagecolorclosest(resource im, int red, int green, int blue) Get the index of the closest color to the specified color */ PHP_FUNCTION(imagecolorclosest) { zval *IM; zend_long red, green, blue; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageColorClosest(im, red, green, blue)); } /* }}} */ /* {{{ proto int imagecolorclosesthwb(resource im, int red, int green, int blue) Get the index of the color which has the hue, white and blackness nearest to the given color */ PHP_FUNCTION(imagecolorclosesthwb) { zval *IM; zend_long red, green, blue; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageColorClosestHWB(im, red, green, blue)); } /* }}} */ /* {{{ proto bool imagecolordeallocate(resource im, int index) De-allocate a color for an image */ PHP_FUNCTION(imagecolordeallocate) { zval *IM; zend_long index; int col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl", &IM, &index) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } /* We can return right away for a truecolor image as deallocating colours is meaningless here */ if (gdImageTrueColor(im)) { RETURN_TRUE; } col = index; if (col >= 0 && col < gdImageColorsTotal(im)) { gdImageColorDeallocate(im, col); RETURN_TRUE; } else { php_error_docref(NULL, E_WARNING, "Color index %d out of range", col); RETURN_FALSE; } } /* }}} */ /* {{{ proto int imagecolorresolve(resource im, int red, int green, int blue) Get the index of the specified color or its closest possible alternative */ PHP_FUNCTION(imagecolorresolve) { zval *IM; zend_long red, green, blue; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageColorResolve(im, red, green, blue)); } /* }}} */ /* {{{ proto int imagecolorexact(resource im, int red, int green, int blue) Get the index of the specified color */ PHP_FUNCTION(imagecolorexact) { zval *IM; zend_long red, green, blue; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlll", &IM, &red, &green, &blue) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageColorExact(im, red, green, blue)); } /* }}} */ /* {{{ proto void imagecolorset(resource im, int col, int red, int green, int blue) Set the color for the specified palette index */ PHP_FUNCTION(imagecolorset) { zval *IM; zend_long color, red, green, blue, alpha = 0; int col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rllll|l", &IM, &color, &red, &green, &blue, &alpha) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } col = color; if (col >= 0 && col < gdImageColorsTotal(im)) { im->red[col] = red; im->green[col] = green; im->blue[col] = blue; im->alpha[col] = alpha; } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto array imagecolorsforindex(resource im, int col) Get the colors for an index */ PHP_FUNCTION(imagecolorsforindex) { zval *IM; zend_long index; int col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl", &IM, &index) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } col = index; if ((col >= 0 && gdImageTrueColor(im)) || (!gdImageTrueColor(im) && col >= 0 && col < gdImageColorsTotal(im))) { array_init(return_value); add_assoc_long(return_value,"red", gdImageRed(im,col)); add_assoc_long(return_value,"green", gdImageGreen(im,col)); add_assoc_long(return_value,"blue", gdImageBlue(im,col)); add_assoc_long(return_value,"alpha", gdImageAlpha(im,col)); } else { php_error_docref(NULL, E_WARNING, "Color index %d out of range", col); RETURN_FALSE; } } /* }}} */ /* {{{ proto bool imagegammacorrect(resource im, float inputgamma, float outputgamma) Apply a gamma correction to a GD image */ PHP_FUNCTION(imagegammacorrect) { zval *IM; gdImagePtr im; int i; double input, output, gamma; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rdd", &IM, &input, &output) == FAILURE) { return; } if ( input <= 0.0 || output <= 0.0 ) { php_error_docref(NULL, E_WARNING, "Gamma values should be positive"); RETURN_FALSE; } gamma = input / output; if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (gdImageTrueColor(im)) { int x, y, c; for (y = 0; y < gdImageSY(im); y++) { for (x = 0; x < gdImageSX(im); x++) { c = gdImageGetPixel(im, x, y); gdImageSetPixel(im, x, y, gdTrueColorAlpha( (int) ((pow((gdTrueColorGetRed(c) / 255.0), gamma) * 255) + .5), (int) ((pow((gdTrueColorGetGreen(c) / 255.0), gamma) * 255) + .5), (int) ((pow((gdTrueColorGetBlue(c) / 255.0), gamma) * 255) + .5), gdTrueColorGetAlpha(c) ) ); } } RETURN_TRUE; } for (i = 0; i < gdImageColorsTotal(im); i++) { im->red[i] = (int)((pow((im->red[i] / 255.0), gamma) * 255) + .5); im->green[i] = (int)((pow((im->green[i] / 255.0), gamma) * 255) + .5); im->blue[i] = (int)((pow((im->blue[i] / 255.0), gamma) * 255) + .5); } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagesetpixel(resource im, int x, int y, int col) Set a single pixel */ PHP_FUNCTION(imagesetpixel) { zval *IM; zend_long x, y, col; gdImagePtr im; ZEND_PARSE_PARAMETERS_START(4, 4) Z_PARAM_RESOURCE(IM) Z_PARAM_LONG(x) Z_PARAM_LONG(y) Z_PARAM_LONG(col) ZEND_PARSE_PARAMETERS_END(); if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageSetPixel(im, x, y, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imageline(resource im, int x1, int y1, int x2, int y2, int col) Draw a line */ PHP_FUNCTION(imageline) { zval *IM; zend_long x1, y1, x2, y2, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (im->AA) { gdImageSetAntiAliased(im, col); col = gdAntiAliased; } gdImageLine(im, x1, y1, x2, y2, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagedashedline(resource im, int x1, int y1, int x2, int y2, int col) Draw a dashed line */ PHP_FUNCTION(imagedashedline) { zval *IM; zend_long x1, y1, x2, y2, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageDashedLine(im, x1, y1, x2, y2, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagerectangle(resource im, int x1, int y1, int x2, int y2, int col) Draw a rectangle */ PHP_FUNCTION(imagerectangle) { zval *IM; zend_long x1, y1, x2, y2, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageRectangle(im, x1, y1, x2, y2, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefilledrectangle(resource im, int x1, int y1, int x2, int y2, int col) Draw a filled rectangle */ PHP_FUNCTION(imagefilledrectangle) { zval *IM; zend_long x1, y1, x2, y2, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageFilledRectangle(im, x1, y1, x2, y2, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagearc(resource im, int cx, int cy, int w, int h, int s, int e, int col) Draw a partial ellipse */ PHP_FUNCTION(imagearc) { zval *IM; zend_long cx, cy, w, h, ST, E, col; gdImagePtr im; int e, st; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlllllll", &IM, &cx, &cy, &w, &h, &ST, &E, &col) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } e = E; if (e < 0) { e %= 360; } st = ST; if (st < 0) { st %= 360; } gdImageArc(im, cx, cy, w, h, st, e, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imageellipse(resource im, int cx, int cy, int w, int h, int color) Draw an ellipse */ PHP_FUNCTION(imageellipse) { zval *IM; zend_long cx, cy, w, h, color; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlllll", &IM, &cx, &cy, &w, &h, &color) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageEllipse(im, cx, cy, w, h, color); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefilltoborder(resource im, int x, int y, int border, int col) Flood fill to specific color */ PHP_FUNCTION(imagefilltoborder) { zval *IM; zend_long x, y, border, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rllll", &IM, &x, &y, &border, &col) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageFillToBorder(im, x, y, border, col); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagefill(resource im, int x, int y, int col) Flood fill */ PHP_FUNCTION(imagefill) { zval *IM; zend_long x, y, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlll", &IM, &x, &y, &col) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageFill(im, x, y, col); RETURN_TRUE; } /* }}} */ /* {{{ proto int imagecolorstotal(resource im) Find out the number of colors in an image's palette */ PHP_FUNCTION(imagecolorstotal) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &IM) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageColorsTotal(im)); } /* }}} */ /* {{{ proto int imagecolortransparent(resource im [, int col]) Define a color as transparent */ PHP_FUNCTION(imagecolortransparent) { zval *IM; zend_long COL = 0; gdImagePtr im; int argc = ZEND_NUM_ARGS(); if (zend_parse_parameters(argc, "r|l", &IM, &COL) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (argc > 1) { gdImageColorTransparent(im, COL); } RETURN_LONG(gdImageGetTransparent(im)); } /* }}} */ /* {{{ proto int imageinterlace(resource im [, int interlace]) Enable or disable interlace */ PHP_FUNCTION(imageinterlace) { zval *IM; int argc = ZEND_NUM_ARGS(); zend_long INT = 0; gdImagePtr im; if (zend_parse_parameters(argc, "r|l", &IM, &INT) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (argc > 1) { gdImageInterlace(im, INT); } RETURN_LONG(gdImageGetInterlaced(im)); } /* }}} */ /* {{{ php_imagepolygon arg = -1 open polygon arg = 0 normal polygon arg = 1 filled polygon */ /* im, points, num_points, col */ static void php_imagepolygon(INTERNAL_FUNCTION_PARAMETERS, int filled) { zval *IM, *POINTS; zend_long NPOINTS, COL; zval *var = NULL; gdImagePtr im; gdPointPtr points; int npoints, col, nelem, i; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rall", &IM, &POINTS, &NPOINTS, &COL) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } npoints = NPOINTS; col = COL; nelem = zend_hash_num_elements(Z_ARRVAL_P(POINTS)); if (nelem < 6) { php_error_docref(NULL, E_WARNING, "You must have at least 3 points in your array"); RETURN_FALSE; } if (npoints <= 0) { php_error_docref(NULL, E_WARNING, "You must give a positive number of points"); RETURN_FALSE; } if (nelem < npoints * 2) { php_error_docref(NULL, E_WARNING, "Trying to use %d points in array with only %d points", npoints, nelem/2); RETURN_FALSE; } points = (gdPointPtr) safe_emalloc(npoints, sizeof(gdPoint), 0); for (i = 0; i < npoints; i++) { if ((var = zend_hash_index_find(Z_ARRVAL_P(POINTS), (i * 2))) != NULL) { points[i].x = zval_get_long(var); } if ((var = zend_hash_index_find(Z_ARRVAL_P(POINTS), (i * 2) + 1)) != NULL) { points[i].y = zval_get_long(var); } } if (im->AA) { gdImageSetAntiAliased(im, col); col = gdAntiAliased; } switch (filled) { case -1: gdImageOpenPolygon(im, points, npoints, col); break; case 0: gdImagePolygon(im, points, npoints, col); break; case 1: gdImageFilledPolygon(im, points, npoints, col); break; } efree(points); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagepolygon(resource im, array point, int num_points, int col) Draw a polygon */ PHP_FUNCTION(imagepolygon) { php_imagepolygon(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto bool imageopenpolygon(resource im, array point, int num_points, int col) Draw a polygon */ PHP_FUNCTION(imageopenpolygon) { php_imagepolygon(INTERNAL_FUNCTION_PARAM_PASSTHRU, -1); } /* }}} */ /* {{{ proto bool imagefilledpolygon(resource im, array point, int num_points, int col) Draw a filled polygon */ PHP_FUNCTION(imagefilledpolygon) { php_imagepolygon(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ php_find_gd_font */ static gdFontPtr php_find_gd_font(int size) { gdFontPtr font; switch (size) { case 1: font = gdFontTiny; break; case 2: font = gdFontSmall; break; case 3: font = gdFontMediumBold; break; case 4: font = gdFontLarge; break; case 5: font = gdFontGiant; break; default: { zval *zv = zend_hash_index_find(&EG(regular_list), size - 5); if (!zv || (Z_RES_P(zv))->type != le_gd_font) { if (size < 1) { font = gdFontTiny; } else { font = gdFontGiant; } } else { font = (gdFontPtr)Z_RES_P(zv)->ptr; } } break; } return font; } /* }}} */ /* {{{ php_imagefontsize * arg = 0 ImageFontWidth * arg = 1 ImageFontHeight */ static void php_imagefontsize(INTERNAL_FUNCTION_PARAMETERS, int arg) { zend_long SIZE; gdFontPtr font; if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &SIZE) == FAILURE) { return; } font = php_find_gd_font(SIZE); RETURN_LONG(arg ? font->h : font->w); } /* }}} */ /* {{{ proto int imagefontwidth(int font) Get font width */ PHP_FUNCTION(imagefontwidth) { php_imagefontsize(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto int imagefontheight(int font) Get font height */ PHP_FUNCTION(imagefontheight) { php_imagefontsize(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ php_gdimagecharup * workaround for a bug in gd 1.2 */ static void php_gdimagecharup(gdImagePtr im, gdFontPtr f, int x, int y, int c, int color) { int cx, cy, px, py, fline; cx = 0; cy = 0; if ((c < f->offset) || (c >= (f->offset + f->nchars))) { return; } fline = (c - f->offset) * f->h * f->w; for (py = y; (py > (y - f->w)); py--) { for (px = x; (px < (x + f->h)); px++) { if (f->data[fline + cy * f->w + cx]) { gdImageSetPixel(im, px, py, color); } cy++; } cy = 0; cx++; } } /* }}} */ /* {{{ php_imagechar * arg = 0 ImageChar * arg = 1 ImageCharUp * arg = 2 ImageString * arg = 3 ImageStringUp */ static void php_imagechar(INTERNAL_FUNCTION_PARAMETERS, int mode) { zval *IM; zend_long SIZE, X, Y, COL; char *C; size_t C_len; gdImagePtr im; int ch = 0, col, x, y, size, i, l = 0; unsigned char *str = NULL; gdFontPtr font; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlllsl", &IM, &SIZE, &X, &Y, &C, &C_len, &COL) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } col = COL; if (mode < 2) { ch = (int)((unsigned char)*C); } else { str = (unsigned char *) estrndup(C, C_len); l = strlen((char *)str); } y = Y; x = X; size = SIZE; font = php_find_gd_font(size); switch (mode) { case 0: gdImageChar(im, font, x, y, ch, col); break; case 1: php_gdimagecharup(im, font, x, y, ch, col); break; case 2: for (i = 0; (i < l); i++) { gdImageChar(im, font, x, y, (int) ((unsigned char) str[i]), col); x += font->w; } break; case 3: { for (i = 0; (i < l); i++) { /* php_gdimagecharup(im, font, x, y, (int) str[i], col); */ gdImageCharUp(im, font, x, y, (int) str[i], col); y -= font->w; } break; } } if (str) { efree(str); } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagechar(resource im, int font, int x, int y, string c, int col) Draw a character */ PHP_FUNCTION(imagechar) { php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto bool imagecharup(resource im, int font, int x, int y, string c, int col) Draw a character rotated 90 degrees counter-clockwise */ PHP_FUNCTION(imagecharup) { php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ proto bool imagestring(resource im, int font, int x, int y, string str, int col) Draw a string horizontally */ PHP_FUNCTION(imagestring) { php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 2); } /* }}} */ /* {{{ proto bool imagestringup(resource im, int font, int x, int y, string str, int col) Draw a string vertically - rotated 90 degrees counter-clockwise */ PHP_FUNCTION(imagestringup) { php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 3); } /* }}} */ /* {{{ proto bool imagecopy(resource dst_im, resource src_im, int dst_x, int dst_y, int src_x, int src_y, int src_w, int src_h) Copy part of an image */ PHP_FUNCTION(imagecopy) { zval *SIM, *DIM; zend_long SX, SY, SW, SH, DX, DY; gdImagePtr im_dst, im_src; int srcH, srcW, srcY, srcX, dstY, dstX; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rrllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH) == FAILURE) { return; } if ((im_dst = (gdImagePtr)zend_fetch_resource(Z_RES_P(DIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; gdImageCopy(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagecopymerge(resource src_im, resource dst_im, int dst_x, int dst_y, int src_x, int src_y, int src_w, int src_h, int pct) Merge one part of an image with another */ PHP_FUNCTION(imagecopymerge) { zval *SIM, *DIM; zend_long SX, SY, SW, SH, DX, DY, PCT; gdImagePtr im_dst, im_src; int srcH, srcW, srcY, srcX, dstY, dstX, pct; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rrlllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH, &PCT) == FAILURE) { return; } if ((im_dst = (gdImagePtr)zend_fetch_resource(Z_RES_P(DIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; pct = PCT; gdImageCopyMerge(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH, pct); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagecopymergegray(resource src_im, resource dst_im, int dst_x, int dst_y, int src_x, int src_y, int src_w, int src_h, int pct) Merge one part of an image with another */ PHP_FUNCTION(imagecopymergegray) { zval *SIM, *DIM; zend_long SX, SY, SW, SH, DX, DY, PCT; gdImagePtr im_dst, im_src; int srcH, srcW, srcY, srcX, dstY, dstX, pct; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rrlllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH, &PCT) == FAILURE) { return; } if ((im_dst = (gdImagePtr)zend_fetch_resource(Z_RES_P(DIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; pct = PCT; gdImageCopyMergeGray(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH, pct); RETURN_TRUE; } /* }}} */ /* {{{ proto bool imagecopyresized(resource dst_im, resource src_im, int dst_x, int dst_y, int src_x, int src_y, int dst_w, int dst_h, int src_w, int src_h) Copy and resize part of an image */ PHP_FUNCTION(imagecopyresized) { zval *SIM, *DIM; zend_long SX, SY, SW, SH, DX, DY, DW, DH; gdImagePtr im_dst, im_src; int srcH, srcW, dstH, dstW, srcY, srcX, dstY, dstX; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rrllllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &DW, &DH, &SW, &SH) == FAILURE) { return; } if ((im_dst = (gdImagePtr)zend_fetch_resource(Z_RES_P(DIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } srcX = SX; srcY = SY; srcH = SH; srcW = SW; dstX = DX; dstY = DY; dstH = DH; dstW = DW; if (dstW <= 0 || dstH <= 0 || srcW <= 0 || srcH <= 0) { php_error_docref(NULL, E_WARNING, "Invalid image dimensions"); RETURN_FALSE; } gdImageCopyResized(im_dst, im_src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH); RETURN_TRUE; } /* }}} */ /* {{{ proto int imagesx(resource im) Get image width */ PHP_FUNCTION(imagesx) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &IM) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageSX(im)); } /* }}} */ /* {{{ proto int imagesy(resource im) Get image height */ PHP_FUNCTION(imagesy) { zval *IM; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &IM) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } RETURN_LONG(gdImageSY(im)); } /* }}} */ /* {{{ proto bool imagesetclip(resource im, int x1, int y1, int x2, int y2) Set the clipping rectangle. */ PHP_FUNCTION(imagesetclip) { zval *im_zval; gdImagePtr im; zend_long x1, y1, x2, y2; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rllll", &im_zval, &x1, &y1, &x2, &y2) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(im_zval), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageSetClip(im, x1, y1, x2, y2); RETURN_TRUE; } /* }}} */ /* {{{ proto array imagegetclip(resource im) Get the clipping rectangle. */ PHP_FUNCTION(imagegetclip) { zval *im_zval; gdImagePtr im; int x1, y1, x2, y2; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &im_zval) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(im_zval), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageGetClip(im, &x1, &y1, &x2, &y2); array_init(return_value); add_next_index_long(return_value, x1); add_next_index_long(return_value, y1); add_next_index_long(return_value, x2); add_next_index_long(return_value, y2); } /* }}} */ #ifdef ENABLE_GD_TTF #define TTFTEXT_DRAW 0 #define TTFTEXT_BBOX 1 #endif #ifdef ENABLE_GD_TTF #if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE /* {{{ proto array imageftbbox(float size, float angle, string font_file, string text [, array extrainfo]) Give the bounding box of a text using fonts via freetype2 */ PHP_FUNCTION(imageftbbox) { php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_BBOX, 1); } /* }}} */ /* {{{ proto array imagefttext(resource im, float size, float angle, int x, int y, int col, string font_file, string text [, array extrainfo]) Write text to the image using fonts via freetype2 */ PHP_FUNCTION(imagefttext) { php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_DRAW, 1); } /* }}} */ #endif /* HAVE_GD_FREETYPE && HAVE_LIBFREETYPE */ /* {{{ proto array imagettfbbox(float size, float angle, string font_file, string text) Give the bounding box of a text using TrueType fonts */ PHP_FUNCTION(imagettfbbox) { php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_BBOX, 0); } /* }}} */ /* {{{ proto array imagettftext(resource im, float size, float angle, int x, int y, int col, string font_file, string text) Write text to the image using a TrueType font */ PHP_FUNCTION(imagettftext) { php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_DRAW, 0); } /* }}} */ /* {{{ php_imagettftext_common */ static void php_imagettftext_common(INTERNAL_FUNCTION_PARAMETERS, int mode, int extended) { zval *IM, *EXT = NULL; gdImagePtr im=NULL; zend_long col = -1, x = 0, y = 0; size_t str_len, fontname_len; int i, brect[8]; double ptsize, angle; char *str = NULL, *fontname = NULL; char *error = NULL; int argc = ZEND_NUM_ARGS(); gdFTStringExtra strex = {0}; if (mode == TTFTEXT_BBOX) { if (argc < 4 || argc > ((extended) ? 5 : 4)) { ZEND_WRONG_PARAM_COUNT(); } else if (zend_parse_parameters(argc, "ddss|a", &ptsize, &angle, &fontname, &fontname_len, &str, &str_len, &EXT) == FAILURE) { RETURN_FALSE; } } else { if (argc < 8 || argc > ((extended) ? 9 : 8)) { ZEND_WRONG_PARAM_COUNT(); } else if (zend_parse_parameters(argc, "rddlllss|a", &IM, &ptsize, &angle, &x, &y, &col, &fontname, &fontname_len, &str, &str_len, &EXT) == FAILURE) { RETURN_FALSE; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } } /* convert angle to radians */ angle = angle * (M_PI/180); if (extended && EXT) { /* parse extended info */ zval *item; zend_string *key; /* walk the assoc array */ ZEND_HASH_FOREACH_STR_KEY_VAL(Z_ARRVAL_P(EXT), key, item) { if (key == NULL) { continue; } if (strcmp("linespacing", ZSTR_VAL(key)) == 0) { strex.flags |= gdFTEX_LINESPACE; strex.linespacing = zval_get_double(item); } } ZEND_HASH_FOREACH_END(); } #ifdef VIRTUAL_DIR { char tmp_font_path[MAXPATHLEN]; if (!VCWD_REALPATH(fontname, tmp_font_path)) { fontname = NULL; } } #endif /* VIRTUAL_DIR */ PHP_GD_CHECK_OPEN_BASEDIR(fontname, "Invalid font filename"); #ifdef HAVE_GD_FREETYPE if (extended) { error = gdImageStringFTEx(im, brect, col, fontname, ptsize, angle, x, y, str, &strex); } else error = gdImageStringFT(im, brect, col, fontname, ptsize, angle, x, y, str); #endif /* HAVE_GD_FREETYPE */ if (error) { php_error_docref(NULL, E_WARNING, "%s", error); RETURN_FALSE; } array_init(return_value); /* return array with the text's bounding box */ for (i = 0; i < 8; i++) { add_next_index_long(return_value, brect[i]); } } /* }}} */ #endif /* ENABLE_GD_TTF */ /* {{{ proto bool image2wbmp(resource im [, string filename [, int threshold]]) Output WBMP image to browser or file */ PHP_FUNCTION(image2wbmp) { _php_image_output(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_CONVERT_WBM, "WBMP", _php_image_bw_convert); } /* }}} */ #if defined(HAVE_GD_JPG) /* {{{ proto bool jpeg2wbmp (string f_org, string f_dest, int d_height, int d_width, int threshold) Convert JPEG image to WBMP image */ PHP_FUNCTION(jpeg2wbmp) { _php_image_convert(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG); } /* }}} */ #endif #if defined(HAVE_GD_PNG) /* {{{ proto bool png2wbmp (string f_org, string f_dest, int d_height, int d_width, int threshold) Convert PNG image to WBMP image */ PHP_FUNCTION(png2wbmp) { _php_image_convert(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_PNG); } /* }}} */ #endif /* {{{ _php_image_bw_convert * It converts a gd Image to bw using a threshold value */ static void _php_image_bw_convert(gdImagePtr im_org, gdIOCtx *out, int threshold) { gdImagePtr im_dest; int white, black; int color, color_org, median; int dest_height = gdImageSY(im_org); int dest_width = gdImageSX(im_org); int x, y; im_dest = gdImageCreate(dest_width, dest_height); if (im_dest == NULL) { php_error_docref(NULL, E_WARNING, "Unable to allocate temporary buffer"); return; } white = gdImageColorAllocate(im_dest, 255, 255, 255); if (white == -1) { php_error_docref(NULL, E_WARNING, "Unable to allocate the colors for the destination buffer"); return; } black = gdImageColorAllocate(im_dest, 0, 0, 0); if (black == -1) { php_error_docref(NULL, E_WARNING, "Unable to allocate the colors for the destination buffer"); return; } if (im_org->trueColor) { if (!gdImageTrueColorToPalette(im_org, 1, 256)) { php_error_docref(NULL, E_WARNING, "Unable to convert to palette"); return; } } for (y = 0; y < dest_height; y++) { for (x = 0; x < dest_width; x++) { color_org = gdImageGetPixel(im_org, x, y); median = (im_org->red[color_org] + im_org->green[color_org] + im_org->blue[color_org]) / 3; if (median < threshold) { color = black; } else { color = white; } gdImageSetPixel (im_dest, x, y, color); } } gdImageWBMPCtx (im_dest, black, out); } /* }}} */ /* {{{ _php_image_convert * _php_image_convert converts jpeg/png images to wbmp and resizes them as needed */ static void _php_image_convert(INTERNAL_FUNCTION_PARAMETERS, int image_type ) { char *f_org, *f_dest; size_t f_org_len, f_dest_len; zend_long height, width, threshold; gdImagePtr im_org, im_dest, im_tmp; char *fn_org = NULL; char *fn_dest = NULL; FILE *org, *dest; int dest_height = -1; int dest_width = -1; int org_height, org_width; int white, black; int color, color_org, median; int int_threshold; int x, y; float x_ratio, y_ratio; #ifdef HAVE_GD_JPG zend_long ignore_warning; #endif if (zend_parse_parameters(ZEND_NUM_ARGS(), "pplll", &f_org, &f_org_len, &f_dest, &f_dest_len, &height, &width, &threshold) == FAILURE) { return; } fn_org = f_org; fn_dest = f_dest; dest_height = height; dest_width = width; int_threshold = threshold; /* Check threshold value */ if (int_threshold < 0 || int_threshold > 8) { php_error_docref(NULL, E_WARNING, "Invalid threshold value '%d'", int_threshold); RETURN_FALSE; } /* Check origin file */ PHP_GD_CHECK_OPEN_BASEDIR(fn_org, "Invalid origin filename"); /* Check destination file */ PHP_GD_CHECK_OPEN_BASEDIR(fn_dest, "Invalid destination filename"); /* Open origin file */ org = VCWD_FOPEN(fn_org, "rb"); if (!org) { php_error_docref(NULL, E_WARNING, "Unable to open '%s' for reading", fn_org); RETURN_FALSE; } /* Open destination file */ dest = VCWD_FOPEN(fn_dest, "wb"); if (!dest) { php_error_docref(NULL, E_WARNING, "Unable to open '%s' for writing", fn_dest); fclose(org); RETURN_FALSE; } switch (image_type) { #ifdef HAVE_GD_JPG case PHP_GDIMG_TYPE_JPG: ignore_warning = INI_INT("gd.jpeg_ignore_warning"); im_org = gdImageCreateFromJpegEx(org, ignore_warning); if (im_org == NULL) { php_error_docref(NULL, E_WARNING, "Unable to open '%s' Not a valid JPEG file", fn_dest); fclose(org); fclose(dest); RETURN_FALSE; } break; #endif /* HAVE_GD_JPG */ #ifdef HAVE_GD_PNG case PHP_GDIMG_TYPE_PNG: im_org = gdImageCreateFromPng(org); if (im_org == NULL) { php_error_docref(NULL, E_WARNING, "Unable to open '%s' Not a valid PNG file", fn_dest); fclose(org); fclose(dest); RETURN_FALSE; } break; #endif /* HAVE_GD_PNG */ default: php_error_docref(NULL, E_WARNING, "Format not supported"); fclose(org); fclose(dest); RETURN_FALSE; break; } fclose(org); org_width = gdImageSX (im_org); org_height = gdImageSY (im_org); x_ratio = (float) org_width / (float) dest_width; y_ratio = (float) org_height / (float) dest_height; if (x_ratio > 1 && y_ratio > 1) { if (y_ratio > x_ratio) { x_ratio = y_ratio; } else { y_ratio = x_ratio; } dest_width = (int) (org_width / x_ratio); dest_height = (int) (org_height / y_ratio); } else { x_ratio = (float) dest_width / (float) org_width; y_ratio = (float) dest_height / (float) org_height; if (y_ratio < x_ratio) { x_ratio = y_ratio; } else { y_ratio = x_ratio; } dest_width = (int) (org_width * x_ratio); dest_height = (int) (org_height * y_ratio); } im_tmp = gdImageCreate (dest_width, dest_height); if (im_tmp == NULL ) { php_error_docref(NULL, E_WARNING, "Unable to allocate temporary buffer"); fclose(dest); gdImageDestroy(im_org); RETURN_FALSE; } gdImageCopyResized (im_tmp, im_org, 0, 0, 0, 0, dest_width, dest_height, org_width, org_height); gdImageDestroy(im_org); im_dest = gdImageCreate(dest_width, dest_height); if (im_dest == NULL) { php_error_docref(NULL, E_WARNING, "Unable to allocate destination buffer"); fclose(dest); gdImageDestroy(im_tmp); RETURN_FALSE; } white = gdImageColorAllocate(im_dest, 255, 255, 255); if (white == -1) { php_error_docref(NULL, E_WARNING, "Unable to allocate the colors for the destination buffer"); fclose(dest); gdImageDestroy(im_tmp); gdImageDestroy(im_dest); RETURN_FALSE; } black = gdImageColorAllocate(im_dest, 0, 0, 0); if (black == -1) { php_error_docref(NULL, E_WARNING, "Unable to allocate the colors for the destination buffer"); fclose(dest); gdImageDestroy(im_tmp); gdImageDestroy(im_dest); RETURN_FALSE; } int_threshold = int_threshold * 32; for (y = 0; y < dest_height; y++) { for (x = 0; x < dest_width; x++) { color_org = gdImageGetPixel (im_tmp, x, y); median = (im_tmp->red[color_org] + im_tmp->green[color_org] + im_tmp->blue[color_org]) / 3; if (median < int_threshold) { color = black; } else { color = white; } gdImageSetPixel (im_dest, x, y, color); } } gdImageDestroy (im_tmp ); gdImageWBMP(im_dest, black , dest); fflush(dest); fclose(dest); gdImageDestroy(im_dest); RETURN_TRUE; } /* }}} */ /* Section Filters */ #define PHP_GD_SINGLE_RES \ zval *SIM; \ gdImagePtr im_src; \ if (zend_parse_parameters(1, "r", &SIM) == FAILURE) { \ RETURN_FALSE; \ } \ if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { \ RETURN_FALSE; \ } static void php_image_filter_negate(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageNegate(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_grayscale(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageGrayScale(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_brightness(INTERNAL_FUNCTION_PARAMETERS) { zval *SIM; gdImagePtr im_src; zend_long brightness, tmp; if (zend_parse_parameters(ZEND_NUM_ARGS(), "zll", &SIM, &tmp, &brightness) == FAILURE) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (im_src == NULL) { RETURN_FALSE; } if (gdImageBrightness(im_src, (int)brightness) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_contrast(INTERNAL_FUNCTION_PARAMETERS) { zval *SIM; gdImagePtr im_src; zend_long contrast, tmp; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rll", &SIM, &tmp, &contrast) == FAILURE) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (im_src == NULL) { RETURN_FALSE; } if (gdImageContrast(im_src, (int)contrast) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_colorize(INTERNAL_FUNCTION_PARAMETERS) { zval *SIM; gdImagePtr im_src; zend_long r,g,b,tmp; zend_long a = 0; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rllll|l", &SIM, &tmp, &r, &g, &b, &a) == FAILURE) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (im_src == NULL) { RETURN_FALSE; } if (gdImageColor(im_src, (int) r, (int) g, (int) b, (int) a) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_edgedetect(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageEdgeDetectQuick(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_emboss(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageEmboss(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_gaussian_blur(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageGaussianBlur(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_selective_blur(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageSelectiveBlur(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_mean_removal(INTERNAL_FUNCTION_PARAMETERS) { PHP_GD_SINGLE_RES if (gdImageMeanRemoval(im_src) == 1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_smooth(INTERNAL_FUNCTION_PARAMETERS) { zval *SIM; zend_long tmp; gdImagePtr im_src; double weight; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rld", &SIM, &tmp, &weight) == FAILURE) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (im_src == NULL) { RETURN_FALSE; } if (gdImageSmooth(im_src, (float)weight)==1) { RETURN_TRUE; } RETURN_FALSE; } static void php_image_filter_pixelate(INTERNAL_FUNCTION_PARAMETERS) { zval *IM; gdImagePtr im; zend_long tmp, blocksize; zend_bool mode = 0; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rll|b", &IM, &tmp, &blocksize, &mode) == FAILURE) { RETURN_FALSE; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (im == NULL) { RETURN_FALSE; } if (gdImagePixelate(im, (int) blocksize, (const unsigned int) mode)) { RETURN_TRUE; } RETURN_FALSE; } /* {{{ proto bool imagefilter(resource src_im, int filtertype[, int arg1 [, int arg2 [, int arg3 [, int arg4 ]]]] ) Applies Filter an image using a custom angle */ PHP_FUNCTION(imagefilter) { zval *tmp; typedef void (*image_filter)(INTERNAL_FUNCTION_PARAMETERS); zend_long filtertype; image_filter filters[] = { php_image_filter_negate , php_image_filter_grayscale, php_image_filter_brightness, php_image_filter_contrast, php_image_filter_colorize, php_image_filter_edgedetect, php_image_filter_emboss, php_image_filter_gaussian_blur, php_image_filter_selective_blur, php_image_filter_mean_removal, php_image_filter_smooth, php_image_filter_pixelate }; if (ZEND_NUM_ARGS() < 2 || ZEND_NUM_ARGS() > IMAGE_FILTER_MAX_ARGS) { WRONG_PARAM_COUNT; } else if (zend_parse_parameters(2, "rl", &tmp, &filtertype) == FAILURE) { return; } if (filtertype >= 0 && filtertype <= IMAGE_FILTER_MAX) { filters[filtertype](INTERNAL_FUNCTION_PARAM_PASSTHRU); } } /* }}} */ /* {{{ proto resource imageconvolution(resource src_im, array matrix3x3, double div, double offset) Apply a 3x3 convolution matrix, using coefficient div and offset */ PHP_FUNCTION(imageconvolution) { zval *SIM, *hash_matrix; zval *var = NULL, *var2 = NULL; gdImagePtr im_src = NULL; double div, offset; int nelem, i, j, res; float matrix[3][3] = {{0,0,0}, {0,0,0}, {0,0,0}}; if (zend_parse_parameters(ZEND_NUM_ARGS(), "radd", &SIM, &hash_matrix, &div, &offset) == FAILURE) { RETURN_FALSE; } if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) { RETURN_FALSE; } nelem = zend_hash_num_elements(Z_ARRVAL_P(hash_matrix)); if (nelem != 3) { php_error_docref(NULL, E_WARNING, "You must have 3x3 array"); RETURN_FALSE; } for (i=0; i<3; i++) { if ((var = zend_hash_index_find(Z_ARRVAL_P(hash_matrix), (i))) != NULL && Z_TYPE_P(var) == IS_ARRAY) { if (zend_hash_num_elements(Z_ARRVAL_P(var)) != 3 ) { php_error_docref(NULL, E_WARNING, "You must have 3x3 array"); RETURN_FALSE; } for (j=0; j<3; j++) { if ((var2 = zend_hash_index_find(Z_ARRVAL_P(var), j)) != NULL) { matrix[i][j] = (float) zval_get_double(var2); } else { php_error_docref(NULL, E_WARNING, "You must have a 3x3 matrix"); RETURN_FALSE; } } } } res = gdImageConvolution(im_src, matrix, (float)div, (float)offset); if (res) { RETURN_TRUE; } else { RETURN_FALSE; } } /* }}} */ /* End section: Filters */ /* {{{ proto void imageflip(resource im, int mode) Flip an image (in place) horizontally, vertically or both directions. */ PHP_FUNCTION(imageflip) { zval *IM; zend_long mode; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl", &IM, &mode) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } switch (mode) { case GD_FLIP_VERTICAL: gdImageFlipVertical(im); break; case GD_FLIP_HORINZONTAL: gdImageFlipHorizontal(im); break; case GD_FLIP_BOTH: gdImageFlipBoth(im); break; default: php_error_docref(NULL, E_WARNING, "Unknown flip mode"); RETURN_FALSE; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool imageantialias(resource im, bool on) Should antialiased functions used or not*/ PHP_FUNCTION(imageantialias) { zval *IM; zend_bool alias; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rb", &IM, &alias) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } gdImageSetAntiAliased(im, 0); RETURN_TRUE; } /* }}} */ /* {{{ proto void imagecrop(resource im, array rect) Crop an image using the given coordinates and size, x, y, width and height. */ PHP_FUNCTION(imagecrop) { zval *IM; gdImagePtr im; gdImagePtr im_crop; gdRect rect; zval *z_rect; zval *tmp; if (zend_parse_parameters(ZEND_NUM_ARGS(), "ra", &IM, &z_rect) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((tmp = zend_hash_str_find(Z_ARRVAL_P(z_rect), "x", sizeof("x") -1)) != NULL) { rect.x = zval_get_long(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing x position"); RETURN_FALSE; } if ((tmp = zend_hash_str_find(Z_ARRVAL_P(z_rect), "y", sizeof("y") - 1)) != NULL) { rect.y = zval_get_long(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing y position"); RETURN_FALSE; } if ((tmp = zend_hash_str_find(Z_ARRVAL_P(z_rect), "width", sizeof("width") - 1)) != NULL) { rect.width = zval_get_long(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing width"); RETURN_FALSE; } if ((tmp = zend_hash_str_find(Z_ARRVAL_P(z_rect), "height", sizeof("height") - 1)) != NULL) { rect.height = zval_get_long(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing height"); RETURN_FALSE; } im_crop = gdImageCrop(im, &rect); if (im_crop == NULL) { RETURN_FALSE; } else { RETURN_RES(zend_register_resource(im_crop, le_gd)); } } /* }}} */ /* {{{ proto void imagecropauto(resource im [, int mode [, float threshold [, int color]]]) Crop an image automatically using one of the available modes. */ PHP_FUNCTION(imagecropauto) { zval *IM; zend_long mode = -1; zend_long color = -1; double threshold = 0.5f; gdImagePtr im; gdImagePtr im_crop; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|ldl", &IM, &mode, &threshold, &color) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } switch (mode) { case -1: mode = GD_CROP_DEFAULT; case GD_CROP_DEFAULT: case GD_CROP_TRANSPARENT: case GD_CROP_BLACK: case GD_CROP_WHITE: case GD_CROP_SIDES: im_crop = gdImageCropAuto(im, mode); break; case GD_CROP_THRESHOLD: if (color < 0 || (!gdImageTrueColor(im) && color >= gdImageColorsTotal(im))) { php_error_docref(NULL, E_WARNING, "Color argument missing with threshold mode"); RETURN_FALSE; } im_crop = gdImageCropThreshold(im, color, (float) threshold); break; default: php_error_docref(NULL, E_WARNING, "Unknown crop mode"); RETURN_FALSE; } if (im_crop == NULL) { RETURN_FALSE; } else { RETURN_RES(zend_register_resource(im_crop, le_gd)); } } /* }}} */ /* {{{ proto resource imagescale(resource im, int new_width[, int new_height[, int method]]) Scale an image using the given new width and height. */ PHP_FUNCTION(imagescale) { zval *IM; gdImagePtr im; gdImagePtr im_scaled = NULL; int new_width, new_height; zend_long tmp_w, tmp_h=-1, tmp_m = GD_BILINEAR_FIXED; gdInterpolationMethod method, old_method; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl|ll", &IM, &tmp_w, &tmp_h, &tmp_m) == FAILURE) { return; } method = tmp_m; if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (tmp_h < 0) { /* preserve ratio */ long src_x, src_y; src_x = gdImageSX(im); src_y = gdImageSY(im); if (src_x) { tmp_h = tmp_w * src_y / src_x; } } if (tmp_h <= 0 || tmp_h > INT_MAX || tmp_w <= 0 || tmp_w > INT_MAX) { RETURN_FALSE; } new_width = tmp_w; new_height = tmp_h; /* gdImageGetInterpolationMethod() is only available as of GD 2.1.1 */ old_method = im->interpolation_id; if (gdImageSetInterpolationMethod(im, method)) { im_scaled = gdImageScale(im, new_width, new_height); } gdImageSetInterpolationMethod(im, old_method); if (im_scaled == NULL) { RETURN_FALSE; } else { RETURN_RES(zend_register_resource(im_scaled, le_gd)); } } /* }}} */ /* {{{ proto resource imageaffine(resource src, array affine[, array clip]) Return an image containing the affine tramsformed src image, using an optional clipping area */ PHP_FUNCTION(imageaffine) { zval *IM; gdImagePtr src; gdImagePtr dst; gdRect rect; gdRectPtr pRect = NULL; zval *z_rect = NULL; zval *z_affine; zval *tmp; double affine[6]; int i, nelems; zval *zval_affine_elem = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS(), "ra|a", &IM, &z_affine, &z_rect) == FAILURE) { return; } if ((src = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if ((nelems = zend_hash_num_elements(Z_ARRVAL_P(z_affine))) != 6) { php_error_docref(NULL, E_WARNING, "Affine array must have six elements"); RETURN_FALSE; } for (i = 0; i < nelems; i++) { if ((zval_affine_elem = zend_hash_index_find(Z_ARRVAL_P(z_affine), i)) != NULL) { switch (Z_TYPE_P(zval_affine_elem)) { case IS_LONG: affine[i] = Z_LVAL_P(zval_affine_elem); break; case IS_DOUBLE: affine[i] = Z_DVAL_P(zval_affine_elem); break; case IS_STRING: affine[i] = zval_get_double(zval_affine_elem); break; default: php_error_docref(NULL, E_WARNING, "Invalid type for element %i", i); RETURN_FALSE; } } } if (z_rect != NULL) { if ((tmp = zend_hash_str_find(Z_ARRVAL_P(z_rect), "x", sizeof("x") - 1)) != NULL) { rect.x = zval_get_long(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing x position"); RETURN_FALSE; } if ((tmp = zend_hash_str_find(Z_ARRVAL_P(z_rect), "y", sizeof("y") - 1)) != NULL) { rect.y = zval_get_long(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing y position"); RETURN_FALSE; } if ((tmp = zend_hash_str_find(Z_ARRVAL_P(z_rect), "width", sizeof("width") - 1)) != NULL) { rect.width = zval_get_long(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing width"); RETURN_FALSE; } if ((tmp = zend_hash_str_find(Z_ARRVAL_P(z_rect), "height", sizeof("height") - 1)) != NULL) { rect.height = zval_get_long(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing height"); RETURN_FALSE; } pRect = &rect; } else { rect.x = -1; rect.y = -1; rect.width = gdImageSX(src); rect.height = gdImageSY(src); pRect = NULL; } if (gdTransformAffineGetImage(&dst, src, pRect, affine) != GD_TRUE) { RETURN_FALSE; } if (dst == NULL) { RETURN_FALSE; } else { RETURN_RES(zend_register_resource(dst, le_gd)); } } /* }}} */ /* {{{ proto array imageaffinematrixget(int type[, array options]) Return an image containing the affine tramsformed src image, using an optional clipping area */ PHP_FUNCTION(imageaffinematrixget) { double affine[6]; zend_long type; zval *options = NULL; zval *tmp; int res = GD_FALSE, i; if (zend_parse_parameters(ZEND_NUM_ARGS(), "l|z", &type, &options) == FAILURE) { return; } switch((gdAffineStandardMatrix)type) { case GD_AFFINE_TRANSLATE: case GD_AFFINE_SCALE: { double x, y; if (!options || Z_TYPE_P(options) != IS_ARRAY) { php_error_docref(NULL, E_WARNING, "Array expected as options"); RETURN_FALSE; } if ((tmp = zend_hash_str_find(Z_ARRVAL_P(options), "x", sizeof("x") - 1)) != NULL) { x = zval_get_double(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing x position"); RETURN_FALSE; } if ((tmp = zend_hash_str_find(Z_ARRVAL_P(options), "y", sizeof("y") - 1)) != NULL) { y = zval_get_double(tmp); } else { php_error_docref(NULL, E_WARNING, "Missing y position"); RETURN_FALSE; } if (type == GD_AFFINE_TRANSLATE) { res = gdAffineTranslate(affine, x, y); } else { res = gdAffineScale(affine, x, y); } break; } case GD_AFFINE_ROTATE: case GD_AFFINE_SHEAR_HORIZONTAL: case GD_AFFINE_SHEAR_VERTICAL: { double angle; if (!options) { php_error_docref(NULL, E_WARNING, "Number is expected as option"); RETURN_FALSE; } angle = zval_get_double(options); if (type == GD_AFFINE_SHEAR_HORIZONTAL) { res = gdAffineShearHorizontal(affine, angle); } else if (type == GD_AFFINE_SHEAR_VERTICAL) { res = gdAffineShearVertical(affine, angle); } else { res = gdAffineRotate(affine, angle); } break; } default: php_error_docref(NULL, E_WARNING, "Invalid type for element " ZEND_LONG_FMT, type); RETURN_FALSE; } if (res == GD_FALSE) { RETURN_FALSE; } else { array_init(return_value); for (i = 0; i < 6; i++) { add_index_double(return_value, i, affine[i]); } } } /* }}} */ /* {{{ proto array imageaffineconcat(array m1, array m2) Concat two matrices (as in doing many ops in one go) */ PHP_FUNCTION(imageaffinematrixconcat) { double m1[6]; double m2[6]; double mr[6]; zval *tmp; zval *z_m1; zval *z_m2; int i, nelems; if (zend_parse_parameters(ZEND_NUM_ARGS(), "aa", &z_m1, &z_m2) == FAILURE) { return; } if (((nelems = zend_hash_num_elements(Z_ARRVAL_P(z_m1))) != 6) || (nelems = zend_hash_num_elements(Z_ARRVAL_P(z_m2))) != 6) { php_error_docref(NULL, E_WARNING, "Affine arrays must have six elements"); RETURN_FALSE; } for (i = 0; i < 6; i++) { if ((tmp = zend_hash_index_find(Z_ARRVAL_P(z_m1), i)) != NULL) { switch (Z_TYPE_P(tmp)) { case IS_LONG: m1[i] = Z_LVAL_P(tmp); break; case IS_DOUBLE: m1[i] = Z_DVAL_P(tmp); break; case IS_STRING: m1[i] = zval_get_double(tmp); break; default: php_error_docref(NULL, E_WARNING, "Invalid type for element %i", i); RETURN_FALSE; } } if ((tmp = zend_hash_index_find(Z_ARRVAL_P(z_m2), i)) != NULL) { switch (Z_TYPE_P(tmp)) { case IS_LONG: m2[i] = Z_LVAL_P(tmp); break; case IS_DOUBLE: m2[i] = Z_DVAL_P(tmp); break; case IS_STRING: m2[i] = zval_get_double(tmp); break; default: php_error_docref(NULL, E_WARNING, "Invalid type for element %i", i); RETURN_FALSE; } } } if (gdAffineConcat (mr, m1, m2) != GD_TRUE) { RETURN_FALSE; } array_init(return_value); for (i = 0; i < 6; i++) { add_index_double(return_value, i, mr[i]); } } /* }}} */ /* {{{ proto resource imagesetinterpolation(resource im [, int method]]) Set the default interpolation method, passing -1 or 0 sets it to the libgd default (bilinear). */ PHP_FUNCTION(imagesetinterpolation) { zval *IM; gdImagePtr im; zend_long method = GD_BILINEAR_FIXED; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|l", &IM, &method) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } if (method == -1) { method = GD_BILINEAR_FIXED; } RETURN_BOOL(gdImageSetInterpolationMethod(im, (gdInterpolationMethod) method)); } /* }}} */ /* {{{ proto array imageresolution(resource im [, res_x, [res_y]]) Get or set the resolution of the image in DPI. */ PHP_FUNCTION(imageresolution) { zval *IM; gdImagePtr im; zend_long res_x = GD_RESOLUTION, res_y = GD_RESOLUTION; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|ll", &IM, &res_x, &res_y) == FAILURE) { return; } if ((im = (gdImagePtr)zend_fetch_resource(Z_RES_P(IM), "Image", le_gd)) == NULL) { RETURN_FALSE; } switch (ZEND_NUM_ARGS()) { case 3: gdImageSetResolution(im, res_x, res_y); RETURN_TRUE; case 2: gdImageSetResolution(im, res_x, res_x); RETURN_TRUE; default: array_init(return_value); add_next_index_long(return_value, gdImageResolutionX(im)); add_next_index_long(return_value, gdImageResolutionY(im)); } } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
158145.c
/* This testcase is part of GDB, the GNU debugger. Copyright 2012-2020 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ void do_nothing (void) { } int main () { int i; for (i = 0; i < 2; i++) do_nothing (); return 0; }
670108.c
int largest(int arr[], int n) { int lar = arr[0]; int i=0; while(i<n) { if(arr[i]>lar) { lar=arr[i]; } i++; } return lar; }
786577.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, * Zoltan Sogor <weth@inf.u-szeged.hu>, * Patrik Kluba <pajko@halom.u-szeged.hu>, * University of Szeged, Hungary * 2006 KaiGai Kohei <kaigai@ak.jp.nec.com> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include "summary.h" #ifdef CONFIG_JFFS2_SUMMARY #ifndef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #endif #include <linux/kernel.h> #include <linux/slab.h> #include <mtd_dev.h> #include <linux/pagemap.h> #include "los_crc32.h" #include <linux/compiler.h> #include "nodelist.h" #include "debug.h" int jffs2_sum_init(struct jffs2_sb_info *c) { uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); if (!c->summary) { JFFS2_WARNING("Can't allocate memory for summary information!\n"); return -ENOMEM; } c->summary->sum_buf = kmalloc(sum_size, GFP_KERNEL); if (!c->summary->sum_buf) { JFFS2_WARNING("Can't allocate buffer for writing out summary information!\n"); kfree(c->summary); return -ENOMEM; } dbg_summary("returned successfully\n"); return 0; } void jffs2_sum_exit(struct jffs2_sb_info *c) { dbg_summary("called\n"); jffs2_sum_disable_collecting(c->summary); kfree(c->summary->sum_buf); c->summary->sum_buf = NULL; kfree(c->summary); c->summary = NULL; } static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item) { if (!s->sum_list_head) s->sum_list_head = (union jffs2_sum_mem *) item; if (s->sum_list_tail) s->sum_list_tail->u.next = (union jffs2_sum_mem *) item; s->sum_list_tail = (union jffs2_sum_mem *) item; switch (je16_to_cpu(item->u.nodetype)) { case JFFS2_NODETYPE_INODE: s->sum_size += JFFS2_SUMMARY_INODE_SIZE; s->sum_num++; dbg_summary("inode (%u) added to summary\n", je32_to_cpu(item->i.inode)); break; case JFFS2_NODETYPE_DIRENT: s->sum_size += JFFS2_SUMMARY_DIRENT_SIZE(item->d.nsize); s->sum_num++; dbg_summary("dirent (%u) added to summary\n", je32_to_cpu(item->d.ino)); break; #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: s->sum_size += JFFS2_SUMMARY_XATTR_SIZE; s->sum_num++; dbg_summary("xattr (xid=%u, version=%u) added to summary\n", je32_to_cpu(item->x.xid), je32_to_cpu(item->x.version)); break; case JFFS2_NODETYPE_XREF: s->sum_size += JFFS2_SUMMARY_XREF_SIZE; s->sum_num++; dbg_summary("xref added to summary\n"); break; #endif default: JFFS2_WARNING("UNKNOWN node type %u\n", je16_to_cpu(item->u.nodetype)); return 1; } return 0; } /* The following 3 functions are called from scan.c to collect summary info for not closed jeb */ int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size) { dbg_summary("called with %u\n", size); s->sum_padded += size; return 0; } int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs) { struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = ri->nodetype; temp->inode = ri->ino; temp->version = ri->version; temp->offset = cpu_to_je32(ofs); /* relative offset from the beginning of the jeb */ temp->totlen = ri->totlen; temp->next = NULL; return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs) { struct jffs2_sum_dirent_mem *temp = kmalloc(sizeof(struct jffs2_sum_dirent_mem) + rd->nsize, GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = rd->nodetype; temp->totlen = rd->totlen; temp->offset = cpu_to_je32(ofs); /* relative from the beginning of the jeb */ temp->pino = rd->pino; temp->version = rd->version; temp->ino = rd->ino; temp->nsize = rd->nsize; temp->type = rd->type; temp->next = NULL; memcpy(temp->name, rd->name, rd->nsize); return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } #ifdef CONFIG_JFFS2_FS_XATTR int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs) { struct jffs2_sum_xattr_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = rx->nodetype; temp->xid = rx->xid; temp->version = rx->version; temp->offset = cpu_to_je32(ofs); temp->totlen = rx->totlen; temp->next = NULL; return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs) { struct jffs2_sum_xref_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = rr->nodetype; temp->offset = cpu_to_je32(ofs); temp->next = NULL; return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } #endif /* Cleanup every collected summary information */ static void jffs2_sum_clean_collected(struct jffs2_summary *s) { union jffs2_sum_mem *temp; if (!s->sum_list_head) { dbg_summary("already empty\n"); } while (s->sum_list_head) { temp = s->sum_list_head; s->sum_list_head = s->sum_list_head->u.next; kfree(temp); } s->sum_list_tail = NULL; s->sum_padded = 0; s->sum_num = 0; } void jffs2_sum_reset_collected(struct jffs2_summary *s) { dbg_summary("called\n"); jffs2_sum_clean_collected(s); s->sum_size = 0; } void jffs2_sum_disable_collecting(struct jffs2_summary *s) { dbg_summary("called\n"); jffs2_sum_clean_collected(s); s->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; } int jffs2_sum_is_disabled(struct jffs2_summary *s) { return (s->sum_size == JFFS2_SUMMARY_NOSUM_SIZE); } /* Move the collected summary information into sb (called from scan.c) */ void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s) { dbg_summary("oldsize=0x%x oldnum=%u => newsize=0x%x newnum=%u\n", c->summary->sum_size, c->summary->sum_num, s->sum_size, s->sum_num); c->summary->sum_size = s->sum_size; c->summary->sum_num = s->sum_num; c->summary->sum_padded = s->sum_padded; c->summary->sum_list_head = s->sum_list_head; c->summary->sum_list_tail = s->sum_list_tail; s->sum_list_head = s->sum_list_tail = NULL; } /* Called from wbuf.c to collect writed node info */ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, uint32_t ofs) { union jffs2_node_union *node; struct jffs2_eraseblock *jeb; if (c->summary->sum_size == JFFS2_SUMMARY_NOSUM_SIZE) { dbg_summary("Summary is disabled for this jeb! Skipping summary info!\n"); return 0; } node = invecs[0].iov_base; jeb = &c->blocks[ofs / c->sector_size]; ofs -= jeb->offset; switch (je16_to_cpu(node->u.nodetype)) { case JFFS2_NODETYPE_INODE: { struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->i.nodetype; temp->inode = node->i.ino; temp->version = node->i.version; temp->offset = cpu_to_je32(ofs); temp->totlen = node->i.totlen; temp->next = NULL; return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } case JFFS2_NODETYPE_DIRENT: { struct jffs2_sum_dirent_mem *temp = kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->d.nodetype; temp->totlen = node->d.totlen; temp->offset = cpu_to_je32(ofs); temp->pino = node->d.pino; temp->version = node->d.version; temp->ino = node->d.ino; temp->nsize = node->d.nsize; temp->type = node->d.type; temp->next = NULL; switch (count) { case 1: memcpy(temp->name,node->d.name,node->d.nsize); break; case 2: memcpy(temp->name,invecs[1].iov_base,node->d.nsize); break; default: BUG(); /* impossible count value */ break; } return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: { struct jffs2_sum_xattr_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->x.nodetype; temp->xid = node->x.xid; temp->version = node->x.version; temp->totlen = node->x.totlen; temp->offset = cpu_to_je32(ofs); temp->next = NULL; return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } case JFFS2_NODETYPE_XREF: { struct jffs2_sum_xref_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->r.nodetype; temp->offset = cpu_to_je32(ofs); temp->next = NULL; return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } #endif case JFFS2_NODETYPE_PADDING: dbg_summary("node PADDING\n"); c->summary->sum_padded += je32_to_cpu(node->u.totlen); break; case JFFS2_NODETYPE_CLEANMARKER: dbg_summary("node CLEANMARKER\n"); break; case JFFS2_NODETYPE_SUMMARY: dbg_summary("node SUMMARY\n"); break; default: /* If you implement a new node type you should also implement summary support for it or disable summary. */ BUG(); break; } return 0; no_mem: JFFS2_WARNING("MEMORY ALLOCATION ERROR!"); return -ENOMEM; } static struct jffs2_raw_node_ref *sum_link_node_ref(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t ofs, uint32_t len, struct jffs2_inode_cache *ic) { /* If there was a gap, mark it dirty */ if ((ofs & ~3) > c->sector_size - jeb->free_size) { /* Ew. Summary doesn't actually tell us explicitly about dirty space */ jffs2_scan_dirty_space(c, jeb, (ofs & ~3) - (c->sector_size - jeb->free_size)); } return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic); } /* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_summary *summary, uint32_t *pseudo_random) { struct jffs2_inode_cache *ic; struct jffs2_full_dirent *fd; uintptr_t sp; int i, ino; int err; sp = (uintptr_t)summary->sum; #if 0 PRINTK("summary: %x %x %d %d %x %x %d %x %x %p %p\n", je16_to_cpu(summary->magic), je16_to_cpu(summary->nodetype), je32_to_cpu(summary->totlen), je32_to_cpu(summary->hdr_crc), je32_to_cpu(summary->sum_num), je32_to_cpu(summary->cln_mkr), je32_to_cpu(summary->padded), je32_to_cpu(summary->sum_crc), je32_to_cpu(summary->node_crc), sp, summary->sum); #endif for (i=0; i<je32_to_cpu(summary->sum_num); i++) { dbg_summary("processing summary index %d\n", i); cond_resched(); /* Make sure there's a spare ref for dirty space */ err = jffs2_prealloc_raw_node_refs(c, jeb, 2); if (err) return err; //PRINTK("sum type %d \n", je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)); switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) { case JFFS2_NODETYPE_INODE: { struct jffs2_sum_inode_flash *spi; spi = (struct jffs2_sum_inode_flash *)sp; ino = je32_to_cpu(spi->inode); dbg_summary("Inode at 0x%08x-0x%08x\n", jeb->offset + je32_to_cpu(spi->offset), jeb->offset + je32_to_cpu(spi->offset) + je32_to_cpu(spi->totlen)); ic = jffs2_scan_make_ino_cache(c, ino); if (!ic) { JFFS2_NOTICE("scan_make_ino_cache failed\n"); return -ENOMEM; } sum_link_node_ref(c, jeb, je32_to_cpu(spi->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spi->totlen)), ic); *pseudo_random += je32_to_cpu(spi->version); sp += JFFS2_SUMMARY_INODE_SIZE; //PRINTK("1 sp + %d %p\n", JFFS2_SUMMARY_INODE_SIZE, sp); break; } case JFFS2_NODETYPE_DIRENT: { struct jffs2_sum_dirent_flash *spd; int checkedlen; spd = (struct jffs2_sum_dirent_flash *)sp; #if 0 PRINTK("dir: %x %d %d %d %d %d %d %d %d\n", je16_to_cpu(spd->nodetype), je32_to_cpu(spd->totlen), je32_to_cpu(spd->offset), je32_to_cpu(spd->pino), je32_to_cpu(spd->version), je32_to_cpu(spd->ino), spd->nsize, spd->type, spd->name); #endif dbg_summary("Dirent at 0x%08x-0x%08x\n", jeb->offset + je32_to_cpu(spd->offset), jeb->offset + je32_to_cpu(spd->offset) + je32_to_cpu(spd->totlen)); /* This should never happen, but https://dev.laptop.org/ticket/4184 */ checkedlen = strnlen((const char *)spd->name, spd->nsize); if (!checkedlen) { pr_err("Dirent at %08x has zero at start of name. Aborting mount.\n", jeb->offset + je32_to_cpu(spd->offset)); return -EIO; } if (checkedlen < spd->nsize) { pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n", jeb->offset + je32_to_cpu(spd->offset), checkedlen); } fd = jffs2_alloc_full_dirent(checkedlen+1); if (!fd) return -ENOMEM; memcpy(&fd->name, spd->name, checkedlen); fd->name[checkedlen] = 0; //PRINTK("add %s \n", fd->name); ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); if (!ic) { jffs2_free_full_dirent(fd); return -ENOMEM; } fd->raw = sum_link_node_ref(c, jeb, je32_to_cpu(spd->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spd->totlen)), ic); fd->next = NULL; fd->version = je32_to_cpu(spd->version); fd->ino = je32_to_cpu(spd->ino); fd->nhash = full_name_hash((const unsigned char *)fd->name, checkedlen); fd->type = spd->type; jffs2_add_fd_to_list(c, fd, &ic->scan_dents); *pseudo_random += je32_to_cpu(spd->version); //PRINTK("2 sp before add %p\n", sp); sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize); //PRINTK("2 sp + %d %p\n", JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize), sp); break; } #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: { struct jffs2_xattr_datum *xd; struct jffs2_sum_xattr_flash *spx; spx = (struct jffs2_sum_xattr_flash *)sp; dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n", jeb->offset + je32_to_cpu(spx->offset), jeb->offset + je32_to_cpu(spx->offset) + je32_to_cpu(spx->totlen), je32_to_cpu(spx->xid), je32_to_cpu(spx->version)); xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), je32_to_cpu(spx->version)); if (IS_ERR(xd)) return PTR_ERR(xd); if (xd->version > je32_to_cpu(spx->version)) { /* node is not the newest one */ struct jffs2_raw_node_ref *raw = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spx->totlen)), NULL); raw->next_in_ino = xd->node->next_in_ino; xd->node->next_in_ino = raw; } else { xd->version = je32_to_cpu(spx->version); sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spx->totlen)), (void *)xd); } *pseudo_random += je32_to_cpu(spx->xid); sp += JFFS2_SUMMARY_XATTR_SIZE; break; } case JFFS2_NODETYPE_XREF: { struct jffs2_xattr_ref *ref; struct jffs2_sum_xref_flash *spr; spr = (struct jffs2_sum_xref_flash *)sp; dbg_summary("xref at %#08x-%#08x\n", jeb->offset + je32_to_cpu(spr->offset), jeb->offset + je32_to_cpu(spr->offset) + (uint32_t)PAD(sizeof(struct jffs2_raw_xref))); ref = jffs2_alloc_xattr_ref(); if (!ref) { JFFS2_NOTICE("allocation of xattr_datum failed\n"); return -ENOMEM; } ref->next = c->xref_temp; c->xref_temp = ref; sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED, PAD(sizeof(struct jffs2_raw_xref)), (void *)ref); *pseudo_random += ref->node->flash_offset; sp += JFFS2_SUMMARY_XREF_SIZE; break; } #endif default : { uint16_t nodetype = je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype); JFFS2_WARNING("Unsupported node type %x found in summary! Exiting...\n", nodetype); if ((nodetype & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_INCOMPAT) return -EIO; /* For compatible node types, just fall back to the full scan */ c->wasted_size -= jeb->wasted_size; c->free_size += c->sector_size - jeb->free_size; c->used_size -= jeb->used_size; c->dirty_size -= jeb->dirty_size; jeb->wasted_size = jeb->used_size = jeb->dirty_size = 0; jeb->free_size = c->sector_size; jffs2_free_jeb_node_refs(c, jeb); return -ENOTRECOVERABLE; } } } return 0; } /* Process the summary node - called from jffs2_scan_eraseblock() */ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_summary *summary, uint32_t sumsize, uint32_t *pseudo_random) { struct jffs2_unknown_node crcnode; int ret, ofs; uint32_t crc; ofs = c->sector_size - sumsize; dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n", jeb->offset, jeb->offset + ofs, sumsize); /* OK, now check for node validity and CRC */ crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); crcnode.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); crcnode.totlen = summary->totlen; crc = crc32(0, &crcnode, sizeof(crcnode)-4); if (je32_to_cpu(summary->hdr_crc) != crc) { dbg_summary("Summary node header is corrupt (bad CRC or " "no summary at all)\n"); goto crc_err; } if (je32_to_cpu(summary->totlen) != sumsize) { dbg_summary("Summary node is corrupt (wrong erasesize?)\n"); goto crc_err; } crc = crc32(0, summary, sizeof(struct jffs2_raw_summary)-8); if (je32_to_cpu(summary->node_crc) != crc) { dbg_summary("Summary node is corrupt (bad CRC)\n"); goto crc_err; } crc = crc32(0, summary->sum, sumsize - sizeof(struct jffs2_raw_summary)); if (je32_to_cpu(summary->sum_crc) != crc) { dbg_summary("Summary node data is corrupt (bad CRC)\n"); goto crc_err; } if ( je32_to_cpu(summary->cln_mkr) ) { dbg_summary("Summary : CLEANMARKER node \n"); ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); if (ret) return ret; if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n", je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) return ret; } else if (jeb->first_node) { dbg_summary("CLEANMARKER node not first node in block " "(0x%08x)\n", jeb->offset); if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) return ret; } else { jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, je32_to_cpu(summary->cln_mkr), NULL); } } ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); /* -ENOTRECOVERABLE isn't a fatal error -- it means we should do a full scan of this eraseblock. So return zero */ if (ret == -ENOTRECOVERABLE) return 0; if (ret) return ret; /* real error */ /* for PARANOIA_CHECK */ ret = jffs2_prealloc_raw_node_refs(c, jeb, 2); if (ret) return ret; sum_link_node_ref(c, jeb, ofs | REF_NORMAL, sumsize, NULL); if (unlikely(jeb->free_size)) { JFFS2_WARNING("Free size 0x%x bytes in eraseblock @0x%08x with summary?\n", jeb->free_size, jeb->offset); jeb->wasted_size += jeb->free_size; c->wasted_size += jeb->free_size; c->free_size -= jeb->free_size; jeb->free_size = 0; } return jffs2_scan_classify_jeb(c, jeb); crc_err: JFFS2_WARNING("Summary node crc error, skipping summary information.\n"); return 0; } /* Write summary data to flash - helper function for jffs2_sum_write_sumnode() */ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t infosize, uint32_t datasize, int padsize) { struct jffs2_raw_summary isum; union jffs2_sum_mem *temp; struct jffs2_sum_marker *sm; struct kvec vecs[2]; uint32_t sum_ofs; uintptr_t wpage; int ret; size_t retlen; if (padsize + datasize > MAX_SUMMARY_SIZE) { /* It won't fit in the buffer. Abort summary for this jeb */ jffs2_sum_disable_collecting(c->summary); JFFS2_WARNING("Summary too big (%d data, %d pad) in eraseblock at %08x\n", datasize, padsize, jeb->offset); /* Non-fatal */ return 0; } /* Is there enough space for summary? */ if (padsize < 0) { /* don't try to write out summary for this jeb */ jffs2_sum_disable_collecting(c->summary); JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize); /* Non-fatal */ return 0; } memset(c->summary->sum_buf, 0xff, datasize); memset(&isum, 0, sizeof(isum)); isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); isum.totlen = cpu_to_je32(infosize); isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4)); isum.padded = cpu_to_je32(c->summary->sum_padded); isum.cln_mkr = cpu_to_je32(c->cleanmarker_size); isum.sum_num = cpu_to_je32(c->summary->sum_num); wpage = (uintptr_t)c->summary->sum_buf; while (c->summary->sum_num) { temp = c->summary->sum_list_head; switch (je16_to_cpu(temp->u.nodetype)) { case JFFS2_NODETYPE_INODE: { struct jffs2_sum_inode_flash *sino_ptr = (struct jffs2_sum_inode_flash *)wpage; sino_ptr->nodetype = temp->i.nodetype; sino_ptr->inode = temp->i.inode; sino_ptr->version = temp->i.version; sino_ptr->offset = temp->i.offset; sino_ptr->totlen = temp->i.totlen; wpage += JFFS2_SUMMARY_INODE_SIZE; break; } case JFFS2_NODETYPE_DIRENT: { struct jffs2_sum_dirent_flash *sdrnt_ptr = (struct jffs2_sum_dirent_flash *)wpage; sdrnt_ptr->nodetype = temp->d.nodetype; sdrnt_ptr->totlen = temp->d.totlen; sdrnt_ptr->offset = temp->d.offset; sdrnt_ptr->pino = temp->d.pino; sdrnt_ptr->version = temp->d.version; sdrnt_ptr->ino = temp->d.ino; sdrnt_ptr->nsize = temp->d.nsize; sdrnt_ptr->type = temp->d.type; memcpy(sdrnt_ptr->name, temp->d.name, temp->d.nsize); wpage += JFFS2_SUMMARY_DIRENT_SIZE(temp->d.nsize); break; } #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: { struct jffs2_sum_xattr_flash *sxattr_ptr = wpage; temp = c->summary->sum_list_head; sxattr_ptr->nodetype = temp->x.nodetype; sxattr_ptr->xid = temp->x.xid; sxattr_ptr->version = temp->x.version; sxattr_ptr->offset = temp->x.offset; sxattr_ptr->totlen = temp->x.totlen; wpage += JFFS2_SUMMARY_XATTR_SIZE; break; } case JFFS2_NODETYPE_XREF: { struct jffs2_sum_xref_flash *sxref_ptr = wpage; temp = c->summary->sum_list_head; sxref_ptr->nodetype = temp->r.nodetype; sxref_ptr->offset = temp->r.offset; wpage += JFFS2_SUMMARY_XREF_SIZE; break; } #endif default : { if ((je16_to_cpu(temp->u.nodetype) & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_RWCOMPAT_COPY) { dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n", je16_to_cpu(temp->u.nodetype)); jffs2_sum_disable_collecting(c->summary); } else { BUG(); /* unknown node in summary information */ } } } c->summary->sum_list_head = temp->u.next; kfree(temp); c->summary->sum_num--; } jffs2_sum_reset_collected(c->summary); wpage += padsize; sm = (struct jffs2_sum_marker *)wpage; sm->offset = cpu_to_je32(c->sector_size - jeb->free_size); sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC); isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize)); isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8)); vecs[0].iov_base = &isum; vecs[0].iov_len = sizeof(isum); vecs[1].iov_base = c->summary->sum_buf; vecs[1].iov_len = datasize; sum_ofs = jeb->offset + c->sector_size - jeb->free_size; dbg_summary("writing out data to flash to pos : 0x%08x\n", sum_ofs); ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0); if (ret || (retlen != infosize)) { JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n", infosize, sum_ofs, ret, retlen); if (retlen) { /* Waste remaining space */ spin_lock(&c->erase_completion_lock); jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL); spin_unlock(&c->erase_completion_lock); } c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; return 0; } spin_lock(&c->erase_completion_lock); jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL); spin_unlock(&c->erase_completion_lock); return 0; } /* Write out summary information - called from jffs2_do_reserve_space */ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) //__must_hold(&c->erase_completion_block) { int datasize, infosize, padsize; struct jffs2_eraseblock *jeb; int ret = 0; dbg_summary("called\n"); spin_unlock(&c->erase_completion_lock); jeb = c->nextblock; jffs2_prealloc_raw_node_refs(c, jeb, 1); if (!c->summary->sum_num || !c->summary->sum_list_head) { JFFS2_WARNING("Empty summary info!!!\n"); BUG(); } datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker); infosize = sizeof(struct jffs2_raw_summary) + datasize; padsize = jeb->free_size - infosize; infosize += padsize; datasize += padsize; ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); spin_lock(&c->erase_completion_lock); return ret; } #endif
96013.c
//===-- lib/arm/aeabi_frsub.c - Single-precision subtraction --------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #define SINGLE_PRECISION #include "../fp_lib.h" AEABI_RTABI fp_t __aeabi_fsub(fp_t, fp_t); AEABI_RTABI fp_t __aeabi_frsub(fp_t a, fp_t b) { return __aeabi_fsub(b, a); }
39851.c
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019 The Fluent Bit Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_sds.h> #include <fluent-bit/flb_http_client.h> #include <fluent-bit/flb_aws_credentials.h> #include <fluent-bit/flb_aws_util.h> #include <fluent-bit/flb_jsmn.h> #include <stdlib.h> #include <time.h> #include <sys/types.h> #include <sys/stat.h> /* HTTP Credentials Endpoints have a standard set of JSON Keys */ #define AWS_HTTP_RESPONSE_ACCESS_KEY "AccessKeyId" #define AWS_HTTP_RESPONSE_SECRET_KEY "SecretAccessKey" #define AWS_HTTP_RESPONSE_TOKEN "Token" #define AWS_HTTP_RESPONSE_EXPIRATION "Expiration" #define ECS_CREDENTIALS_HOST "169.254.170.2" #define ECS_CREDENTIALS_HOST_LEN 13 #define ECS_CREDENTIALS_PATH_ENV_VAR "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" /* Declarations */ struct flb_aws_provider_http; static int http_credentials_request(struct flb_aws_provider_http *implementation); /* * HTTP Credentials Provider - retrieve credentials from a local http server * Used to implement the ECS Credentials provider. * Equivalent to: * https://github.com/aws/aws-sdk-go/tree/master/aws/credentials/endpointcreds */ struct flb_aws_provider_http { struct flb_aws_credentials *creds; time_t next_refresh; struct flb_aws_client *client; /* Host and Path to request credentials */ flb_sds_t host; flb_sds_t path; }; struct flb_aws_credentials *get_credentials_fn_http(struct flb_aws_provider *provider) { struct flb_aws_credentials *creds = NULL; int refresh = FLB_FALSE; struct flb_aws_provider_http *implementation = provider->implementation; flb_debug("[aws_credentials] Retrieving credentials from the " "HTTP provider.."); /* a negative next_refresh means that auto-refresh is disabled */ if (implementation->next_refresh > 0 && time(NULL) > implementation->next_refresh) { refresh = FLB_TRUE; } if (!implementation->creds || refresh == FLB_TRUE) { if (try_lock_provider(provider)) { http_credentials_request(implementation); unlock_provider(provider); } } if (!implementation->creds) { /* * We failed to lock the provider and creds are unset. This means that * another co-routine is performing the refresh. */ flb_warn("[aws_credentials] No cached credentials are available and " "a credential refresh is already in progress. The current " "co-routine will retry."); return NULL; } creds = flb_malloc(sizeof(struct flb_aws_credentials)); if (!creds) { flb_errno(); goto error; } creds->access_key_id = flb_sds_create(implementation->creds->access_key_id); if (!creds->access_key_id) { flb_errno(); goto error; } creds->secret_access_key = flb_sds_create(implementation->creds-> secret_access_key); if (!creds->secret_access_key) { flb_errno(); goto error; } if (implementation->creds->session_token) { creds->session_token = flb_sds_create(implementation->creds-> session_token); if (!creds->session_token) { flb_errno(); goto error; } } else { creds->session_token = NULL; } return creds; error: flb_aws_credentials_destroy(creds); return NULL; } int refresh_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; int ret = -1; flb_debug("[aws_credentials] Refresh called on the http provider"); if (try_lock_provider(provider)) { ret = http_credentials_request(implementation); unlock_provider(provider); } return ret; } int init_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; int ret = -1; flb_debug("[aws_credentials] Init called on the http provider"); implementation->client->debug_only = FLB_TRUE; if (try_lock_provider(provider)) { ret = http_credentials_request(implementation); unlock_provider(provider); } implementation->client->debug_only = FLB_FALSE; return ret; } void sync_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; flb_debug("[aws_credentials] Sync called on the http provider"); /* remove async flag */ implementation->client->upstream->flags &= ~(FLB_IO_ASYNC); } void async_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; flb_debug("[aws_credentials] Async called on the http provider"); /* add async flag */ implementation->client->upstream->flags |= FLB_IO_ASYNC; } void upstream_set_fn_http(struct flb_aws_provider *provider, struct flb_output_instance *ins) { struct flb_aws_provider_http *implementation = provider->implementation; flb_debug("[aws_credentials] upstream_set called on the http provider"); /* set upstream on output */ flb_output_upstream_set(implementation->client->upstream, ins); } void destroy_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; if (implementation) { if (implementation->creds) { flb_aws_credentials_destroy(implementation->creds); } if (implementation->client) { flb_aws_client_destroy(implementation->client); } if (implementation->host) { flb_sds_destroy(implementation->host); } if (implementation->path) { flb_sds_destroy(implementation->path); } flb_free(implementation); provider->implementation = NULL; } return; } static struct flb_aws_provider_vtable http_provider_vtable = { .get_credentials = get_credentials_fn_http, .init = init_fn_http, .refresh = refresh_fn_http, .destroy = destroy_fn_http, .sync = sync_fn_http, .async = async_fn_http, .upstream_set = upstream_set_fn_http, }; struct flb_aws_provider *flb_http_provider_create(struct flb_config *config, flb_sds_t host, flb_sds_t path, struct flb_aws_client_generator *generator) { struct flb_aws_provider_http *implementation = NULL; struct flb_aws_provider *provider = NULL; struct flb_upstream *upstream = NULL; flb_debug("[aws_credentials] Configuring HTTP provider with %s:80%s", host, path); provider = flb_calloc(1, sizeof(struct flb_aws_provider)); if (!provider) { flb_errno(); return NULL; } implementation = flb_calloc(1, sizeof(struct flb_aws_provider_http)); if (!implementation) { flb_free(provider); flb_errno(); return NULL; } provider->provider_vtable = &http_provider_vtable; provider->implementation = implementation; implementation->host = host; implementation->path = path; upstream = flb_upstream_create(config, host, 80, FLB_IO_TCP, NULL); if (!upstream) { flb_aws_provider_destroy(provider); flb_error("[aws_credentials] HTTP Provider: connection initialization " "error"); return NULL; } upstream->net.connect_timeout = FLB_AWS_CREDENTIAL_NET_TIMEOUT; implementation->client = generator->create(); if (!implementation->client) { flb_aws_provider_destroy(provider); flb_upstream_destroy(upstream); flb_error("[aws_credentials] HTTP Provider: client creation error"); return NULL; } implementation->client->name = "http_provider_client"; implementation->client->has_auth = FLB_FALSE; implementation->client->provider = NULL; implementation->client->region = NULL; implementation->client->service = NULL; implementation->client->port = 80; implementation->client->flags = 0; implementation->client->proxy = NULL; implementation->client->upstream = upstream; return provider; } /* * ECS Provider * The ECS Provider is just a wrapper around the HTTP Provider * with the ECS credentials endpoint. */ struct flb_aws_provider *flb_ecs_provider_create(struct flb_config *config, struct flb_aws_client_generator *generator) { flb_sds_t host = NULL; flb_sds_t path = NULL; char *path_var = NULL; host = flb_sds_create_len(ECS_CREDENTIALS_HOST, ECS_CREDENTIALS_HOST_LEN); if (!host) { flb_errno(); return NULL; } path_var = getenv(ECS_CREDENTIALS_PATH_ENV_VAR); if (path_var && strlen(path_var) > 0) { path = flb_sds_create(path_var); if (!path) { flb_errno(); flb_free(host); return NULL; } return flb_http_provider_create(config, host, path, generator); } else { flb_debug("[aws_credentials] Not initializing ECS Provider because" " %s is not set", ECS_CREDENTIALS_PATH_ENV_VAR); flb_sds_destroy(host); return NULL; } } static int http_credentials_request(struct flb_aws_provider_http *implementation) { char *response = NULL; size_t response_len; time_t expiration; struct flb_aws_credentials *creds = NULL; struct flb_aws_client *client = implementation->client; struct flb_http_client *c = NULL; c = client->client_vtable->request(client, FLB_HTTP_GET, implementation->path, NULL, 0, NULL, 0); if (!c || c->resp.status != 200) { flb_debug("[aws_credentials] http credentials request failed"); if (c) { flb_http_client_destroy(c); } return -1; } response = c->resp.payload; response_len = c->resp.payload_size; creds = flb_parse_http_credentials(response, response_len, &expiration); if (!creds) { flb_http_client_destroy(c); return -1; } /* destroy existing credentials */ flb_aws_credentials_destroy(implementation->creds); implementation->creds = NULL; implementation->creds = creds; implementation->next_refresh = expiration - FLB_AWS_REFRESH_WINDOW; flb_http_client_destroy(c); return 0; } /* * All HTTP credentials endpoints (IMDS, ECS, custom) follow the same spec: * { * "AccessKeyId": "ACCESS_KEY_ID", * "Expiration": "2019-12-18T21:27:58Z", * "SecretAccessKey": "SECRET_ACCESS_KEY", * "Token": "SECURITY_TOKEN_STRING" * } * (some implementations (IMDS) have additional fields) * Returns NULL if any part of parsing was unsuccessful. */ struct flb_aws_credentials *flb_parse_http_credentials(char *response, size_t response_len, time_t *expiration) { jsmntok_t *tokens = NULL; const jsmntok_t *t = NULL; char *current_token = NULL; jsmn_parser parser; int tokens_size = 50; size_t size; int ret; struct flb_aws_credentials *creds = NULL; int i = 0; int len; flb_sds_t tmp; /* * Remove/reset existing value of expiration. * Expiration should be in the response, but it is not * strictly speaking needed. Fluent Bit logs a warning if it is missing. */ *expiration = -1; jsmn_init(&parser); size = sizeof(jsmntok_t) * tokens_size; tokens = flb_calloc(1, size); if (!tokens) { goto error; } ret = jsmn_parse(&parser, response, response_len, tokens, tokens_size); if (ret == JSMN_ERROR_INVAL || ret == JSMN_ERROR_PART) { flb_error("[aws_credentials] Could not parse http credentials response" " - invalid JSON."); goto error; } /* Shouldn't happen, but just in case, check for too many tokens error */ if (ret == JSMN_ERROR_NOMEM) { flb_error("[aws_credentials] Could not parse http credentials response" " - response contained more tokens than expected."); goto error; } /* return value is number of tokens parsed */ tokens_size = ret; creds = flb_calloc(1, sizeof(struct flb_aws_credentials)); if (!creds) { flb_errno(); goto error; } /* * jsmn will create an array of tokens like: * key, value, key, value */ while (i < (tokens_size - 1)) { t = &tokens[i]; if (t->start == -1 || t->end == -1 || (t->start == 0 && t->end == 0)) { break; } if (t->type == JSMN_STRING) { current_token = &response[t->start]; len = t->end - t->start; if (strncmp(current_token, AWS_HTTP_RESPONSE_ACCESS_KEY, len) == 0) { i++; t = &tokens[i]; current_token = &response[t->start]; len = t->end - t->start; creds->access_key_id = flb_sds_create_len(current_token, len); if (!creds->access_key_id) { flb_errno(); goto error; } continue; } if (strncmp(current_token, AWS_HTTP_RESPONSE_SECRET_KEY, len) == 0) { i++; t = &tokens[i]; current_token = &response[t->start]; len = t->end - t->start; creds->secret_access_key = flb_sds_create_len(current_token, len); if (!creds->secret_access_key) { flb_errno(); goto error; } continue; } if (strncmp(current_token, AWS_HTTP_RESPONSE_TOKEN, len) == 0) { i++; t = &tokens[i]; current_token = &response[t->start]; len = t->end - t->start; creds->session_token = flb_sds_create_len(current_token, len); if (!creds->session_token) { flb_errno(); goto error; } continue; } if (strncmp(current_token, AWS_HTTP_RESPONSE_EXPIRATION, len) == 0) { i++; t = &tokens[i]; current_token = &response[t->start]; len = t->end - t->start; tmp = flb_sds_create_len(current_token, len); if (!tmp) { flb_errno(); goto error; } *expiration = flb_aws_cred_expiration(tmp); flb_sds_destroy(tmp); if (*expiration < 0) { flb_warn("[aws_credentials] '%s' was invalid or " "could not be parsed. Disabling auto-refresh of " "credentials.", AWS_HTTP_RESPONSE_EXPIRATION); } } } i++; } if (creds->access_key_id == NULL) { flb_error("[aws_credentials] Missing %s field in http" "credentials response", AWS_HTTP_RESPONSE_ACCESS_KEY); goto error; } if (creds->secret_access_key == NULL) { flb_error("[aws_credentials] Missing %s field in http" "credentials response", AWS_HTTP_RESPONSE_SECRET_KEY); goto error; } if (creds->session_token == NULL) { flb_error("[aws_credentials] Missing %s field in http" "credentials response", AWS_HTTP_RESPONSE_TOKEN); goto error; } flb_free(tokens); return creds; error: flb_aws_credentials_destroy(creds); flb_free(tokens); return NULL; }
447854.c
//to test the impact of arguments #include <stdio.h> int main (int argc, char ** argv) { int * p; if(argc>0) p = NULL; else if(argc<0) p = &argc; fprintf(stderr, "%d\n", *p); // Here, p must points toward argc return 0; }
146381.c
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alon Levy */ #include <linux/crc32.h> #include "qxl_drv.h" #include "qxl_object.h" #include "drm_crtc_helper.h" static bool qxl_head_enabled(struct qxl_head *head) { return head->width && head->height; } void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count) { if (qdev->client_monitors_config && count > qdev->client_monitors_config->count) { kfree(qdev->client_monitors_config); qdev->client_monitors_config = NULL; } if (!qdev->client_monitors_config) { qdev->client_monitors_config = kzalloc( sizeof(struct qxl_monitors_config) + sizeof(struct qxl_head) * count, GFP_KERNEL); if (!qdev->client_monitors_config) { qxl_io_log(qdev, "%s: allocation failure for %u heads\n", __func__, count); return; } } qdev->client_monitors_config->count = count; } static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev) { int i; int num_monitors; uint32_t crc; num_monitors = qdev->rom->client_monitors_config.count; crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config, sizeof(qdev->rom->client_monitors_config)); if (crc != qdev->rom->client_monitors_config_crc) { qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc, sizeof(qdev->rom->client_monitors_config), qdev->rom->client_monitors_config_crc); return 1; } if (num_monitors > qdev->monitors_config->max_allowed) { DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n", qdev->monitors_config->max_allowed, num_monitors); num_monitors = qdev->monitors_config->max_allowed; } else { num_monitors = qdev->rom->client_monitors_config.count; } qxl_alloc_client_monitors_config(qdev, num_monitors); /* we copy max from the client but it isn't used */ qdev->client_monitors_config->max_allowed = qdev->monitors_config->max_allowed; for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) { struct qxl_urect *c_rect = &qdev->rom->client_monitors_config.heads[i]; struct qxl_head *client_head = &qdev->client_monitors_config->heads[i]; client_head->x = c_rect->left; client_head->y = c_rect->top; client_head->width = c_rect->right - c_rect->left; client_head->height = c_rect->bottom - c_rect->top; client_head->surface_id = 0; client_head->id = i; client_head->flags = 0; DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height, client_head->x, client_head->y); } return 0; } void qxl_display_read_client_monitors_config(struct qxl_device *qdev) { while (qxl_display_copy_rom_client_monitors_config(qdev)) { qxl_io_log(qdev, "failed crc check for client_monitors_config," " retrying\n"); } if (!drm_helper_hpd_irq_event(qdev->ddev)) { /* notify that the monitor configuration changed, to adjust at the arbitrary resolution */ drm_kms_helper_hotplug_event(qdev->ddev); } } static int qxl_add_monitors_config_modes(struct drm_connector *connector, unsigned *pwidth, unsigned *pheight) { struct drm_device *dev = connector->dev; struct qxl_device *qdev = dev->dev_private; struct qxl_output *output = drm_connector_to_qxl_output(connector); int h = output->index; struct drm_display_mode *mode = NULL; struct qxl_head *head; if (!qdev->client_monitors_config) return 0; head = &qdev->client_monitors_config->heads[h]; mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false, false); mode->type |= DRM_MODE_TYPE_PREFERRED; *pwidth = head->width; *pheight = head->height; drm_mode_probed_add(connector, mode); /* remember the last custom size for mode validation */ qdev->monitors_config_width = mode->hdisplay; qdev->monitors_config_height = mode->vdisplay; return 1; } static struct mode_size { int w; int h; } common_modes[] = { { 640, 480}, { 720, 480}, { 800, 600}, { 848, 480}, {1024, 768}, {1152, 768}, {1280, 720}, {1280, 800}, {1280, 854}, {1280, 960}, {1280, 1024}, {1440, 900}, {1400, 1050}, {1680, 1050}, {1600, 1200}, {1920, 1080}, {1920, 1200} }; static int qxl_add_common_modes(struct drm_connector *connector, unsigned pwidth, unsigned pheight) { struct drm_device *dev = connector->dev; struct drm_display_mode *mode = NULL; int i; for (i = 0; i < ARRAY_SIZE(common_modes); i++) { mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); if (common_modes[i].w == pwidth && common_modes[i].h == pheight) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); } return i - 1; } static void qxl_crtc_destroy(struct drm_crtc *crtc) { struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc); drm_crtc_cleanup(crtc); kfree(qxl_crtc); } static int qxl_hide_cursor(struct qxl_device *qdev) { struct qxl_release *release; struct qxl_cursor_cmd *cmd; int ret; ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, &release, NULL); if (ret) return ret; ret = qxl_release_reserve_list(release, true); if (ret) { qxl_release_free(qdev, release); return ret; } cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_HIDE; qxl_release_unmap(qdev, release, &cmd->release_info); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); return 0; } static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y) { struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); struct drm_gem_object *obj; struct qxl_cursor *cursor; struct qxl_cursor_cmd *cmd; struct qxl_bo *cursor_bo, *user_bo; struct qxl_release *release; void *user_ptr; int size = 64*64*4; int ret = 0; if (!handle) return qxl_hide_cursor(qdev); obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); if (!obj) { DRM_ERROR("cannot find cursor object\n"); return -ENOENT; } user_bo = gem_to_qxl_bo(obj); ret = qxl_bo_reserve(user_bo, false); if (ret) goto out_unref; ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); qxl_bo_unreserve(user_bo); if (ret) goto out_unref; ret = qxl_bo_kmap(user_bo, &user_ptr); if (ret) goto out_unpin; ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, &release, NULL); if (ret) goto out_kunmap; ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, &cursor_bo); if (ret) goto out_free_release; ret = qxl_release_reserve_list(release, false); if (ret) goto out_free_bo; ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); if (ret) goto out_backoff; cursor->header.unique = 0; cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; cursor->header.width = 64; cursor->header.height = 64; cursor->header.hot_spot_x = hot_x; cursor->header.hot_spot_y = hot_y; cursor->data_size = size; cursor->chunk.next_chunk = 0; cursor->chunk.prev_chunk = 0; cursor->chunk.data_size = size; memcpy(cursor->chunk.data, user_ptr, size); qxl_bo_kunmap(cursor_bo); qxl_bo_kunmap(user_bo); cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_SET; cmd->u.set.position.x = qcrtc->cur_x; cmd->u.set.position.y = qcrtc->cur_y; cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); cmd->u.set.visible = 1; qxl_release_unmap(qdev, release, &cmd->release_info); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); /* finish with the userspace bo */ ret = qxl_bo_reserve(user_bo, false); if (!ret) { qxl_bo_unpin(user_bo); qxl_bo_unreserve(user_bo); } drm_gem_object_unreference_unlocked(obj); qxl_bo_unref(&cursor_bo); return ret; out_backoff: qxl_release_backoff_reserve_list(release); out_free_bo: qxl_bo_unref(&cursor_bo); out_free_release: qxl_release_free(qdev, release); out_kunmap: qxl_bo_kunmap(user_bo); out_unpin: qxl_bo_unpin(user_bo); out_unref: drm_gem_object_unreference_unlocked(obj); return ret; } static int qxl_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); struct qxl_release *release; struct qxl_cursor_cmd *cmd; int ret; ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, &release, NULL); if (ret) return ret; ret = qxl_release_reserve_list(release, true); if (ret) { qxl_release_free(qdev, release); return ret; } qcrtc->cur_x = x; qcrtc->cur_y = y; cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_MOVE; cmd->u.position.x = qcrtc->cur_x; cmd->u.position.y = qcrtc->cur_y; qxl_release_unmap(qdev, release, &cmd->release_info); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); return 0; } static const struct drm_crtc_funcs qxl_crtc_funcs = { .cursor_set2 = qxl_crtc_cursor_set2, .cursor_move = qxl_crtc_cursor_move, .set_config = drm_crtc_helper_set_config, .destroy = qxl_crtc_destroy, }; static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); if (qxl_fb->obj) drm_gem_object_unreference_unlocked(qxl_fb->obj); drm_framebuffer_cleanup(fb); kfree(qxl_fb); } static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips) { /* TODO: vmwgfx where this was cribbed from had locking. Why? */ struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); struct qxl_device *qdev = qxl_fb->base.dev->dev_private; struct drm_clip_rect norect; struct qxl_bo *qobj; int inc = 1; qobj = gem_to_qxl_bo(qxl_fb->obj); /* if we aren't primary surface ignore this */ if (!qobj->is_primary) return 0; if (!num_clips) { num_clips = 1; clips = &norect; norect.x1 = norect.y1 = 0; norect.x2 = fb->width; norect.y2 = fb->height; } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { num_clips /= 2; inc = 2; /* skip source rects */ } qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color, clips, num_clips, inc); return 0; } static const struct drm_framebuffer_funcs qxl_fb_funcs = { .destroy = qxl_user_framebuffer_destroy, .dirty = qxl_framebuffer_surface_dirty, /* TODO? * .create_handle = qxl_user_framebuffer_create_handle, */ }; int qxl_framebuffer_init(struct drm_device *dev, struct qxl_framebuffer *qfb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; qfb->obj = obj; ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs); if (ret) { qfb->obj = NULL; return ret; } drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd); return 0; } static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode) { } static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n", __func__, mode->hdisplay, mode->vdisplay, adjusted_mode->hdisplay, adjusted_mode->vdisplay); return true; } void qxl_send_monitors_config(struct qxl_device *qdev) { int i; BUG_ON(!qdev->ram_header->monitors_config); if (qdev->monitors_config->count == 0) { qxl_io_log(qdev, "%s: 0 monitors??\n", __func__); return; } for (i = 0 ; i < qdev->monitors_config->count ; ++i) { struct qxl_head *head = &qdev->monitors_config->heads[i]; if (head->y > 8192 || head->x > 8192 || head->width > 8192 || head->height > 8192) { DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", i, head->width, head->height, head->x, head->y); return; } } qxl_io_monitors_config(qdev); } static void qxl_monitors_config_set(struct qxl_device *qdev, int index, unsigned x, unsigned y, unsigned width, unsigned height, unsigned surf_id) { DRM_DEBUG_KMS("%d:%dx%d+%d+%d\n", index, width, height, x, y); qdev->monitors_config->heads[index].x = x; qdev->monitors_config->heads[index].y = y; qdev->monitors_config->heads[index].width = width; qdev->monitors_config->heads[index].height = height; qdev->monitors_config->heads[index].surface_id = surf_id; } static int qxl_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; struct qxl_mode *m = (void *)mode->private; struct qxl_framebuffer *qfb; struct qxl_bo *bo, *old_bo = NULL; struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); bool recreate_primary = false; int ret; int surf_id; if (!crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } if (old_fb) { qfb = to_qxl_framebuffer(old_fb); old_bo = gem_to_qxl_bo(qfb->obj); } qfb = to_qxl_framebuffer(crtc->fb); bo = gem_to_qxl_bo(qfb->obj); if (!m) /* and do we care? */ DRM_DEBUG("%dx%d: not a native mode\n", x, y); else DRM_DEBUG("%dx%d: qxl id %d\n", mode->hdisplay, mode->vdisplay, m->id); DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n", x, y, mode->hdisplay, mode->vdisplay, adjusted_mode->hdisplay, adjusted_mode->vdisplay); if (bo->is_primary == false) recreate_primary = true; if (bo->surf.stride * bo->surf.height > qdev->vram_size) { DRM_ERROR("Mode doesn't fit in vram size (vgamem)"); return -EINVAL; } ret = qxl_bo_reserve(bo, false); if (ret != 0) return ret; ret = qxl_bo_pin(bo, bo->type, NULL); if (ret != 0) { qxl_bo_unreserve(bo); return -EINVAL; } qxl_bo_unreserve(bo); if (recreate_primary) { qxl_io_destroy_primary(qdev); qxl_io_log(qdev, "recreate primary: %dx%d,%d,%d\n", bo->surf.width, bo->surf.height, bo->surf.stride, bo->surf.format); qxl_io_create_primary(qdev, 0, bo); bo->is_primary = true; surf_id = 0; } else { surf_id = bo->surface_id; } if (old_bo && old_bo != bo) { old_bo->is_primary = false; ret = qxl_bo_reserve(old_bo, false); qxl_bo_unpin(old_bo); qxl_bo_unreserve(old_bo); } qxl_monitors_config_set(qdev, qcrtc->index, x, y, mode->hdisplay, mode->vdisplay, surf_id); return 0; } static void qxl_crtc_prepare(struct drm_crtc *crtc) { DRM_DEBUG("current: %dx%d+%d+%d (%d).\n", crtc->mode.hdisplay, crtc->mode.vdisplay, crtc->x, crtc->y, crtc->enabled); } static void qxl_crtc_commit(struct drm_crtc *crtc) { DRM_DEBUG("\n"); } static void qxl_crtc_disable(struct drm_crtc *crtc) { struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; if (crtc->fb) { struct qxl_framebuffer *qfb = to_qxl_framebuffer(crtc->fb); struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); int ret; ret = qxl_bo_reserve(bo, false); qxl_bo_unpin(bo); qxl_bo_unreserve(bo); crtc->fb = NULL; } qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0); qxl_send_monitors_config(qdev); } static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { .dpms = qxl_crtc_dpms, .disable = qxl_crtc_disable, .mode_fixup = qxl_crtc_mode_fixup, .mode_set = qxl_crtc_mode_set, .prepare = qxl_crtc_prepare, .commit = qxl_crtc_commit, }; static int qdev_crtc_init(struct drm_device *dev, int crtc_id) { struct qxl_crtc *qxl_crtc; qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL); if (!qxl_crtc) return -ENOMEM; drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); qxl_crtc->index = crtc_id; drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256); drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); return 0; } static void qxl_enc_dpms(struct drm_encoder *encoder, int mode) { DRM_DEBUG("\n"); } static bool qxl_enc_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { DRM_DEBUG("\n"); return true; } static void qxl_enc_prepare(struct drm_encoder *encoder) { DRM_DEBUG("\n"); } static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev, struct drm_encoder *encoder) { int i; struct qxl_output *output = drm_encoder_to_qxl_output(encoder); struct qxl_head *head; struct drm_display_mode *mode; BUG_ON(!encoder); /* TODO: ugly, do better */ i = output->index; if (!qdev->monitors_config || qdev->monitors_config->max_allowed <= i) { DRM_ERROR( "head number too large or missing monitors config: %p, %d", qdev->monitors_config, qdev->monitors_config ? qdev->monitors_config->max_allowed : -1); return; } if (!encoder->crtc) { DRM_ERROR("missing crtc on encoder %p\n", encoder); return; } if (i != 0) DRM_DEBUG("missing for multiple monitors: no head holes\n"); head = &qdev->monitors_config->heads[i]; head->id = i; if (encoder->crtc->enabled) { mode = &encoder->crtc->mode; head->width = mode->hdisplay; head->height = mode->vdisplay; head->x = encoder->crtc->x; head->y = encoder->crtc->y; if (qdev->monitors_config->count < i + 1) qdev->monitors_config->count = i + 1; } else { head->width = 0; head->height = 0; head->x = 0; head->y = 0; } DRM_DEBUG_KMS("setting head %d to +%d+%d %dx%d out of %d\n", i, head->x, head->y, head->width, head->height, qdev->monitors_config->count); head->flags = 0; /* TODO - somewhere else to call this for multiple monitors * (config_commit?) */ qxl_send_monitors_config(qdev); } static void qxl_enc_commit(struct drm_encoder *encoder) { struct qxl_device *qdev = encoder->dev->dev_private; qxl_write_monitors_config_for_encoder(qdev, encoder); DRM_DEBUG("\n"); } static void qxl_enc_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { DRM_DEBUG("\n"); } static int qxl_conn_get_modes(struct drm_connector *connector) { int ret = 0; struct qxl_device *qdev = connector->dev->dev_private; unsigned pwidth = 1024; unsigned pheight = 768; DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config); /* TODO: what should we do here? only show the configured modes for the * device, or allow the full list, or both? */ if (qdev->monitors_config && qdev->monitors_config->count) { ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight); if (ret < 0) return ret; } ret += qxl_add_common_modes(connector, pwidth, pheight); return ret; } static int qxl_conn_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *ddev = connector->dev; struct qxl_device *qdev = ddev->dev_private; int i; /* TODO: is this called for user defined modes? (xrandr --add-mode) * TODO: check that the mode fits in the framebuffer */ if(qdev->monitors_config_width == mode->hdisplay && qdev->monitors_config_height == mode->vdisplay) return MODE_OK; for (i = 0; i < ARRAY_SIZE(common_modes); i++) { if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay) return MODE_OK; } return MODE_BAD; } static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector) { struct qxl_output *qxl_output = drm_connector_to_qxl_output(connector); DRM_DEBUG("\n"); return &qxl_output->enc; } static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = { .dpms = qxl_enc_dpms, .mode_fixup = qxl_enc_mode_fixup, .prepare = qxl_enc_prepare, .mode_set = qxl_enc_mode_set, .commit = qxl_enc_commit, }; static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = { .get_modes = qxl_conn_get_modes, .mode_valid = qxl_conn_mode_valid, .best_encoder = qxl_best_encoder, }; static void qxl_conn_save(struct drm_connector *connector) { DRM_DEBUG("\n"); } static void qxl_conn_restore(struct drm_connector *connector) { DRM_DEBUG("\n"); } static enum drm_connector_status qxl_conn_detect( struct drm_connector *connector, bool force) { struct qxl_output *output = drm_connector_to_qxl_output(connector); struct drm_device *ddev = connector->dev; struct qxl_device *qdev = ddev->dev_private; bool connected = false; /* The first monitor is always connected */ if (!qdev->client_monitors_config) { if (output->index == 0) connected = true; } else connected = qdev->client_monitors_config->count > output->index && qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]); DRM_DEBUG("#%d connected: %d\n", output->index, connected); if (!connected) qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0); return connected ? connector_status_connected : connector_status_disconnected; } static int qxl_conn_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { DRM_DEBUG("\n"); return 0; } static void qxl_conn_destroy(struct drm_connector *connector) { struct qxl_output *qxl_output = drm_connector_to_qxl_output(connector); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(qxl_output); } static const struct drm_connector_funcs qxl_connector_funcs = { .dpms = drm_helper_connector_dpms, .save = qxl_conn_save, .restore = qxl_conn_restore, .detect = qxl_conn_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = qxl_conn_set_property, .destroy = qxl_conn_destroy, }; static void qxl_enc_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); } static const struct drm_encoder_funcs qxl_enc_funcs = { .destroy = qxl_enc_destroy, }; static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev) { if (qdev->hotplug_mode_update_property) return 0; qdev->hotplug_mode_update_property = drm_property_create_range(qdev->ddev, DRM_MODE_PROP_IMMUTABLE, "hotplug_mode_update", 0, 1); return 0; } static int qdev_output_init(struct drm_device *dev, int num_output) { struct qxl_device *qdev = dev->dev_private; struct qxl_output *qxl_output; struct drm_connector *connector; struct drm_encoder *encoder; qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL); if (!qxl_output) return -ENOMEM; qxl_output->index = num_output; connector = &qxl_output->base; encoder = &qxl_output->enc; drm_connector_init(dev, &qxl_output->base, &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs, DRM_MODE_ENCODER_VIRTUAL); /* we get HPD via client monitors config */ connector->polled = DRM_CONNECTOR_POLL_HPD; encoder->possible_crtcs = 1 << num_output; drm_mode_connector_attach_encoder(&qxl_output->base, &qxl_output->enc); drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs); drm_connector_helper_add(connector, &qxl_connector_helper_funcs); drm_object_attach_property(&connector->base, qdev->hotplug_mode_update_property, 0); drm_sysfs_connector_add(connector); return 0; } static struct drm_framebuffer * qxl_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct qxl_framebuffer *qxl_fb; int ret; obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL); if (qxl_fb == NULL) return NULL; ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj); if (ret) { kfree(qxl_fb); drm_gem_object_unreference_unlocked(obj); return NULL; } return &qxl_fb->base; } static const struct drm_mode_config_funcs qxl_mode_funcs = { .fb_create = qxl_user_framebuffer_create, }; int qxl_create_monitors_object(struct qxl_device *qdev) { int ret; struct drm_gem_object *gobj; int max_allowed = qxl_num_crtc; int monitors_config_size = sizeof(struct qxl_monitors_config) + max_allowed * sizeof(struct qxl_head); ret = qxl_gem_object_create(qdev, monitors_config_size, 0, QXL_GEM_DOMAIN_VRAM, false, false, NULL, &gobj); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); return -ENOMEM; } qdev->monitors_config_bo = gem_to_qxl_bo(gobj); ret = qxl_bo_reserve(qdev->monitors_config_bo, false); if (ret) return ret; ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL); if (ret) { qxl_bo_unreserve(qdev->monitors_config_bo); return ret; } qxl_bo_unreserve(qdev->monitors_config_bo); qxl_bo_kmap(qdev->monitors_config_bo, NULL); qdev->monitors_config = qdev->monitors_config_bo->kptr; qdev->ram_header->monitors_config = qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0); memset(qdev->monitors_config, 0, monitors_config_size); qdev->monitors_config->max_allowed = max_allowed; return 0; } int qxl_destroy_monitors_object(struct qxl_device *qdev) { int ret; qdev->monitors_config = NULL; qdev->ram_header->monitors_config = 0; qxl_bo_kunmap(qdev->monitors_config_bo); ret = qxl_bo_reserve(qdev->monitors_config_bo, false); if (ret) return ret; qxl_bo_unpin(qdev->monitors_config_bo); qxl_bo_unreserve(qdev->monitors_config_bo); qxl_bo_unref(&qdev->monitors_config_bo); return 0; } int qxl_modeset_init(struct qxl_device *qdev) { int i; int ret; drm_mode_config_init(qdev->ddev); ret = qxl_create_monitors_object(qdev); if (ret) return ret; qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs; /* modes will be validated against the framebuffer size */ qdev->ddev->mode_config.min_width = 320; qdev->ddev->mode_config.min_height = 200; qdev->ddev->mode_config.max_width = 8192; qdev->ddev->mode_config.max_height = 8192; qdev->ddev->mode_config.fb_base = qdev->vram_base; qxl_mode_create_hotplug_mode_update_property(qdev); for (i = 0 ; i < qxl_num_crtc; ++i) { qdev_crtc_init(qdev->ddev, i); qdev_output_init(qdev->ddev, i); } qdev->mode_info.mode_config_initialized = true; /* primary surface must be created by this point, to allow * issuing command queue commands and having them read by * spice server. */ qxl_fbdev_init(qdev); return 0; } void qxl_modeset_fini(struct qxl_device *qdev) { qxl_fbdev_fini(qdev); qxl_destroy_monitors_object(qdev); if (qdev->mode_info.mode_config_initialized) { drm_mode_config_cleanup(qdev->ddev); qdev->mode_info.mode_config_initialized = false; } }
180209.c
/* * Generated by asn1c-0.9.29 (http://lionet.info/asn1c) * From ASN.1 module "EUTRA-UE-Variables" * found in "/home/lixh/ue_folder/openair2/RRC/LTE/MESSAGES/asn1c/ASN1_files/lte-rrc-14.7.0.asn1" * `asn1c -pdu=all -fcompound-names -gen-PER -no-gen-OER -no-gen-example -D /home/lixh/ue_folder/cmake_targets/lte_build_oai/build/CMakeFiles/RRC_Rel14` */ #include "LTE_VarMeasReportList.h" static asn_per_constraints_t asn_PER_type_LTE_VarMeasReportList_constr_1 CC_NOTUSED = { { APC_UNCONSTRAINED, -1, -1, 0, 0 }, { APC_CONSTRAINED, 5, 5, 1, 32 } /* (SIZE(1..32)) */, 0, 0 /* No PER value map */ }; static asn_TYPE_member_t asn_MBR_LTE_VarMeasReportList_1[] = { { ATF_POINTER, 0, 0, (ASN_TAG_CLASS_UNIVERSAL | (16 << 2)), 0, &asn_DEF_LTE_VarMeasReport, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "" }, }; static const ber_tlv_tag_t asn_DEF_LTE_VarMeasReportList_tags_1[] = { (ASN_TAG_CLASS_UNIVERSAL | (16 << 2)) }; static asn_SET_OF_specifics_t asn_SPC_LTE_VarMeasReportList_specs_1 = { sizeof(struct LTE_VarMeasReportList), offsetof(struct LTE_VarMeasReportList, _asn_ctx), 0, /* XER encoding is XMLDelimitedItemList */ }; asn_TYPE_descriptor_t asn_DEF_LTE_VarMeasReportList = { "VarMeasReportList", "VarMeasReportList", &asn_OP_SEQUENCE_OF, asn_DEF_LTE_VarMeasReportList_tags_1, sizeof(asn_DEF_LTE_VarMeasReportList_tags_1) /sizeof(asn_DEF_LTE_VarMeasReportList_tags_1[0]), /* 1 */ asn_DEF_LTE_VarMeasReportList_tags_1, /* Same as above */ sizeof(asn_DEF_LTE_VarMeasReportList_tags_1) /sizeof(asn_DEF_LTE_VarMeasReportList_tags_1[0]), /* 1 */ { 0, &asn_PER_type_LTE_VarMeasReportList_constr_1, SEQUENCE_OF_constraint }, asn_MBR_LTE_VarMeasReportList_1, 1, /* Single element */ &asn_SPC_LTE_VarMeasReportList_specs_1 /* Additional specs */ };
321695.c
/** ****************************************************************************** * File Name : I2C.c * Description : This file provides code for the configuration * of the I2C instances. ****************************************************************************** ** This notice applies to any and all portions of this file * that are not between comment pairs USER CODE BEGIN and * USER CODE END. Other portions of this file, whether * inserted by the user or by software development tools * are owned by their respective copyright owners. * * COPYRIGHT(c) 2018 STMicroelectronics * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "i2c.h" #include "gpio.h" /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ I2C_HandleTypeDef hi2c1; /* I2C1 init function */ void MX_I2C1_Init(void) { hi2c1.Instance = I2C1; hi2c1.Init.Timing = 0x2000090E; hi2c1.Init.OwnAddress1 = 0; hi2c1.Init.AddressingMode = I2C_ADDRESSINGMODE_7BIT; hi2c1.Init.DualAddressMode = I2C_DUALADDRESS_DISABLE; hi2c1.Init.OwnAddress2 = 0; hi2c1.Init.OwnAddress2Masks = I2C_OA2_NOMASK; hi2c1.Init.GeneralCallMode = I2C_GENERALCALL_DISABLE; hi2c1.Init.NoStretchMode = I2C_NOSTRETCH_DISABLE; if (HAL_I2C_Init(&hi2c1) != HAL_OK) { _Error_Handler(__FILE__, __LINE__); } /**Configure Analogue filter */ if (HAL_I2CEx_ConfigAnalogFilter(&hi2c1, I2C_ANALOGFILTER_ENABLE) != HAL_OK) { _Error_Handler(__FILE__, __LINE__); } /**Configure Digital filter */ if (HAL_I2CEx_ConfigDigitalFilter(&hi2c1, 0) != HAL_OK) { _Error_Handler(__FILE__, __LINE__); } } void HAL_I2C_MspInit(I2C_HandleTypeDef* i2cHandle) { GPIO_InitTypeDef GPIO_InitStruct; if(i2cHandle->Instance==I2C1) { /* USER CODE BEGIN I2C1_MspInit 0 */ /* USER CODE END I2C1_MspInit 0 */ /**I2C1 GPIO Configuration PB8 ------> I2C1_SCL PB9 ------> I2C1_SDA */ GPIO_InitStruct.Pin = GPIO_PIN_8|GPIO_PIN_9; GPIO_InitStruct.Mode = GPIO_MODE_AF_OD; GPIO_InitStruct.Pull = GPIO_PULLUP; GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; GPIO_InitStruct.Alternate = GPIO_AF4_I2C1; HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); /* I2C1 clock enable */ __HAL_RCC_I2C1_CLK_ENABLE(); /* USER CODE BEGIN I2C1_MspInit 1 */ /* USER CODE END I2C1_MspInit 1 */ } } void HAL_I2C_MspDeInit(I2C_HandleTypeDef* i2cHandle) { if(i2cHandle->Instance==I2C1) { /* USER CODE BEGIN I2C1_MspDeInit 0 */ /* USER CODE END I2C1_MspDeInit 0 */ /* Peripheral clock disable */ __HAL_RCC_I2C1_CLK_DISABLE(); /**I2C1 GPIO Configuration PB8 ------> I2C1_SCL PB9 ------> I2C1_SDA */ HAL_GPIO_DeInit(GPIOB, GPIO_PIN_8|GPIO_PIN_9); /* USER CODE BEGIN I2C1_MspDeInit 1 */ /* USER CODE END I2C1_MspDeInit 1 */ } } /* USER CODE BEGIN 1 */ /* USER CODE END 1 */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
741777.c
/* Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* VAX/DEC CMS REPLACEMENT HISTORY, Element MdsPk.C */ /* *9 2-DEC-1994 16:53:57 JAS "my head waw wedged" */ /* *8 10-NOV-1994 11:34:21 JAS "make it portable to unix" */ /* *7 24-OCT-1994 11:45:58 TWF "Take out ifdef vax" */ /* *6 3-NOV-1993 13:39:41 KKLARE "return after ots$move to avoid clobber of * *ppack" */ /* *5 2-NOV-1993 09:38:00 TWF "Fix bug (if (off))" */ /* *4 10-MAY-1993 14:37:05 TWF "Make it shareable" */ /* *3 1-MAR-1993 08:42:11 TWF "Use standard indentation" */ /* *2 3-FEB-1993 11:11:59 TWF "Make it portable" */ /* *1 4-JAN-1993 14:52:41 TWF "CC MdsPk and MdsUnpk for compression" */ /* VAX/DEC CMS REPLACEMENT HISTORY, Element MdsPk.C */ /* MdsPk.C Pack or unpack bits. MdsPk(&nbits,&nitems,&pack,&items,&bit) Ken Klare, LANL CTR-7 (c)1990 Limitation: clears high bits of final word. Limitation: 32-bit two's-complement longs Optimization for Vax: negative left shift is right arithmetic shift. VAXstation 3100, 1000 interates, 1000 items 2 10 31 32 bits Pack, signed or unsigned bits and us/item: 3.63 4.20 5.75 4.32 Unpack, unsigned bits and us/item: 3.30 4.04 5.94 4.14 Unpack, signed bits and us/item: 4.23 4.94 6.73 4.14 Pack or unpack 32-bit on byte boundaries 0.88, on long boundary 0.73 For reference, Pack Macro timings 3.25 3.50 3.40 Unpack Macro timings 3.01 3.07 3.02 */ #include <STATICdef.h> #include <mdsplus/mdsconfig.h> STATIC_CONSTANT unsigned int masks[33] = { 0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff, 0x1ffff, 0x3ffff, 0x7ffff, 0xfffff, 0x1fffff, 0x3fffff, 0x7fffff, 0xffffff, 0x1ffffff, 0x3ffffff, 0x7ffffff, 0xfffffff, 0x1fffffff, 0x3fffffff, 0x7fffffff, 0xffffffff, }; #include <mdsdescrip.h> #include <string.h> STATIC_ROUTINE int SwapBytes(char *in_c) { int out; char *out_c = (char *)&out; int i; #ifdef WORDS_BIGENDIAN for (i = 0; i < 4; i++) out_c[i] = in_c[3 - i]; #else for (i = 0; i < 4; i++) out_c[i] = in_c[i]; #endif return out; } #define getppack SwapBytes((char *)ppack) void MdsPk(signed char *nbits_ptr, int *nitems_ptr, int pack[], int items[], int *bit_ptr) { int nbits = *nbits_ptr; int nitems = *nitems_ptr; int *ppack = &pack[*bit_ptr >> 5]; int *pitems = &items[0]; int size = nbits >= 0 ? nbits : -nbits; int off = *bit_ptr & 31; unsigned int mask; int test; #ifdef WORDS_BIGENDIAN int i, j; signed char *pin; signed char *pout; unsigned int hold = 0; if (off) { for (i = 0; i < 4; i++) ((char *)&hold)[i] = ((char *)ppack)[3 - i]; hold = hold & masks[off]; } #else unsigned int hold = off ? *(unsigned int *)ppack & masks[off] : 0; #endif if (size == 0 || nitems <= 0) return; *bit_ptr += size * nitems; if (size == 32) { if ((off & 7) == 0) { #ifdef WORDS_BIGENDIAN for (i = 0, pout = ((signed char *)ppack) + (off >> 3), pin = (signed char *)pitems; i < nitems; i++, pout += 4, pin += 4) for (j = 0; j < 4; j++) pout[j] = pin[3 - j]; #else memcpy(((char *)ppack) + (off >> 3), pitems, sizeof(int) * (size_t)nitems); #endif return; } else for (; --nitems >= 0;) { hold |= *(unsigned int *)pitems << off; #ifdef __APPLE__ *ppack++ = SwapBytes((char *)&hold); #else #ifdef WORDS_BIGENDIAN for (i = 0; i < 4; i++) ((char *)ppack)[i] = ((char *)&hold)[3 - i]; ppack++; #else *ppack++ = (int)hold; #endif #endif hold = *(unsigned int *)pitems++ >> (32 - off); } } else { mask = masks[size]; test = 32 - size; for (; --nitems >= 0; ++pitems) { hold |= (mask & *(unsigned int *)pitems) << off; if (off >= test) { #ifdef __APPLE__ *ppack++ = SwapBytes((char *)&hold); #else #ifdef WORDS_BIGENDIAN for (i = 0; i < 4; i++) ((char *)ppack)[i] = ((char *)&hold)[3 - i]; ppack++; #else *ppack++ = (int)hold; #endif #endif hold = (mask & *(unsigned int *)pitems) >> (32 - off); off -= test; } else off += size; } } if (off) #ifdef WORDS_BIGENDIAN for (i = 0; i < 4; i++) ((char *)ppack)[i] = ((char *)&hold)[3 - i]; #else *ppack = (int)hold; #endif return; } /*-------------------------------------------------------------*/ void MdsUnpk(signed char *nbits_ptr, int *nitems_ptr, int pack[], int items[], int *bit_ptr) { int nbits = *nbits_ptr; int nitems = *nitems_ptr; int *ppack = &pack[*bit_ptr >> 5]; int *pitems = &items[0]; int size = nbits >= 0 ? nbits : -nbits; int off = *bit_ptr & 31; unsigned int hold, full, max, mask = masks[size]; int test = 32 - size; *bit_ptr += size * nitems; /*32-bit data*/ if (test == 0) { if ((off & 7) == 0) { int i; ppack = (int *)(((char *)ppack) + (off >> 3)); for (i = 0; i < nitems; i++, ppack++) pitems[i] = getppack; } else for (; --nitems >= 0;) { hold = ((unsigned int)getppack) >> off; ppack++; hold |= ((unsigned int)getppack) << (32 - off); *pitems++ = (int)hold; } } /*sign extended*/ else if (nbits < 0) { full = mask + 1; max = mask >> 1; for (; --nitems >= 0;) { if (off >= test) { hold = ((unsigned int)getppack) >> off; ppack++; hold |= (((unsigned int)getppack) << (32 - off)) & mask; if (hold > max) *pitems++ = (int)(hold - full); else *pitems++ = (int)hold; off -= test; } else { hold = (((unsigned int)getppack) >> off) & mask; if (hold > max) *pitems++ = (int)(hold - full); else *pitems++ = (int)hold; off += size; } } } /*zero extended*/ else if (nbits > 0) for (; --nitems >= 0;) { if (off >= test) { hold = ((unsigned int)getppack) >> off; ppack++; hold |= (((unsigned int)getppack) << (32 - off)) & mask; *pitems++ = (int)hold; off -= test; } else { hold = (((unsigned int)getppack) >> off) & mask; *pitems++ = (int)hold; off += size; } } /*zero fill*/ else for (; --nitems >= 0;) *pitems++ = 0; return; }
875793.c
/* pru1_statemachine_dma.c: state machine for bus master DMA Copyright (c) 2018, Joerg Hoppe j_hoppe@t-online.de, www.retrocmp.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL JOERG HOPPE BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 29-jun-2019 JH rework: state returns ptr to next state func 12-nov-2018 JH entered beta phase Statemachines to execute multiple masterr DATO or DATI cycles. All references "PDP11BUS handbook 1979" Precondition: BBSY already asserted (arbitration got) Master reponds to INIT by stopping transactions. new state Start: setup dma mailbox setup with startaddr, wordcount, cycle, words[] Then sm_dma_init() ; sm_dma_state = DMA_STATE_RUNNING ; while(sm_dma_state != DMA_STATE_READY) sm_dma_service() ; state is 0 for OK, or 2 for timeout error. mailbox.dma.cur_addr is error location Speed: (clpru 2.2, -O3: Example: DATI, time SSYN- active -> (processing) -> MSYN inactive a) 2 states, buslatch_set/get function calls, TIMEOUT_SET/REACHED(75) -> 700ns b) 2 states, buslatch_set/get macro, TIMEOUT_SET/REACHED(75) -> 605ns c) 2 states, no TIMEOUT (75 already met) -> 430ns d) 1 marged state, no TIMEOUT ca. 350ns ! Uses single global timeout, don't run in parallel with other statemachines using timeout ! */ #include <stdlib.h> #include <stdint.h> #include <stdbool.h> #include "iopageregister.h" #include "mailbox.h" #include "pru1_buslatches.h" #include "pru1_utils.h" #include "pru1_timeouts.h" #include "pru1_statemachine_arbitration.h" #include "pru1_statemachine_dma.h" /* sometimes short timeout of 75 and 150ns are required * 75ns between state changes is not necessary, code runs longer * 150ns between state changes is necessary * Overhead for extra state and TIMEOUTSET/REACHED is 100ns */ statemachine_dma_t sm_dma; /********** Master DATA cycles **************/ // forwards ; static statemachine_state_func sm_dma_state_1(void); static statemachine_state_func sm_dma_state_11(void); static statemachine_state_func sm_dma_state_21(void); static statemachine_state_func sm_dma_state_99(void); // dma mailbox setup with // startaddr, wordcount, cycle, words[] ? // "cycle" must be QUNIBUS_CYCLE_DATI or QUNIBUS_CYCLE_DATO // Wait for BBSY, SACK already held asserted // Sorting between device and CPU transfers: unibusadapter request scheduler statemachine_state_func sm_dma_start() { // assert BBSY: latch[1], bit 6 // buslatches_setbits(1, BIT(6), BIT(6)); mailbox.dma.cur_addr = mailbox.dma.startaddr; sm_dma.dataptr = (uint16_t *) mailbox.dma.words; // point to start of data buffer sm_dma.cur_wordsleft = mailbox.dma.wordcount; mailbox.dma.cur_status = DMA_STATE_RUNNING; // do not wait for BBSY here. This is part of Arbitration. buslatches_setbits(1, BIT(6), BIT(6)); // assert BBSY // next call to sm_dma.state() starts state machine return (statemachine_state_func) &sm_dma_state_1; } /* // wait for BBSY deasserted, then assert static statemachine_state_func sm_dma_state_1() { if (buslatches_getbyte(1) & BIT(6)) return (statemachine_state_func) &sm_dma_state_1; // wait buslatches_setbits(1, BIT(6), BIT(6)); // assert BBSY return (statemachine_state_func) &sm_dma_state_1; } */ // place address and control bits onto bus, also data for DATO // If slave address is internal (= implemented by UniBone), // fast UNIBUS slave protocol is generated on the bus. static statemachine_state_func sm_dma_state_1() { uint32_t tmpval; uint32_t addr = mailbox.dma.cur_addr; // non-volatile snapshot uint16_t data; uint8_t buscycle = mailbox.dma.buscycle; // uint8_t page_table_entry; // BBSY released if (mailbox.dma.cur_status != DMA_STATE_RUNNING || mailbox.dma.wordcount == 0) return NULL; // still stopped if (sm_dma.cur_wordsleft == 1) { // deassert SACK, enable next arbitration cycle // deassert SACK before deassert BBSY // parallel to last word data transfer buslatches_setbits(1, BIT(5), 0); // SACK = latch[1], bit 5 } sm_dma.state_timeout = 0; //if (addr == 01046) // trigger address // PRU_DEBUG_PIN0(1) ; // trigger to LA. // if M9312 boot vector active: // Don't put addr on bus, read modified addr back and use, // But: use modifed addr internally, clear on external bus, // no UNIBUS member will do another DATI for it. addr |= address_overlay ; // addr0..7 = latch[2] buslatches_setbyte(2, addr & 0xff); // addr8..15 = latch[3] buslatches_setbyte(3, addr >> 8); // addr 16,17 = latch[4].0,1 // C0 = latch[4], bit 2 // C1 = latch[4], bit 3 // MSYN = latch[4], bit 4 // SSYN = latch[4], bit 5 if (QUNIBUS_CYCLE_IS_DATO(buscycle)) { bool internal; bool is_datob = (buscycle == QUNIBUS_CYCLE_DATOB); tmpval = (addr >> 16) & 3; if (is_datob) tmpval |= (BIT(3) | BIT(2)); // DATOB: c1=1, c0=1 else tmpval |= BIT(3); // DATO: c1=1, c0=0 // bit 4,5 == 0 -> MSYN,SSYN not asserted buslatches_setbits(4, 0x3f, tmpval); // write data. SSYN may still be active and cleared now? by sm_slave_10 etc? // data = mailbox.dma.words[sm_dma.cur_wordidx]; data = *sm_dma.dataptr; buslatches_setbyte(5, data & 0xff); // DATA[0..7] = latch[5] buslatches_setbyte(6, data >> 8); // DATA[8..15] = latch[6] // wait 150ns, but guaranteed to wait 150ns after SSYN inactive // prev SSYN & DATA may be still on bus, disturbes DATA while (buslatches_getbyte(4) & BIT(5)) ; // wait for SSYN inactive __delay_cycles(NANOSECS(UNIBUS_DMA_MASTER_PRE_MSYN_NS) - 10); // assume 10 cycles for buslatches_getbyte and address test // ADDR, CONTROL (and DATA) stable since 150ns, set MSYN // use 150ns delay to check for internal address // page_table_entry = PAGE_TABLE_ENTRY(deviceregisters,addr); // !!! optimizer may not move this around !!! // try "volatile internal_addr" (__asm(";---") may be rearanged) // MSYN = latch[4], bit 4 buslatches_setbits(4, BIT(4), BIT(4)); // master assert MSYN // DATO to internal slave (fast test). // write data into slave ( if (is_datob) { // A00=1: upper byte, A00=0: lower byte uint8_t b = (addr & 1) ? (data >> 8) : (data & 0xff); internal = emulated_addr_write_b(addr, b); // always sucessful, addr already tested } else // DATO internal = emulated_addr_write_w(addr, data); if (internal) { buslatches_setbits(4, BIT(5), BIT(5)); // slave assert SSYN buslatches_setbits(4, BIT(4), 0); // master deassert MSYN buslatches_setbyte(5, 0); // master removes data buslatches_setbyte(6, 0); // perhaps ARM issued ARM2PRU_INTR, request set in parallel state machine. // Arbitrator will GRANT it after DMA ready (SACK deasserted). // assert SSYN after ARM completes "active" register logic // while (mailbox.events.event_deviceregister) ; buslatches_setbits(4, BIT(5), 0); // slave deassert SSYN return (statemachine_state_func) &sm_dma_state_99; // next word } else { // DATO to external slave // wait for a slave SSYN timeout_set(TIMEOUT_DMA, MICROSECS(UNIBUS_TIMEOUT_PERIOD_US)); return (statemachine_state_func) &sm_dma_state_21; // wait SSYN DATAO } } else { // DATI or DATIP tmpval = (addr >> 16) & 3; // bit 2,3,4,5 == 0 -> C0,C1,MSYN,SSYN not asserted buslatches_setbits(4, 0x3f, tmpval); // wait 150ns after MSYN, no distance to SSYN required __delay_cycles(NANOSECS(UNIBUS_DMA_MASTER_PRE_MSYN_NS) - 10); // assume 10 cycles for buslatches_getbyte and address test // ADDR, CONTROL (and DATA) stable since 150ns, set MSYN next // use 150ns delay to check for internal address // page_table_entry = PAGE_TABLE_ENTRY(deviceregisters,addr); // !!! optimizer may not move this around !!! // MSYN = latch[4], bit 4 buslatches_setbits(4, BIT(4), BIT(4)); // master assert MSYN if (emulated_addr_read(addr, &data)) { // DATI to internal slave: put MSYN/SSYN/DATA protocol onto bus, // slave puts data onto bus // DATA[0..7] = latch[5] buslatches_setbyte(5, data & 0xff); // DATA[8..15] = latch[6] buslatches_setbyte(6, data >> 8); // theoretically another bus member could set bits in bus addr & data ... // if yes, we would have to read back the bus lines *sm_dma.dataptr = data; // mailbox.dma.words[sm_dma.cur_wordidx] = data; buslatches_setbits(4, BIT(5), BIT(5)); // slave assert SSYN buslatches_setbits(4, BIT(4), 0); // master deassert MSYN buslatches_setbyte(5, 0); // slave removes data buslatches_setbyte(6, 0); // perhaps ARM issued ARM2PRU_INTR, request set in parallel state machine. // Arbitrator will GRANT it after DMA ready (SACK deasserted). // assert SSYN after ARM completes "active" register logic // while (mailbox.events.event_deviceregister) ; buslatches_setbits(4, BIT(5), 0); // slave deassert SSYN return (statemachine_state_func) &sm_dma_state_99; // next word } else { // DATI to external slave // wait for a slave SSYN timeout_set(TIMEOUT_DMA, MICROSECS(UNIBUS_TIMEOUT_PERIOD_US)); return (statemachine_state_func) &sm_dma_state_11; // wait SSYN DATI } } } // DATI to external slave: MSYN set, wait for SSYN or timeout static statemachine_state_func sm_dma_state_11() { uint16_t tmpval; sm_dma.state_timeout = timeout_reached(TIMEOUT_DMA); // SSYN = latch[4], bit 5 if (!sm_dma.state_timeout && !(buslatches_getbyte(4) & BIT(5))) return (statemachine_state_func) &sm_dma_state_11; // no SSYN yet: wait // SSYN set by slave (or timeout). read data __delay_cycles(NANOSECS(75) - 6); // assume 2*3 cycles for buslatches_getbyte // DATA[0..7] = latch[5] tmpval = buslatches_getbyte(5); // DATA[8..15] = latch[6] tmpval |= (buslatches_getbyte(6) << 8); // save in buffer *sm_dma.dataptr = tmpval; // mailbox.dma.words[sm_dma.cur_wordidx] = tmpval; // negate MSYN buslatches_setbits(4, BIT(4), 0); // DATI: remove address,control, MSYN,SSYN from bus, 75ns after MSYN inactive __delay_cycles(NANOSECS(75) - 8); // assume 8 cycles for state change return (statemachine_state_func) &sm_dma_state_99; } // DATO to external slave: wait for SSYN or timeout static statemachine_state_func sm_dma_state_21() { sm_dma.state_timeout = timeout_reached(TIMEOUT_DMA); // SSYN timeout? // SSYN = latch[4], bit 5 if (!sm_dma.state_timeout && !(buslatches_getbyte(4) & BIT(5))) return (statemachine_state_func) &sm_dma_state_21; // no SSYN yet: wait // SSYN set by slave (or timeout): negate MSYN, remove DATA from bus buslatches_setbits(4, BIT(4), 0); // deassert MSYN buslatches_setbyte(5, 0); buslatches_setbyte(6, 0); // DATO: remove address,control, MSYN,SSYN from bus, 75ns after MSYN inactive __delay_cycles(NANOSECS(75) - 8); // assume 8 cycles for state change return (statemachine_state_func) &sm_dma_state_99; } // word is transfered, or timeout. static statemachine_state_func sm_dma_state_99() { uint8_t final_dma_state; // from state_12, state_21 // 2 reasons to terminate transfer // - BUS timeout at curent address // - last word transferred if (sm_dma.state_timeout) { final_dma_state = DMA_STATE_TIMEOUTSTOP; // deassert SACK after timeout, independent of remaining word count buslatches_setbits(1, BIT(5), 0); // deassert SACK = latch[1], bit 5 } else { sm_dma.dataptr++; // point to next word in buffer sm_dma.cur_wordsleft--; if (sm_dma.cur_wordsleft == 0) final_dma_state = DMA_STATE_READY; // last word: stop else if (buslatches_getbyte(7) & BIT(3)) { // INIT stops transaction: latch[7], bit 3 // only bus master (=CPU?) can issue INIT final_dma_state = DMA_STATE_INITSTOP; // deassert SACK after INIT, independent of remaining word count buslatches_setbits(1, BIT(5), 0); // deassert SACK = latch[1], bit 5 } else final_dma_state = DMA_STATE_RUNNING; // more words: continue } if (final_dma_state == DMA_STATE_RUNNING) { // dataptr and words_left already incremented mailbox.dma.cur_addr += 2; // signal progress to ARM return (statemachine_state_func) &sm_dma_state_1; // reloop } else { // remove addr and control from bus. // clears also address_overlay from bus buslatches_setbyte(2, 0); buslatches_setbyte(3, 0) ; buslatches_setbits(4, 0x3f, 0); // remove BBSY: latch[1], bit 6 buslatches_setbits(1, BIT(6), 0); timeout_cleanup(TIMEOUT_DMA); // SACK already de-asserted at wordcount==1 mailbox.dma.cur_status = final_dma_state; // signal to ARM // device or cpu cycle ended // no concurrent ARM+PRU access // for cpu access: ARM CPU thread ends looping now // test for DMA_STATE_IS_COMPLETE(cur_status) EVENT_SIGNAL(mailbox, dma); // for device DMA: unibusadapter worker() waits for signal if (!mailbox.dma.cpu_access) { // signal to ARM // ARM is clearing this, before requesting new DMA. // no concurrent ARM+PRU access PRU2ARM_INTERRUPT ; } // PRU_DEBUG_PIN0_PULSE(50) ; // CPU20 performace return NULL; // now stopped } }
9449.c
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "uv.h" #include "task.h" #include <stdio.h> #include <stdlib.h> #include <string.h> static const char* address_ip4 = "127.0.0.1"; static const char* address_ip6 = "::1"; static const int port = 80; static struct sockaddr_in addr4; static struct sockaddr_in6 addr6; static uv_getnameinfo_t req; static void getnameinfo_req(uv_getnameinfo_t* handle, int status, const char* hostname, const char* service) { ASSERT(handle != NULL); ASSERT(status == 0); ASSERT(hostname != NULL); ASSERT(service != NULL); } TEST_IMPL(getnameinfo_basic_ip4) { /* TODO(gengjiawen): Fix test on QEMU. */ #if defined(__QEMU__) RETURN_SKIP("Test does not currently work in QEMU"); #endif int r; r = uv_ip4_addr(address_ip4, port, &addr4); ASSERT(r == 0); r = uv_getnameinfo(uv_default_loop(), &req, &getnameinfo_req, (const struct sockaddr*)&addr4, 0); ASSERT(r == 0); uv_run(uv_default_loop(), UV_RUN_DEFAULT); MAKE_VALGRIND_HAPPY(); return 0; } TEST_IMPL(getnameinfo_basic_ip4_sync) { /* TODO(gengjiawen): Fix test on QEMU. */ #if defined(__QEMU__) RETURN_SKIP("Test does not currently work in QEMU"); #endif ASSERT(0 == uv_ip4_addr(address_ip4, port, &addr4)); ASSERT(0 == uv_getnameinfo(uv_default_loop(), &req, NULL, (const struct sockaddr*)&addr4, 0)); ASSERT(req.host[0] != '\0'); ASSERT(req.service[0] != '\0'); MAKE_VALGRIND_HAPPY(); return 0; } TEST_IMPL(getnameinfo_basic_ip6) { /* TODO(gengjiawen): Fix test on QEMU. */ #if defined(__QEMU__) RETURN_SKIP("Test does not currently work in QEMU"); #endif int r; r = uv_ip6_addr(address_ip6, port, &addr6); ASSERT(r == 0); r = uv_getnameinfo(uv_default_loop(), &req, &getnameinfo_req, (const struct sockaddr*)&addr6, 0); ASSERT(r == 0); uv_run(uv_default_loop(), UV_RUN_DEFAULT); MAKE_VALGRIND_HAPPY(); return 0; }
766031.c
/**************************************************************************** * net/socket/net_sockif.c * * Copyright (C) 2017-2018 Gregory Nutt. All rights reserved. * Author: Gregory Nutt <gnutt@nuttx.org> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <sys/types.h> #include <errno.h> #include <debug.h> #include <nuttx/net/net.h> #include "inet/inet.h" #include "local/local.h" #include "pkt/pkt.h" #include "bluetooth/bluetooth.h" #include "ieee802154/ieee802154.h" #include "socket/socket.h" /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: net_sockif * * Description: * Return the socket interface associated with this address family. * * Input Parameters: * family - Socket address family * type - Socket type * protocol - Socket protocol * * Returned Value: * On success, a non-NULL instance of struct sock_intf_s is returned. NULL * is returned only if the address family is not supported. * ****************************************************************************/ FAR const struct sock_intf_s * net_sockif(sa_family_t family, int type, int protocol) { FAR const struct sock_intf_s *sockif = NULL; /* Get the socket interface. * * REVISIT: Should also support PF_UNSPEC which would permit the socket * to be used for anything. */ switch (family) { #ifdef HAVE_INET_SOCKETS #ifdef HAVE_PFINET_SOCKETS case PF_INET: #endif #ifdef HAVE_PFINET6_SOCKETS case PF_INET6: #endif sockif = inet_sockif(family, type, protocol); break; #endif #ifdef CONFIG_NET_LOCAL case PF_LOCAL: sockif = &g_local_sockif; break; #endif #ifdef CONFIG_NET_PKT case PF_PACKET: sockif = &g_pkt_sockif; break; #endif #ifdef CONFIG_NET_BLUETOOTH case PF_BLUETOOTH: sockif = &g_bluetooth_sockif; break; #endif #ifdef CONFIG_NET_IEEE802154 case PF_IEEE802154: sockif = &g_ieee802154_sockif; break; #endif default: nerr("ERROR: Address family unsupported: %d\n", family); } return sockif; }
229568.c
/***************************************************************************/ /* */ /* afmparse.c */ /* */ /* AFM parser (body). */ /* */ /* Copyright 2006, 2007, 2008, 2009 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #include <ft2build.h> #include FT_FREETYPE_H #include FT_INTERNAL_POSTSCRIPT_AUX_H #include "afmparse.h" #include "psconv.h" #include "psauxerr.h" /***************************************************************************/ /* */ /* AFM_Stream */ /* */ /* The use of AFM_Stream is largely inspired by parseAFM.[ch] from t1lib. */ /* */ /* */ enum { AFM_STREAM_STATUS_NORMAL, AFM_STREAM_STATUS_EOC, AFM_STREAM_STATUS_EOL, AFM_STREAM_STATUS_EOF }; typedef struct AFM_StreamRec_ { FT_Byte* cursor; FT_Byte* base; FT_Byte* limit; FT_Int status; } AFM_StreamRec; #ifndef EOF #define EOF -1 #endif /* this works because empty lines are ignored */ #define AFM_IS_NEWLINE( ch ) ( (ch) == '\r' || (ch) == '\n' ) #define AFM_IS_EOF( ch ) ( (ch) == EOF || (ch) == '\x1a' ) #define AFM_IS_SPACE( ch ) ( (ch) == ' ' || (ch) == '\t' ) /* column separator; there is no `column' in the spec actually */ #define AFM_IS_SEP( ch ) ( (ch) == ';' ) #define AFM_GETC() \ ( ( (stream)->cursor < (stream)->limit ) ? *(stream)->cursor++ \ : EOF ) #define AFM_STREAM_KEY_BEGIN( stream ) \ (char*)( (stream)->cursor - 1 ) #define AFM_STREAM_KEY_LEN( stream, key ) \ ( (char*)(stream)->cursor - key - 1 ) #define AFM_STATUS_EOC( stream ) \ ( (stream)->status >= AFM_STREAM_STATUS_EOC ) #define AFM_STATUS_EOL( stream ) \ ( (stream)->status >= AFM_STREAM_STATUS_EOL ) #define AFM_STATUS_EOF( stream ) \ ( (stream)->status >= AFM_STREAM_STATUS_EOF ) static int afm_stream_skip_spaces( AFM_Stream stream ) { int ch = 0; /* make stupid compiler happy */ if ( AFM_STATUS_EOC( stream ) ) return ';'; while ( 1 ) { ch = AFM_GETC(); if ( !AFM_IS_SPACE( ch ) ) break; } if ( AFM_IS_NEWLINE( ch ) ) stream->status = AFM_STREAM_STATUS_EOL; else if ( AFM_IS_SEP( ch ) ) stream->status = AFM_STREAM_STATUS_EOC; else if ( AFM_IS_EOF( ch ) ) stream->status = AFM_STREAM_STATUS_EOF; return ch; } /* read a key or value in current column */ static char* afm_stream_read_one( AFM_Stream stream ) { char* str; int ch; afm_stream_skip_spaces( stream ); if ( AFM_STATUS_EOC( stream ) ) return NULL; str = AFM_STREAM_KEY_BEGIN( stream ); while ( 1 ) { ch = AFM_GETC(); if ( AFM_IS_SPACE( ch ) ) break; else if ( AFM_IS_NEWLINE( ch ) ) { stream->status = AFM_STREAM_STATUS_EOL; break; } else if ( AFM_IS_SEP( ch ) ) { stream->status = AFM_STREAM_STATUS_EOC; break; } else if ( AFM_IS_EOF( ch ) ) { stream->status = AFM_STREAM_STATUS_EOF; break; } } return str; } /* read a string (i.e., read to EOL) */ static char* afm_stream_read_string( AFM_Stream stream ) { char* str; int ch; afm_stream_skip_spaces( stream ); if ( AFM_STATUS_EOL( stream ) ) return NULL; str = AFM_STREAM_KEY_BEGIN( stream ); /* scan to eol */ while ( 1 ) { ch = AFM_GETC(); if ( AFM_IS_NEWLINE( ch ) ) { stream->status = AFM_STREAM_STATUS_EOL; break; } else if ( AFM_IS_EOF( ch ) ) { stream->status = AFM_STREAM_STATUS_EOF; break; } } return str; } /*************************************************************************/ /* */ /* AFM_Parser */ /* */ /* */ /* all keys defined in Ch. 7-10 of 5004.AFM_Spec.pdf */ typedef enum AFM_Token_ { AFM_TOKEN_ASCENDER, AFM_TOKEN_AXISLABEL, AFM_TOKEN_AXISTYPE, AFM_TOKEN_B, AFM_TOKEN_BLENDAXISTYPES, AFM_TOKEN_BLENDDESIGNMAP, AFM_TOKEN_BLENDDESIGNPOSITIONS, AFM_TOKEN_C, AFM_TOKEN_CC, AFM_TOKEN_CH, AFM_TOKEN_CAPHEIGHT, AFM_TOKEN_CHARWIDTH, AFM_TOKEN_CHARACTERSET, AFM_TOKEN_CHARACTERS, AFM_TOKEN_DESCENDER, AFM_TOKEN_ENCODINGSCHEME, AFM_TOKEN_ENDAXIS, AFM_TOKEN_ENDCHARMETRICS, AFM_TOKEN_ENDCOMPOSITES, AFM_TOKEN_ENDDIRECTION, AFM_TOKEN_ENDFONTMETRICS, AFM_TOKEN_ENDKERNDATA, AFM_TOKEN_ENDKERNPAIRS, AFM_TOKEN_ENDTRACKKERN, AFM_TOKEN_ESCCHAR, AFM_TOKEN_FAMILYNAME, AFM_TOKEN_FONTBBOX, AFM_TOKEN_FONTNAME, AFM_TOKEN_FULLNAME, AFM_TOKEN_ISBASEFONT, AFM_TOKEN_ISCIDFONT, AFM_TOKEN_ISFIXEDPITCH, AFM_TOKEN_ISFIXEDV, AFM_TOKEN_ITALICANGLE, AFM_TOKEN_KP, AFM_TOKEN_KPH, AFM_TOKEN_KPX, AFM_TOKEN_KPY, AFM_TOKEN_L, AFM_TOKEN_MAPPINGSCHEME, AFM_TOKEN_METRICSSETS, AFM_TOKEN_N, AFM_TOKEN_NOTICE, AFM_TOKEN_PCC, AFM_TOKEN_STARTAXIS, AFM_TOKEN_STARTCHARMETRICS, AFM_TOKEN_STARTCOMPOSITES, AFM_TOKEN_STARTDIRECTION, AFM_TOKEN_STARTFONTMETRICS, AFM_TOKEN_STARTKERNDATA, AFM_TOKEN_STARTKERNPAIRS, AFM_TOKEN_STARTKERNPAIRS0, AFM_TOKEN_STARTKERNPAIRS1, AFM_TOKEN_STARTTRACKKERN, AFM_TOKEN_STDHW, AFM_TOKEN_STDVW, AFM_TOKEN_TRACKKERN, AFM_TOKEN_UNDERLINEPOSITION, AFM_TOKEN_UNDERLINETHICKNESS, AFM_TOKEN_VV, AFM_TOKEN_VVECTOR, AFM_TOKEN_VERSION, AFM_TOKEN_W, AFM_TOKEN_W0, AFM_TOKEN_W0X, AFM_TOKEN_W0Y, AFM_TOKEN_W1, AFM_TOKEN_W1X, AFM_TOKEN_W1Y, AFM_TOKEN_WX, AFM_TOKEN_WY, AFM_TOKEN_WEIGHT, AFM_TOKEN_WEIGHTVECTOR, AFM_TOKEN_XHEIGHT, N_AFM_TOKENS, AFM_TOKEN_UNKNOWN } AFM_Token; static const char* const afm_key_table[N_AFM_TOKENS] = { "Ascender", "AxisLabel", "AxisType", "B", "BlendAxisTypes", "BlendDesignMap", "BlendDesignPositions", "C", "CC", "CH", "CapHeight", "CharWidth", "CharacterSet", "Characters", "Descender", "EncodingScheme", "EndAxis", "EndCharMetrics", "EndComposites", "EndDirection", "EndFontMetrics", "EndKernData", "EndKernPairs", "EndTrackKern", "EscChar", "FamilyName", "FontBBox", "FontName", "FullName", "IsBaseFont", "IsCIDFont", "IsFixedPitch", "IsFixedV", "ItalicAngle", "KP", "KPH", "KPX", "KPY", "L", "MappingScheme", "MetricsSets", "N", "Notice", "PCC", "StartAxis", "StartCharMetrics", "StartComposites", "StartDirection", "StartFontMetrics", "StartKernData", "StartKernPairs", "StartKernPairs0", "StartKernPairs1", "StartTrackKern", "StdHW", "StdVW", "TrackKern", "UnderlinePosition", "UnderlineThickness", "VV", "VVector", "Version", "W", "W0", "W0X", "W0Y", "W1", "W1X", "W1Y", "WX", "WY", "Weight", "WeightVector", "XHeight" }; /* * `afm_parser_read_vals' and `afm_parser_next_key' provide * high-level operations to an AFM_Stream. The rest of the * parser functions should use them without accessing the * AFM_Stream directly. */ FT_LOCAL_DEF( FT_Int ) afm_parser_read_vals( AFM_Parser parser, AFM_Value vals, FT_UInt n ) { AFM_Stream stream = parser->stream; char* str; FT_UInt i; if ( n > AFM_MAX_ARGUMENTS ) return 0; for ( i = 0; i < n; i++ ) { FT_Offset len; AFM_Value val = vals + i; if ( val->type == AFM_VALUE_TYPE_STRING ) str = afm_stream_read_string( stream ); else str = afm_stream_read_one( stream ); if ( !str ) break; len = AFM_STREAM_KEY_LEN( stream, str ); switch ( val->type ) { case AFM_VALUE_TYPE_STRING: case AFM_VALUE_TYPE_NAME: { FT_Memory memory = parser->memory; FT_Error error; if ( !FT_QALLOC( val->u.s, len + 1 ) ) { ft_memcpy( val->u.s, str, len ); val->u.s[len] = '\0'; } } break; case AFM_VALUE_TYPE_FIXED: val->u.f = PS_Conv_ToFixed( (FT_Byte**)(void*)&str, (FT_Byte*)str + len, 0 ); break; case AFM_VALUE_TYPE_INTEGER: val->u.i = PS_Conv_ToInt( (FT_Byte**)(void*)&str, (FT_Byte*)str + len ); break; case AFM_VALUE_TYPE_BOOL: val->u.b = FT_BOOL( len == 4 && !ft_strncmp( str, "true", 4 ) ); break; case AFM_VALUE_TYPE_INDEX: if ( parser->get_index ) val->u.i = parser->get_index( str, len, parser->user_data ); else val->u.i = 0; break; } } return i; } FT_LOCAL_DEF( char* ) afm_parser_next_key( AFM_Parser parser, FT_Bool line, FT_Offset* len ) { AFM_Stream stream = parser->stream; char* key = 0; /* make stupid compiler happy */ if ( line ) { while ( 1 ) { /* skip current line */ if ( !AFM_STATUS_EOL( stream ) ) afm_stream_read_string( stream ); stream->status = AFM_STREAM_STATUS_NORMAL; key = afm_stream_read_one( stream ); /* skip empty line */ if ( !key && !AFM_STATUS_EOF( stream ) && AFM_STATUS_EOL( stream ) ) continue; break; } } else { while ( 1 ) { /* skip current column */ while ( !AFM_STATUS_EOC( stream ) ) afm_stream_read_one( stream ); stream->status = AFM_STREAM_STATUS_NORMAL; key = afm_stream_read_one( stream ); /* skip empty column */ if ( !key && !AFM_STATUS_EOF( stream ) && AFM_STATUS_EOC( stream ) ) continue; break; } } if ( len ) *len = ( key ) ? (FT_Offset)AFM_STREAM_KEY_LEN( stream, key ) : 0; return key; } static AFM_Token afm_tokenize( const char* key, FT_Offset len ) { int n; for ( n = 0; n < N_AFM_TOKENS; n++ ) { if ( *( afm_key_table[n] ) == *key ) { for ( ; n < N_AFM_TOKENS; n++ ) { if ( *( afm_key_table[n] ) != *key ) return AFM_TOKEN_UNKNOWN; if ( ft_strncmp( afm_key_table[n], key, len ) == 0 ) return (AFM_Token) n; } } } return AFM_TOKEN_UNKNOWN; } FT_LOCAL_DEF( FT_Error ) afm_parser_init( AFM_Parser parser, FT_Memory memory, FT_Byte* base, FT_Byte* limit ) { AFM_Stream stream; FT_Error error; if ( FT_NEW( stream ) ) return error; stream->cursor = stream->base = base; stream->limit = limit; /* don't skip the first line during the first call */ stream->status = AFM_STREAM_STATUS_EOL; parser->memory = memory; parser->stream = stream; parser->FontInfo = NULL; parser->get_index = NULL; return PSaux_Err_Ok; } FT_LOCAL( void ) afm_parser_done( AFM_Parser parser ) { FT_Memory memory = parser->memory; FT_FREE( parser->stream ); } FT_LOCAL_DEF( FT_Error ) afm_parser_read_int( AFM_Parser parser, FT_Int* aint ) { AFM_ValueRec val; val.type = AFM_VALUE_TYPE_INTEGER; if ( afm_parser_read_vals( parser, &val, 1 ) == 1 ) { *aint = val.u.i; return PSaux_Err_Ok; } else return PSaux_Err_Syntax_Error; } static FT_Error afm_parse_track_kern( AFM_Parser parser ) { AFM_FontInfo fi = parser->FontInfo; AFM_TrackKern tk; char* key; FT_Offset len; int n = -1; if ( afm_parser_read_int( parser, &fi->NumTrackKern ) ) goto Fail; if ( fi->NumTrackKern ) { FT_Memory memory = parser->memory; FT_Error error; if ( FT_QNEW_ARRAY( fi->TrackKerns, fi->NumTrackKern ) ) return error; } while ( ( key = afm_parser_next_key( parser, 1, &len ) ) != 0 ) { AFM_ValueRec shared_vals[5]; switch ( afm_tokenize( key, len ) ) { case AFM_TOKEN_TRACKKERN: n++; if ( n >= fi->NumTrackKern ) goto Fail; tk = fi->TrackKerns + n; shared_vals[0].type = AFM_VALUE_TYPE_INTEGER; shared_vals[1].type = AFM_VALUE_TYPE_FIXED; shared_vals[2].type = AFM_VALUE_TYPE_FIXED; shared_vals[3].type = AFM_VALUE_TYPE_FIXED; shared_vals[4].type = AFM_VALUE_TYPE_FIXED; if ( afm_parser_read_vals( parser, shared_vals, 5 ) != 5 ) goto Fail; tk->degree = shared_vals[0].u.i; tk->min_ptsize = shared_vals[1].u.f; tk->min_kern = shared_vals[2].u.f; tk->max_ptsize = shared_vals[3].u.f; tk->max_kern = shared_vals[4].u.f; /* is this correct? */ if ( tk->degree < 0 && tk->min_kern > 0 ) tk->min_kern = -tk->min_kern; break; case AFM_TOKEN_ENDTRACKKERN: case AFM_TOKEN_ENDKERNDATA: case AFM_TOKEN_ENDFONTMETRICS: fi->NumTrackKern = n + 1; return PSaux_Err_Ok; case AFM_TOKEN_UNKNOWN: break; default: goto Fail; } } Fail: return PSaux_Err_Syntax_Error; } #undef KERN_INDEX #define KERN_INDEX( g1, g2 ) ( ( (FT_ULong)g1 << 16 ) | g2 ) /* compare two kerning pairs */ FT_CALLBACK_DEF( int ) afm_compare_kern_pairs( const void* a, const void* b ) { AFM_KernPair kp1 = (AFM_KernPair)a; AFM_KernPair kp2 = (AFM_KernPair)b; FT_ULong index1 = KERN_INDEX( kp1->index1, kp1->index2 ); FT_ULong index2 = KERN_INDEX( kp2->index1, kp2->index2 ); if ( index1 > index2 ) return 1; else if ( index1 < index2 ) return -1; else return 0; } static FT_Error afm_parse_kern_pairs( AFM_Parser parser ) { AFM_FontInfo fi = parser->FontInfo; AFM_KernPair kp; char* key; FT_Offset len; int n = -1; if ( afm_parser_read_int( parser, &fi->NumKernPair ) ) goto Fail; if ( fi->NumKernPair ) { FT_Memory memory = parser->memory; FT_Error error; if ( FT_QNEW_ARRAY( fi->KernPairs, fi->NumKernPair ) ) return error; } while ( ( key = afm_parser_next_key( parser, 1, &len ) ) != 0 ) { AFM_Token token = afm_tokenize( key, len ); switch ( token ) { case AFM_TOKEN_KP: case AFM_TOKEN_KPX: case AFM_TOKEN_KPY: { FT_Int r; AFM_ValueRec shared_vals[4]; n++; if ( n >= fi->NumKernPair ) goto Fail; kp = fi->KernPairs + n; shared_vals[0].type = AFM_VALUE_TYPE_INDEX; shared_vals[1].type = AFM_VALUE_TYPE_INDEX; shared_vals[2].type = AFM_VALUE_TYPE_INTEGER; shared_vals[3].type = AFM_VALUE_TYPE_INTEGER; r = afm_parser_read_vals( parser, shared_vals, 4 ); if ( r < 3 ) goto Fail; kp->index1 = shared_vals[0].u.i; kp->index2 = shared_vals[1].u.i; if ( token == AFM_TOKEN_KPY ) { kp->x = 0; kp->y = shared_vals[2].u.i; } else { kp->x = shared_vals[2].u.i; kp->y = ( token == AFM_TOKEN_KP && r == 4 ) ? shared_vals[3].u.i : 0; } } break; case AFM_TOKEN_ENDKERNPAIRS: case AFM_TOKEN_ENDKERNDATA: case AFM_TOKEN_ENDFONTMETRICS: fi->NumKernPair = n + 1; ft_qsort( fi->KernPairs, fi->NumKernPair, sizeof( AFM_KernPairRec ), afm_compare_kern_pairs ); return PSaux_Err_Ok; case AFM_TOKEN_UNKNOWN: break; default: goto Fail; } } Fail: return PSaux_Err_Syntax_Error; } static FT_Error afm_parse_kern_data( AFM_Parser parser ) { FT_Error error; char* key; FT_Offset len; while ( ( key = afm_parser_next_key( parser, 1, &len ) ) != 0 ) { switch ( afm_tokenize( key, len ) ) { case AFM_TOKEN_STARTTRACKKERN: error = afm_parse_track_kern( parser ); if ( error ) return error; break; case AFM_TOKEN_STARTKERNPAIRS: case AFM_TOKEN_STARTKERNPAIRS0: error = afm_parse_kern_pairs( parser ); if ( error ) return error; break; case AFM_TOKEN_ENDKERNDATA: case AFM_TOKEN_ENDFONTMETRICS: return PSaux_Err_Ok; case AFM_TOKEN_UNKNOWN: break; default: goto Fail; } } Fail: return PSaux_Err_Syntax_Error; } static FT_Error afm_parser_skip_section( AFM_Parser parser, FT_UInt n, AFM_Token end_section ) { char* key; FT_Offset len; while ( n-- > 0 ) { key = afm_parser_next_key( parser, 1, NULL ); if ( !key ) goto Fail; } while ( ( key = afm_parser_next_key( parser, 1, &len ) ) != 0 ) { AFM_Token token = afm_tokenize( key, len ); if ( token == end_section || token == AFM_TOKEN_ENDFONTMETRICS ) return PSaux_Err_Ok; } Fail: return PSaux_Err_Syntax_Error; } FT_LOCAL_DEF( FT_Error ) afm_parser_parse( AFM_Parser parser ) { FT_Memory memory = parser->memory; AFM_FontInfo fi = parser->FontInfo; FT_Error error = PSaux_Err_Syntax_Error; char* key; FT_Offset len; FT_Int metrics_sets = 0; if ( !fi ) return PSaux_Err_Invalid_Argument; key = afm_parser_next_key( parser, 1, &len ); if ( !key || len != 16 || ft_strncmp( key, "StartFontMetrics", 16 ) != 0 ) return PSaux_Err_Unknown_File_Format; while ( ( key = afm_parser_next_key( parser, 1, &len ) ) != 0 ) { AFM_ValueRec shared_vals[4]; switch ( afm_tokenize( key, len ) ) { case AFM_TOKEN_METRICSSETS: if ( afm_parser_read_int( parser, &metrics_sets ) ) goto Fail; if ( metrics_sets != 0 && metrics_sets != 2 ) { error = PSaux_Err_Unimplemented_Feature; goto Fail; } break; case AFM_TOKEN_ISCIDFONT: shared_vals[0].type = AFM_VALUE_TYPE_BOOL; if ( afm_parser_read_vals( parser, shared_vals, 1 ) != 1 ) goto Fail; fi->IsCIDFont = shared_vals[0].u.b; break; case AFM_TOKEN_FONTBBOX: shared_vals[0].type = AFM_VALUE_TYPE_FIXED; shared_vals[1].type = AFM_VALUE_TYPE_FIXED; shared_vals[2].type = AFM_VALUE_TYPE_FIXED; shared_vals[3].type = AFM_VALUE_TYPE_FIXED; if ( afm_parser_read_vals( parser, shared_vals, 4 ) != 4 ) goto Fail; fi->FontBBox.xMin = shared_vals[0].u.f; fi->FontBBox.yMin = shared_vals[1].u.f; fi->FontBBox.xMax = shared_vals[2].u.f; fi->FontBBox.yMax = shared_vals[3].u.f; break; case AFM_TOKEN_ASCENDER: shared_vals[0].type = AFM_VALUE_TYPE_FIXED; if ( afm_parser_read_vals( parser, shared_vals, 1 ) != 1 ) goto Fail; fi->Ascender = shared_vals[0].u.f; break; case AFM_TOKEN_DESCENDER: shared_vals[0].type = AFM_VALUE_TYPE_FIXED; if ( afm_parser_read_vals( parser, shared_vals, 1 ) != 1 ) goto Fail; fi->Descender = shared_vals[0].u.f; break; case AFM_TOKEN_STARTCHARMETRICS: { FT_Int n = 0; if ( afm_parser_read_int( parser, &n ) ) goto Fail; error = afm_parser_skip_section( parser, n, AFM_TOKEN_ENDCHARMETRICS ); if ( error ) return error; } break; case AFM_TOKEN_STARTKERNDATA: error = afm_parse_kern_data( parser ); if ( error ) goto Fail; /* fall through since we only support kern data */ case AFM_TOKEN_ENDFONTMETRICS: return PSaux_Err_Ok; default: break; } } Fail: FT_FREE( fi->TrackKerns ); fi->NumTrackKern = 0; FT_FREE( fi->KernPairs ); fi->NumKernPair = 0; fi->IsCIDFont = 0; return error; } /* END */
526263.c
/* * LCD.c * * Created on: Jul 10, 2020 * Author: Helmy */ /* Description ! 16x2 Character LCD for chip LMB161A * */ /********************************** Header Files INCLUSIONS ****************************/ #include <avr/delay.h> #include "dio.h" #include "LCDCONFIG.h" #include"std_types.h" union { struct { unsigned char b0:1; unsigned char b1:1; unsigned char b2:1; unsigned char b3:1; unsigned char b4:1; unsigned char b5:1; unsigned char b6:1; unsigned char b7:1; }BITs; unsigned char LCD_BYTE; }LCD_INFO; /* ----------- ---------- | ATmega32 | | LCD | | | | | | PD7|---------------->|D7(4bit) | | PD6|---------------->|D6(4bit) | | PD5|---------------->|D5(4bit) | | PD4|---------------->|D4(4bit) | | PD3|---------------->|D3 | | PD2|---------------->|D2 | | PD1|---------------->|D1 | | PD0|---------------->|D0 | | | | | | PC2|---------------->|E | | PC1|---------------->|RW | | PC0|---------------->|RS | ----------- ---------- */ void LCD_Vid_SendCommand(unsigned char command) { DIO_SetPinValue(PORTCONTROL,RS,LOW); DIO_SetPinValue(PORTCONTROL,RW,LOW); DIO_SetPinValue(PORTCONTROL,E,HIGH); _delay_ms(2); DIO_SetPortValue(PORTDATA,command); DIO_SetPinValue(PORTCONTROL,E,LOW); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,HIGH); } void LCD_Vid_SendChar(unsigned char Data) { DIO_SetPinValue(PORTCONTROL,RS,HIGH); DIO_SetPinValue(PORTCONTROL,RW,LOW); DIO_SetPinValue(PORTCONTROL,E,HIGH); DIO_SetPortValue(PORTDATA,Data); DIO_SetPinValue(PORTCONTROL,E,LOW); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,HIGH); } void LCD_Vid_8BitInit(void) {DIO_SetBinDirection(PORTCONTROL,RS,output); DIO_SetBinDirection(PORTCONTROL,RW,output); DIO_SetBinDirection(PORTCONTROL,E,output); DIO_SetPortdirection(PORTDATA,output); _delay_ms(30); LCD_Vid_SendCommand(FunctionSet_8Bit); _delay_ms(2); LCD_Vid_SendCommand(DisplayON); _delay_ms(2); LCD_Vid_SendCommand(DisplayClear); _delay_ms(2); LCD_Vid_SendCommand(EntryModeSet); } void LCD_Vid_SendCommand4Bit_test(unsigned char command) {int x; DIO_SetPinValue(PORTCONTROL,RS,LOW); DIO_SetPinValue(PORTCONTROL,RW,LOW); //x=command>>4; //x=x&0x0f; x=command; x=x&0xf0; LCD_INFO.LCD_BYTE=x; DIO_SetPinValue(PORTDATA,0,LCD_INFO.BITs.b4); DIO_SetPinValue(PORTDATA,1,LCD_INFO.BITs.b5); DIO_SetPinValue(PORTDATA,2,LCD_INFO.BITs.b6); DIO_SetPinValue(PORTDATA,3,LCD_INFO.BITs.b7); DIO_SetPinValue(PORTCONTROL,E,HIGH); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,LOW); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,HIGH); LCD_INFO.LCD_BYTE=x; DIO_SetPinValue(PORTDATA,0,LCD_INFO.BITs.b4); DIO_SetPinValue(PORTDATA,1,LCD_INFO.BITs.b5); DIO_SetPinValue(PORTDATA,2,LCD_INFO.BITs.b6); DIO_SetPinValue(PORTDATA,3,LCD_INFO.BITs.b7); DIO_SetPinValue(PORTCONTROL,E,HIGH); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,LOW); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,HIGH); //***************************************** x=command<<4; x=x&0xf0; LCD_INFO.LCD_BYTE=x; DIO_SetPinValue(PORTDATA,0,LCD_INFO.BITs.b4); DIO_SetPinValue(PORTDATA,1,LCD_INFO.BITs.b5); DIO_SetPinValue(PORTDATA,2,LCD_INFO.BITs.b6); DIO_SetPinValue(PORTDATA,3,LCD_INFO.BITs.b7); DIO_SetPinValue(PORTCONTROL,E,HIGH); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,LOW); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,HIGH); } void LCD_Vid_SendCommand4Bit(unsigned char command) { unsigned int x; DIO_SetPinValue(PORTCONTROL,RS,LOW); DIO_SetPinValue(PORTCONTROL,RW,LOW); x=command; x=x&0xf0; LCD_INFO.LCD_BYTE=x; DIO_SetPinValue(PORTDATA,0,LCD_INFO.BITs.b4); DIO_SetPinValue(PORTDATA,1,LCD_INFO.BITs.b5); DIO_SetPinValue(PORTDATA,2,LCD_INFO.BITs.b6); DIO_SetPinValue(PORTDATA,3,LCD_INFO.BITs.b7); DIO_SetPinValue(PORTCONTROL,E,HIGH); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,LOW); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,HIGH); //***************************************** x=command<<4; x=x&0xf0; LCD_INFO.LCD_BYTE=x; DIO_SetPinValue(PORTDATA,0,LCD_INFO.BITs.b4); DIO_SetPinValue(PORTDATA,1,LCD_INFO.BITs.b5); DIO_SetPinValue(PORTDATA,2,LCD_INFO.BITs.b6); DIO_SetPinValue(PORTDATA,3,LCD_INFO.BITs.b7); DIO_SetPinValue(PORTCONTROL,E,HIGH); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,LOW); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,HIGH); } void LCD_Vid_SendChar4Bit(unsigned char Data) {int x; DIO_SetPinValue(PORTCONTROL,RS,HIGH); DIO_SetPinValue(PORTCONTROL,RW,LOW); DIO_SetPinValue(PORTCONTROL,E,HIGH); x=Data; x=x&0xf0; LCD_INFO.LCD_BYTE=x; DIO_SetPinValue(PORTDATA,0,LCD_INFO.BITs.b4); DIO_SetPinValue(PORTDATA,1,LCD_INFO.BITs.b5); DIO_SetPinValue(PORTDATA,2,LCD_INFO.BITs.b6); DIO_SetPinValue(PORTDATA,3,LCD_INFO.BITs.b7); DIO_SetPinValue(PORTCONTROL,E,LOW); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,HIGH); //***************************************** x=Data<<4; x=x&0xf0; LCD_INFO.LCD_BYTE=x; DIO_SetPinValue(PORTDATA,0,LCD_INFO.BITs.b4); DIO_SetPinValue(PORTDATA,1,LCD_INFO.BITs.b5); DIO_SetPinValue(PORTDATA,2,LCD_INFO.BITs.b6); DIO_SetPinValue(PORTDATA,3,LCD_INFO.BITs.b7); DIO_SetPinValue(PORTCONTROL,E,LOW); _delay_ms(2); DIO_SetPinValue(PORTCONTROL,E,HIGH); } void LCD_Vid_4BitInit(void) { DIO_SetBinDirection(PORTCONTROL,RS,output); DIO_SetBinDirection(PORTCONTROL,RW,output); DIO_SetBinDirection(PORTCONTROL,E,output); DIO_SetBinDirection(PORTDATA,0,output); DIO_SetBinDirection(PORTDATA,1,output); DIO_SetBinDirection(PORTDATA,2,output); DIO_SetBinDirection(PORTDATA,3,output); _delay_ms(30); LCD_Vid_SendCommand4Bit_test(FunctionSet_4bit); _delay_ms(2); LCD_Vid_SendCommand4Bit(DisplayON); _delay_ms(2); LCD_Vid_SendCommand4Bit(DisplayClear); _delay_ms(2); LCD_Vid_SendCommand4Bit(EntryModeSet); } void LCD_displayString_4bit(const char *Str) { uint8 i = 0; while(Str[i] != '\0') { LCD_Vid_SendChar4Bit(Str[i]); i++; } } void LCD_displayString(const char *Str) { uint8 i = 0; while(Str[i] != '\0') { LCD_Vid_SendChar(Str[i]); i++; } } void LCD_goToRowColumn(uint8 row,uint8 col) { uint8 Address; /* first of all calculate the required address */ switch(row) { case 0: Address=col; break; case 1: Address=col+0x40; break; case 2: Address=col+0x10; break; case 3: Address=col+0x50; break; } /* to write to a specific address in the LCD * we need to apply the corresponding command 0b10000000+Address */ LCD_Vid_SendCommand(Address | SET_CURSOR_LOCATION); } void LCD_displayStringRowColumn(uint8 row,uint8 col,const char *Str) { LCD_goToRowColumn(row,col); /* go to to the required LCD position */ LCD_displayString(Str); /* display the string */ } void LCD_intgerToString(int data) { char buff[16]; /* String to hold the ascii result */ itoa(data,buff,10); /* 10 for decimal */ LCD_displayString(buff); } void LCD_clearScreen(void) { LCD_Vid_SendCommand(DisplayClear); //clear display } void LCD_clearScreen_4bit(void) { LCD_Vid_SendCommand4Bit(DisplayClear); //clear display } void LCD_intgerToString_4bit(int data) { char buff[16]; /* String to hold the ascii result */ itoa(data,buff,10); /* 10 for decimal */ LCD_displayString_4bit(buff); } void LCD_goToRowColumn_4bit(uint8 row,uint8 col) { uint8 Address; /* first of all calculate the required address */ switch(row) { case 0: Address=col; break; case 1: Address=col+0x40; break; case 2: Address=col+0x10; break; case 3: Address=col+0x50; break; } /* to write to a specific address in the LCD * we need to apply the corresponding command 0b10000000+Address */ LCD_Vid_SendCommand4Bit(Address | SET_CURSOR_LOCATION); } void LCD_voidGoto(u8 y,u8 x) { u8 arr[2]={0x80,0xc0}; LCD_Vid_SendCommand4Bit(arr[y] + x); }
51213.c
/* This program checks the sizeof() as defined in AVR for different data types. sizeof(type) evaluates to a number, the number of bytes used to represent type. */ #include <uart/uart.h> #include <stdint.h> int main(void){ init_uart(); print("\n\n"); print("sizeof(int) = %u\n", sizeof(int)); print("sizeof(unsigned int) = %u\n", sizeof(unsigned int)); print("sizeof(uint8_t) = %u\n", sizeof(uint8_t)); print("sizeof(uint32_t) = %u\n", sizeof(uint32_t)); print("sizeof(uint64_t) = %u\n", sizeof(uint64_t)); print("sizeof(7) = %u\n", sizeof(7)); print("sizeof(7L) = %u\n", sizeof(7L)); print("sizeof(7UL) = %u\n", sizeof(7UL)); print("sizeof(7LL) = %u\n", sizeof(7LL)); print("sizeof(7ULL) = %u\n", sizeof(7ULL)); print("\n"); }
76555.c
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> * * While no substantial code is shared, the prime code is inspired by * drm_prime.c, with * Authors: * Dave Airlie <airlied@redhat.com> * Rob Clark <rob.clark@linaro.org> */ /** @file ttm_ref_object.c * * Base- and reference object implementation for the various * ttm objects. Implements reference counting, minimal security checks * and release on file close. */ #define pr_fmt(fmt) "[TTM] " fmt #include <linux/list.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/atomic.h> #include "ttm_object.h" /** * struct ttm_object_file * * @tdev: Pointer to the ttm_object_device. * * @lock: Lock that protects the ref_list list and the * ref_hash hash tables. * * @ref_list: List of ttm_ref_objects to be destroyed at * file release. * * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, * for fast lookup of ref objects given a base object. * * @refcount: reference/usage count */ struct ttm_object_file { struct ttm_object_device *tdev; spinlock_t lock; struct list_head ref_list; struct drm_open_hash ref_hash[TTM_REF_NUM]; struct kref refcount; }; /* * struct ttm_object_device * * @object_lock: lock that protects the object_hash hash table. * * @object_hash: hash table for fast lookup of object global names. * * @object_count: Per device object count. * * This is the per-device data structure needed for ttm object management. */ struct ttm_object_device { spinlock_t object_lock; struct drm_open_hash object_hash; atomic_t object_count; struct ttm_mem_global *mem_glob; struct dma_buf_ops ops; void (*dmabuf_release)(struct dma_buf *dma_buf); size_t dma_buf_size; struct idr idr; }; /* * struct ttm_ref_object * * @hash: Hash entry for the per-file object reference hash. * * @head: List entry for the per-file list of ref-objects. * * @kref: Ref count. * * @obj: Base object this ref object is referencing. * * @ref_type: Type of ref object. * * This is similar to an idr object, but it also has a hash table entry * that allows lookup with a pointer to the referenced object as a key. In * that way, one can easily detect whether a base object is referenced by * a particular ttm_object_file. It also carries a ref count to avoid creating * multiple ref objects if a ttm_object_file references the same base * object more than once. */ struct ttm_ref_object { struct rcu_head rcu_head; struct drm_hash_item hash; struct list_head head; struct kref kref; enum ttm_ref_type ref_type; struct ttm_base_object *obj; struct ttm_object_file *tfile; }; static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf); static inline struct ttm_object_file * ttm_object_file_ref(struct ttm_object_file *tfile) { kref_get(&tfile->refcount); return tfile; } static void ttm_object_file_destroy(struct kref *kref) { struct ttm_object_file *tfile = container_of(kref, struct ttm_object_file, refcount); kfree(tfile); } static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) { struct ttm_object_file *tfile = *p_tfile; *p_tfile = NULL; kref_put(&tfile->refcount, ttm_object_file_destroy); } int ttm_base_object_init(struct ttm_object_file *tfile, struct ttm_base_object *base, bool shareable, enum ttm_object_type object_type, void (*refcount_release) (struct ttm_base_object **), void (*ref_obj_release) (struct ttm_base_object *, enum ttm_ref_type ref_type)) { struct ttm_object_device *tdev = tfile->tdev; int ret; base->shareable = shareable; base->tfile = ttm_object_file_ref(tfile); base->refcount_release = refcount_release; base->ref_obj_release = ref_obj_release; base->object_type = object_type; kref_init(&base->refcount); idr_preload(GFP_KERNEL); spin_lock(&tdev->object_lock); ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT); spin_unlock(&tdev->object_lock); idr_preload_end(); if (ret < 0) return ret; base->handle = ret; ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); if (unlikely(ret != 0)) goto out_err1; ttm_base_object_unref(&base); return 0; out_err1: spin_lock(&tdev->object_lock); idr_remove(&tdev->idr, base->handle); spin_unlock(&tdev->object_lock); return ret; } static void ttm_release_base(struct kref *kref) { struct ttm_base_object *base = container_of(kref, struct ttm_base_object, refcount); struct ttm_object_device *tdev = base->tfile->tdev; spin_lock(&tdev->object_lock); idr_remove(&tdev->idr, base->handle); spin_unlock(&tdev->object_lock); /* * Note: We don't use synchronize_rcu() here because it's far * too slow. It's up to the user to free the object using * call_rcu() or ttm_base_object_kfree(). */ ttm_object_file_unref(&base->tfile); if (base->refcount_release) base->refcount_release(&base); } void ttm_base_object_unref(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; *p_base = NULL; kref_put(&base->refcount, ttm_release_base); } /** * ttm_base_object_noref_lookup - look up a base object without reference * @tfile: The struct ttm_object_file the object is registered with. * @key: The object handle. * * This function looks up a ttm base object and returns a pointer to it * without refcounting the pointer. The returned pointer is only valid * until ttm_base_object_noref_release() is called, and the object * pointed to by the returned pointer may be doomed. Any persistent usage * of the object requires a refcount to be taken using kref_get_unless_zero(). * Iff this function returns successfully it needs to be paired with * ttm_base_object_noref_release() and no sleeping- or scheduling functions * may be called inbetween these function callse. * * Return: A pointer to the object if successful or NULL otherwise. */ struct ttm_base_object * ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) { struct drm_hash_item *hash; struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; int ret; rcu_read_lock(); ret = drm_ht_find_item_rcu(ht, key, &hash); if (ret) { rcu_read_unlock(); return NULL; } __release(RCU); return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; } EXPORT_SYMBOL(ttm_base_object_noref_lookup); struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, uint32_t key) { struct ttm_base_object *base = NULL; struct drm_hash_item *hash; struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; int ret; rcu_read_lock(); ret = drm_ht_find_item_rcu(ht, key, &hash); if (likely(ret == 0)) { base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; if (!kref_get_unless_zero(&base->refcount)) base = NULL; } rcu_read_unlock(); return base; } struct ttm_base_object * ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) { struct ttm_base_object *base; rcu_read_lock(); base = idr_find(&tdev->idr, key); if (base && !kref_get_unless_zero(&base->refcount)) base = NULL; rcu_read_unlock(); return base; } /** * ttm_ref_object_exists - Check whether a caller has a valid ref object * (has opened) a base object. * * @tfile: Pointer to a struct ttm_object_file identifying the caller. * @base: Pointer to a struct base object. * * Checks wether the caller identified by @tfile has put a valid USAGE * reference object on the base object identified by @base. */ bool ttm_ref_object_exists(struct ttm_object_file *tfile, struct ttm_base_object *base) { struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; struct drm_hash_item *hash; struct ttm_ref_object *ref; rcu_read_lock(); if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0)) goto out_false; /* * Verify that the ref object is really pointing to our base object. * Our base object could actually be dead, and the ref object pointing * to another base object with the same handle. */ ref = drm_hash_entry(hash, struct ttm_ref_object, hash); if (unlikely(base != ref->obj)) goto out_false; /* * Verify that the ref->obj pointer was actually valid! */ rmb(); if (unlikely(kref_read(&ref->kref) == 0)) goto out_false; rcu_read_unlock(); return true; out_false: rcu_read_unlock(); return false; } int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, enum ttm_ref_type ref_type, bool *existed, bool require_existed) { struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; struct ttm_ref_object *ref; struct drm_hash_item *hash; struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false }; int ret = -EINVAL; if (base->tfile != tfile && !base->shareable) return -EPERM; if (existed != NULL) *existed = true; while (ret == -EINVAL) { rcu_read_lock(); ret = drm_ht_find_item_rcu(ht, base->handle, &hash); if (ret == 0) { ref = drm_hash_entry(hash, struct ttm_ref_object, hash); if (kref_get_unless_zero(&ref->kref)) { rcu_read_unlock(); break; } } rcu_read_unlock(); if (require_existed) return -EPERM; ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), &ctx); if (unlikely(ret != 0)) return ret; ref = kmalloc(sizeof(*ref), GFP_KERNEL); if (unlikely(ref == NULL)) { ttm_mem_global_free(mem_glob, sizeof(*ref)); return -ENOMEM; } ref->hash.key = base->handle; ref->obj = base; ref->tfile = tfile; ref->ref_type = ref_type; kref_init(&ref->kref); spin_lock(&tfile->lock); ret = drm_ht_insert_item_rcu(ht, &ref->hash); if (likely(ret == 0)) { list_add_tail(&ref->head, &tfile->ref_list); kref_get(&base->refcount); spin_unlock(&tfile->lock); if (existed != NULL) *existed = false; break; } spin_unlock(&tfile->lock); BUG_ON(ret != -EINVAL); ttm_mem_global_free(mem_glob, sizeof(*ref)); kfree(ref); } return ret; } static void __releases(tfile->lock) __acquires(tfile->lock) ttm_ref_object_release(struct kref *kref) { struct ttm_ref_object *ref = container_of(kref, struct ttm_ref_object, kref); struct ttm_base_object *base = ref->obj; struct ttm_object_file *tfile = ref->tfile; struct drm_open_hash *ht; struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; ht = &tfile->ref_hash[ref->ref_type]; (void)drm_ht_remove_item_rcu(ht, &ref->hash); list_del(&ref->head); spin_unlock(&tfile->lock); if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) base->ref_obj_release(base, ref->ref_type); ttm_base_object_unref(&ref->obj); ttm_mem_global_free(mem_glob, sizeof(*ref)); kfree_rcu(ref, rcu_head); spin_lock(&tfile->lock); } int ttm_ref_object_base_unref(struct ttm_object_file *tfile, unsigned long key, enum ttm_ref_type ref_type) { struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; struct ttm_ref_object *ref; struct drm_hash_item *hash; int ret; spin_lock(&tfile->lock); ret = drm_ht_find_item(ht, key, &hash); if (unlikely(ret != 0)) { spin_unlock(&tfile->lock); return -EINVAL; } ref = drm_hash_entry(hash, struct ttm_ref_object, hash); kref_put(&ref->kref, ttm_ref_object_release); spin_unlock(&tfile->lock); return 0; } void ttm_object_file_release(struct ttm_object_file **p_tfile) { struct ttm_ref_object *ref; struct list_head *list; unsigned int i; struct ttm_object_file *tfile = *p_tfile; *p_tfile = NULL; spin_lock(&tfile->lock); /* * Since we release the lock within the loop, we have to * restart it from the beginning each time. */ while (!list_empty(&tfile->ref_list)) { list = tfile->ref_list.next; ref = list_entry(list, struct ttm_ref_object, head); ttm_ref_object_release(&ref->kref); } spin_unlock(&tfile->lock); for (i = 0; i < TTM_REF_NUM; ++i) drm_ht_remove(&tfile->ref_hash[i]); ttm_object_file_unref(&tfile); } struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, unsigned int hash_order) { struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); unsigned int i; unsigned int j = 0; int ret; if (unlikely(tfile == NULL)) return NULL; spin_lock_init(&tfile->lock); tfile->tdev = tdev; kref_init(&tfile->refcount); INIT_LIST_HEAD(&tfile->ref_list); for (i = 0; i < TTM_REF_NUM; ++i) { ret = drm_ht_create(&tfile->ref_hash[i], hash_order); if (ret) { j = i; goto out_err; } } return tfile; out_err: for (i = 0; i < j; ++i) drm_ht_remove(&tfile->ref_hash[i]); kfree(tfile); return NULL; } struct ttm_object_device * ttm_object_device_init(struct ttm_mem_global *mem_glob, unsigned int hash_order, const struct dma_buf_ops *ops) { struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); int ret; if (unlikely(tdev == NULL)) return NULL; tdev->mem_glob = mem_glob; spin_lock_init(&tdev->object_lock); atomic_set(&tdev->object_count, 0); ret = drm_ht_create(&tdev->object_hash, hash_order); if (ret != 0) goto out_no_object_hash; idr_init_base(&tdev->idr, 1); tdev->ops = *ops; tdev->dmabuf_release = tdev->ops.release; tdev->ops.release = ttm_prime_dmabuf_release; tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) + ttm_round_pot(sizeof(struct file)); return tdev; out_no_object_hash: kfree(tdev); return NULL; } void ttm_object_device_release(struct ttm_object_device **p_tdev) { struct ttm_object_device *tdev = *p_tdev; *p_tdev = NULL; WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); idr_destroy(&tdev->idr); drm_ht_remove(&tdev->object_hash); kfree(tdev); } /** * get_dma_buf_unless_doomed - get a dma_buf reference if possible. * * @dmabuf: Non-refcounted pointer to a struct dma-buf. * * Obtain a file reference from a lookup structure that doesn't refcount * the file, but synchronizes with its release method to make sure it has * not been freed yet. See for example kref_get_unless_zero documentation. * Returns true if refcounting succeeds, false otherwise. * * Nobody really wants this as a public API yet, so let it mature here * for some time... */ static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf) { return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L; } /** * ttm_prime_refcount_release - refcount release method for a prime object. * * @p_base: Pointer to ttm_base_object pointer. * * This is a wrapper that calls the refcount_release founction of the * underlying object. At the same time it cleans up the prime object. * This function is called when all references to the base object we * derive from are gone. */ static void ttm_prime_refcount_release(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; struct ttm_prime_object *prime; *p_base = NULL; prime = container_of(base, struct ttm_prime_object, base); BUG_ON(prime->dma_buf != NULL); mutex_destroy(&prime->mutex); if (prime->refcount_release) prime->refcount_release(&base); } /** * ttm_prime_dmabuf_release - Release method for the dma-bufs we export * * @dma_buf: * * This function first calls the dma_buf release method the driver * provides. Then it cleans up our dma_buf pointer used for lookup, * and finally releases the reference the dma_buf has on our base * object. */ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) { struct ttm_prime_object *prime = (struct ttm_prime_object *) dma_buf->priv; struct ttm_base_object *base = &prime->base; struct ttm_object_device *tdev = base->tfile->tdev; if (tdev->dmabuf_release) tdev->dmabuf_release(dma_buf); mutex_lock(&prime->mutex); if (prime->dma_buf == dma_buf) prime->dma_buf = NULL; mutex_unlock(&prime->mutex); ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size); ttm_base_object_unref(&base); } /** * ttm_prime_fd_to_handle - Get a base object handle from a prime fd * * @tfile: A struct ttm_object_file identifying the caller. * @fd: The prime / dmabuf fd. * @handle: The returned handle. * * This function returns a handle to an object that previously exported * a dma-buf. Note that we don't handle imports yet, because we simply * have no consumers of that implementation. */ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, int fd, u32 *handle) { struct ttm_object_device *tdev = tfile->tdev; struct dma_buf *dma_buf; struct ttm_prime_object *prime; struct ttm_base_object *base; int ret; dma_buf = dma_buf_get(fd); if (IS_ERR(dma_buf)) return PTR_ERR(dma_buf); if (dma_buf->ops != &tdev->ops) return -ENOSYS; prime = (struct ttm_prime_object *) dma_buf->priv; base = &prime->base; *handle = base->handle; ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); dma_buf_put(dma_buf); return ret; } /** * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object * * @tfile: Struct ttm_object_file identifying the caller. * @handle: Handle to the object we're exporting from. * @flags: flags for dma-buf creation. We just pass them on. * @prime_fd: The returned file descriptor. * */ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, uint32_t handle, uint32_t flags, int *prime_fd) { struct ttm_object_device *tdev = tfile->tdev; struct ttm_base_object *base; struct dma_buf *dma_buf; struct ttm_prime_object *prime; int ret; base = ttm_base_object_lookup(tfile, handle); if (unlikely(base == NULL || base->object_type != ttm_prime_type)) { ret = -ENOENT; goto out_unref; } prime = container_of(base, struct ttm_prime_object, base); if (unlikely(!base->shareable)) { ret = -EPERM; goto out_unref; } ret = mutex_lock_interruptible(&prime->mutex); if (unlikely(ret != 0)) { ret = -ERESTARTSYS; goto out_unref; } dma_buf = prime->dma_buf; if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false }; exp_info.ops = &tdev->ops; exp_info.size = prime->size; exp_info.flags = flags; exp_info.priv = prime; /* * Need to create a new dma_buf, with memory accounting. */ ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size, &ctx); if (unlikely(ret != 0)) { mutex_unlock(&prime->mutex); goto out_unref; } dma_buf = dma_buf_export(&exp_info); if (IS_ERR(dma_buf)) { ret = PTR_ERR(dma_buf); ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size); mutex_unlock(&prime->mutex); goto out_unref; } /* * dma_buf has taken the base object reference */ base = NULL; prime->dma_buf = dma_buf; } mutex_unlock(&prime->mutex); ret = dma_buf_fd(dma_buf, flags); if (ret >= 0) { *prime_fd = ret; ret = 0; } else dma_buf_put(dma_buf); out_unref: if (base) ttm_base_object_unref(&base); return ret; } /** * ttm_prime_object_init - Initialize a ttm_prime_object * * @tfile: struct ttm_object_file identifying the caller * @size: The size of the dma_bufs we export. * @prime: The object to be initialized. * @shareable: See ttm_base_object_init * @type: See ttm_base_object_init * @refcount_release: See ttm_base_object_init * @ref_obj_release: See ttm_base_object_init * * Initializes an object which is compatible with the drm_prime model * for data sharing between processes and devices. */ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, struct ttm_prime_object *prime, bool shareable, enum ttm_object_type type, void (*refcount_release) (struct ttm_base_object **), void (*ref_obj_release) (struct ttm_base_object *, enum ttm_ref_type ref_type)) { mutex_init(&prime->mutex); prime->size = PAGE_ALIGN(size); prime->real_type = type; prime->dma_buf = NULL; prime->refcount_release = refcount_release; return ttm_base_object_init(tfile, &prime->base, shareable, ttm_prime_type, ttm_prime_refcount_release, ref_obj_release); }
501525.c
/* * Placeholder for the Send path functions * * Copyright (c) 2020 Virtuozzo International GmbH * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met : * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and / or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of their contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "precomp.h" #include "viosock.h" #if defined(EVENT_TRACING) #include "Tx.tmh" #endif #ifdef ALLOC_PRAGMA #pragma alloc_text (PAGE, VIOSockTxVqInit) #pragma alloc_text (PAGE, VIOSockTxVqCleanup) #pragma alloc_text (PAGE, VIOSockWriteQueueInit) #endif #define VIOSOCK_DMA_TX_PAGES BYTES_TO_PAGES(VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) typedef struct _VIOSOCK_TX_PKT { VIRTIO_VSOCK_HDR Header; PHYSICAL_ADDRESS PhysAddr; //packet addr WDFDMATRANSACTION Transaction; union { BYTE IndirectDescs[SIZE_OF_SINGLE_INDIRECT_DESC * (1 + VIOSOCK_DMA_TX_PAGES)]; //Header + sglist struct { SINGLE_LIST_ENTRY ListEntry; WDFREQUEST Request; }; }; }VIOSOCK_TX_PKT, *PVIOSOCK_TX_PKT; typedef struct _VIOSOCK_TX_ENTRY { LIST_ENTRY ListEntry; WDFMEMORY Memory; WDFREQUEST Request; WDFFILEOBJECT Socket; ULONG64 dst_cid; ULONG32 src_port; ULONG32 dst_port; ULONG32 len; USHORT op; BOOLEAN reply; ULONG32 flags; }VIOSOCK_TX_ENTRY, *PVIOSOCK_TX_ENTRY; WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(VIOSOCK_TX_ENTRY, GetRequestTxContext); VOID VIOSockTxDequeue( PDEVICE_CONTEXT pContext ); ////////////////////////////////////////////////////////////////////////// VOID VIOSockTxVqCleanup( IN PDEVICE_CONTEXT pContext ) { PAGED_CODE(); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_HW_ACCESS, "--> %s\n", __FUNCTION__); ASSERT(pContext->TxVq); if (pContext->TxPktSliced) { pContext->TxPktSliced->destroy(pContext->TxPktSliced); pContext->TxPktSliced = NULL; pContext->TxPktNum = 0; } pContext->TxVq = NULL; } NTSTATUS VIOSockTxVqInit( IN PDEVICE_CONTEXT pContext ) { NTSTATUS status = STATUS_SUCCESS; USHORT uNumEntries; ULONG uRingSize, uHeapSize, uBufferSize; PAGED_CODE(); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_HW_ACCESS, "--> %s\n", __FUNCTION__); status = virtio_query_queue_allocation(&pContext->VDevice.VIODevice, VIOSOCK_VQ_TX, &uNumEntries, &uRingSize, &uHeapSize); if (!NT_SUCCESS(status)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_HW_ACCESS, "virtio_query_queue_allocation(VIOSOCK_VQ_TX) failed\n"); pContext->TxVq = NULL; return status; } uBufferSize = sizeof(VIOSOCK_TX_PKT) * uNumEntries; TraceEvents(TRACE_LEVEL_INFORMATION, DBG_HW_ACCESS, "Allocating sliced buffer of %u bytes for %u Tx packets\n", uBufferSize, uNumEntries); pContext->TxPktSliced = VirtIOWdfDeviceAllocDmaMemorySliced(&pContext->VDevice.VIODevice, uBufferSize, sizeof(VIOSOCK_TX_PKT)); ASSERT(pContext->TxPktSliced); if (!pContext->TxPktSliced) { TraceEvents(TRACE_LEVEL_ERROR, DBG_HW_ACCESS, "VirtIOWdfDeviceAllocDmaMemorySliced(%u butes for TxPackets) failed\n", uBufferSize); status = STATUS_INSUFFICIENT_RESOURCES; } pContext->TxPktNum = uNumEntries; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_HW_ACCESS, "<-- %s\n", __FUNCTION__); return status; } __inline VOID VIOSockRxIncTxPkt( IN PSOCKET_CONTEXT pSocket, IN OUT PVIRTIO_VSOCK_HDR pPkt ) { pSocket->last_fwd_cnt = pSocket->fwd_cnt; pPkt->fwd_cnt = pSocket->fwd_cnt; pPkt->buf_alloc = pSocket->buf_alloc; } static PVIOSOCK_TX_PKT VIOSockTxPktAlloc( IN PVIOSOCK_TX_ENTRY pTxEntry ) { PHYSICAL_ADDRESS PA; PVIOSOCK_TX_PKT pPkt; PSOCKET_CONTEXT pSocket = (pTxEntry->Socket != WDF_NO_HANDLE) ? GetSocketContext(pTxEntry->Socket) : NULL; PDEVICE_CONTEXT pContext = GetDeviceContextFromSocket(pSocket); ASSERT(pContext->TxPktSliced); pPkt = pContext->TxPktSliced->get_slice(pContext->TxPktSliced, &PA); if (pPkt) { pPkt->PhysAddr = PA; pPkt->Transaction = WDF_NO_HANDLE; if (pSocket) { VIOSockRxIncTxPkt(pSocket, &pPkt->Header); } pPkt->Header.src_cid = pContext->Config.guest_cid; pPkt->Header.dst_cid = pTxEntry->dst_cid; pPkt->Header.src_port = pTxEntry->src_port; pPkt->Header.dst_port = pTxEntry->dst_port; pPkt->Header.len = pTxEntry->len; pPkt->Header.type = (USHORT)pSocket->type; pPkt->Header.op = pTxEntry->op; pPkt->Header.flags = pTxEntry->flags; } return pPkt; } #define VIOSockTxPktFree(cx,va) (cx)->TxPktSliced->return_slice((cx)->TxPktSliced, va) //TxLock+ static BOOLEAN VIOSockTxPktInsert( IN PDEVICE_CONTEXT pContext, IN PVIOSOCK_TX_PKT pPkt, IN PVIRTIO_DMA_TRANSACTION_PARAMS pParams OPTIONAL ) { VIOSOCK_SG_DESC sg[VIOSOCK_DMA_TX_PAGES + 1]; ULONG uElements = 1, uPktLen = 0; PVOID va_indirect = NULL; ULONGLONG phys_indirect = 0; PSCATTER_GATHER_LIST SgList = NULL; int ret; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_HW_ACCESS, "--> %s\n", __FUNCTION__); if (pParams) { ASSERT(pParams->transaction); pPkt->Transaction = pParams->transaction; SgList = pParams->sgList; } sg[0].length = sizeof(VIRTIO_VSOCK_HDR); sg[0].physAddr.QuadPart = pPkt->PhysAddr.QuadPart + FIELD_OFFSET(VIOSOCK_TX_PKT, Header); if (SgList) { ULONG i; ASSERT(SgList->NumberOfElements <= VIOSOCK_DMA_TX_PAGES); for (i = 0; i < SgList->NumberOfElements; i++) { sg[i + 1].length = SgList->Elements[i].Length; sg[i + 1].physAddr = SgList->Elements[i].Address; uPktLen += SgList->Elements[i].Length; if (uPktLen >= pPkt->Header.len) { sg[++i].length -= uPktLen - pPkt->Header.len; break; } } uElements += i; } if (uElements > 1) { va_indirect = &pPkt->IndirectDescs; phys_indirect = pPkt->PhysAddr.QuadPart + FIELD_OFFSET(VIOSOCK_TX_PKT, IndirectDescs); } ret = virtqueue_add_buf(pContext->TxVq, sg, uElements, 0, pPkt, va_indirect, phys_indirect); ASSERT(ret >= 0); if (ret < 0) { TraceEvents(TRACE_LEVEL_ERROR, DBG_HW_ACCESS, "Error adding buffer to queue (ret = %d)\n", ret); return FALSE; } TraceEvents(TRACE_LEVEL_VERBOSE, DBG_HW_ACCESS, "<-- %s\n", __FUNCTION__); return TRUE; } //TxLock- VOID VIOSockTxVqProcess( IN PDEVICE_CONTEXT pContext ) { PVIOSOCK_TX_PKT pPkt; UINT len; SINGLE_LIST_ENTRY CompletionList; PSINGLE_LIST_ENTRY CurrentItem; NTSTATUS status; WDFREQUEST Request; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "--> %s\n", __FUNCTION__); CompletionList.Next = NULL; WdfSpinLockAcquire(pContext->TxLock); do { virtqueue_disable_cb(pContext->TxVq); while ((pPkt = (PVIOSOCK_TX_PKT)virtqueue_get_buf(pContext->TxVq, &len)) != NULL) { if (pPkt->Transaction != WDF_NO_HANDLE) { pPkt->Request = WdfDmaTransactionGetRequest(pPkt->Transaction); //postpone to complete transaction PushEntryList(&CompletionList, &pPkt->ListEntry); } else { //just free packet pPkt->Request = WDF_NO_HANDLE; VIOSockTxPktFree(pContext, pPkt); } } } while (!virtqueue_enable_cb(pContext->TxVq)); WdfSpinLockRelease(pContext->TxLock); while ((CurrentItem = PopEntryList(&CompletionList)) != NULL) { pPkt = CONTAINING_RECORD(CurrentItem, VIOSOCK_TX_PKT, ListEntry); if (pPkt->Transaction != WDF_NO_HANDLE) { VirtIOWdfDeviceDmaTxComplete(&pContext->VDevice.VIODevice, pPkt->Transaction); if (pPkt->Request != WDF_NO_HANDLE) WdfRequestCompleteWithInformation(pPkt->Request, STATUS_SUCCESS, pPkt->Header.len); } VIOSockTxPktFree(pContext, pPkt); }; VIOSockTxDequeue(pContext); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "<-- %s\n", __FUNCTION__); } static BOOLEAN VIOSockTxDequeueCallback( IN PVIRTIO_DMA_TRANSACTION_PARAMS pParams ) { PVIOSOCK_TX_PKT pPkt = pParams->param1; PDEVICE_CONTEXT pContext = GetDeviceContextFromSocket((PSOCKET_CONTEXT)pParams->param2); BOOLEAN bRes; WdfSpinLockAcquire(pContext->TxLock); bRes = VIOSockTxPktInsert(pContext, pPkt, pParams); WdfSpinLockRelease(pContext->TxLock); if (!bRes) { TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE, "VIOSockTxPktInsert failed\n"); VIOSockTxPktFree(pContext, pPkt); VirtIOWdfDeviceDmaTxComplete(&pContext->VDevice.VIODevice, pParams->transaction); WdfRequestComplete(pParams->req, STATUS_INSUFFICIENT_RESOURCES); } else { virtqueue_kick(pContext->TxVq); } return bRes; } static VOID VIOSockTxDequeue( PDEVICE_CONTEXT pContext ) { static volatile LONG lInProgress; BOOLEAN bKick = FALSE, bReply, bRestartRx = FALSE; WdfSpinLockAcquire(pContext->TxLock); while (!IsListEmpty(&pContext->TxList)) { PVIOSOCK_TX_ENTRY pTxEntry = CONTAINING_RECORD(pContext->TxList.Flink, VIOSOCK_TX_ENTRY, ListEntry); PSOCKET_CONTEXT pSocket = GetSocketContext(pTxEntry->Socket); PVIOSOCK_TX_PKT pPkt = VIOSockTxPktAlloc(pTxEntry); NTSTATUS status; //can't allocate packet, stop dequeue if (!pPkt) break; RemoveHeadList(&pContext->TxList); bReply = pTxEntry->reply; if (pTxEntry->Request) { ASSERT(pTxEntry->len && !bReply); status = WdfRequestUnmarkCancelable(pTxEntry->Request); if (NT_SUCCESS(status)) { VIRTIO_DMA_TRANSACTION_PARAMS params = { 0 }; WdfSpinLockRelease(pContext->TxLock); status = VIOSockTxValidateSocketState(pSocket); if (NT_SUCCESS(status)) { params.req = pTxEntry->Request; params.param1 = pPkt; params.param2 = pSocket; //create transaction if (!VirtIOWdfDeviceDmaTxAsync(&pContext->VDevice.VIODevice, &params, VIOSockTxDequeueCallback)) { if (params.transaction) VirtIOWdfDeviceDmaTxComplete(&pContext->VDevice.VIODevice, params.transaction); status = STATUS_INSUFFICIENT_RESOURCES; WdfRequestComplete(pTxEntry->Request, STATUS_INSUFFICIENT_RESOURCES); VIOSockTxPktFree(pContext, pPkt); } } if (!NT_SUCCESS(status)) { WdfRequestComplete(pTxEntry->Request, status); VIOSockTxPktFree(pContext, pPkt); } WdfSpinLockAcquire(pContext->TxLock); } else { ASSERT(status == STATUS_CANCELLED); TraceEvents(TRACE_LEVEL_WARNING, DBG_WRITE, "Write request canceled\n"); } } else { ASSERT(pTxEntry->Memory != WDF_NO_HANDLE); if (VIOSockTxPktInsert(pContext, pPkt, NULL)) { bKick = TRUE; WdfObjectDelete(pTxEntry->Memory); } else { ASSERT(FALSE); TraceEvents(TRACE_LEVEL_ERROR, DBG_HW_ACCESS, "VIOSockTxPktInsert failed\n"); InsertHeadList(&pContext->TxList, &pTxEntry->ListEntry); VIOSockTxPktFree(pContext, pPkt); break; } if (bReply) { LONG lVal = --pContext->QueuedReply; /* Do we now have resources to resume rx processing? */ bRestartRx = (lVal + 1 == pContext->RxPktNum); } } } WdfSpinLockRelease(pContext->TxLock); if (bKick) virtqueue_kick(pContext->TxVq); if (bRestartRx) VIOSockRxVqProcess(pContext); } ////////////////////////////////////////////////////////////////////////// static VOID VIOSockTxEnqueueCancel( IN WDFREQUEST Request ) { PSOCKET_CONTEXT pSocket = GetSocketContextFromRequest(Request); PDEVICE_CONTEXT pContext = GetDeviceContextFromSocket(pSocket); PVIOSOCK_TX_ENTRY pTxEntry = GetRequestTxContext(Request); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "--> %s\n", __FUNCTION__); WdfSpinLockAcquire(pContext->TxLock); RemoveEntryList(&pTxEntry->ListEntry); VIOSockTxPutCredit(pSocket, pTxEntry->len); WdfSpinLockRelease(pContext->TxLock); WdfRequestComplete(Request, STATUS_CANCELLED); } NTSTATUS VIOSockTxValidateSocketState( PSOCKET_CONTEXT pSocket ) { NTSTATUS status; WdfSpinLockAcquire(pSocket->StateLock); if (VIOSockStateGet(pSocket) == VIOSOCK_STATE_CLOSING && (pSocket->PeerShutdown & VIRTIO_VSOCK_SHUTDOWN_RCV || pSocket->Shutdown & VIRTIO_VSOCK_SHUTDOWN_SEND)) { status = STATUS_GRACEFUL_DISCONNECT; } else if (VIOSockStateGet(pSocket) != VIOSOCK_STATE_CONNECTED) { status = STATUS_CONNECTION_INVALID; } else status = STATUS_SUCCESS; WdfSpinLockRelease(pSocket->StateLock); return status; } NTSTATUS VIOSockTxEnqueue( IN PSOCKET_CONTEXT pSocket, IN VIRTIO_VSOCK_OP Op, IN ULONG32 Flags OPTIONAL, IN BOOLEAN Reply, IN WDFREQUEST Request OPTIONAL ) { NTSTATUS status; PDEVICE_CONTEXT pContext = GetDeviceContextFromSocket(pSocket); PVIOSOCK_TX_ENTRY pTxEntry = NULL; ULONG uLen; WDFMEMORY Memory = WDF_NO_HANDLE; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "--> %s\n", __FUNCTION__); if (IsLoopbackSocket(pSocket)) return VIOSockLoopbackTxEnqueue(pSocket, Op, Flags, Request, (Request == WDF_NO_HANDLE) ? 0 : GetRequestTxContext(Request)->len); if (Request == WDF_NO_HANDLE) { status = WdfMemoryCreateFromLookaside(pContext->TxMemoryList, &Memory); if (NT_SUCCESS(status)) { pTxEntry = WdfMemoryGetBuffer(Memory, NULL); pTxEntry->Memory = Memory; pTxEntry->Request = WDF_NO_HANDLE; pTxEntry->len = 0; } else { TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE, "WdfMemoryCreateFromLookaside failed: 0x%x\n", status); } } else { status = VIOSockTxValidateSocketState(pSocket); if (NT_SUCCESS(status)) { pTxEntry = GetRequestTxContext(Request); pTxEntry->Request = Request; pTxEntry->Memory = WDF_NO_HANDLE; } } if (!NT_SUCCESS(status)) return status; ASSERT(pTxEntry); pTxEntry->Socket = pSocket->ThisSocket; pTxEntry->src_port = pSocket->src_port; pTxEntry->dst_cid = pSocket->dst_cid; pTxEntry->dst_port = pSocket->dst_port; pTxEntry->op = Op; pTxEntry->reply = Reply; pTxEntry->flags = Flags; WdfSpinLockAcquire(pContext->TxLock); uLen = VIOSockTxGetCredit(pSocket, pTxEntry->len); if (pTxEntry->len && !uLen) { ASSERT(pTxEntry->Request); TraceEvents(TRACE_LEVEL_INFORMATION, DBG_WRITE, "No free space on peer\n"); WdfSpinLockRelease(pContext->TxLock); return STATUS_BUFFER_TOO_SMALL; } if (Request != WDF_NO_HANDLE) { pTxEntry->len = uLen; status = WdfRequestMarkCancelableEx(Request, VIOSockTxEnqueueCancel); if (!NT_SUCCESS(status)) { ASSERT(status == STATUS_CANCELLED); TraceEvents(TRACE_LEVEL_INFORMATION, DBG_WRITE, "WdfRequestMarkCancelableEx failed: 0x%x\n", status); VIOSockTxPutCredit(pSocket, pTxEntry->len); WdfSpinLockRelease(pContext->TxLock); return status; } } if (pTxEntry->reply) pContext->QueuedReply++; InsertTailList(&pContext->TxList, &pTxEntry->ListEntry); WdfSpinLockRelease(pContext->TxLock); VIOSockTxVqProcess(pContext); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "<-- %s\n", __FUNCTION__); return status; } ////////////////////////////////////////////////////////////////////////// static VOID VIOSockWriteEnqueue( IN PDEVICE_CONTEXT pContext, IN WDFREQUEST Request, IN size_t stLength ) { NTSTATUS status; WDF_OBJECT_ATTRIBUTES attributes; PSOCKET_CONTEXT pSocket = GetSocketContextFromRequest(Request); PVIOSOCK_TX_ENTRY pRequest; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "--> %s\n", __FUNCTION__); if (stLength > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) stLength = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; WDF_OBJECT_ATTRIBUTES_INIT_CONTEXT_TYPE( &attributes, VIOSOCK_TX_ENTRY ); status = WdfObjectAllocateContext( Request, &attributes, &pRequest ); if (!NT_SUCCESS(status)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE, "WdfObjectAllocateContext failed: 0x%x\n", status); WdfRequestComplete(Request, status); return; } else { pRequest->len = (ULONG32)stLength; } status = VIOSockSendWrite(pSocket, Request); if (!NT_SUCCESS(status)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE, "VIOSockSendWrite failed: 0x%x\n", status); WdfRequestComplete(Request, status); } TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "<-- %s\n", __FUNCTION__); } static VOID VIOSockWrite( IN WDFQUEUE Queue, IN WDFREQUEST Request, IN size_t Length ) { VIOSockWriteEnqueue(GetDeviceContext(WdfIoQueueGetDevice(Queue)), Request, Length); } static VOID VIOSockWriteIoStop(IN WDFQUEUE Queue, IN WDFREQUEST Request, IN ULONG ActionFlags) { if (ActionFlags & WdfRequestStopActionSuspend) { WdfRequestStopAcknowledge(Request, FALSE); } else if (ActionFlags & WdfRequestStopActionPurge) { if (ActionFlags & WdfRequestStopRequestCancelable) { if (WdfRequestUnmarkCancelable(Request) != STATUS_CANCELLED) { WdfRequestComplete(Request, STATUS_CANCELLED); } } } } NTSTATUS VIOSockWriteQueueInit( IN WDFDEVICE hDevice ) { PDEVICE_CONTEXT pContext = GetDeviceContext(hDevice); WDF_IO_QUEUE_CONFIG queueConfig; NTSTATUS status; WDF_OBJECT_ATTRIBUTES lockAttributes, memAttributes; PAGED_CODE(); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "--> %s\n", __FUNCTION__); WDF_OBJECT_ATTRIBUTES_INIT(&lockAttributes); lockAttributes.ParentObject = pContext->ThisDevice; status = WdfSpinLockCreate( &lockAttributes, &pContext->TxLock ); if (!NT_SUCCESS(status)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE, "WdfSpinLockCreate failed: 0x%x\n", status); return FALSE; } WDF_OBJECT_ATTRIBUTES_INIT(&memAttributes); memAttributes.ParentObject = pContext->ThisDevice; status = WdfLookasideListCreate(&memAttributes, sizeof(VIOSOCK_TX_ENTRY), NonPagedPoolNx, &memAttributes, VIOSOCK_DRIVER_MEMORY_TAG, &pContext->TxMemoryList); if (!NT_SUCCESS(status)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_INIT, "WdfLookasideListCreate failed: 0x%x\n", status); return status; } InitializeListHead(&pContext->TxList); WDF_IO_QUEUE_CONFIG_INIT(&queueConfig, WdfIoQueueDispatchParallel ); queueConfig.EvtIoWrite = VIOSockWrite; queueConfig.EvtIoStop = VIOSockWriteIoStop; queueConfig.AllowZeroLengthRequests = WdfFalse; status = WdfIoQueueCreate(hDevice, &queueConfig, WDF_NO_OBJECT_ATTRIBUTES, &pContext->WriteQueue ); if (!NT_SUCCESS(status)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE, "WdfIoQueueCreate failed (Write Queue): 0x%x\n", status); return status; } status = WdfDeviceConfigureRequestDispatching(hDevice, pContext->WriteQueue, WdfRequestTypeWrite); if (!NT_SUCCESS(status)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE, "WdfDeviceConfigureRequestDispatching failed (Write Queue): 0x%x\n", status); return status; } TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "<-- %s\n", __FUNCTION__); return STATUS_SUCCESS; } ////////////////////////////////////////////////////////////////////////// NTSTATUS VIOSockSendResetNoSock( IN PDEVICE_CONTEXT pContext, IN PVIRTIO_VSOCK_HDR pHeader ) { PVIOSOCK_TX_ENTRY pTxEntry; NTSTATUS status; WDFMEMORY Memory; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "--> %s\n", __FUNCTION__); /* Send RST only if the original pkt is not a RST pkt */ if (pHeader->op == VIRTIO_VSOCK_OP_RST) return STATUS_SUCCESS; status = WdfMemoryCreateFromLookaside(pContext->TxMemoryList, &Memory); if (!NT_SUCCESS(status)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE, "WdfMemoryCreateFromLookaside failed: 0x%x\n", status); return status; } pTxEntry = WdfMemoryGetBuffer(Memory, NULL); pTxEntry->Memory = Memory; pTxEntry->Request = WDF_NO_HANDLE; pTxEntry->len = 0; pTxEntry->src_port = pHeader->dst_port; pTxEntry->dst_cid = pHeader->src_cid; pTxEntry->dst_port = pHeader->src_port; pTxEntry->Socket = WDF_NO_HANDLE; pTxEntry->op = VIRTIO_VSOCK_OP_RST; pTxEntry->reply = FALSE; pTxEntry->flags = 0; WdfSpinLockAcquire(pContext->TxLock); InsertTailList(&pContext->TxList, &pTxEntry->ListEntry); WdfSpinLockRelease(pContext->TxLock); VIOSockTxVqProcess(pContext); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "<-- %s\n", __FUNCTION__); return status; }
181181.c
// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ #include "core.h" #include "debug.h" #include "mac.h" #include "hw.h" #include "wmi.h" #include "wmi-ops.h" #include "wmi-tlv.h" #include "p2p.h" #include "testmode.h" #include <linux/bitfield.h> /***************/ /* TLV helpers */ /**************/ struct wmi_tlv_policy { size_t min_len; }; static const struct wmi_tlv_policy wmi_tlv_policies[] = { [WMI_TLV_TAG_ARRAY_BYTE] = { .min_len = 0 }, [WMI_TLV_TAG_ARRAY_UINT32] = { .min_len = 0 }, [WMI_TLV_TAG_STRUCT_SCAN_EVENT] = { .min_len = sizeof(struct wmi_scan_event) }, [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR] = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) }, [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT] = { .min_len = sizeof(struct wmi_chan_info_event) }, [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT] = { .min_len = sizeof(struct wmi_vdev_start_response_event) }, [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT] = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT] = { .min_len = sizeof(struct wmi_host_swba_event) }, [WMI_TLV_TAG_STRUCT_TIM_INFO] = { .min_len = sizeof(struct wmi_tim_info) }, [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO] = { .min_len = sizeof(struct wmi_p2p_noa_info) }, [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT] = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) }, [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES] = { .min_len = sizeof(struct hal_reg_capabilities) }, [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ] = { .min_len = sizeof(struct wlan_host_mem_req) }, [WMI_TLV_TAG_STRUCT_READY_EVENT] = { .min_len = sizeof(struct wmi_tlv_rdy_ev) }, [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT] = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) }, [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT] = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) }, [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT] = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) }, [WMI_TLV_TAG_STRUCT_ROAM_EVENT] = { .min_len = sizeof(struct wmi_tlv_roam_ev) }, [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO] = { .min_len = sizeof(struct wmi_tlv_wow_event_info) }, [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT] = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) }, }; static int ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len, int (*iter)(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data), void *data) { const void *begin = ptr; const struct wmi_tlv *tlv; u16 tlv_tag, tlv_len; int ret; while (len > 0) { if (len < sizeof(*tlv)) { ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", ptr - begin, len, sizeof(*tlv)); return -EINVAL; } tlv = ptr; tlv_tag = __le16_to_cpu(tlv->tag); tlv_len = __le16_to_cpu(tlv->len); ptr += sizeof(*tlv); len -= sizeof(*tlv); if (tlv_len > len) { ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", tlv_tag, ptr - begin, len, tlv_len); return -EINVAL; } if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) && wmi_tlv_policies[tlv_tag].min_len && wmi_tlv_policies[tlv_tag].min_len > tlv_len) { ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n", tlv_tag, ptr - begin, tlv_len, wmi_tlv_policies[tlv_tag].min_len); return -EINVAL; } ret = iter(ar, tlv_tag, tlv_len, ptr, data); if (ret) return ret; ptr += tlv_len; len -= tlv_len; } return 0; } static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { const void **tb = data; if (tag < WMI_TLV_TAG_MAX) tb[tag] = ptr; return 0; } static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb, const void *ptr, size_t len) { return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse, (void *)tb); } static const void ** ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr, size_t len, gfp_t gfp) { const void **tb; int ret; tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp); if (!tb) return ERR_PTR(-ENOMEM); ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len); if (ret) { kfree(tb); return ERR_PTR(ret); } return tb; } static u16 ath10k_wmi_tlv_len(const void *ptr) { return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len); } /**************/ /* TLV events */ /**************/ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar, struct sk_buff *skb) { const void **tb; const struct wmi_tlv_bcn_tx_status_ev *ev; struct ath10k_vif *arvif; u32 vdev_id, tx_status; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]; if (!ev) { kfree(tb); return -EPROTO; } tx_status = __le32_to_cpu(ev->tx_status); vdev_id = __le32_to_cpu(ev->vdev_id); switch (tx_status) { case WMI_TLV_BCN_TX_STATUS_OK: break; case WMI_TLV_BCN_TX_STATUS_XRETRY: case WMI_TLV_BCN_TX_STATUS_DROP: case WMI_TLV_BCN_TX_STATUS_FILTERED: /* FIXME: It's probably worth telling mac80211 to stop the * interface as it is crippled. */ ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d", vdev_id, tx_status); break; } arvif = ath10k_get_arvif(ar, vdev_id); if (arvif && arvif->is_up && arvif->vif->csa_active) ieee80211_queue_work(ar->hw, &arvif->ap_csa_work); kfree(tb); return 0; } static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar, struct sk_buff *skb) { ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n"); complete(&ar->vdev_delete_done); } static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { const struct wmi_tlv_peer_stats_info *stat = ptr; struct ieee80211_sta *sta; struct ath10k_sta *arsta; if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO) return -EPROTO; ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n", stat->peer_macaddr.addr, __le32_to_cpu(stat->last_rx_rate_code), __le32_to_cpu(stat->last_rx_bitrate_kbps)); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n", __le32_to_cpu(stat->last_tx_rate_code), __le32_to_cpu(stat->last_tx_bitrate_kbps)); rcu_read_lock(); sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL); if (!sta) { rcu_read_unlock(); ath10k_warn(ar, "not found station for peer stats\n"); return -EINVAL; } arsta = (struct ath10k_sta *)sta->drv_priv; arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code); arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps); arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code); arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps); rcu_read_unlock(); return 0; } static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar, struct sk_buff *skb) { const void **tb; const struct wmi_tlv_peer_stats_info_ev *ev; const void *data; u32 num_peer_stats; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT]; data = tb[WMI_TLV_TAG_ARRAY_STRUCT]; if (!ev || !data) { kfree(tb); return -EPROTO; } num_peer_stats = __le32_to_cpu(ev->num_peers); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n", __le32_to_cpu(ev->vdev_id), num_peer_stats, __le32_to_cpu(ev->more_data)); ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data), ath10k_wmi_tlv_parse_peer_stats_info, NULL); if (ret) ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret); kfree(tb); return 0; } static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar, struct sk_buff *skb) { ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n"); ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb); complete(&ar->peer_stats_info_complete); } static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar, struct sk_buff *skb) { const void **tb; const struct wmi_tlv_diag_data_ev *ev; const struct wmi_tlv_diag_item *item; const void *data; int ret, num_items, len; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]; data = tb[WMI_TLV_TAG_ARRAY_BYTE]; if (!ev || !data) { kfree(tb); return -EPROTO; } num_items = __le32_to_cpu(ev->num_items); len = ath10k_wmi_tlv_len(data); while (num_items--) { if (len == 0) break; if (len < sizeof(*item)) { ath10k_warn(ar, "failed to parse diag data: can't fit item header\n"); break; } item = data; if (len < sizeof(*item) + __le16_to_cpu(item->len)) { ath10k_warn(ar, "failed to parse diag data: item is too long\n"); break; } trace_ath10k_wmi_diag_container(ar, item->type, __le32_to_cpu(item->timestamp), __le32_to_cpu(item->code), __le16_to_cpu(item->len), item->payload); len -= sizeof(*item); len -= roundup(__le16_to_cpu(item->len), 4); data += sizeof(*item); data += roundup(__le16_to_cpu(item->len), 4); } if (num_items != -1 || len != 0) ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n", num_items, len); kfree(tb); return 0; } static int ath10k_wmi_tlv_event_diag(struct ath10k *ar, struct sk_buff *skb) { const void **tb; const void *data; int ret, len; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } data = tb[WMI_TLV_TAG_ARRAY_BYTE]; if (!data) { kfree(tb); return -EPROTO; } len = ath10k_wmi_tlv_len(data); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len); trace_ath10k_wmi_diag(ar, data, len); kfree(tb); return 0; } static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar, struct sk_buff *skb) { const void **tb; const struct wmi_tlv_p2p_noa_ev *ev; const struct wmi_p2p_noa_info *noa; int ret, vdev_id; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]; noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]; if (!ev || !noa) { kfree(tb); return -EPROTO; } vdev_id = __le32_to_cpu(ev->vdev_id); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p noa vdev_id %i descriptors %hhu\n", vdev_id, noa->num_descriptors); ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); kfree(tb); return 0; } static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar, struct sk_buff *skb) { const void **tb; const struct wmi_tlv_tx_pause_ev *ev; int ret, vdev_id; u32 pause_id, action, vdev_map, peer_id, tid_map; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]; if (!ev) { kfree(tb); return -EPROTO; } pause_id = __le32_to_cpu(ev->pause_id); action = __le32_to_cpu(ev->action); vdev_map = __le32_to_cpu(ev->vdev_map); peer_id = __le32_to_cpu(ev->peer_id); tid_map = __le32_to_cpu(ev->tid_map); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n", pause_id, action, vdev_map, peer_id, tid_map); switch (pause_id) { case WMI_TLV_TX_PAUSE_ID_MCC: case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: case WMI_TLV_TX_PAUSE_ID_AP_PS: case WMI_TLV_TX_PAUSE_ID_IBSS_PS: for (vdev_id = 0; vdev_map; vdev_id++) { if (!(vdev_map & BIT(vdev_id))) continue; vdev_map &= ~BIT(vdev_id); ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id, action); } break; case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: case WMI_TLV_TX_PAUSE_ID_HOST: ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ignoring unsupported tx pause id %d\n", pause_id); break; default: ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ignoring unknown tx pause vdev %d\n", pause_id); break; } kfree(tb); return 0; } static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar, struct sk_buff *skb) { const struct wmi_tlv_rfkill_state_change_ev *ev; const void **tb; bool radio; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse rfkill state change event: %d\n", ret); return; } ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT]; if (!ev) { kfree(tb); return; } ath10k_dbg(ar, ATH10K_DBG_MAC, "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", __le32_to_cpu(ev->gpio_pin_num), __le32_to_cpu(ev->int_type), __le32_to_cpu(ev->radio_state)); radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON); spin_lock_bh(&ar->data_lock); if (!radio) ar->hw_rfkill_on = true; spin_unlock_bh(&ar->data_lock); /* notify cfg80211 radio state change */ ath10k_mac_rfkill_enable_radio(ar, radio); wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio); } static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar, struct sk_buff *skb) { const struct wmi_tlv_pdev_temperature_event *ev; ev = (struct wmi_tlv_pdev_temperature_event *)skb->data; if (WARN_ON(skb->len < sizeof(*ev))) return -EPROTO; ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature)); return 0; } static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_sta *station; const struct wmi_tlv_tdls_peer_event *ev; const void **tb; struct ath10k_vif *arvif; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ath10k_warn(ar, "tdls peer failed to parse tlv"); return; } ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT]; if (!ev) { kfree(tb); ath10k_warn(ar, "tdls peer NULL event"); return; } switch (__le32_to_cpu(ev->peer_reason)) { case WMI_TDLS_TEARDOWN_REASON_TX: case WMI_TDLS_TEARDOWN_REASON_RSSI: case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: rcu_read_lock(); station = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); if (!station) { ath10k_warn(ar, "did not find station from tdls peer event"); goto exit; } arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id)); ieee80211_tdls_oper_request( arvif->vif, station->addr, NL80211_TDLS_TEARDOWN, WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, GFP_ATOMIC ); break; default: kfree(tb); return; } exit: rcu_read_unlock(); kfree(tb); } static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar, struct sk_buff *skb) { struct wmi_peer_delete_resp_ev_arg *arg; struct wmi_tlv *tlv_hdr; tlv_hdr = (struct wmi_tlv *)skb->data; arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value; ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id); ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n"); complete(&ar->peer_delete_done); return 0; } /***********/ /* TLV ops */ /***********/ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; enum wmi_tlv_event_id id; bool consumed; cmd_hdr = (struct wmi_cmd_hdr *)skb->data; id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) goto out; trace_ath10k_wmi_event(ar, id, skb->data, skb->len); consumed = ath10k_tm_event_wmi(ar, id, skb); /* Ready event must be handled normally also in UTF mode so that we * know the UTF firmware has booted, others we are just bypass WMI * events to testmode. */ if (consumed && id != WMI_TLV_READY_EVENTID) { ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv testmode consumed 0x%x\n", id); goto out; } switch (id) { case WMI_TLV_MGMT_RX_EVENTID: ath10k_wmi_event_mgmt_rx(ar, skb); /* mgmt_rx() owns the skb now! */ return; case WMI_TLV_SCAN_EVENTID: ath10k_wmi_event_scan(ar, skb); break; case WMI_TLV_CHAN_INFO_EVENTID: ath10k_wmi_event_chan_info(ar, skb); break; case WMI_TLV_ECHO_EVENTID: ath10k_wmi_event_echo(ar, skb); break; case WMI_TLV_DEBUG_MESG_EVENTID: ath10k_wmi_event_debug_mesg(ar, skb); break; case WMI_TLV_UPDATE_STATS_EVENTID: ath10k_wmi_event_update_stats(ar, skb); break; case WMI_TLV_PEER_STATS_INFO_EVENTID: ath10k_wmi_tlv_event_peer_stats_info(ar, skb); break; case WMI_TLV_VDEV_START_RESP_EVENTID: ath10k_wmi_event_vdev_start_resp(ar, skb); break; case WMI_TLV_VDEV_STOPPED_EVENTID: ath10k_wmi_event_vdev_stopped(ar, skb); break; case WMI_TLV_VDEV_DELETE_RESP_EVENTID: ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb); break; case WMI_TLV_PEER_STA_KICKOUT_EVENTID: ath10k_wmi_event_peer_sta_kickout(ar, skb); break; case WMI_TLV_HOST_SWBA_EVENTID: ath10k_wmi_event_host_swba(ar, skb); break; case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID: ath10k_wmi_event_tbttoffset_update(ar, skb); break; case WMI_TLV_PHYERR_EVENTID: ath10k_wmi_event_phyerr(ar, skb); break; case WMI_TLV_ROAM_EVENTID: ath10k_wmi_event_roam(ar, skb); break; case WMI_TLV_PROFILE_MATCH: ath10k_wmi_event_profile_match(ar, skb); break; case WMI_TLV_DEBUG_PRINT_EVENTID: ath10k_wmi_event_debug_print(ar, skb); break; case WMI_TLV_PDEV_QVIT_EVENTID: ath10k_wmi_event_pdev_qvit(ar, skb); break; case WMI_TLV_WLAN_PROFILE_DATA_EVENTID: ath10k_wmi_event_wlan_profile_data(ar, skb); break; case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID: ath10k_wmi_event_rtt_measurement_report(ar, skb); break; case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID: ath10k_wmi_event_tsf_measurement_report(ar, skb); break; case WMI_TLV_RTT_ERROR_REPORT_EVENTID: ath10k_wmi_event_rtt_error_report(ar, skb); break; case WMI_TLV_WOW_WAKEUP_HOST_EVENTID: ath10k_wmi_event_wow_wakeup_host(ar, skb); break; case WMI_TLV_DCS_INTERFERENCE_EVENTID: ath10k_wmi_event_dcs_interference(ar, skb); break; case WMI_TLV_PDEV_TPC_CONFIG_EVENTID: ath10k_wmi_event_pdev_tpc_config(ar, skb); break; case WMI_TLV_PDEV_FTM_INTG_EVENTID: ath10k_wmi_event_pdev_ftm_intg(ar, skb); break; case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID: ath10k_wmi_event_gtk_offload_status(ar, skb); break; case WMI_TLV_GTK_REKEY_FAIL_EVENTID: ath10k_wmi_event_gtk_rekey_fail(ar, skb); break; case WMI_TLV_TX_DELBA_COMPLETE_EVENTID: ath10k_wmi_event_delba_complete(ar, skb); break; case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID: ath10k_wmi_event_addba_complete(ar, skb); break; case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID: ath10k_wmi_event_vdev_install_key_complete(ar, skb); break; case WMI_TLV_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); return; case WMI_TLV_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; case WMI_TLV_SERVICE_AVAILABLE_EVENTID: ath10k_wmi_event_service_available(ar, skb); break; case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID: ath10k_wmi_tlv_event_bcn_tx_status(ar, skb); break; case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID: ath10k_wmi_tlv_event_diag_data(ar, skb); break; case WMI_TLV_DIAG_EVENTID: ath10k_wmi_tlv_event_diag(ar, skb); break; case WMI_TLV_P2P_NOA_EVENTID: ath10k_wmi_tlv_event_p2p_noa(ar, skb); break; case WMI_TLV_TX_PAUSE_EVENTID: ath10k_wmi_tlv_event_tx_pause(ar, skb); break; case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID: ath10k_wmi_tlv_event_rfkill_state_change(ar, skb); break; case WMI_TLV_PDEV_TEMPERATURE_EVENTID: ath10k_wmi_tlv_event_temperature(ar, skb); break; case WMI_TLV_TDLS_PEER_EVENTID: ath10k_wmi_event_tdls_peer(ar, skb); break; case WMI_TLV_PEER_DELETE_RESP_EVENTID: ath10k_wmi_tlv_event_peer_delete_resp(ar, skb); break; case WMI_TLV_MGMT_TX_COMPLETION_EVENTID: ath10k_wmi_event_mgmt_tx_compl(ar, skb); break; case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID: ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb); break; default: ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id); break; } out: dev_kfree_skb(skb); } static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_scan_ev_arg *arg) { const void **tb; const struct wmi_scan_event *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT]; if (!ev) { kfree(tb); return -EPROTO; } arg->event_type = ev->event_type; arg->reason = ev->reason; arg->channel_freq = ev->channel_freq; arg->scan_req_id = ev->scan_req_id; arg->scan_id = ev->scan_id; arg->vdev_id = ev->vdev_id; kfree(tb); return 0; } static int ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) { const void **tb; const struct wmi_tlv_mgmt_tx_compl_ev *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT]; arg->desc_id = ev->desc_id; arg->status = ev->status; arg->pdev_id = ev->pdev_id; arg->ppdu_id = ev->ppdu_id; if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) arg->ack_rssi = ev->ack_rssi; kfree(tb); return 0; } struct wmi_tlv_tx_bundle_compl_parse { const __le32 *num_reports; const __le32 *desc_ids; const __le32 *status; const __le32 *ppdu_ids; const __le32 *ack_rssi; bool desc_ids_done; bool status_done; bool ppdu_ids_done; bool ack_rssi_done; }; static int ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data; switch (tag) { case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT: bundle_tx_compl->num_reports = ptr; break; case WMI_TLV_TAG_ARRAY_UINT32: if (!bundle_tx_compl->desc_ids_done) { bundle_tx_compl->desc_ids_done = true; bundle_tx_compl->desc_ids = ptr; } else if (!bundle_tx_compl->status_done) { bundle_tx_compl->status_done = true; bundle_tx_compl->status = ptr; } else if (!bundle_tx_compl->ppdu_ids_done) { bundle_tx_compl->ppdu_ids_done = true; bundle_tx_compl->ppdu_ids = ptr; } else if (!bundle_tx_compl->ack_rssi_done) { bundle_tx_compl->ack_rssi_done = true; bundle_tx_compl->ack_rssi = ptr; } break; default: break; } return 0; } static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev( struct ath10k *ar, struct sk_buff *skb, struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg) { struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { }; int ret; ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse, &bundle_tx_compl); if (ret) { ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids || !bundle_tx_compl.status) return -EPROTO; arg->num_reports = *bundle_tx_compl.num_reports; arg->desc_ids = bundle_tx_compl.desc_ids; arg->status = bundle_tx_compl.status; arg->ppdu_ids = bundle_tx_compl.ppdu_ids; if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) arg->ack_rssi = bundle_tx_compl.ack_rssi; return 0; } static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_ev_arg *arg) { const void **tb; const struct wmi_tlv_mgmt_rx_ev *ev; const u8 *frame; u32 msdu_len; int ret, i; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]; frame = tb[WMI_TLV_TAG_ARRAY_BYTE]; if (!ev || !frame) { kfree(tb); return -EPROTO; } arg->channel = ev->channel; arg->buf_len = ev->buf_len; arg->status = ev->status; arg->snr = ev->snr; arg->phy_mode = ev->phy_mode; arg->rate = ev->rate; for (i = 0; i < ARRAY_SIZE(ev->rssi); i++) arg->rssi[i] = ev->rssi[i]; msdu_len = __le32_to_cpu(arg->buf_len); if (skb->len < (frame - skb->data) + msdu_len) { kfree(tb); return -EPROTO; } /* shift the sk_buff to point to `frame` */ skb_trim(skb, 0); skb_put(skb, frame - skb->data); skb_pull(skb, frame - skb->data); skb_put(skb, msdu_len); kfree(tb); return 0; } static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_ch_info_ev_arg *arg) { const void **tb; const struct wmi_tlv_chan_info_event *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]; if (!ev) { kfree(tb); return -EPROTO; } arg->err_code = ev->err_code; arg->freq = ev->freq; arg->cmd_flags = ev->cmd_flags; arg->noise_floor = ev->noise_floor; arg->rx_clear_count = ev->rx_clear_count; arg->cycle_count = ev->cycle_count; if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL, ar->running_fw->fw_file.fw_features)) arg->mac_clk_mhz = ev->mac_clk_mhz; kfree(tb); return 0; } static int ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_vdev_start_ev_arg *arg) { const void **tb; const struct wmi_vdev_start_response_event *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]; if (!ev) { kfree(tb); return -EPROTO; } skb_pull(skb, sizeof(*ev)); arg->vdev_id = ev->vdev_id; arg->req_id = ev->req_id; arg->resp_type = ev->resp_type; arg->status = ev->status; kfree(tb); return 0; } static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_peer_kick_ev_arg *arg) { const void **tb; const struct wmi_peer_sta_kickout_event *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]; if (!ev) { kfree(tb); return -EPROTO; } arg->mac_addr = ev->peer_macaddr.addr; kfree(tb); return 0; } struct wmi_tlv_swba_parse { const struct wmi_host_swba_event *ev; bool tim_done; bool noa_done; size_t n_tim; size_t n_noa; struct wmi_swba_ev_arg *arg; }; static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_swba_parse *swba = data; struct wmi_tim_info_arg *tim_info_arg; const struct wmi_tim_info *tim_info_ev = ptr; if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO) return -EPROTO; if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info)) return -ENOBUFS; if (__le32_to_cpu(tim_info_ev->tim_len) > sizeof(tim_info_ev->tim_bitmap)) { ath10k_warn(ar, "refusing to parse invalid swba structure\n"); return -EPROTO; } tim_info_arg = &swba->arg->tim_info[swba->n_tim]; tim_info_arg->tim_len = tim_info_ev->tim_len; tim_info_arg->tim_mcast = tim_info_ev->tim_mcast; tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap; tim_info_arg->tim_changed = tim_info_ev->tim_changed; tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending; swba->n_tim++; return 0; } static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_swba_parse *swba = data; if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO) return -EPROTO; if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info)) return -ENOBUFS; swba->arg->noa_info[swba->n_noa++] = ptr; return 0; } static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_swba_parse *swba = data; int ret; switch (tag) { case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT: swba->ev = ptr; break; case WMI_TLV_TAG_ARRAY_STRUCT: if (!swba->tim_done) { swba->tim_done = true; ret = ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_swba_tim_parse, swba); if (ret) return ret; } else if (!swba->noa_done) { swba->noa_done = true; ret = ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_swba_noa_parse, swba); if (ret) return ret; } break; default: break; } return 0; } static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_swba_ev_arg *arg) { struct wmi_tlv_swba_parse swba = { .arg = arg }; u32 map; size_t n_vdevs; int ret; ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, ath10k_wmi_tlv_swba_parse, &swba); if (ret) { ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } if (!swba.ev) return -EPROTO; arg->vdev_map = swba.ev->vdev_map; for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1) if (map & BIT(0)) n_vdevs++; if (n_vdevs != swba.n_tim || n_vdevs != swba.n_noa) return -EPROTO; return 0; } static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar, struct sk_buff *skb, struct wmi_phyerr_hdr_arg *arg) { const void **tb; const struct wmi_tlv_phyerr_ev *ev; const void *phyerrs; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR]; phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE]; if (!ev || !phyerrs) { kfree(tb); return -EPROTO; } arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs); arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); arg->buf_len = __le32_to_cpu(ev->buf_len); arg->phyerrs = phyerrs; kfree(tb); return 0; } #define WMI_TLV_ABI_VER_NS0 0x5F414351 #define WMI_TLV_ABI_VER_NS1 0x00004C4D #define WMI_TLV_ABI_VER_NS2 0x00000000 #define WMI_TLV_ABI_VER_NS3 0x00000000 #define WMI_TLV_ABI_VER0_MAJOR 1 #define WMI_TLV_ABI_VER0_MINOR 0 #define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \ (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF)) #define WMI_TLV_ABI_VER1 53 static int ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_svc_rdy_ev_arg *arg = data; int i; if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ) return -EPROTO; for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) { if (!arg->mem_reqs[i]) { arg->mem_reqs[i] = ptr; return 0; } } return -ENOMEM; } struct wmi_tlv_svc_rdy_parse { const struct hal_reg_capabilities *reg; const struct wmi_tlv_svc_rdy_ev *ev; const __le32 *svc_bmap; const struct wlan_host_mem_req *mem_reqs; bool svc_bmap_done; bool dbs_hw_mode_done; }; static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_parse *svc_rdy = data; switch (tag) { case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT: svc_rdy->ev = ptr; break; case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES: svc_rdy->reg = ptr; break; case WMI_TLV_TAG_ARRAY_STRUCT: svc_rdy->mem_reqs = ptr; break; case WMI_TLV_TAG_ARRAY_UINT32: if (!svc_rdy->svc_bmap_done) { svc_rdy->svc_bmap_done = true; svc_rdy->svc_bmap = ptr; } else if (!svc_rdy->dbs_hw_mode_done) { svc_rdy->dbs_hw_mode_done = true; } break; default: break; } return 0; } static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_svc_rdy_ev_arg *arg) { const struct hal_reg_capabilities *reg; const struct wmi_tlv_svc_rdy_ev *ev; const __le32 *svc_bmap; const struct wlan_host_mem_req *mem_reqs; struct wmi_tlv_svc_rdy_parse svc_rdy = { }; int ret; ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy); if (ret) { ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = svc_rdy.ev; reg = svc_rdy.reg; svc_bmap = svc_rdy.svc_bmap; mem_reqs = svc_rdy.mem_reqs; if (!ev || !reg || !svc_bmap || !mem_reqs) return -EPROTO; /* This is an internal ABI compatibility check for WMI TLV so check it * here instead of the generic WMI code. */ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n", __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0, __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0, __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1, __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2, __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3); if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 || __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 || __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 || __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 || __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) { return -ENOTSUPP; } arg->min_tx_power = ev->hw_min_tx_power; arg->max_tx_power = ev->hw_max_tx_power; arg->ht_cap = ev->ht_cap_info; arg->vht_cap = ev->vht_cap_info; arg->vht_supp_mcs = ev->vht_supp_mcs; arg->sw_ver0 = ev->abi.abi_ver0; arg->sw_ver1 = ev->abi.abi_ver1; arg->fw_build = ev->fw_build_vers; arg->phy_capab = ev->phy_capability; arg->num_rf_chains = ev->num_rf_chains; arg->eeprom_rd = reg->eeprom_rd; arg->low_2ghz_chan = reg->low_2ghz_chan; arg->high_2ghz_chan = reg->high_2ghz_chan; arg->low_5ghz_chan = reg->low_5ghz_chan; arg->high_5ghz_chan = reg->high_5ghz_chan; arg->num_mem_reqs = ev->num_mem_reqs; arg->service_map = svc_bmap; arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap); arg->sys_cap_info = ev->sys_cap_info; ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs), ath10k_wmi_tlv_parse_mem_reqs, arg); if (ret) { ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret); return ret; } return 0; } static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_rdy_ev_arg *arg) { const void **tb; const struct wmi_tlv_rdy_ev *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT]; if (!ev) { kfree(tb); return -EPROTO; } arg->sw_version = ev->abi.abi_ver0; arg->abi_version = ev->abi.abi_ver1; arg->status = ev->status; arg->mac_addr = ev->mac_addr.addr; kfree(tb); return 0; } static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_svc_avail_ev_arg *arg = data; switch (tag) { case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT: arg->service_map_ext_valid = true; arg->service_map_ext_len = *(__le32 *)ptr; arg->service_map_ext = ptr + sizeof(__le32); return 0; default: break; } return 0; } static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, struct wmi_svc_avail_ev_arg *arg) { int ret; ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, ath10k_wmi_tlv_svc_avail_parse, arg); if (ret) { ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret); return ret; } return 0; } static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src, struct ath10k_fw_stats_vdev *dst) { int i; dst->vdev_id = __le32_to_cpu(src->vdev_id); dst->beacon_snr = __le32_to_cpu(src->beacon_snr); dst->data_snr = __le32_to_cpu(src->data_snr); dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames); dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail); dst->num_rts_success = __le32_to_cpu(src->num_rts_success); dst->num_rx_err = __le32_to_cpu(src->num_rx_err); dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard); dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked); for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++) dst->num_tx_frames[i] = __le32_to_cpu(src->num_tx_frames[i]); for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++) dst->num_tx_frames_retries[i] = __le32_to_cpu(src->num_tx_frames_retries[i]); for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++) dst->num_tx_frames_failures[i] = __le32_to_cpu(src->num_tx_frames_failures[i]); for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++) dst->tx_rate_history[i] = __le32_to_cpu(src->tx_rate_history[i]); for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++) dst->beacon_rssi_history[i] = __le32_to_cpu(src->beacon_rssi_history[i]); } static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, struct ath10k_fw_stats *stats) { const void **tb; const struct wmi_tlv_stats_ev *ev; u32 num_peer_stats_extd; const void *data; u32 num_pdev_stats; u32 num_vdev_stats; u32 num_peer_stats; u32 num_bcnflt_stats; u32 num_chan_stats; size_t data_len; u32 stats_id; int ret; int i; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT]; data = tb[WMI_TLV_TAG_ARRAY_BYTE]; if (!ev || !data) { kfree(tb); return -EPROTO; } data_len = ath10k_wmi_tlv_len(data); num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); num_chan_stats = __le32_to_cpu(ev->num_chan_stats); stats_id = __le32_to_cpu(ev->stats_id); num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n", num_pdev_stats, num_vdev_stats, num_peer_stats, num_bcnflt_stats, num_chan_stats, num_peer_stats_extd); for (i = 0; i < num_pdev_stats; i++) { const struct wmi_pdev_stats *src; struct ath10k_fw_stats_pdev *dst; src = data; if (data_len < sizeof(*src)) { kfree(tb); return -EPROTO; } data += sizeof(*src); data_len -= sizeof(*src); dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; ath10k_wmi_pull_pdev_stats_base(&src->base, dst); ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); list_add_tail(&dst->list, &stats->pdevs); } for (i = 0; i < num_vdev_stats; i++) { const struct wmi_tlv_vdev_stats *src; struct ath10k_fw_stats_vdev *dst; src = data; if (data_len < sizeof(*src)) { kfree(tb); return -EPROTO; } data += sizeof(*src); data_len -= sizeof(*src); dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; ath10k_wmi_tlv_pull_vdev_stats(src, dst); list_add_tail(&dst->list, &stats->vdevs); } for (i = 0; i < num_peer_stats; i++) { const struct wmi_10x_peer_stats *src; struct ath10k_fw_stats_peer *dst; src = data; if (data_len < sizeof(*src)) { kfree(tb); return -EPROTO; } data += sizeof(*src); data_len -= sizeof(*src); dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; ath10k_wmi_pull_peer_stats(&src->old, dst); dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); if (stats_id & WMI_TLV_STAT_PEER_EXTD) { const struct wmi_tlv_peer_stats_extd *extd; unsigned long rx_duration_high; extd = data + sizeof(*src) * (num_peer_stats - i - 1) + sizeof(*extd) * i; dst->rx_duration = __le32_to_cpu(extd->rx_duration); rx_duration_high = __le32_to_cpu (extd->rx_duration_high); if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT, &rx_duration_high)) { rx_duration_high = FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK, rx_duration_high); dst->rx_duration |= (u64)rx_duration_high << WMI_TLV_PEER_RX_DURATION_SHIFT; } } list_add_tail(&dst->list, &stats->peers); } kfree(tb); return 0; } static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_roam_ev_arg *arg) { const void **tb; const struct wmi_tlv_roam_ev *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT]; if (!ev) { kfree(tb); return -EPROTO; } arg->vdev_id = ev->vdev_id; arg->reason = ev->reason; arg->rssi = ev->rssi; kfree(tb); return 0; } static int ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_wow_ev_arg *arg) { const void **tb; const struct wmi_tlv_wow_event_info *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]; if (!ev) { kfree(tb); return -EPROTO; } arg->vdev_id = __le32_to_cpu(ev->vdev_id); arg->flag = __le32_to_cpu(ev->flag); arg->wake_reason = __le32_to_cpu(ev->wake_reason); arg->data_len = __le32_to_cpu(ev->data_len); kfree(tb); return 0; } static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_echo_ev_arg *arg) { const void **tb; const struct wmi_echo_event *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT]; if (!ev) { kfree(tb); return -EPROTO; } arg->value = ev->value; kfree(tb); return 0; } static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt) { struct wmi_tlv_pdev_suspend *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->opt = __cpu_to_le32(opt); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar) { struct wmi_tlv_resume_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->reserved = __cpu_to_le32(0); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, u16 ctl2g, u16 ctl5g, enum wmi_dfs_region dfs_reg) { struct wmi_tlv_pdev_set_rd_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->regd = __cpu_to_le32(rd); cmd->regd_2ghz = __cpu_to_le32(rd2g); cmd->regd_5ghz = __cpu_to_le32(rd5g); cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g); cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n"); return skb; } static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar) { return WMI_TXBF_CONF_AFTER_ASSOC; } static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id, u32 param_value) { struct wmi_tlv_pdev_set_param_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n", param_id, param_value); return skb; } static void ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks) { struct host_memory_chunk_tlv *chunk; struct wmi_tlv *tlv; dma_addr_t paddr; int i; __le16 tlv_len, tlv_tag; tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK); tlv_len = __cpu_to_le16(sizeof(*chunk)); for (i = 0; i < ar->wmi.num_mem_chunks; i++) { tlv = host_mem_chunks; tlv->tag = tlv_tag; tlv->len = tlv_len; chunk = (void *)tlv->value; chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS, ar->wmi.svc_map)) { paddr = ar->wmi.mem_chunks[i].paddr; chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr)); } ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n", i, ar->wmi.mem_chunks[i].len, (unsigned long long)ar->wmi.mem_chunks[i].paddr, ar->wmi.mem_chunks[i].req_id); host_mem_chunks += sizeof(*tlv); host_mem_chunks += sizeof(*chunk); } } static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) { struct sk_buff *skb; struct wmi_tlv *tlv; struct wmi_tlv_init_cmd *cmd; struct wmi_tlv_resource_config *cfg; void *chunks; size_t len, chunks_len; void *ptr; chunks_len = ar->wmi.num_mem_chunks * (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv)); len = (sizeof(*tlv) + sizeof(*cmd)) + (sizeof(*tlv) + sizeof(*cfg)) + (sizeof(*tlv) + chunks_len); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG); tlv->len = __cpu_to_le16(sizeof(*cfg)); cfg = (void *)tlv->value; ptr += sizeof(*tlv); ptr += sizeof(*cfg); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(chunks_len); chunks = (void *)tlv->value; ptr += sizeof(*tlv); ptr += chunks_len; cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0); cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1); cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0); cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1); cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2); cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3); cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); if (ar->hw_params.num_peers) cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers); else cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit); cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries); if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) { cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); } else { cfg->num_offload_peers = __cpu_to_le32(0); cfg->num_offload_reorder_bufs = __cpu_to_le32(0); } cfg->num_peer_keys = __cpu_to_le32(2); if (ar->hw_params.num_peers) cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2); else cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); cfg->tx_chain_mask = __cpu_to_le32(0x7); cfg->rx_chain_mask = __cpu_to_le32(0x7); cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64); cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64); cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64); cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28); cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); cfg->scan_max_pending_reqs = __cpu_to_le32(4); cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8); cfg->num_mcast_groups = __cpu_to_le32(0); cfg->num_mcast_table_elems = __cpu_to_le32(0); cfg->mcast2ucast_mode = __cpu_to_le32(0); cfg->tx_dbg_log_size = __cpu_to_le32(0x400); cfg->dma_burst_size = __cpu_to_le32(0); cfg->mac_aggr_delim = __cpu_to_le32(0); cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0); cfg->vow_config = __cpu_to_le32(0); cfg->gtk_offload_max_vdev = __cpu_to_le32(2); cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx); cfg->max_frag_entries = __cpu_to_le32(2); cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS); cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20); cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2); cfg->num_multicast_filter_entries = __cpu_to_le32(5); cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns); cfg->num_keep_alive_pattern = __cpu_to_le32(6); cfg->keep_alive_pattern_size = __cpu_to_le32(0); cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1); cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1); cfg->wmi_send_separate = __cpu_to_le32(0); cfg->num_ocb_vdevs = __cpu_to_le32(0); cfg->num_ocb_channels = __cpu_to_le32(0); cfg->num_ocb_schedules = __cpu_to_le32(0); cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL); if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI); ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *arg) { struct wmi_tlv_start_scan_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len, chan_len, ssid_len, bssid_len, ie_len; __le32 *chans; struct wmi_ssid *ssids; struct wmi_mac_addr *addrs; void *ptr; int i, ret; ret = ath10k_wmi_start_scan_verify(arg); if (ret) return ERR_PTR(ret); chan_len = arg->n_channels * sizeof(__le32); ssid_len = arg->n_ssids * sizeof(struct wmi_ssid); bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr); ie_len = roundup(arg->ie_len, 4); len = (sizeof(*tlv) + sizeof(*cmd)) + sizeof(*tlv) + chan_len + sizeof(*tlv) + ssid_len + sizeof(*tlv) + bssid_len + sizeof(*tlv) + ie_len; skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; ath10k_wmi_put_start_scan_common(&cmd->common, arg); cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms); cmd->num_channels = __cpu_to_le32(arg->n_channels); cmd->num_ssids = __cpu_to_le32(arg->n_ssids); cmd->num_bssids = __cpu_to_le32(arg->n_bssids); cmd->ie_len = __cpu_to_le32(arg->ie_len); cmd->num_probes = __cpu_to_le32(3); ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr); ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr); /* FIXME: There are some scan flag inconsistencies across firmwares, * e.g. WMI-TLV inverts the logic behind the following flag. */ cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); tlv->len = __cpu_to_le16(chan_len); chans = (void *)tlv->value; for (i = 0; i < arg->n_channels; i++) chans[i] = __cpu_to_le32(arg->channels[i]); ptr += sizeof(*tlv); ptr += chan_len; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT); tlv->len = __cpu_to_le16(ssid_len); ssids = (void *)tlv->value; for (i = 0; i < arg->n_ssids; i++) { ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len); memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len); } ptr += sizeof(*tlv); ptr += ssid_len; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT); tlv->len = __cpu_to_le16(bssid_len); addrs = (void *)tlv->value; for (i = 0; i < arg->n_bssids; i++) ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid); ptr += sizeof(*tlv); ptr += bssid_len; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); tlv->len = __cpu_to_le16(ie_len); memcpy(tlv->value, arg->ie, arg->ie_len); ptr += sizeof(*tlv); ptr += ie_len; ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) { struct wmi_stop_scan_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; u32 scan_id; u32 req_id; if (arg->req_id > 0xFFF) return ERR_PTR(-EINVAL); if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) return ERR_PTR(-EINVAL); skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); scan_id = arg->u.scan_id; scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; req_id = arg->req_id; req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->req_type = __cpu_to_le32(arg->req_type); cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); cmd->scan_id = __cpu_to_le32(scan_id); cmd->scan_req_id = __cpu_to_le32(req_id); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n"); return skb; } static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) { switch (subtype) { case WMI_VDEV_SUBTYPE_NONE: return WMI_TLV_VDEV_SUBTYPE_NONE; case WMI_VDEV_SUBTYPE_P2P_DEVICE: return WMI_TLV_VDEV_SUBTYPE_P2P_DEV; case WMI_VDEV_SUBTYPE_P2P_CLIENT: return WMI_TLV_VDEV_SUBTYPE_P2P_CLI; case WMI_VDEV_SUBTYPE_P2P_GO: return WMI_TLV_VDEV_SUBTYPE_P2P_GO; case WMI_VDEV_SUBTYPE_PROXY_STA: return WMI_TLV_VDEV_SUBTYPE_PROXY_STA; case WMI_VDEV_SUBTYPE_MESH_11S: return WMI_TLV_VDEV_SUBTYPE_MESH_11S; case WMI_VDEV_SUBTYPE_MESH_NON_11S: return -ENOTSUPP; } return -ENOTSUPP; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id, enum wmi_vdev_type vdev_type, enum wmi_vdev_subtype vdev_subtype, const u8 mac_addr[ETH_ALEN]) { struct wmi_vdev_create_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->vdev_type = __cpu_to_le32(vdev_type); cmd->vdev_subtype = __cpu_to_le32(vdev_subtype); ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id) { struct wmi_vdev_delete_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar, const struct wmi_vdev_start_request_arg *arg, bool restart) { struct wmi_tlv_vdev_start_cmd *cmd; struct wmi_channel *ch; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; void *ptr; u32 flags = 0; if (WARN_ON(arg->hidden_ssid && !arg->ssid)) return ERR_PTR(-EINVAL); if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) return ERR_PTR(-EINVAL); len = (sizeof(*tlv) + sizeof(*cmd)) + (sizeof(*tlv) + sizeof(*ch)) + (sizeof(*tlv) + 0); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); if (arg->hidden_ssid) flags |= WMI_VDEV_START_HIDDEN_SSID; if (arg->pmf_enabled) flags |= WMI_VDEV_START_PMF_ENABLED; ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval); cmd->dtim_period = __cpu_to_le32(arg->dtim_period); cmd->flags = __cpu_to_le32(flags); cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); if (arg->ssid) { cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); } ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); tlv->len = __cpu_to_le16(sizeof(*ch)); ch = (void *)tlv->value; ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel); ptr += sizeof(*tlv); ptr += sizeof(*ch); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = 0; /* Note: This is a nested TLV containing: * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. */ ptr += sizeof(*tlv); ptr += 0; ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id) { struct wmi_vdev_stop_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) { struct wmi_vdev_up_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->vdev_assoc_id = __cpu_to_le32(aid); ether_addr_copy(cmd->vdev_bssid.addr, bssid); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id) { struct wmi_vdev_down_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, u32 param_value) { struct wmi_vdev_set_param_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n", vdev_id, param_id, param_value); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar, const struct wmi_vdev_install_key_arg *arg) { struct wmi_vdev_install_key_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; void *ptr; if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] && arg->key_data) return ERR_PTR(-EINVAL); if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] && !arg->key_data) return ERR_PTR(-EINVAL); len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32)); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); cmd->key_idx = __cpu_to_le32(arg->key_idx); cmd->key_flags = __cpu_to_le32(arg->key_flags); cmd->key_cipher = __cpu_to_le32(arg->key_cipher); cmd->key_len = __cpu_to_le32(arg->key_len); cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); if (arg->macaddr) ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32))); if (arg->key_data) memcpy(tlv->value, arg->key_data, arg->key_len); ptr += sizeof(*tlv); ptr += roundup(arg->key_len, sizeof(__le32)); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n"); return skb; } static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr, const struct wmi_sta_uapsd_auto_trig_arg *arg) { struct wmi_sta_uapsd_auto_trig_param *ac; struct wmi_tlv *tlv; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM); tlv->len = __cpu_to_le16(sizeof(*ac)); ac = (void *)tlv->value; ac->wmm_ac = __cpu_to_le32(arg->wmm_ac); ac->user_priority = __cpu_to_le32(arg->user_priority); ac->service_interval = __cpu_to_le32(arg->service_interval); ac->suspend_interval = __cpu_to_le32(arg->suspend_interval); ac->delay_interval = __cpu_to_le32(arg->delay_interval); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n", ac->wmm_ac, ac->user_priority, ac->service_interval, ac->suspend_interval, ac->delay_interval); return ptr + sizeof(*tlv) + sizeof(*ac); } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, const u8 peer_addr[ETH_ALEN], const struct wmi_sta_uapsd_auto_trig_arg *args, u32 num_ac) { struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd; struct wmi_sta_uapsd_auto_trig_param *ac; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; size_t ac_tlv_len; void *ptr; int i; ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac)); len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + ac_tlv_len; skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->num_ac = __cpu_to_le32(num_ac); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(ac_tlv_len); ac = (void *)tlv->value; ptr += sizeof(*tlv); for (i = 0; i < num_ac; i++) ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n"); return skb; } static void *ath10k_wmi_tlv_put_wmm(void *ptr, const struct wmi_wmm_params_arg *arg) { struct wmi_wmm_params *wmm; struct wmi_tlv *tlv; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS); tlv->len = __cpu_to_le16(sizeof(*wmm)); wmm = (void *)tlv->value; ath10k_wmi_set_wmm_param(wmm, arg); return ptr + sizeof(*tlv) + sizeof(*wmm); } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, const struct wmi_wmm_params_all_arg *arg) { struct wmi_tlv_vdev_set_wmm_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; void *ptr; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be); ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk); ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi); ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar, const struct wmi_sta_keepalive_arg *arg) { struct wmi_tlv_sta_keepalive_cmd *cmd; struct wmi_sta_keepalive_arp_resp *arp; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + sizeof(*arp); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); cmd->enabled = __cpu_to_le32(arg->enabled); cmd->method = __cpu_to_le32(arg->method); cmd->interval = __cpu_to_le32(arg->interval); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE); tlv->len = __cpu_to_le16(sizeof(*arp)); arp = (void *)tlv->value; arp->src_ip4_addr = arg->src_ip4_addr; arp->dest_ip4_addr = arg->dest_ip4_addr; ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n", arg->vdev_id, arg->enabled, arg->method, arg->interval); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, const u8 peer_addr[ETH_ALEN], enum wmi_peer_type peer_type) { struct wmi_tlv_peer_create_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->peer_type = __cpu_to_le32(peer_type); ether_addr_copy(cmd->peer_addr.addr, peer_addr); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 peer_addr[ETH_ALEN]) { struct wmi_peer_delete_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id, const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) { struct wmi_peer_flush_tids_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, enum wmi_peer_param param_id, u32 param_value) { struct wmi_peer_set_param_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d peer %pM set param %d value 0x%x\n", vdev_id, peer_addr, param_id, param_value); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar, const struct wmi_peer_assoc_complete_arg *arg) { struct wmi_tlv_peer_assoc_cmd *cmd; struct wmi_vht_rate_set *vht_rate; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len, legacy_rate_len, ht_rate_len; void *ptr; if (arg->peer_mpdu_density > 16) return ERR_PTR(-EINVAL); if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) return ERR_PTR(-EINVAL); if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) return ERR_PTR(-EINVAL); legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates, sizeof(__le32)); ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32)); len = (sizeof(*tlv) + sizeof(*cmd)) + (sizeof(*tlv) + legacy_rate_len) + (sizeof(*tlv) + ht_rate_len) + (sizeof(*tlv) + sizeof(*vht_rate)); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); cmd->assoc_id = __cpu_to_le32(arg->peer_aid); cmd->flags = __cpu_to_le32(arg->peer_flags); cmd->caps = __cpu_to_le32(arg->peer_caps); cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval); cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps); cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps); cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams); cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps); cmd->phy_mode = __cpu_to_le32(arg->peer_phymode); cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates); cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates); ether_addr_copy(cmd->mac_addr.addr, arg->addr); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); tlv->len = __cpu_to_le16(legacy_rate_len); memcpy(tlv->value, arg->peer_legacy_rates.rates, arg->peer_legacy_rates.num_rates); ptr += sizeof(*tlv); ptr += legacy_rate_len; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); tlv->len = __cpu_to_le16(ht_rate_len); memcpy(tlv->value, arg->peer_ht_rates.rates, arg->peer_ht_rates.num_rates); ptr += sizeof(*tlv); ptr += ht_rate_len; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET); tlv->len = __cpu_to_le16(sizeof(*vht_rate)); vht_rate = (void *)tlv->value; vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); ptr += sizeof(*tlv); ptr += sizeof(*vht_rate); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, enum wmi_sta_ps_mode psmode) { struct wmi_sta_powersave_mode_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->sta_ps_mode = __cpu_to_le32(psmode); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id, enum wmi_sta_powersave_param param_id, u32 param_value) { struct wmi_sta_powersave_param_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac, enum wmi_ap_ps_peer_param param_id, u32 value) { struct wmi_ap_ps_peer_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; if (!mac) return ERR_PTR(-EINVAL); skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(value); ether_addr_copy(cmd->peer_macaddr.addr, mac); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar, const struct wmi_scan_chan_list_arg *arg) { struct wmi_tlv_scan_chan_list_cmd *cmd; struct wmi_channel *ci; struct wmi_channel_arg *ch; struct wmi_tlv *tlv; struct sk_buff *skb; size_t chans_len, len; int i; void *ptr, *chans; chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci)); len = (sizeof(*tlv) + sizeof(*cmd)) + (sizeof(*tlv) + chans_len); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(chans_len); chans = (void *)tlv->value; for (i = 0; i < arg->n_channels; i++) { ch = &arg->channels[i]; tlv = chans; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); tlv->len = __cpu_to_le16(sizeof(*ci)); ci = (void *)tlv->value; ath10k_wmi_put_wmi_channel(ar, ci, ch); chans += sizeof(*tlv); chans += sizeof(*ci); } ptr += sizeof(*tlv); ptr += chans_len; ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui) { struct wmi_scan_prob_req_oui_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->prob_req_oui = __cpu_to_le32(prob_req_oui); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn, size_t bcn_len, u32 bcn_paddr, bool dtim_zero, bool deliver_cab) { struct wmi_bcn_tx_ref_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; struct ieee80211_hdr *hdr; u16 fc; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); hdr = (struct ieee80211_hdr *)bcn; fc = le16_to_cpu(hdr->frame_control); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->data_len = __cpu_to_le32(bcn_len); cmd->data_ptr = __cpu_to_le32(bcn_paddr); cmd->msdu_id = 0; cmd->frame_control = __cpu_to_le32(fc); cmd->flags = 0; if (dtim_zero) cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); if (deliver_cab) cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, const struct wmi_wmm_params_all_arg *arg) { struct wmi_tlv_pdev_set_wmm_cmd *cmd; struct wmi_wmm_params *wmm; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; void *ptr; len = (sizeof(*tlv) + sizeof(*cmd)) + (4 * (sizeof(*tlv) + sizeof(*wmm))); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; /* nothing to set here */ ptr += sizeof(*tlv); ptr += sizeof(*cmd); ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be); ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk); ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi); ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) { struct wmi_request_stats_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->stats_id = __cpu_to_le32(stats_mask); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar, u32 vdev_id, enum wmi_peer_stats_info_request_type type, u8 *addr, u32 reset) { struct wmi_tlv_request_peer_stats_info *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->request_type = __cpu_to_le32(type); if (type == WMI_REQUEST_ONE_PEER_STATS_INFO) ether_addr_copy(cmd->peer_macaddr.addr, addr); cmd->reset_after_request = __cpu_to_le32(reset); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n"); return skb; } static int ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu) { struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); struct ath10k_wmi *wmi = &ar->wmi; idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); return 0; } static int ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, dma_addr_t paddr) { struct ath10k_wmi *wmi = &ar->wmi; struct ath10k_mgmt_tx_pkt_addr *pkt_addr; int ret; pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC); if (!pkt_addr) return -ENOMEM; pkt_addr->vaddr = skb; pkt_addr->paddr = paddr; spin_lock_bh(&ar->data_lock); ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0, wmi->mgmt_max_num_pending_tx, GFP_ATOMIC); spin_unlock_bh(&ar->data_lock); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret); return ret; } static struct sk_buff * ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, dma_addr_t paddr) { struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); struct wmi_tlv_mgmt_tx_cmd *cmd; struct ieee80211_hdr *hdr; struct ath10k_vif *arvif; u32 buf_len = msdu->len; struct wmi_tlv *tlv; struct sk_buff *skb; int len, desc_id; u32 vdev_id; void *ptr; if (!cb->vif) return ERR_PTR(-EINVAL); hdr = (struct ieee80211_hdr *)msdu->data; arvif = (void *)cb->vif->drv_priv; vdev_id = arvif->vdev_id; if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) && (!(ieee80211_is_nullfunc(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control))))) return ERR_PTR(-EINVAL); len = sizeof(*cmd) + 2 * sizeof(*tlv); if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); buf_len += IEEE80211_CCMP_MIC_LEN; } buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN); buf_len = round_up(buf_len, 4); len += buf_len; len = round_up(len, 4); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr); if (desc_id < 0) goto err_free_skb; cb->msdu_id = desc_id; ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->desc_id = __cpu_to_le32(desc_id); cmd->chanfreq = 0; cmd->buf_len = __cpu_to_le32(buf_len); cmd->frame_len = __cpu_to_le32(msdu->len); cmd->paddr = __cpu_to_le64(paddr); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); tlv->len = __cpu_to_le16(buf_len); ptr += sizeof(*tlv); memcpy(ptr, msdu->data, buf_len); return skb; err_free_skb: dev_kfree_skb(skb); return ERR_PTR(desc_id); } static struct sk_buff * ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, enum wmi_force_fw_hang_type type, u32 delay_ms) { struct wmi_force_fw_hang_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->type = __cpu_to_le32(type); cmd->delay_ms = __cpu_to_le32(delay_ms); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) { struct wmi_tlv_dbglog_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len, bmap_len; u32 value; void *ptr; if (module_enable) { value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE( module_enable, WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE); } else { value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE( WMI_TLV_DBGLOG_ALL_MODULES, WMI_TLV_DBGLOG_LOG_LEVEL_WARN); } bmap_len = 0; len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len; skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL); cmd->value = __cpu_to_le32(value); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); tlv->len = __cpu_to_le16(bmap_len); /* nothing to do here */ ptr += sizeof(*tlv); ptr += sizeof(bmap_len); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter) { struct wmi_tlv_pktlog_enable *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->filter = __cpu_to_le32(filter); ptr += sizeof(*tlv); ptr += sizeof(*cmd); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n", filter); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar) { struct wmi_tlv_pdev_get_temp_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar) { struct wmi_tlv_pktlog_disable *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; ptr += sizeof(*tlv); ptr += sizeof(*cmd); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, void *prb_ies, size_t prb_ies_len) { struct wmi_tlv_bcn_tmpl_cmd *cmd; struct wmi_tlv_bcn_prb_info *info; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; if (WARN_ON(prb_ies_len > 0 && !prb_ies)) return ERR_PTR(-EINVAL); len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + sizeof(*info) + prb_ies_len + sizeof(*tlv) + roundup(bcn->len, 4); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset); cmd->buf_len = __cpu_to_le32(bcn->len); ptr += sizeof(*tlv); ptr += sizeof(*cmd); /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but * then it is then impossible to pass original ie len. * This chunk is not used yet so if setting probe resp template yields * problems with beaconing or crashes firmware look here. */ tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len); info = (void *)tlv->value; info->caps = __cpu_to_le32(prb_caps); info->erp = __cpu_to_le32(prb_erp); memcpy(info->ies, prb_ies, prb_ies_len); ptr += sizeof(*tlv); ptr += sizeof(*info); ptr += prb_ies_len; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); tlv->len = __cpu_to_le16(roundup(bcn->len, 4)); memcpy(tlv->value, bcn->data, bcn->len); /* FIXME: Adjust TSF? */ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n", vdev_id); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) { struct wmi_tlv_prb_tmpl_cmd *cmd; struct wmi_tlv_bcn_prb_info *info; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + sizeof(*info) + sizeof(*tlv) + roundup(prb->len, 4); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->buf_len = __cpu_to_le32(prb->len); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); tlv->len = __cpu_to_le16(sizeof(*info)); info = (void *)tlv->value; info->caps = 0; info->erp = 0; ptr += sizeof(*tlv); ptr += sizeof(*info); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); tlv->len = __cpu_to_le16(roundup(prb->len, 4)); memcpy(tlv->value, prb->data, prb->len); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n", vdev_id); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) { struct wmi_tlv_p2p_go_bcn_ie *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4)); memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2); ptr += sizeof(*tlv); ptr += roundup(p2p_ie[1] + 2, 4); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n", vdev_id); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, enum wmi_tdls_state state) { struct wmi_tdls_set_state_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; /* Set to options from wmi_tlv_tdls_options, * for now none of them are enabled. */ u32 options = 0; if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) options |= WMI_TLV_TDLS_BUFFER_STA_EN; /* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS * link inactivity detecting logic. */ if (state == WMI_TDLS_ENABLE_ACTIVE) state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->state = __cpu_to_le32(state); cmd->notification_interval_ms = __cpu_to_le32(5000); cmd->tx_discovery_threshold = __cpu_to_le32(100); cmd->tx_teardown_threshold = __cpu_to_le32(5); cmd->rssi_teardown_threshold = __cpu_to_le32(-75); cmd->rssi_delta = __cpu_to_le32(-20); cmd->tdls_options = __cpu_to_le32(options); cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2); cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000); cmd->tdls_puapsd_mask = __cpu_to_le32(0xf); cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0); cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10); ptr += sizeof(*tlv); ptr += sizeof(*cmd); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n", state, vdev_id); return skb; } static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp) { u32 peer_qos = 0; if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO; if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI; if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK; if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE; peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP); return peer_qos; } static struct sk_buff * ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar, const struct wmi_tdls_peer_update_cmd_arg *arg, const struct wmi_tdls_peer_capab_arg *cap, const struct wmi_channel_arg *chan_arg) { struct wmi_tdls_peer_update_cmd *cmd; struct wmi_tdls_peer_capab *peer_cap; struct wmi_channel *chan; struct wmi_tlv *tlv; struct sk_buff *skb; u32 peer_qos; void *ptr; int len; int i; len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + sizeof(*peer_cap) + sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); cmd->peer_state = __cpu_to_le32(arg->peer_state); ptr += sizeof(*tlv); ptr += sizeof(*cmd); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES); tlv->len = __cpu_to_le16(sizeof(*peer_cap)); peer_cap = (void *)tlv->value; peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues, cap->peer_max_sp); peer_cap->peer_qos = __cpu_to_le32(peer_qos); peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support); peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support); peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass); peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass); peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len); peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len); for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) peer_cap->peer_operclass[i] = cap->peer_operclass[i]; peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder); peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num); peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw); ptr += sizeof(*tlv); ptr += sizeof(*peer_cap); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan)); ptr += sizeof(*tlv); for (i = 0; i < cap->peer_chan_len; i++) { tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); tlv->len = __cpu_to_le16(sizeof(*chan)); chan = (void *)tlv->value; ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]); ptr += sizeof(*tlv); ptr += sizeof(*chan); } ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv tdls peer update vdev %i state %d n_chans %u\n", arg->vdev_id, arg->peer_state, cap->peer_chan_len); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, u32 next_offset, u32 enabled) { struct wmi_tlv_set_quiet_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); tlv = (void *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; /* vdev_id is not in use, set to 0 */ cmd->vdev_id = __cpu_to_le32(0); cmd->period = __cpu_to_le32(period); cmd->duration = __cpu_to_le32(duration); cmd->next_start = __cpu_to_le32(next_offset); cmd->enabled = __cpu_to_le32(enabled); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv quiet param: period %u duration %u enabled %d\n", period, duration, enabled); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) { struct wmi_tlv_wow_enable_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); tlv = (struct wmi_tlv *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->enable = __cpu_to_le32(1); if (!ar->bus_param.link_can_suspend) cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, enum wmi_wow_wakeup_event event, u32 enable) { struct wmi_tlv_wow_add_del_event_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); tlv = (struct wmi_tlv *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->is_add = __cpu_to_le32(enable); cmd->event_bitmap = __cpu_to_le32(1 << event); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", wow_wakeup_event(event), enable, vdev_id); return skb; } static struct sk_buff * ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar) { struct wmi_tlv_wow_host_wakeup_ind *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); tlv = (struct wmi_tlv *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, const u8 *pattern, const u8 *bitmask, int pattern_len, int pattern_offset) { struct wmi_tlv_wow_add_pattern_cmd *cmd; struct wmi_tlv_wow_bitmap_pattern *bitmap; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + /* array struct */ sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */ sizeof(*tlv) + /* empty ipv4 sync */ sizeof(*tlv) + /* empty ipv6 sync */ sizeof(*tlv) + /* empty magic */ sizeof(*tlv) + /* empty info timeout */ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); /* cmd */ ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->pattern_id = __cpu_to_le32(pattern_id); cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); ptr += sizeof(*tlv); ptr += sizeof(*cmd); /* bitmap */ tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap)); ptr += sizeof(*tlv); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T); tlv->len = __cpu_to_le16(sizeof(*bitmap)); bitmap = (void *)tlv->value; memcpy(bitmap->patternbuf, pattern, pattern_len); memcpy(bitmap->bitmaskbuf, bitmask, pattern_len); bitmap->pattern_offset = __cpu_to_le32(pattern_offset); bitmap->pattern_len = __cpu_to_le32(pattern_len); bitmap->bitmask_len = __cpu_to_le32(pattern_len); bitmap->pattern_id = __cpu_to_le32(pattern_id); ptr += sizeof(*tlv); ptr += sizeof(*bitmap); /* ipv4 sync */ tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(0); ptr += sizeof(*tlv); /* ipv6 sync */ tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(0); ptr += sizeof(*tlv); /* magic */ tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(0); ptr += sizeof(*tlv); /* pattern info timeout */ tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); tlv->len = __cpu_to_le16(0); ptr += sizeof(*tlv); /* ratelimit interval */ tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); tlv->len = __cpu_to_le16(sizeof(u32)); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n", vdev_id, pattern_id, pattern_offset); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) { struct wmi_tlv_wow_del_pattern_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); tlv = (struct wmi_tlv *)skb->data; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->pattern_id = __cpu_to_le32(pattern_id); cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", vdev_id, pattern_id); return skb; } /* Request FW to start PNO operation */ static struct sk_buff * ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar, u32 vdev_id, struct wmi_pno_scan_req *pno) { struct nlo_configured_parameters *nlo_list; struct wmi_tlv_wow_nlo_config_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; __le32 *channel_list; u16 tlv_len; size_t len; void *ptr; u32 i; len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + /* TLV place holder for array of structures * nlo_configured_parameters(nlo_list) */ sizeof(*tlv); /* TLV place holder for array of uint32 channel_list */ len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count, WMI_NLO_MAX_CHAN); len += sizeof(struct nlo_configured_parameters) * min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; /* wmi_tlv_wow_nlo_config_cmd parameters*/ cmd->vdev_id = __cpu_to_le32(pno->vdev_id); cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); /* current FW does not support min-max range for dwell time */ cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time); cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time); if (pno->do_passive_scan) cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); /* copy scan interval */ cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period); cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period); cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles); cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time); if (pno->enable_pno_scan_randomization) { cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); } ptr += sizeof(*tlv); ptr += sizeof(*cmd); /* nlo_configured_parameters(nlo_list) */ cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS)); tlv_len = __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(tlv_len); ptr += sizeof(*tlv); nlo_list = ptr; for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) { tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) - sizeof(*tlv)); /* copy ssid and it's length */ nlo_list[i].ssid.valid = __cpu_to_le32(true); nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; memcpy(nlo_list[i].ssid.ssid.ssid, pno->a_networks[i].ssid.ssid, __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); /* copy rssi threshold */ if (pno->a_networks[i].rssi_threshold && pno->a_networks[i].rssi_threshold > -300) { nlo_list[i].rssi_cond.valid = __cpu_to_le32(true); nlo_list[i].rssi_cond.rssi = __cpu_to_le32(pno->a_networks[i].rssi_threshold); } nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true); nlo_list[i].bcast_nw_type.bcast_nw_type = __cpu_to_le32(pno->a_networks[i].bcast_nw_type); } ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters); /* copy channel info */ cmd->num_of_channels = __cpu_to_le32(min_t(u8, pno->a_networks[0].channel_count, WMI_NLO_MAX_CHAN)); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) * sizeof(u_int32_t)); ptr += sizeof(*tlv); channel_list = (__le32 *)ptr; for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++) channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", vdev_id); return skb; } /* Request FW to stop ongoing PNO operation */ static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar, u32 vdev_id) { struct wmi_tlv_wow_nlo_config_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + /* TLV place holder for array of structures * nlo_configured_parameters(nlo_list) */ sizeof(*tlv); /* TLV place holder for array of uint32 channel_list */ skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP); ptr += sizeof(*tlv); ptr += sizeof(*cmd); /* nlo_configured_parameters(nlo_list) */ tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = __cpu_to_le16(0); ptr += sizeof(*tlv); /* channel list */ tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); tlv->len = __cpu_to_le16(0); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id, struct wmi_pno_scan_req *pno_scan) { if (pno_scan->enable) return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan); else return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id); } static struct sk_buff * ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) { struct wmi_tlv_adaptive_qcs *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->enable = __cpu_to_le32(enable ? 1 : 0); ptr += sizeof(*tlv); ptr += sizeof(*cmd); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value) { struct wmi_echo_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->value = cpu_to_le32(value); ptr += sizeof(*tlv); ptr += sizeof(*cmd); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar, const struct wmi_vdev_spectral_conf_arg *arg) { struct wmi_vdev_spectral_conf_cmd *cmd; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); cmd->scan_count = __cpu_to_le32(arg->scan_count); cmd->scan_period = __cpu_to_le32(arg->scan_period); cmd->scan_priority = __cpu_to_le32(arg->scan_priority); cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size); cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena); cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena); cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref); cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay); cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr); cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr); cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode); cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode); cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr); cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format); cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode); cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale); cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj); cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask); return skb; } static struct sk_buff * ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, u32 enable) { struct wmi_vdev_spectral_enable_cmd *cmd; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; size_t len; len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->trigger_cmd = __cpu_to_le32(trigger); cmd->enable_cmd = __cpu_to_le32(enable); return skb; } /****************/ /* TLV mappings */ /****************/ static struct wmi_cmd_map wmi_tlv_cmd_map = { .init_cmdid = WMI_TLV_INIT_CMDID, .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID, .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID, .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID, .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID, .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID, .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID, .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID, .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID, .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID, .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID, .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID, .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID, .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID, .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID, .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID, .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID, .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID, .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID, .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID, .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID, .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID, .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID, .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID, .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID, .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID, .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID, .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID, .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID, .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID, .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID, .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID, .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID, .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID, .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID, .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID, .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID, .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID, .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD, .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID, .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID, .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID, .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID, .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID, .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID, .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID, .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID, .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID, .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID, .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID, .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID, .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE, .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD, .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD, .roam_scan_rssi_change_threshold = WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE, .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE, .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE, .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD, .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO, .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY, .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE, .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE, .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID, .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID, .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID, .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID, .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID, .wlan_profile_set_hist_intvl_cmdid = WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID, .wlan_profile_get_profile_data_cmdid = WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, .wlan_profile_enable_profile_id_cmdid = WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, .wlan_profile_list_profile_id_cmdid = WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID, .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID, .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID, .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID, .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID, .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID, .wow_enable_disable_wake_event_cmdid = WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID, .wow_hostwakeup_from_sleep_cmdid = WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID, .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID, .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID, .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID, .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID, .request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID, .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID, .network_list_offload_config_cmdid = WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID, .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID, .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID, .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID, .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID, .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID, .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID, .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID, .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID, .echo_cmdid = WMI_TLV_ECHO_CMDID, .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID, .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID, .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID, .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID, .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID, .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID, .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID, .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID, .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID, .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID, .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID, .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID, .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, .oem_req_cmdid = WMI_CMD_UNSUPPORTED, .nan_cmdid = WMI_CMD_UNSUPPORTED, .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, .fwtest_cmdid = WMI_CMD_UNSUPPORTED, .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK, .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK, .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G, .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G, .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE, .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE, .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE, .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE, .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE, .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW, .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH, .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH, .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH, .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING, .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE, .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE, .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK, .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI, .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO, .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE, .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE, .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE, .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE, .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH, .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, .bcnflt_stats_update_period = WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS, .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE, .dcs = WMI_TLV_PDEV_PARAM_DCS, .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE, .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD, .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD, .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL, .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL, .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN, .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA, .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG, .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP, .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED, .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR, .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE, .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG, .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE, .peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE, }; static struct wmi_peer_param_map wmi_tlv_peer_param_map = { .smps_state = WMI_TLV_PEER_SMPS_STATE, .ampdu = WMI_TLV_PEER_AMPDU, .authorize = WMI_TLV_PEER_AUTHORIZE, .chan_width = WMI_TLV_PEER_CHAN_WIDTH, .nss = WMI_TLV_PEER_NSS, .use_4addr = WMI_TLV_PEER_USE_4ADDR, .membership = WMI_TLV_PEER_MEMBERSHIP, .user_pos = WMI_TLV_PEER_USERPOS, .crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED, .tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR, .set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S, .ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH, .phymode = WMI_TLV_PEER_PHYMODE, .use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR, .dummy_var = WMI_TLV_PEER_DUMMY_VAR, }; static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD, .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD, .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL, .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL, .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE, .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE, .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME, .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE, .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME, .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD, .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME, .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL, .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD, .wmi_vdev_oc_scheduler_air_time_limit = WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, .wds = WMI_TLV_VDEV_PARAM_WDS, .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW, .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX, .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT, .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT, .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM, .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH, .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET, .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION, .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT, .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE, .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE, .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE, .sgi = WMI_TLV_VDEV_PARAM_SGI, .ldpc = WMI_TLV_VDEV_PARAM_LDPC, .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC, .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC, .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD, .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID, .nss = WMI_TLV_VDEV_PARAM_NSS, .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE, .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE, .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE, .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE, .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE, .ap_keepalive_min_idle_inactive_time_secs = WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, .ap_keepalive_max_idle_inactive_time_secs = WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, .ap_keepalive_max_unresponsive_time_secs = WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS, .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED, .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS, .txbf = WMI_TLV_VDEV_PARAM_TXBF, .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE, .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY, .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE, .ap_detect_out_of_sync_sleeping_sta_time_secs = WMI_TLV_VDEV_PARAM_UNSUPPORTED, .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, }; static const struct wmi_ops wmi_tlv_ops = { .rx = ath10k_wmi_tlv_op_rx, .map_svc = wmi_tlv_svc_map, .map_svc_ext = wmi_tlv_svc_map_ext, .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev, .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev, .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev, .pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev, .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev, .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev, .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev, .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr, .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev, .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail, .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev, .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme, .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume, .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd, .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param, .gen_init = ath10k_wmi_tlv_op_gen_init, .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan, .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan, .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create, .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete, .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start, .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop, .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up, .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down, .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param, .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key, .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf, .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create, .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete, .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush, .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param, .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc, .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode, .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps, .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps, .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list, .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui, .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma, .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm, .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, .gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info, .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, /* .gen_mgmt_tx = not implemented; HTT is used */ .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send, .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, .gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode, .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature, /* .gen_addba_clear_resp not implemented */ /* .gen_addba_send not implemented */ /* .gen_addba_set_resp not implemented */ /* .gen_delba_send not implemented */ .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl, .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl, .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie, .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd, .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive, .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable, .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event, .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind, .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern, .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern, .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno, .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state, .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, .get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype, .gen_echo = ath10k_wmi_tlv_op_gen_echo, .gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf, .gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable, }; static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = { .auth = WMI_TLV_PEER_AUTH, .qos = WMI_TLV_PEER_QOS, .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY, .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY, .apsd = WMI_TLV_PEER_APSD, .ht = WMI_TLV_PEER_HT, .bw40 = WMI_TLV_PEER_40MHZ, .stbc = WMI_TLV_PEER_STBC, .ldbc = WMI_TLV_PEER_LDPC, .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS, .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS, .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX, .vht = WMI_TLV_PEER_VHT, .bw80 = WMI_TLV_PEER_80MHZ, .pmf = WMI_TLV_PEER_PMF, .bw160 = WMI_TLV_PEER_160MHZ, }; /************/ /* TLV init */ /************/ void ath10k_wmi_tlv_attach(struct ath10k *ar) { ar->wmi.cmd = &wmi_tlv_cmd_map; ar->wmi.vdev_param = &wmi_tlv_vdev_param_map; ar->wmi.pdev_param = &wmi_tlv_pdev_param_map; ar->wmi.peer_param = &wmi_tlv_peer_param_map; ar->wmi.ops = &wmi_tlv_ops; ar->wmi.peer_flags = &wmi_tlv_peer_flags_map; }
408550.c
/* * PostScript driver Escape function * * Copyright 1998 Huw D M Davies * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #include "config.h" #include "wine/port.h" #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <signal.h> #include <errno.h> #include <fcntl.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif #include "windef.h" #include "winbase.h" #include "wingdi.h" #include "wine/wingdi16.h" #include "winuser.h" #include "winreg.h" #include "psdrv.h" #include "wine/debug.h" #include "winspool.h" WINE_DEFAULT_DEBUG_CHANNEL(psdrv); DWORD write_spool( PHYSDEV dev, const void *data, DWORD num ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); DWORD written; if (!WritePrinter(physDev->job.hprinter, (LPBYTE) data, num, &written) || (written != num)) return SP_OUTOFDISK; return num; } /********************************************************************** * ExtEscape (WINEPS.@) */ INT CDECL PSDRV_ExtEscape( PHYSDEV dev, INT nEscape, INT cbInput, LPCVOID in_data, INT cbOutput, LPVOID out_data ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); TRACE("%p,%d,%d,%p,%d,%p\n", dev->hdc, nEscape, cbInput, in_data, cbOutput, out_data); switch(nEscape) { case QUERYESCSUPPORT: if(cbInput < sizeof(SHORT)) { WARN("cbInput < sizeof(SHORT) (=%d) for QUERYESCSUPPORT\n", cbInput); return 0; } else { DWORD num = (cbInput < sizeof(DWORD)) ? *(const USHORT *)in_data : *(const DWORD *)in_data; TRACE("QUERYESCSUPPORT for %d\n", num); switch(num) { case NEXTBAND: /*case BANDINFO:*/ case SETCOPYCOUNT: case GETTECHNOLOGY: case SETLINECAP: case SETLINEJOIN: case SETMITERLIMIT: case SETCHARSET: case EXT_DEVICE_CAPS: case SET_BOUNDS: case EPSPRINTING: case POSTSCRIPT_DATA: case PASSTHROUGH: case POSTSCRIPT_PASSTHROUGH: case POSTSCRIPT_IGNORE: case BEGIN_PATH: case CLIP_TO_PATH: case END_PATH: /*case DRAWPATTERNRECT:*/ /* PageMaker checks for it */ case DOWNLOADHEADER: /* PageMaker doesn't check for DOWNLOADFACE and GETFACENAME but * uses them, they are supposed to be supported by any PS printer. */ case DOWNLOADFACE: /* PageMaker checks for these as a part of process of detecting * a "fully compatible" PS printer, but doesn't actually use them. */ case OPENCHANNEL: case CLOSECHANNEL: return TRUE; /* Windows PS driver reports 0, but still supports this escape */ case GETFACENAME: return FALSE; /* suppress the FIXME below */ default: FIXME("QUERYESCSUPPORT(%d) - not supported.\n", num); return FALSE; } } case OPENCHANNEL: FIXME("OPENCHANNEL: stub\n"); return 1; case CLOSECHANNEL: FIXME("CLOSECHANNEL: stub\n"); return 1; case DOWNLOADHEADER: FIXME("DOWNLOADHEADER: stub\n"); /* should return name of the downloaded procset */ *(char *)out_data = 0; return 1; case GETFACENAME: FIXME("GETFACENAME: stub\n"); lstrcpynA(out_data, "Courier", cbOutput); return 1; case DOWNLOADFACE: FIXME("DOWNLOADFACE: stub\n"); return 1; case MFCOMMENT: { FIXME("MFCOMMENT(%p, %d)\n", in_data, cbInput); return 1; } case DRAWPATTERNRECT: { DRAWPATRECT *dpr = (DRAWPATRECT*)in_data; FIXME("DRAWPATTERNRECT(pos (%d,%d), size %dx%d, style %d, pattern %x), stub!\n", dpr->ptPosition.x, dpr->ptPosition.y, dpr->ptSize.x, dpr->ptSize.y, dpr->wStyle, dpr->wPattern ); return 1; } case BANDINFO: { BANDINFOSTRUCT *ibi = (BANDINFOSTRUCT*)in_data; BANDINFOSTRUCT *obi = (BANDINFOSTRUCT*)out_data; FIXME("BANDINFO(graphics %d, text %d, rect %s), stub!\n", ibi->GraphicsFlag, ibi->TextFlag, wine_dbgstr_rect(&ibi->GraphicsRect)); *obi = *ibi; return 1; } case NEXTBAND: { RECT *r = out_data; if(!physDev->job.banding) { physDev->job.banding = TRUE; SetRect(r, 0, 0, physDev->horzRes, physDev->vertRes); TRACE("NEXTBAND returning %s\n", wine_dbgstr_rect(r)); return 1; } SetRectEmpty(r); TRACE("NEXTBAND rect to 0,0 - 0,0\n" ); physDev->job.banding = FALSE; return EndPage( dev->hdc ); } case SETCOPYCOUNT: { const INT *NumCopies = in_data; INT *ActualCopies = out_data; if(cbInput != sizeof(INT)) { WARN("cbInput != sizeof(INT) (=%d) for SETCOPYCOUNT\n", cbInput); return 0; } TRACE("SETCOPYCOUNT %d\n", *NumCopies); *ActualCopies = 1; return 1; } case GETTECHNOLOGY: { LPSTR p = out_data; strcpy(p, "PostScript"); *(p + strlen(p) + 1) = '\0'; /* 2 '\0's at end of string */ return 1; } case SETLINECAP: { INT newCap = *(const INT *)in_data; if(cbInput != sizeof(INT)) { WARN("cbInput != sizeof(INT) (=%d) for SETLINECAP\n", cbInput); return 0; } TRACE("SETLINECAP %d\n", newCap); return 0; } case SETLINEJOIN: { INT newJoin = *(const INT *)in_data; if(cbInput != sizeof(INT)) { WARN("cbInput != sizeof(INT) (=%d) for SETLINEJOIN\n", cbInput); return 0; } TRACE("SETLINEJOIN %d\n", newJoin); return 0; } case SETMITERLIMIT: { INT newLimit = *(const INT *)in_data; if(cbInput != sizeof(INT)) { WARN("cbInput != sizeof(INT) (=%d) for SETMITERLIMIT\n", cbInput); return 0; } TRACE("SETMITERLIMIT %d\n", newLimit); return 0; } case SETCHARSET: /* Undocumented escape used by winword6. Switches between ANSI and a special charset. If *lpInData == 1 we require that 0x91 is quoteleft 0x92 is quoteright 0x93 is quotedblleft 0x94 is quotedblright 0x95 is bullet 0x96 is endash 0x97 is emdash 0xa0 is non break space - yeah right. If *lpInData == 0 we get ANSI. Since there's nothing else there, let's just make these the default anyway and see what happens... */ return 1; case EXT_DEVICE_CAPS: { UINT cap = *(const UINT *)in_data; if(cbInput != sizeof(UINT)) { WARN("cbInput != sizeof(UINT) (=%d) for EXT_DEVICE_CAPS\n", cbInput); return 0; } TRACE("EXT_DEVICE_CAPS %d\n", cap); return 0; } case SET_BOUNDS: { const RECT *r = in_data; if(cbInput != sizeof(RECT)) { WARN("cbInput != sizeof(RECT) (=%d) for SET_BOUNDS\n", cbInput); return 0; } TRACE("SET_BOUNDS %s\n", wine_dbgstr_rect(r)); return 0; } case EPSPRINTING: { UINT epsprint = *(const UINT*)in_data; /* FIXME: In this mode we do not need to send page intros and page * ends according to the doc. But I just ignore that detail * for now. */ TRACE("EPS Printing support %sable.\n",epsprint?"en":"dis"); return 1; } case POSTSCRIPT_DATA: case PASSTHROUGH: case POSTSCRIPT_PASSTHROUGH: { /* Write directly to spool file, bypassing normal PS driver * processing that is done along with writing PostScript code * to the spool. * We have a WORD before the data counting the size, but * cbInput is just this +2. * However Photoshop 7 has a bug that sets cbInput to 2 less than the * length of the string, rather than 2 more. So we'll use the WORD at * in_data[0] instead. */ passthrough_enter(dev); return write_spool(dev, ((char*)in_data) + 2, *(const WORD*)in_data); } case POSTSCRIPT_IGNORE: { BOOL ret = physDev->job.quiet; TRACE("POSTSCRIPT_IGNORE %d\n", *(const short*)in_data); physDev->job.quiet = *(const short*)in_data; return ret; } case GETSETPRINTORIENT: { /* If lpInData is present, it is a 20 byte structure, first 32 * bit LONG value is the orientation. if lpInData is NULL, it * returns the current orientation. */ FIXME("GETSETPRINTORIENT not implemented (data %p)!\n",in_data); return 1; } case BEGIN_PATH: TRACE("BEGIN_PATH\n"); if(physDev->pathdepth) FIXME("Nested paths not yet handled\n"); return ++physDev->pathdepth; case END_PATH: { const struct PATH_INFO *info = (const struct PATH_INFO*)in_data; TRACE("END_PATH\n"); if(!physDev->pathdepth) { ERR("END_PATH called without a BEGIN_PATH\n"); return -1; } TRACE("RenderMode = %d, FillMode = %d, BkMode = %d\n", info->RenderMode, info->FillMode, info->BkMode); switch(info->RenderMode) { case RENDERMODE_NO_DISPLAY: PSDRV_WriteClosePath(dev); /* not sure if this is necessary, but it can't hurt */ break; case RENDERMODE_OPEN: case RENDERMODE_CLOSED: default: FIXME("END_PATH: RenderMode %d, not yet supported\n", info->RenderMode); break; } return --physDev->pathdepth; } case CLIP_TO_PATH: { WORD mode = *(const WORD*)in_data; switch(mode) { case CLIP_SAVE: TRACE("CLIP_TO_PATH: CLIP_SAVE\n"); PSDRV_WriteGSave(dev); return 1; case CLIP_RESTORE: TRACE("CLIP_TO_PATH: CLIP_RESTORE\n"); PSDRV_WriteGRestore(dev); return 1; case CLIP_INCLUSIVE: TRACE("CLIP_TO_PATH: CLIP_INCLUSIVE\n"); /* FIXME to clip or eoclip ? (see PATH_INFO.FillMode) */ PSDRV_WriteClip(dev); PSDRV_WriteNewPath(dev); return 1; case CLIP_EXCLUSIVE: FIXME("CLIP_EXCLUSIVE: not implemented\n"); return 0; default: FIXME("Unknown CLIP_TO_PATH mode %d\n", mode); return 0; } } default: FIXME("Unimplemented code %d\n", nEscape); return 0; } } /************************************************************************ * PSDRV_StartPage */ INT CDECL PSDRV_StartPage( PHYSDEV dev ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); TRACE("%p\n", dev->hdc); if(!physDev->job.OutOfPage) { FIXME("Already started a page?\n"); return 1; } physDev->job.PageNo++; if(!PSDRV_WriteNewPage( dev )) return 0; physDev->job.OutOfPage = FALSE; return 1; } /************************************************************************ * PSDRV_EndPage */ INT CDECL PSDRV_EndPage( PHYSDEV dev ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); TRACE("%p\n", dev->hdc); if(physDev->job.OutOfPage) { FIXME("Already ended a page?\n"); return 1; } passthrough_leave(dev); if(!PSDRV_WriteEndPage( dev )) return 0; PSDRV_EmptyDownloadList(dev, FALSE); physDev->job.OutOfPage = TRUE; return 1; } /************************************************************************ * PSDRV_StartDoc */ INT CDECL PSDRV_StartDoc( PHYSDEV dev, const DOCINFOW *doc ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); DOC_INFO_1W di; PRINTER_DEFAULTSW prn_def; TRACE("(%p, %p) => %s, %s, %s\n", physDev, doc, debugstr_w(doc->lpszDocName), debugstr_w(doc->lpszOutput), debugstr_w(doc->lpszDatatype)); if(physDev->job.id) { FIXME("hJob != 0. Now what?\n"); return 0; } prn_def.pDatatype = NULL; prn_def.pDevMode = &physDev->pi->Devmode->dmPublic; prn_def.DesiredAccess = PRINTER_ACCESS_USE; if (!OpenPrinterW( physDev->pi->friendly_name, &physDev->job.hprinter, &prn_def )) { WARN("OpenPrinter(%s, ...) failed: %d\n", debugstr_w(physDev->pi->friendly_name), GetLastError()); return 0; } di.pDocName = (LPWSTR) doc->lpszDocName; di.pDatatype = NULL; if(doc->lpszOutput) di.pOutputFile = (LPWSTR) doc->lpszOutput; else if(physDev->job.output) di.pOutputFile = physDev->job.output; else di.pOutputFile = NULL; TRACE("using output: %s\n", debugstr_w(di.pOutputFile)); /* redirection located in HKCU\Software\Wine\Printing\Spooler is done during winspool.drv,ScheduleJob */ physDev->job.id = StartDocPrinterW(physDev->job.hprinter, 1, (LPBYTE) &di); if(!physDev->job.id) { WARN("StartDocPrinter() failed: %d\n", GetLastError()); ClosePrinter(physDev->job.hprinter); return 0; } if (!PSDRV_WriteHeader( dev, doc->lpszDocName )) { WARN("Failed to write header\n"); ClosePrinter(physDev->job.hprinter); return 0; } physDev->job.banding = FALSE; physDev->job.OutOfPage = TRUE; physDev->job.PageNo = 0; physDev->job.quiet = FALSE; physDev->job.passthrough_state = passthrough_none; physDev->job.doc_name = strdupW( doc->lpszDocName ); return physDev->job.id; } /************************************************************************ * PSDRV_EndDoc */ INT CDECL PSDRV_EndDoc( PHYSDEV dev ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); INT ret = 1; TRACE("%p\n", dev->hdc); if(!physDev->job.id) { FIXME("hJob == 0. Now what?\n"); return 0; } if(!physDev->job.OutOfPage) { WARN("Somebody forgot an EndPage\n"); PSDRV_EndPage( dev ); } if (physDev->job.PageNo) PSDRV_WriteFooter( dev ); ret = EndDocPrinter(physDev->job.hprinter); ClosePrinter(physDev->job.hprinter); physDev->job.hprinter = NULL; physDev->job.id = 0; HeapFree( GetProcessHeap(), 0, physDev->job.doc_name ); physDev->job.doc_name = NULL; return ret; }
147009.c
/* * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* We need to use some deprecated APIs */ #define OPENSSL_SUPPRESS_DEPRECATED #include <string.h> #include <openssl/evp.h> #include <openssl/err.h> #include <openssl/provider.h> #include <openssl/safestack.h> #include <openssl/kdf.h> #include <openssl/encoder.h> #include <openssl/decoder.h> #include <openssl/core_names.h> #include "apps.h" #include "app_params.h" #include "progs.h" #include "opt.h" #include "names.h" DEFINE_STACK_OF_CSTRING() static int verbose = 0; static void legacy_cipher_fn(const EVP_CIPHER *c, const char *from, const char *to, void *arg) { if (c != NULL) { BIO_printf(arg, " %s\n", EVP_CIPHER_name(c)); } else { if (from == NULL) from = "<undefined>"; if (to == NULL) to = "<undefined>"; BIO_printf(arg, " %s => %s\n", from, to); } } DEFINE_STACK_OF(EVP_CIPHER) static int cipher_cmp(const EVP_CIPHER * const *a, const EVP_CIPHER * const *b) { int ret = EVP_CIPHER_number(*a) - EVP_CIPHER_number(*b); if (ret == 0) ret = strcmp(OSSL_PROVIDER_name(EVP_CIPHER_provider(*a)), OSSL_PROVIDER_name(EVP_CIPHER_provider(*b))); return ret; } static void collect_ciphers(EVP_CIPHER *cipher, void *stack) { STACK_OF(EVP_CIPHER) *cipher_stack = stack; if (sk_EVP_CIPHER_push(cipher_stack, cipher) > 0) EVP_CIPHER_up_ref(cipher); } static void list_ciphers(void) { STACK_OF(EVP_CIPHER) *ciphers = sk_EVP_CIPHER_new(cipher_cmp); int i; if (ciphers == NULL) { BIO_printf(bio_err, "ERROR: Memory allocation\n"); return; } BIO_printf(bio_out, "Legacy:\n"); EVP_CIPHER_do_all_sorted(legacy_cipher_fn, bio_out); BIO_printf(bio_out, "Provided:\n"); EVP_CIPHER_do_all_provided(NULL, collect_ciphers, ciphers); sk_EVP_CIPHER_sort(ciphers); for (i = 0; i < sk_EVP_CIPHER_num(ciphers); i++) { const EVP_CIPHER *c = sk_EVP_CIPHER_value(ciphers, i); STACK_OF(OPENSSL_CSTRING) *names = sk_OPENSSL_CSTRING_new(name_cmp); EVP_CIPHER_names_do_all(c, collect_names, names); BIO_printf(bio_out, " "); print_names(bio_out, names); BIO_printf(bio_out, " @ %s\n", OSSL_PROVIDER_name(EVP_CIPHER_provider(c))); sk_OPENSSL_CSTRING_free(names); if (verbose) { print_param_types("retrievable algorithm parameters", EVP_CIPHER_gettable_params(c), 4); print_param_types("retrievable operation parameters", EVP_CIPHER_gettable_ctx_params(c), 4); print_param_types("settable operation parameters", EVP_CIPHER_settable_ctx_params(c), 4); } } sk_EVP_CIPHER_pop_free(ciphers, EVP_CIPHER_free); } static void list_md_fn(const EVP_MD *m, const char *from, const char *to, void *arg) { if (m != NULL) { BIO_printf(arg, " %s\n", EVP_MD_name(m)); } else { if (from == NULL) from = "<undefined>"; if (to == NULL) to = "<undefined>"; BIO_printf((BIO *)arg, " %s => %s\n", from, to); } } DEFINE_STACK_OF(EVP_MD) static int md_cmp(const EVP_MD * const *a, const EVP_MD * const *b) { int ret = EVP_MD_number(*a) - EVP_MD_number(*b); if (ret == 0) ret = strcmp(OSSL_PROVIDER_name(EVP_MD_provider(*a)), OSSL_PROVIDER_name(EVP_MD_provider(*b))); return ret; } static void collect_digests(EVP_MD *md, void *stack) { STACK_OF(EVP_MD) *digest_stack = stack; if (sk_EVP_MD_push(digest_stack, md) > 0) EVP_MD_up_ref(md); } static void list_digests(void) { STACK_OF(EVP_MD) *digests = sk_EVP_MD_new(md_cmp); int i; if (digests == NULL) { BIO_printf(bio_err, "ERROR: Memory allocation\n"); return; } BIO_printf(bio_out, "Legacy:\n"); EVP_MD_do_all_sorted(list_md_fn, bio_out); BIO_printf(bio_out, "Provided:\n"); EVP_MD_do_all_provided(NULL, collect_digests, digests); sk_EVP_MD_sort(digests); for (i = 0; i < sk_EVP_MD_num(digests); i++) { const EVP_MD *m = sk_EVP_MD_value(digests, i); STACK_OF(OPENSSL_CSTRING) *names = sk_OPENSSL_CSTRING_new(name_cmp); EVP_MD_names_do_all(m, collect_names, names); BIO_printf(bio_out, " "); print_names(bio_out, names); BIO_printf(bio_out, " @ %s\n", OSSL_PROVIDER_name(EVP_MD_provider(m))); sk_OPENSSL_CSTRING_free(names); if (verbose) { print_param_types("retrievable algorithm parameters", EVP_MD_gettable_params(m), 4); print_param_types("retrievable operation parameters", EVP_MD_gettable_ctx_params(m), 4); print_param_types("settable operation parameters", EVP_MD_settable_ctx_params(m), 4); } } sk_EVP_MD_pop_free(digests, EVP_MD_free); } DEFINE_STACK_OF(EVP_MAC) static int mac_cmp(const EVP_MAC * const *a, const EVP_MAC * const *b) { int ret = EVP_MAC_number(*a) - EVP_MAC_number(*b); if (ret == 0) ret = strcmp(OSSL_PROVIDER_name(EVP_MAC_provider(*a)), OSSL_PROVIDER_name(EVP_MAC_provider(*b))); return ret; } static void collect_macs(EVP_MAC *mac, void *stack) { STACK_OF(EVP_MAC) *mac_stack = stack; if (sk_EVP_MAC_push(mac_stack, mac) > 0) EVP_MAC_up_ref(mac); } static void list_macs(void) { STACK_OF(EVP_MAC) *macs = sk_EVP_MAC_new(mac_cmp); int i; if (macs == NULL) { BIO_printf(bio_err, "ERROR: Memory allocation\n"); return; } BIO_printf(bio_out, "Provided MACs:\n"); EVP_MAC_do_all_provided(NULL, collect_macs, macs); sk_EVP_MAC_sort(macs); for (i = 0; i < sk_EVP_MAC_num(macs); i++) { const EVP_MAC *m = sk_EVP_MAC_value(macs, i); STACK_OF(OPENSSL_CSTRING) *names = sk_OPENSSL_CSTRING_new(name_cmp); EVP_MAC_names_do_all(m, collect_names, names); BIO_printf(bio_out, " "); print_names(bio_out, names); BIO_printf(bio_out, " @ %s\n", OSSL_PROVIDER_name(EVP_MAC_provider(m))); sk_OPENSSL_CSTRING_free(names); if (verbose) { print_param_types("retrievable algorithm parameters", EVP_MAC_gettable_params(m), 4); print_param_types("retrievable operation parameters", EVP_MAC_gettable_ctx_params(m), 4); print_param_types("settable operation parameters", EVP_MAC_settable_ctx_params(m), 4); } } sk_EVP_MAC_pop_free(macs, EVP_MAC_free); } /* * KDFs and PRFs */ DEFINE_STACK_OF(EVP_KDF) static int kdf_cmp(const EVP_KDF * const *a, const EVP_KDF * const *b) { int ret = EVP_KDF_number(*a) - EVP_KDF_number(*b); if (ret == 0) ret = strcmp(OSSL_PROVIDER_name(EVP_KDF_provider(*a)), OSSL_PROVIDER_name(EVP_KDF_provider(*b))); return ret; } static void collect_kdfs(EVP_KDF *kdf, void *stack) { STACK_OF(EVP_KDF) *kdf_stack = stack; sk_EVP_KDF_push(kdf_stack, kdf); EVP_KDF_up_ref(kdf); } static void list_kdfs(void) { STACK_OF(EVP_KDF) *kdfs = sk_EVP_KDF_new(kdf_cmp); int i; if (kdfs == NULL) { BIO_printf(bio_err, "ERROR: Memory allocation\n"); return; } BIO_printf(bio_out, "Provided KDFs and PDFs:\n"); EVP_KDF_do_all_provided(NULL, collect_kdfs, kdfs); sk_EVP_KDF_sort(kdfs); for (i = 0; i < sk_EVP_KDF_num(kdfs); i++) { const EVP_KDF *k = sk_EVP_KDF_value(kdfs, i); STACK_OF(OPENSSL_CSTRING) *names = sk_OPENSSL_CSTRING_new(name_cmp); EVP_KDF_names_do_all(k, collect_names, names); BIO_printf(bio_out, " "); print_names(bio_out, names); BIO_printf(bio_out, " @ %s\n", OSSL_PROVIDER_name(EVP_KDF_provider(k))); sk_OPENSSL_CSTRING_free(names); if (verbose) { print_param_types("retrievable algorithm parameters", EVP_KDF_gettable_params(k), 4); print_param_types("retrievable operation parameters", EVP_KDF_gettable_ctx_params(k), 4); print_param_types("settable operation parameters", EVP_KDF_settable_ctx_params(k), 4); } } sk_EVP_KDF_pop_free(kdfs, EVP_KDF_free); } /* * RANDs */ DEFINE_STACK_OF(EVP_RAND) static int rand_cmp(const EVP_RAND * const *a, const EVP_RAND * const *b) { int ret = strcasecmp(EVP_RAND_name(*a), EVP_RAND_name(*b)); if (ret == 0) ret = strcmp(OSSL_PROVIDER_name(EVP_RAND_provider(*a)), OSSL_PROVIDER_name(EVP_RAND_provider(*b))); return ret; } static void collect_rands(EVP_RAND *rand, void *stack) { STACK_OF(EVP_RAND) *rand_stack = stack; sk_EVP_RAND_push(rand_stack, rand); EVP_RAND_up_ref(rand); } static void list_random_generators(void) { STACK_OF(EVP_RAND) *rands = sk_EVP_RAND_new(rand_cmp); int i; if (rands == NULL) { BIO_printf(bio_err, "ERROR: Memory allocation\n"); return; } BIO_printf(bio_out, "Provided RNGs and seed sources:\n"); EVP_RAND_do_all_provided(NULL, collect_rands, rands); sk_EVP_RAND_sort(rands); for (i = 0; i < sk_EVP_RAND_num(rands); i++) { const EVP_RAND *m = sk_EVP_RAND_value(rands, i); BIO_printf(bio_out, " %s", EVP_RAND_name(m)); BIO_printf(bio_out, " @ %s\n", OSSL_PROVIDER_name(EVP_RAND_provider(m))); if (verbose) { print_param_types("retrievable algorithm parameters", EVP_RAND_gettable_params(m), 4); print_param_types("retrievable operation parameters", EVP_RAND_gettable_ctx_params(m), 4); print_param_types("settable operation parameters", EVP_RAND_settable_ctx_params(m), 4); } } sk_EVP_RAND_pop_free(rands, EVP_RAND_free); } /* * Encoders */ DEFINE_STACK_OF(OSSL_ENCODER) static int encoder_cmp(const OSSL_ENCODER * const *a, const OSSL_ENCODER * const *b) { int ret = OSSL_ENCODER_number(*a) - OSSL_ENCODER_number(*b); if (ret == 0) ret = strcmp(OSSL_PROVIDER_name(OSSL_ENCODER_provider(*a)), OSSL_PROVIDER_name(OSSL_ENCODER_provider(*b))); return ret; } static void collect_encoders(OSSL_ENCODER *encoder, void *stack) { STACK_OF(OSSL_ENCODER) *encoder_stack = stack; sk_OSSL_ENCODER_push(encoder_stack, encoder); OSSL_ENCODER_up_ref(encoder); } static void list_encoders(void) { STACK_OF(OSSL_ENCODER) *encoders; int i; encoders = sk_OSSL_ENCODER_new(encoder_cmp); if (encoders == NULL) { BIO_printf(bio_err, "ERROR: Memory allocation\n"); return; } BIO_printf(bio_out, "Provided ENCODERs:\n"); OSSL_ENCODER_do_all_provided(NULL, collect_encoders, encoders); sk_OSSL_ENCODER_sort(encoders); for (i = 0; i < sk_OSSL_ENCODER_num(encoders); i++) { OSSL_ENCODER *k = sk_OSSL_ENCODER_value(encoders, i); STACK_OF(OPENSSL_CSTRING) *names = sk_OPENSSL_CSTRING_new(name_cmp); OSSL_ENCODER_names_do_all(k, collect_names, names); BIO_printf(bio_out, " "); print_names(bio_out, names); BIO_printf(bio_out, " @ %s (%s)\n", OSSL_PROVIDER_name(OSSL_ENCODER_provider(k)), OSSL_ENCODER_properties(k)); sk_OPENSSL_CSTRING_free(names); if (verbose) { print_param_types("settable operation parameters", OSSL_ENCODER_settable_ctx_params(k), 4); } } sk_OSSL_ENCODER_pop_free(encoders, OSSL_ENCODER_free); } /* * Decoders */ DEFINE_STACK_OF(OSSL_DECODER) static int decoder_cmp(const OSSL_DECODER * const *a, const OSSL_DECODER * const *b) { int ret = OSSL_DECODER_number(*a) - OSSL_DECODER_number(*b); if (ret == 0) ret = strcmp(OSSL_PROVIDER_name(OSSL_DECODER_provider(*a)), OSSL_PROVIDER_name(OSSL_DECODER_provider(*b))); return ret; } static void collect_decoders(OSSL_DECODER *decoder, void *stack) { STACK_OF(OSSL_DECODER) *decoder_stack = stack; sk_OSSL_DECODER_push(decoder_stack, decoder); OSSL_DECODER_up_ref(decoder); } static void list_decoders(void) { STACK_OF(OSSL_DECODER) *decoders; int i; decoders = sk_OSSL_DECODER_new(decoder_cmp); if (decoders == NULL) { BIO_printf(bio_err, "ERROR: Memory allocation\n"); return; } BIO_printf(bio_out, "Provided DECODERs:\n"); OSSL_DECODER_do_all_provided(NULL, collect_decoders, decoders); sk_OSSL_DECODER_sort(decoders); for (i = 0; i < sk_OSSL_DECODER_num(decoders); i++) { OSSL_DECODER *k = sk_OSSL_DECODER_value(decoders, i); STACK_OF(OPENSSL_CSTRING) *names = sk_OPENSSL_CSTRING_new(name_cmp); OSSL_DECODER_names_do_all(k, collect_names, names); BIO_printf(bio_out, " "); print_names(bio_out, names); BIO_printf(bio_out, " @ %s (%s)\n", OSSL_PROVIDER_name(OSSL_DECODER_provider(k)), OSSL_DECODER_properties(k)); sk_OPENSSL_CSTRING_free(names); if (verbose) { print_param_types("settable operation parameters", OSSL_DECODER_settable_ctx_params(k), 4); } } sk_OSSL_DECODER_pop_free(decoders, OSSL_DECODER_free); } static void list_missing_help(void) { const FUNCTION *fp; const OPTIONS *o; for (fp = functions; fp->name != NULL; fp++) { if ((o = fp->help) != NULL) { /* If there is help, list what flags are not documented. */ for ( ; o->name != NULL; o++) { if (o->helpstr == NULL) BIO_printf(bio_out, "%s %s\n", fp->name, o->name); } } else if (fp->func != dgst_main) { /* If not aliased to the dgst command, */ BIO_printf(bio_out, "%s *\n", fp->name); } } } static void list_objects(void) { int max_nid = OBJ_new_nid(0); int i; char *oid_buf = NULL; int oid_size = 0; /* Skip 0, since that's NID_undef */ for (i = 1; i < max_nid; i++) { const ASN1_OBJECT *obj = OBJ_nid2obj(i); const char *sn = OBJ_nid2sn(i); const char *ln = OBJ_nid2ln(i); int n = 0; /* * If one of the retrieved objects somehow generated an error, * we ignore it. The check for NID_undef below will detect the * error and simply skip to the next NID. */ ERR_clear_error(); if (OBJ_obj2nid(obj) == NID_undef) continue; if ((n = OBJ_obj2txt(NULL, 0, obj, 1)) == 0) { BIO_printf(bio_out, "# None-OID object: %s, %s\n", sn, ln); continue; } if (n < 0) break; /* Error */ if (n > oid_size) { oid_buf = OPENSSL_realloc(oid_buf, n + 1); if (oid_buf == NULL) { BIO_printf(bio_err, "ERROR: Memory allocation\n"); break; /* Error */ } oid_size = n + 1; } if (OBJ_obj2txt(oid_buf, oid_size, obj, 1) < 0) break; /* Error */ if (ln == NULL || strcmp(sn, ln) == 0) BIO_printf(bio_out, "%s = %s\n", sn, oid_buf); else BIO_printf(bio_out, "%s = %s, %s\n", sn, ln, oid_buf); } OPENSSL_free(oid_buf); } static void list_options_for_command(const char *command) { const FUNCTION *fp; const OPTIONS *o; for (fp = functions; fp->name != NULL; fp++) if (strcmp(fp->name, command) == 0) break; if (fp->name == NULL) { BIO_printf(bio_err, "Invalid command '%s'; type \"help\" for a list.\n", command); return; } if ((o = fp->help) == NULL) return; for ( ; o->name != NULL; o++) { char c = o->valtype; if (o->name == OPT_PARAM_STR) break; if (o->name == OPT_HELP_STR || o->name == OPT_MORE_STR || o->name == OPT_SECTION_STR || o->name[0] == '\0') continue; BIO_printf(bio_out, "%s %c\n", o->name, c == '\0' ? '-' : c); } /* Always output the -- marker since it is sometimes documented. */ BIO_printf(bio_out, "- -\n"); } static void list_type(FUNC_TYPE ft, int one) { FUNCTION *fp; int i = 0; DISPLAY_COLUMNS dc; memset(&dc, 0, sizeof(dc)); if (!one) calculate_columns(functions, &dc); for (fp = functions; fp->name != NULL; fp++) { if (fp->type != ft) continue; if (one) { BIO_printf(bio_out, "%s\n", fp->name); } else { if (i % dc.columns == 0 && i > 0) BIO_printf(bio_out, "\n"); BIO_printf(bio_out, "%-*s", dc.width, fp->name); i++; } } if (!one) BIO_printf(bio_out, "\n\n"); } static void list_pkey(void) { int i; for (i = 0; i < EVP_PKEY_asn1_get_count(); i++) { const EVP_PKEY_ASN1_METHOD *ameth; int pkey_id, pkey_base_id, pkey_flags; const char *pinfo, *pem_str; ameth = EVP_PKEY_asn1_get0(i); EVP_PKEY_asn1_get0_info(&pkey_id, &pkey_base_id, &pkey_flags, &pinfo, &pem_str, ameth); if (pkey_flags & ASN1_PKEY_ALIAS) { BIO_printf(bio_out, "Name: %s\n", OBJ_nid2ln(pkey_id)); BIO_printf(bio_out, "\tAlias for: %s\n", OBJ_nid2ln(pkey_base_id)); } else { BIO_printf(bio_out, "Name: %s\n", pinfo); BIO_printf(bio_out, "\tType: %s Algorithm\n", pkey_flags & ASN1_PKEY_DYNAMIC ? "External" : "Builtin"); BIO_printf(bio_out, "\tOID: %s\n", OBJ_nid2ln(pkey_id)); if (pem_str == NULL) pem_str = "(none)"; BIO_printf(bio_out, "\tPEM string: %s\n", pem_str); } } } #ifndef OPENSSL_NO_DEPRECATED_3_0 static void list_pkey_meth(void) { size_t i; size_t meth_count = EVP_PKEY_meth_get_count(); for (i = 0; i < meth_count; i++) { const EVP_PKEY_METHOD *pmeth = EVP_PKEY_meth_get0(i); int pkey_id, pkey_flags; EVP_PKEY_meth_get0_info(&pkey_id, &pkey_flags, pmeth); BIO_printf(bio_out, "%s\n", OBJ_nid2ln(pkey_id)); BIO_printf(bio_out, "\tType: %s Algorithm\n", pkey_flags & ASN1_PKEY_DYNAMIC ? "External" : "Builtin"); } } #endif #ifndef OPENSSL_NO_DEPRECATED_3_0 static void list_engines(void) { # ifndef OPENSSL_NO_ENGINE ENGINE *e; BIO_puts(bio_out, "Engines:\n"); e = ENGINE_get_first(); while (e) { BIO_printf(bio_out, "%s\n", ENGINE_get_id(e)); e = ENGINE_get_next(e); } # else BIO_puts(bio_out, "Engine support is disabled.\n"); # endif } #endif static void list_disabled(void) { BIO_puts(bio_out, "Disabled algorithms:\n"); #ifdef OPENSSL_NO_ARIA BIO_puts(bio_out, "ARIA\n"); #endif #ifdef OPENSSL_NO_BF BIO_puts(bio_out, "BF\n"); #endif #ifdef OPENSSL_NO_BLAKE2 BIO_puts(bio_out, "BLAKE2\n"); #endif #ifdef OPENSSL_NO_CAMELLIA BIO_puts(bio_out, "CAMELLIA\n"); #endif #ifdef OPENSSL_NO_CAST BIO_puts(bio_out, "CAST\n"); #endif #ifdef OPENSSL_NO_CMAC BIO_puts(bio_out, "CMAC\n"); #endif #ifdef OPENSSL_NO_CMS BIO_puts(bio_out, "CMS\n"); #endif #ifdef OPENSSL_NO_COMP BIO_puts(bio_out, "COMP\n"); #endif #ifdef OPENSSL_NO_DES BIO_puts(bio_out, "DES\n"); #endif #ifdef OPENSSL_NO_DGRAM BIO_puts(bio_out, "DGRAM\n"); #endif #ifdef OPENSSL_NO_DH BIO_puts(bio_out, "DH\n"); #endif #ifdef OPENSSL_NO_DSA BIO_puts(bio_out, "DSA\n"); #endif #if defined(OPENSSL_NO_DTLS) BIO_puts(bio_out, "DTLS\n"); #endif #if defined(OPENSSL_NO_DTLS1) BIO_puts(bio_out, "DTLS1\n"); #endif #if defined(OPENSSL_NO_DTLS1_2) BIO_puts(bio_out, "DTLS1_2\n"); #endif #ifdef OPENSSL_NO_EC BIO_puts(bio_out, "EC\n"); #endif #ifdef OPENSSL_NO_EC2M BIO_puts(bio_out, "EC2M\n"); #endif #if defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_DEPRECATED_3_0) BIO_puts(bio_out, "ENGINE\n"); #endif #ifdef OPENSSL_NO_GOST BIO_puts(bio_out, "GOST\n"); #endif #ifdef OPENSSL_NO_IDEA BIO_puts(bio_out, "IDEA\n"); #endif #ifdef OPENSSL_NO_MD2 BIO_puts(bio_out, "MD2\n"); #endif #ifdef OPENSSL_NO_MD4 BIO_puts(bio_out, "MD4\n"); #endif #ifdef OPENSSL_NO_MD5 BIO_puts(bio_out, "MD5\n"); #endif #ifdef OPENSSL_NO_MDC2 BIO_puts(bio_out, "MDC2\n"); #endif #ifdef OPENSSL_NO_OCB BIO_puts(bio_out, "OCB\n"); #endif #ifdef OPENSSL_NO_OCSP BIO_puts(bio_out, "OCSP\n"); #endif #ifdef OPENSSL_NO_PSK BIO_puts(bio_out, "PSK\n"); #endif #ifdef OPENSSL_NO_RC2 BIO_puts(bio_out, "RC2\n"); #endif #ifdef OPENSSL_NO_RC4 BIO_puts(bio_out, "RC4\n"); #endif #ifdef OPENSSL_NO_RC5 BIO_puts(bio_out, "RC5\n"); #endif #ifdef OPENSSL_NO_RMD160 BIO_puts(bio_out, "RMD160\n"); #endif #ifdef OPENSSL_NO_RSA BIO_puts(bio_out, "RSA\n"); #endif #ifdef OPENSSL_NO_SCRYPT BIO_puts(bio_out, "SCRYPT\n"); #endif #ifdef OPENSSL_NO_SCTP BIO_puts(bio_out, "SCTP\n"); #endif #ifdef OPENSSL_NO_SEED BIO_puts(bio_out, "SEED\n"); #endif #ifdef OPENSSL_NO_SM2 BIO_puts(bio_out, "SM2\n"); #endif #ifdef OPENSSL_NO_SM3 BIO_puts(bio_out, "SM3\n"); #endif #ifdef OPENSSL_NO_SM4 BIO_puts(bio_out, "SM4\n"); #endif #ifdef OPENSSL_NO_SOCK BIO_puts(bio_out, "SOCK\n"); #endif #ifdef OPENSSL_NO_SRP BIO_puts(bio_out, "SRP\n"); #endif #ifdef OPENSSL_NO_SRTP BIO_puts(bio_out, "SRTP\n"); #endif #ifdef OPENSSL_NO_SSL3 BIO_puts(bio_out, "SSL3\n"); #endif #ifdef OPENSSL_NO_TLS1 BIO_puts(bio_out, "TLS1\n"); #endif #ifdef OPENSSL_NO_TLS1_1 BIO_puts(bio_out, "TLS1_1\n"); #endif #ifdef OPENSSL_NO_TLS1_2 BIO_puts(bio_out, "TLS1_2\n"); #endif #ifdef OPENSSL_NO_WHIRLPOOL BIO_puts(bio_out, "WHIRLPOOL\n"); #endif #ifndef ZLIB BIO_puts(bio_out, "ZLIB\n"); #endif } /* Unified enum for help and list commands. */ typedef enum HELPLIST_CHOICE { OPT_ERR = -1, OPT_EOF = 0, OPT_HELP, OPT_ONE, OPT_VERBOSE, OPT_COMMANDS, OPT_DIGEST_COMMANDS, OPT_MAC_ALGORITHMS, OPT_OPTIONS, OPT_DIGEST_ALGORITHMS, OPT_CIPHER_COMMANDS, OPT_CIPHER_ALGORITHMS, OPT_PK_ALGORITHMS, OPT_PK_METHOD, OPT_DISABLED, OPT_KDF_ALGORITHMS, OPT_RANDOM_GENERATORS, OPT_ENCODERS, OPT_DECODERS, OPT_MISSING_HELP, OPT_OBJECTS, #ifndef OPENSSL_NO_DEPRECATED_3_0 OPT_ENGINES, #endif OPT_PROV_ENUM } HELPLIST_CHOICE; const OPTIONS list_options[] = { OPT_SECTION("General"), {"help", OPT_HELP, '-', "Display this summary"}, OPT_SECTION("Output"), {"1", OPT_ONE, '-', "List in one column"}, {"verbose", OPT_VERBOSE, '-', "Verbose listing"}, {"commands", OPT_COMMANDS, '-', "List of standard commands"}, {"standard-commands", OPT_COMMANDS, '-', "List of standard commands"}, {"digest-commands", OPT_DIGEST_COMMANDS, '-', "List of message digest commands"}, {"digest-algorithms", OPT_DIGEST_ALGORITHMS, '-', "List of message digest algorithms"}, {"kdf-algorithms", OPT_KDF_ALGORITHMS, '-', "List of key derivation and pseudo random function algorithms"}, {"random-generators", OPT_RANDOM_GENERATORS, '-', "List of random number generators"}, {"mac-algorithms", OPT_MAC_ALGORITHMS, '-', "List of message authentication code algorithms"}, {"cipher-commands", OPT_CIPHER_COMMANDS, '-', "List of cipher commands"}, {"cipher-algorithms", OPT_CIPHER_ALGORITHMS, '-', "List of cipher algorithms"}, {"encoders", OPT_ENCODERS, '-', "List of encoding methods" }, {"decoders", OPT_DECODERS, '-', "List of decoding methods" }, {"public-key-algorithms", OPT_PK_ALGORITHMS, '-', "List of public key algorithms"}, #ifndef OPENSSL_NO_DEPRECATED_3_0 {"public-key-methods", OPT_PK_METHOD, '-', "List of public key methods"}, {"engines", OPT_ENGINES, '-', "List of loaded engines"}, #endif {"disabled", OPT_DISABLED, '-', "List of disabled features"}, {"missing-help", OPT_MISSING_HELP, '-', "List missing detailed help strings"}, {"options", OPT_OPTIONS, 's', "List options for specified command"}, {"objects", OPT_OBJECTS, '-', "List built in objects (OID<->name mappings)"}, OPT_PROV_OPTIONS, {NULL} }; int list_main(int argc, char **argv) { char *prog; HELPLIST_CHOICE o; int one = 0, done = 0; struct { unsigned int commands:1; unsigned int random_generators:1; unsigned int digest_commands:1; unsigned int digest_algorithms:1; unsigned int kdf_algorithms:1; unsigned int mac_algorithms:1; unsigned int cipher_commands:1; unsigned int cipher_algorithms:1; unsigned int encoder_algorithms:1; unsigned int decoder_algorithms:1; unsigned int pk_algorithms:1; unsigned int pk_method:1; #ifndef OPENSSL_NO_DEPRECATED_3_0 unsigned int engines:1; #endif unsigned int disabled:1; unsigned int missing_help:1; unsigned int objects:1; unsigned int options:1; } todo = { 0, }; verbose = 0; /* Clear a possible previous call */ prog = opt_init(argc, argv, list_options); while ((o = opt_next()) != OPT_EOF) { switch (o) { case OPT_EOF: /* Never hit, but suppresses warning */ case OPT_ERR: opthelp: BIO_printf(bio_err, "%s: Use -help for summary.\n", prog); return 1; case OPT_HELP: opt_help(list_options); break; case OPT_ONE: one = 1; break; case OPT_COMMANDS: todo.commands = 1; break; case OPT_DIGEST_COMMANDS: todo.digest_commands = 1; break; case OPT_DIGEST_ALGORITHMS: todo.digest_algorithms = 1; break; case OPT_KDF_ALGORITHMS: todo.kdf_algorithms = 1; break; case OPT_RANDOM_GENERATORS: todo.random_generators = 1; break; case OPT_MAC_ALGORITHMS: todo.mac_algorithms = 1; break; case OPT_CIPHER_COMMANDS: todo.cipher_commands = 1; break; case OPT_CIPHER_ALGORITHMS: todo.cipher_algorithms = 1; break; case OPT_ENCODERS: todo.encoder_algorithms = 1; break; case OPT_DECODERS: todo.decoder_algorithms = 1; break; case OPT_PK_ALGORITHMS: todo.pk_algorithms = 1; break; case OPT_PK_METHOD: todo.pk_method = 1; break; #ifndef OPENSSL_NO_DEPRECATED_3_0 case OPT_ENGINES: todo.engines = 1; break; #endif case OPT_DISABLED: todo.disabled = 1; break; case OPT_MISSING_HELP: todo.missing_help = 1; break; case OPT_OBJECTS: todo.objects = 1; break; case OPT_OPTIONS: list_options_for_command(opt_arg()); break; case OPT_VERBOSE: verbose = 1; break; case OPT_PROV_CASES: if (!opt_provider(o)) return 1; break; } done = 1; } if (opt_num_rest() != 0) { BIO_printf(bio_err, "Extra arguments given.\n"); goto opthelp; } if (todo.commands) list_type(FT_general, one); if (todo.random_generators) list_random_generators(); if (todo.digest_commands) list_type(FT_md, one); if (todo.digest_algorithms) list_digests(); if (todo.kdf_algorithms) list_kdfs(); if (todo.mac_algorithms) list_macs(); if (todo.cipher_commands) list_type(FT_cipher, one); if (todo.cipher_algorithms) list_ciphers(); if (todo.encoder_algorithms) list_encoders(); if (todo.decoder_algorithms) list_decoders(); if (todo.pk_algorithms) list_pkey(); #ifndef OPENSSL_NO_DEPRECATED_3_0 if (todo.pk_method) list_pkey_meth(); if (todo.engines) list_engines(); #endif if (todo.disabled) list_disabled(); if (todo.missing_help) list_missing_help(); if (todo.objects) list_objects(); if (!done) goto opthelp; return 0; }
648702.c
#include <mm/mmu.h> #include <dev/timer.h> #include <kernel/irq.h> #include <basic_math.h> #define SYSTEM_TIMER_BASE (_mmio_base+0x3000) #define SYSTEM_TIMER_LOW 0x0004 // System Timer Counter Upper 32 bits #define SYSTEM_TIMER_HI 0x0008 // System Timer Counter Upper 32 bits #define ARM_TIMER_LOD (_mmio_base+0x0B400) #define ARM_TIMER_VAL (_mmio_base+0x0B404) #define ARM_TIMER_CTL (_mmio_base+0x0B408) #define ARM_TIMER_CLI (_mmio_base+0x0B40C) #define ARM_TIMER_RIS (_mmio_base+0x0B410) #define ARM_TIMER_MIS (_mmio_base+0x0B414) #define ARM_TIMER_RLD (_mmio_base+0x0B418) #define ARM_TIMER_DIV (_mmio_base+0x0B41C) #define ARM_TIMER_CNT (_mmio_base+0x0B420) void timer_set_interval(uint32_t id, uint32_t interval_microsecond) { (void)id; put32(ARM_TIMER_CTL,0x003E0000); put32(ARM_TIMER_LOD,interval_microsecond*10-1); put32(ARM_TIMER_RLD,interval_microsecond*10-1); put32(ARM_TIMER_CLI,0); put32(ARM_TIMER_CTL,0x003E00A2); put32(ARM_TIMER_CLI,0); } void timer_clear_interrupt(uint32_t id) { (void)id; put32(ARM_TIMER_CLI,0); } uint64_t timer_read_sys_usec(void) { //read microsec uint64_t r = get32(SYSTEM_TIMER_BASE + SYSTEM_TIMER_HI); r <<= 32; return (r + get32(SYSTEM_TIMER_BASE + SYSTEM_TIMER_LOW)); }
834909.c
#define UNIT #define TRANSA 3 #define ASMNAME ztrmv_RLU #define ASMFNAME ztrmv_RLU_ #define NAME ztrmv_RLU_ #define CNAME ztrmv_RLU #define CHAR_NAME "ztrmv_RLU_" #define CHAR_CNAME "ztrmv_RLU" #define DOUBLE #define COMPLEX #include "/lustre/scratch3/turquoise/rvangara/RD100/distnnmfkcpp_Src/install_dependencies/xianyi-OpenBLAS-6d2da63/driver/level2/ztrmv_L.c"
201238.c
#include "asn1fix_internal.h" #include "asn1fix_export.h" extern arg_t a1f_replace_me_with_proper_interface_arg; static asn1p_t *asn1f_ssn_asn_; static void _add_standard_namespaces(asn1_namespace_t *ns) { asn1p_oid_t *uioc_oid; asn1p_oid_arc_t arcs[] = {{1, "iso"}, {3, "org"}, {6, "dod"}, {1, "internet"}, {4, "private"}, {1, "enterprise"}, {9363, "spelio"}, {1, "software"}, {5, "asn1c"}, {3, "standard-modules"}, {0, "auto-imported"}, {1, 0}}; uioc_oid = asn1p_oid_construct(arcs, sizeof(arcs) / sizeof(arcs[0])); asn1p_module_t *module = asn1f_lookup_module_ex( asn1f_ssn_asn_, "ASN1C-UsefulInformationObjectClasses", uioc_oid); asn1p_oid_free(uioc_oid); if(module) { asn1_namespace_add_module(ns, module, 0); } } void asn1f_use_standard_namespaces(asn1p_t *asn) { asn1f_ssn_asn_ = asn; asn1_namespace_add_standard_namespaces_callback(_add_standard_namespaces); } asn1p_module_t * asn1f_lookup_module_ex(asn1p_t *asn, const char *module_name, const asn1p_oid_t *oid) { arg_t arg; memset(&arg, 0, sizeof(arg)); arg.asn = asn; arg.eh = a1f_replace_me_with_proper_interface_arg.eh; arg.debug = a1f_replace_me_with_proper_interface_arg.debug; return asn1f_lookup_module(&arg, module_name, oid); } asn1p_expr_t * asn1f_lookup_symbol_ex(asn1p_t *asn, asn1_namespace_t *ns, asn1p_expr_t *expr, const asn1p_ref_t *ref) { arg_t arg; memset(&arg, 0, sizeof(arg)); arg.asn = asn; arg.ns = ns; arg.mod = expr->module; arg.expr = expr; arg.eh = a1f_replace_me_with_proper_interface_arg.eh; arg.debug = a1f_replace_me_with_proper_interface_arg.debug; return asn1f_lookup_symbol(&arg, expr->rhs_pspecs, ref); } asn1p_expr_t * asn1f_class_access_ex(asn1p_t *asn, asn1p_module_t *mod, asn1_namespace_t *ns, asn1p_expr_t *expr, asn1p_expr_t *rhs_pspecs, const asn1p_ref_t *ref) { arg_t arg; memset(&arg, 0, sizeof(arg)); arg.asn = asn; arg.mod = mod; arg.ns = ns; arg.expr = expr; arg.eh = a1f_replace_me_with_proper_interface_arg.eh; arg.debug = a1f_replace_me_with_proper_interface_arg.debug; return asn1f_class_access(&arg, rhs_pspecs, ref); } asn1p_expr_t * asn1f_find_terminal_type_ex(asn1p_t *asn, asn1_namespace_t *ns, asn1p_expr_t *expr) { arg_t arg; memset(&arg, 0, sizeof(arg)); arg.asn = asn; arg.ns = ns; arg.mod = expr->module; arg.expr = expr; arg.eh = a1f_replace_me_with_proper_interface_arg.eh; arg.debug = a1f_replace_me_with_proper_interface_arg.debug; return asn1f_find_terminal_type(&arg, expr); } asn1p_expr_t * asn1f_find_ancestor_type_with_PER_constraint_ex(asn1p_t *asn, asn1p_expr_t *expr) { arg_t arg; memset(&arg, 0, sizeof(arg)); arg.asn = asn; arg.mod = expr->module; arg.expr = expr; arg.eh = a1f_replace_me_with_proper_interface_arg.eh; arg.debug = a1f_replace_me_with_proper_interface_arg.debug; return asn1f_find_ancestor_type_with_PER_constraint(&arg, expr); } int asn1f_fix_dereference_values_ex(asn1p_t *asn, asn1p_module_t *mod, asn1p_expr_t *expr) { arg_t arg; memset(&arg, 0, sizeof(arg)); arg.asn = asn; arg.mod = mod; arg.expr = expr; arg.eh = a1f_replace_me_with_proper_interface_arg.eh; arg.debug = a1f_replace_me_with_proper_interface_arg.debug; return asn1f_fix_dereference_values(&arg); }
79543.c
/* * Timer Interface - main file * Copyright (c) 1998-2001 by Jaroslav Kysela <perex@perex.cz> * * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #define __USE_GNU #include <fcntl.h> #include <sys/ioctl.h> #include "timer_local.h" #ifndef PIC /* entry for static linking */ const char *_snd_module_timer_hw = ""; #endif #define SNDRV_FILE_TIMER ALSA_DEVICE_DIRECTORY "timer" #define SNDRV_TIMER_VERSION_MAX SNDRV_PROTOCOL_VERSION(2, 0, 5) #define SNDRV_TIMER_IOCTL_STATUS_OLD _IOW('T', 0x14, struct sndrv_timer_status) enum { SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20), SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21), SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22), SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23), }; static int snd_timer_hw_close(snd_timer_t *handle) { snd_timer_t *tmr = handle; int res; if (!tmr) return -EINVAL; res = close(tmr->poll_fd) < 0 ? -errno : 0; return res; } static int snd_timer_hw_nonblock(snd_timer_t *timer, int nonblock) { long flags; assert(timer); if ((flags = fcntl(timer->poll_fd, F_GETFL)) < 0) return -errno; if (nonblock) flags |= O_NONBLOCK; else flags &= ~O_NONBLOCK; if (fcntl(timer->poll_fd, F_SETFL, flags) < 0) return -errno; return 0; } static int snd_timer_hw_async(snd_timer_t *timer, int sig, pid_t pid) { long flags; int fd; assert(timer); fd = timer->poll_fd; if ((flags = fcntl(fd, F_GETFL)) < 0) { SYSERR("F_GETFL failed"); return -errno; } if (sig >= 0) flags |= O_ASYNC; else flags &= ~O_ASYNC; if (fcntl(fd, F_SETFL, flags) < 0) { SYSERR("F_SETFL for O_ASYNC failed"); return -errno; } if (sig < 0) return 0; if (fcntl(fd, F_SETSIG, (long)sig) < 0) { SYSERR("F_SETSIG failed"); return -errno; } if (fcntl(fd, F_SETOWN, (long)pid) < 0) { SYSERR("F_SETOWN failed"); return -errno; } return 0; } static int snd_timer_hw_info(snd_timer_t *handle, snd_timer_info_t * info) { snd_timer_t *tmr; tmr = handle; if (!tmr || !info) return -EINVAL; if (ioctl(tmr->poll_fd, SNDRV_TIMER_IOCTL_INFO, info) < 0) return -errno; return 0; } static int snd_timer_hw_params(snd_timer_t *handle, snd_timer_params_t * params) { snd_timer_t *tmr; tmr = handle; if (!tmr || !params) return -EINVAL; if (ioctl(tmr->poll_fd, SNDRV_TIMER_IOCTL_PARAMS, params) < 0) return -errno; return 0; } static int snd_timer_hw_status(snd_timer_t *handle, snd_timer_status_t * status) { snd_timer_t *tmr; int cmd; tmr = handle; if (!tmr || !status) return -EINVAL; if (tmr->version < SNDRV_PROTOCOL_VERSION(2, 0, 1)) cmd = SNDRV_TIMER_IOCTL_STATUS_OLD; else cmd = SNDRV_TIMER_IOCTL_STATUS; if (ioctl(tmr->poll_fd, cmd, status) < 0) return -errno; return 0; } static int snd_timer_hw_start(snd_timer_t *handle) { snd_timer_t *tmr; unsigned int cmd; tmr = handle; if (!tmr) return -EINVAL; if (tmr->version < SNDRV_PROTOCOL_VERSION(2, 0, 4)) cmd = SNDRV_TIMER_IOCTL_START_OLD; else cmd = SNDRV_TIMER_IOCTL_START; if (ioctl(tmr->poll_fd, cmd) < 0) return -errno; return 0; } static int snd_timer_hw_stop(snd_timer_t *handle) { snd_timer_t *tmr; unsigned int cmd; tmr = handle; if (!tmr) return -EINVAL; if (tmr->version < SNDRV_PROTOCOL_VERSION(2, 0, 4)) cmd = SNDRV_TIMER_IOCTL_STOP_OLD; else cmd = SNDRV_TIMER_IOCTL_STOP; if (ioctl(tmr->poll_fd, cmd) < 0) return -errno; return 0; } static int snd_timer_hw_continue(snd_timer_t *handle) { snd_timer_t *tmr; unsigned int cmd; tmr = handle; if (!tmr) return -EINVAL; if (tmr->version < SNDRV_PROTOCOL_VERSION(2, 0, 4)) cmd = SNDRV_TIMER_IOCTL_CONTINUE_OLD; else cmd = SNDRV_TIMER_IOCTL_CONTINUE; if (ioctl(tmr->poll_fd, cmd) < 0) return -errno; return 0; } static ssize_t snd_timer_hw_read(snd_timer_t *handle, void *buffer, size_t size) { snd_timer_t *tmr; ssize_t result; tmr = handle; if (!tmr || (!buffer && size > 0)) return -EINVAL; result = read(tmr->poll_fd, buffer, size); if (result < 0) return -errno; return result; } static const snd_timer_ops_t snd_timer_hw_ops = { .close = snd_timer_hw_close, .nonblock = snd_timer_hw_nonblock, .async = snd_timer_hw_async, .info = snd_timer_hw_info, .params = snd_timer_hw_params, .status = snd_timer_hw_status, .rt_start = snd_timer_hw_start, .rt_stop = snd_timer_hw_stop, .rt_continue = snd_timer_hw_continue, .read = snd_timer_hw_read, }; int snd_timer_hw_open(snd_timer_t **handle, const char *name, int dev_class, int dev_sclass, int card, int device, int subdevice, int mode) { int fd, ver, tmode, ret; snd_timer_t *tmr; struct sndrv_timer_select sel; *handle = NULL; tmode = O_RDONLY; if (mode & SND_TIMER_OPEN_NONBLOCK) tmode |= O_NONBLOCK; fd = snd_open_device(SNDRV_FILE_TIMER, tmode); if (fd < 0) return -errno; #if 0 /* * this is bogus, an application have to care about open filedescriptors */ if (fcntl(fd, F_SETFD, FD_CLOEXEC) != 0) { SYSERR("fcntl FD_CLOEXEC failed"); ret = -errno; close(fd); return ret; } #endif if (ioctl(fd, SNDRV_TIMER_IOCTL_PVERSION, &ver) < 0) { ret = -errno; close(fd); return ret; } if (SNDRV_PROTOCOL_INCOMPATIBLE(ver, SNDRV_TIMER_VERSION_MAX)) { close(fd); return -SND_ERROR_INCOMPATIBLE_VERSION; } if (mode & SND_TIMER_OPEN_TREAD) { int arg = 1; if (ver < SNDRV_PROTOCOL_VERSION(2, 0, 3)) { ret = -ENOTTY; goto __no_tread; } if (ioctl(fd, SNDRV_TIMER_IOCTL_TREAD, &arg) < 0) { ret = -errno; __no_tread: close(fd); SNDMSG("extended read is not supported (SNDRV_TIMER_IOCTL_TREAD)"); return ret; } } memset(&sel, 0, sizeof(sel)); sel.id.dev_class = dev_class; sel.id.dev_sclass = dev_sclass; sel.id.card = card; sel.id.device = device; sel.id.subdevice = subdevice; if (ioctl(fd, SNDRV_TIMER_IOCTL_SELECT, &sel) < 0) { ret = -errno; close(fd); return ret; } tmr = (snd_timer_t *) calloc(1, sizeof(snd_timer_t)); if (tmr == NULL) { close(fd); return -ENOMEM; } tmr->type = SND_TIMER_TYPE_HW; tmr->version = ver; tmr->mode = tmode; tmr->name = strdup(name); tmr->poll_fd = fd; tmr->ops = &snd_timer_hw_ops; INIT_LIST_HEAD(&tmr->async_handlers); *handle = tmr; return 0; } int _snd_timer_hw_open(snd_timer_t **timer, char *name, snd_config_t *root ATTRIBUTE_UNUSED, snd_config_t *conf, int mode) { snd_config_iterator_t i, next; long dev_class = SND_TIMER_CLASS_GLOBAL, dev_sclass = SND_TIMER_SCLASS_NONE; long card = 0, device = 0, subdevice = 0; const char *str; int err; snd_config_for_each(i, next, conf) { snd_config_t *n = snd_config_iterator_entry(i); const char *id; if (snd_config_get_id(n, &id) < 0) continue; if (strcmp(id, "comment") == 0) continue; if (strcmp(id, "type") == 0) continue; if (strcmp(id, "class") == 0) { err = snd_config_get_integer(n, &dev_class); if (err < 0) return err; continue; } if (strcmp(id, "sclass") == 0) { err = snd_config_get_integer(n, &dev_sclass); if (err < 0) return err; continue; } if (strcmp(id, "card") == 0) { err = snd_config_get_integer(n, &card); if (err < 0) { err = snd_config_get_string(n, &str); if (err < 0) return -EINVAL; card = snd_card_get_index(str); if (card < 0) return card; } continue; } if (strcmp(id, "device") == 0) { err = snd_config_get_integer(n, &device); if (err < 0) return err; continue; } if (strcmp(id, "subdevice") == 0) { err = snd_config_get_integer(n, &subdevice); if (err < 0) return err; continue; } SNDERR("Unexpected field %s", id); return -EINVAL; } if (card < 0) return -EINVAL; return snd_timer_hw_open(timer, name, dev_class, dev_sclass, card, device, subdevice, mode); } SND_DLSYM_BUILD_VERSION(_snd_timer_hw_open, SND_TIMER_DLSYM_VERSION);
529275.c
int f(){ } int main(void){ return f(); }
769027.c
/* USER CODE BEGIN Header */ /** ****************************************************************************** * @file : main.c * @brief : Main program body ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2021 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ /* USER CODE END Header */ /* Includes ------------------------------------------------------------------*/ #include "main.h" /* Private includes ----------------------------------------------------------*/ /* USER CODE BEGIN Includes */ /* USER CODE END Includes */ /* Private typedef -----------------------------------------------------------*/ /* USER CODE BEGIN PTD */ /* USER CODE END PTD */ /* Private define ------------------------------------------------------------*/ /* USER CODE BEGIN PD */ /* USER CODE END PD */ /* Private macro -------------------------------------------------------------*/ /* USER CODE BEGIN PM */ /* USER CODE END PM */ /* Private variables ---------------------------------------------------------*/ CEC_HandleTypeDef hcec; I2C_HandleTypeDef hi2c1; I2S_HandleTypeDef hi2s1; I2S_HandleTypeDef hi2s2; SAI_HandleTypeDef hsai_BlockA1; SAI_HandleTypeDef hsai_BlockB1; SPDIFRX_HandleTypeDef hspdif; UART_HandleTypeDef huart3; PCD_HandleTypeDef hpcd_USB_OTG_FS; uint8_t cec_receive_buffer[16]; /* USER CODE BEGIN PV */ /* USER CODE END PV */ /* Private function prototypes -----------------------------------------------*/ void SystemClock_Config(void); static void MX_GPIO_Init(void); static void MX_HDMI_CEC_Init(void); static void MX_I2C1_Init(void); static void MX_I2S1_Init(void); static void MX_USART3_UART_Init(void); static void MX_USB_OTG_FS_PCD_Init(void); static void MX_SAI1_Init(void); static void MX_I2S2_Init(void); static void MX_SPDIFRX_Init(void); /* USER CODE BEGIN PFP */ /* USER CODE END PFP */ /* Private user code ---------------------------------------------------------*/ /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ /** * @brief The application entry point. * @retval int */ int main(void) { /* USER CODE BEGIN 1 */ /* USER CODE END 1 */ /* MCU Configuration--------------------------------------------------------*/ /* Reset of all peripherals, Initializes the Flash interface and the Systick. */ HAL_Init(); /* USER CODE BEGIN Init */ /* USER CODE END Init */ /* Configure the system clock */ SystemClock_Config(); /* USER CODE BEGIN SysInit */ /* USER CODE END SysInit */ /* Initialize all configured peripherals */ MX_GPIO_Init(); MX_HDMI_CEC_Init(); MX_I2C1_Init(); MX_I2S1_Init(); MX_USART3_UART_Init(); MX_USB_OTG_FS_PCD_Init(); MX_SAI1_Init(); MX_I2S2_Init(); MX_SPDIFRX_Init(); /* USER CODE BEGIN 2 */ /* USER CODE END 2 */ /* Infinite loop */ /* USER CODE BEGIN WHILE */ while (1) { /* USER CODE END WHILE */ /* USER CODE BEGIN 3 */ } /* USER CODE END 3 */ } /** * @brief System Clock Configuration * @retval None */ void SystemClock_Config(void) { RCC_OscInitTypeDef RCC_OscInitStruct = {0}; RCC_ClkInitTypeDef RCC_ClkInitStruct = {0}; RCC_PeriphCLKInitTypeDef PeriphClkInitStruct = {0}; /** Configure the main internal regulator output voltage */ __HAL_RCC_PWR_CLK_ENABLE(); __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE3); /** Initializes the RCC Oscillators according to the specified parameters * in the RCC_OscInitTypeDef structure. */ RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSI|RCC_OSCILLATORTYPE_HSE; RCC_OscInitStruct.HSEState = RCC_HSE_ON; RCC_OscInitStruct.HSIState = RCC_HSI_ON; RCC_OscInitStruct.HSICalibrationValue = RCC_HSICALIBRATION_DEFAULT; RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON; RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSE; RCC_OscInitStruct.PLL.PLLM = 4; RCC_OscInitStruct.PLL.PLLN = 50; RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2; RCC_OscInitStruct.PLL.PLLQ = 3; RCC_OscInitStruct.PLL.PLLR = 2; if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) { Error_Handler(); } /** Initializes the CPU, AHB and APB buses clocks */ RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK|RCC_CLOCKTYPE_SYSCLK |RCC_CLOCKTYPE_PCLK1|RCC_CLOCKTYPE_PCLK2; RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV2; RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1; if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_1) != HAL_OK) { Error_Handler(); } PeriphClkInitStruct.PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1|RCC_PERIPHCLK_I2S_APB2 |RCC_PERIPHCLK_SAI1|RCC_PERIPHCLK_SPDIFRX |RCC_PERIPHCLK_CLK48|RCC_PERIPHCLK_CEC; PeriphClkInitStruct.PLLSAI.PLLSAIM = 4; PeriphClkInitStruct.PLLSAI.PLLSAIN = 96; PeriphClkInitStruct.PLLSAI.PLLSAIQ = 2; PeriphClkInitStruct.PLLSAI.PLLSAIP = RCC_PLLSAIP_DIV4; PeriphClkInitStruct.PLLSAIDivQ = 1; PeriphClkInitStruct.Clk48ClockSelection = RCC_CLK48CLKSOURCE_PLLSAIP; PeriphClkInitStruct.SpdifClockSelection = RCC_SPDIFRXCLKSOURCE_PLLR; PeriphClkInitStruct.I2sApb2ClockSelection = RCC_I2SAPB2CLKSOURCE_EXT; PeriphClkInitStruct.Sai1ClockSelection = RCC_SAI1CLKSOURCE_EXT; PeriphClkInitStruct.I2sApb1ClockSelection = RCC_I2SAPB1CLKSOURCE_EXT; PeriphClkInitStruct.CecClockSelection = RCC_CECCLKSOURCE_HSI; if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInitStruct) != HAL_OK) { Error_Handler(); } HAL_RCC_MCOConfig(RCC_MCO1, RCC_MCO1SOURCE_HSI, RCC_MCODIV_1); } /** * @brief HDMI_CEC Initialization Function * @param None * @retval None */ static void MX_HDMI_CEC_Init(void) { /* USER CODE BEGIN HDMI_CEC_Init 0 */ /* USER CODE END HDMI_CEC_Init 0 */ /* USER CODE BEGIN HDMI_CEC_Init 1 */ /* USER CODE END HDMI_CEC_Init 1 */ hcec.Instance = CEC; hcec.Init.SignalFreeTime = CEC_DEFAULT_SFT; hcec.Init.Tolerance = CEC_STANDARD_TOLERANCE; hcec.Init.BRERxStop = CEC_RX_STOP_ON_BRE; hcec.Init.BREErrorBitGen = CEC_BRE_ERRORBIT_NO_GENERATION; hcec.Init.LBPEErrorBitGen = CEC_LBPE_ERRORBIT_NO_GENERATION; hcec.Init.BroadcastMsgNoErrorBitGen = CEC_BROADCASTERROR_ERRORBIT_GENERATION; hcec.Init.SignalFreeTimeOption = CEC_SFT_START_ON_TXSOM; hcec.Init.ListenMode = CEC_FULL_LISTENING_MODE; hcec.Init.OwnAddress = CEC_OWN_ADDRESS_NONE; hcec.Init.RxBuffer = cec_receive_buffer; if (HAL_CEC_Init(&hcec) != HAL_OK) { Error_Handler(); } /* USER CODE BEGIN HDMI_CEC_Init 2 */ /* USER CODE END HDMI_CEC_Init 2 */ } /** * @brief I2C1 Initialization Function * @param None * @retval None */ static void MX_I2C1_Init(void) { /* USER CODE BEGIN I2C1_Init 0 */ /* USER CODE END I2C1_Init 0 */ /* USER CODE BEGIN I2C1_Init 1 */ /* USER CODE END I2C1_Init 1 */ hi2c1.Instance = I2C1; hi2c1.Init.ClockSpeed = 100000; hi2c1.Init.DutyCycle = I2C_DUTYCYCLE_2; hi2c1.Init.OwnAddress1 = 0; hi2c1.Init.AddressingMode = I2C_ADDRESSINGMODE_7BIT; hi2c1.Init.DualAddressMode = I2C_DUALADDRESS_DISABLE; hi2c1.Init.OwnAddress2 = 0; hi2c1.Init.GeneralCallMode = I2C_GENERALCALL_DISABLE; hi2c1.Init.NoStretchMode = I2C_NOSTRETCH_DISABLE; if (HAL_I2C_Init(&hi2c1) != HAL_OK) { Error_Handler(); } /* USER CODE BEGIN I2C1_Init 2 */ /* USER CODE END I2C1_Init 2 */ } /** * @brief I2S1 Initialization Function * @param None * @retval None */ static void MX_I2S1_Init(void) { /* USER CODE BEGIN I2S1_Init 0 */ /* USER CODE END I2S1_Init 0 */ /* USER CODE BEGIN I2S1_Init 1 */ /* USER CODE END I2S1_Init 1 */ hi2s1.Instance = SPI1; hi2s1.Init.Mode = I2S_MODE_MASTER_TX; hi2s1.Init.Standard = I2S_STANDARD_PHILIPS; hi2s1.Init.DataFormat = I2S_DATAFORMAT_16B; hi2s1.Init.MCLKOutput = I2S_MCLKOUTPUT_DISABLE; hi2s1.Init.AudioFreq = I2S_AUDIOFREQ_8K; hi2s1.Init.CPOL = I2S_CPOL_LOW; hi2s1.Init.ClockSource = I2S_CLOCK_EXTERNAL; hi2s1.Init.FullDuplexMode = I2S_FULLDUPLEXMODE_DISABLE; if (HAL_I2S_Init(&hi2s1) != HAL_OK) { Error_Handler(); } /* USER CODE BEGIN I2S1_Init 2 */ /* USER CODE END I2S1_Init 2 */ } /** * @brief I2S2 Initialization Function * @param None * @retval None */ static void MX_I2S2_Init(void) { /* USER CODE BEGIN I2S2_Init 0 */ /* USER CODE END I2S2_Init 0 */ /* USER CODE BEGIN I2S2_Init 1 */ /* USER CODE END I2S2_Init 1 */ hi2s2.Instance = SPI2; hi2s2.Init.Mode = I2S_MODE_MASTER_TX; hi2s2.Init.Standard = I2S_STANDARD_PHILIPS; hi2s2.Init.DataFormat = I2S_DATAFORMAT_24B; hi2s2.Init.MCLKOutput = I2S_MCLKOUTPUT_ENABLE; hi2s2.Init.AudioFreq = I2S_AUDIOFREQ_96K; hi2s2.Init.CPOL = I2S_CPOL_LOW; hi2s2.Init.ClockSource = I2S_CLOCK_EXTERNAL; hi2s2.Init.FullDuplexMode = I2S_FULLDUPLEXMODE_DISABLE; if (HAL_I2S_Init(&hi2s2) != HAL_OK) { Error_Handler(); } /* USER CODE BEGIN I2S2_Init 2 */ /* USER CODE END I2S2_Init 2 */ } /** * @brief SAI1 Initialization Function * @param None * @retval None */ static void MX_SAI1_Init(void) { /* USER CODE BEGIN SAI1_Init 0 */ /* USER CODE END SAI1_Init 0 */ /* USER CODE BEGIN SAI1_Init 1 */ /* USER CODE END SAI1_Init 1 */ hsai_BlockA1.Instance = SAI1_Block_A; hsai_BlockA1.Init.Protocol = SAI_FREE_PROTOCOL; hsai_BlockA1.Init.AudioMode = SAI_MODESLAVE_RX; hsai_BlockA1.Init.DataSize = SAI_DATASIZE_8; hsai_BlockA1.Init.FirstBit = SAI_FIRSTBIT_MSB; hsai_BlockA1.Init.ClockStrobing = SAI_CLOCKSTROBING_FALLINGEDGE; hsai_BlockA1.Init.Synchro = SAI_ASYNCHRONOUS; hsai_BlockA1.Init.OutputDrive = SAI_OUTPUTDRIVE_DISABLE; hsai_BlockA1.Init.FIFOThreshold = SAI_FIFOTHRESHOLD_EMPTY; hsai_BlockA1.Init.SynchroExt = SAI_SYNCEXT_DISABLE; hsai_BlockA1.Init.MonoStereoMode = SAI_STEREOMODE; hsai_BlockA1.Init.CompandingMode = SAI_NOCOMPANDING; hsai_BlockA1.Init.TriState = SAI_OUTPUT_NOTRELEASED; hsai_BlockA1.FrameInit.FrameLength = 8; hsai_BlockA1.FrameInit.ActiveFrameLength = 1; hsai_BlockA1.FrameInit.FSDefinition = SAI_FS_STARTFRAME; hsai_BlockA1.FrameInit.FSPolarity = SAI_FS_ACTIVE_LOW; hsai_BlockA1.FrameInit.FSOffset = SAI_FS_FIRSTBIT; hsai_BlockA1.SlotInit.FirstBitOffset = 0; hsai_BlockA1.SlotInit.SlotSize = SAI_SLOTSIZE_DATASIZE; hsai_BlockA1.SlotInit.SlotNumber = 1; hsai_BlockA1.SlotInit.SlotActive = 0x00000000; if (HAL_SAI_Init(&hsai_BlockA1) != HAL_OK) { Error_Handler(); } hsai_BlockB1.Instance = SAI1_Block_B; hsai_BlockB1.Init.Protocol = SAI_FREE_PROTOCOL; hsai_BlockB1.Init.AudioMode = SAI_MODEMASTER_TX; hsai_BlockB1.Init.DataSize = SAI_DATASIZE_8; hsai_BlockB1.Init.FirstBit = SAI_FIRSTBIT_MSB; hsai_BlockB1.Init.ClockStrobing = SAI_CLOCKSTROBING_FALLINGEDGE; hsai_BlockB1.Init.Synchro = SAI_ASYNCHRONOUS; hsai_BlockB1.Init.OutputDrive = SAI_OUTPUTDRIVE_DISABLE; hsai_BlockB1.Init.NoDivider = SAI_MASTERDIVIDER_ENABLE; hsai_BlockB1.Init.FIFOThreshold = SAI_FIFOTHRESHOLD_EMPTY; hsai_BlockB1.Init.ClockSource = SAI_CLKSOURCE_NA; hsai_BlockB1.Init.AudioFrequency = SAI_AUDIO_FREQUENCY_192K; hsai_BlockB1.Init.SynchroExt = SAI_SYNCEXT_DISABLE; hsai_BlockB1.Init.MonoStereoMode = SAI_STEREOMODE; hsai_BlockB1.Init.CompandingMode = SAI_NOCOMPANDING; hsai_BlockB1.Init.TriState = SAI_OUTPUT_NOTRELEASED; hsai_BlockB1.FrameInit.FrameLength = 8; hsai_BlockB1.FrameInit.ActiveFrameLength = 1; hsai_BlockB1.FrameInit.FSDefinition = SAI_FS_STARTFRAME; hsai_BlockB1.FrameInit.FSPolarity = SAI_FS_ACTIVE_LOW; hsai_BlockB1.FrameInit.FSOffset = SAI_FS_FIRSTBIT; hsai_BlockB1.SlotInit.FirstBitOffset = 0; hsai_BlockB1.SlotInit.SlotSize = SAI_SLOTSIZE_DATASIZE; hsai_BlockB1.SlotInit.SlotNumber = 1; hsai_BlockB1.SlotInit.SlotActive = 0x00000000; if (HAL_SAI_Init(&hsai_BlockB1) != HAL_OK) { Error_Handler(); } /* USER CODE BEGIN SAI1_Init 2 */ /* USER CODE END SAI1_Init 2 */ } /** * @brief SPDIFRX Initialization Function * @param None * @retval None */ static void MX_SPDIFRX_Init(void) { /* USER CODE BEGIN SPDIFRX_Init 0 */ /* USER CODE END SPDIFRX_Init 0 */ /* USER CODE BEGIN SPDIFRX_Init 1 */ /* USER CODE END SPDIFRX_Init 1 */ hspdif.Instance = SPDIFRX; hspdif.Init.InputSelection = SPDIFRX_INPUT_IN1; hspdif.Init.Retries = SPDIFRX_MAXRETRIES_NONE; hspdif.Init.WaitForActivity = SPDIFRX_WAITFORACTIVITY_OFF; hspdif.Init.ChannelSelection = SPDIFRX_CHANNEL_A; hspdif.Init.DataFormat = SPDIFRX_DATAFORMAT_LSB; hspdif.Init.StereoMode = SPDIFRX_STEREOMODE_DISABLE; hspdif.Init.PreambleTypeMask = SPDIFRX_PREAMBLETYPEMASK_OFF; hspdif.Init.ChannelStatusMask = SPDIFRX_CHANNELSTATUS_OFF; hspdif.Init.ValidityBitMask = SPDIFRX_VALIDITYMASK_OFF; hspdif.Init.ParityErrorMask = SPDIFRX_PARITYERRORMASK_OFF; if (HAL_SPDIFRX_Init(&hspdif) != HAL_OK) { Error_Handler(); } /* USER CODE BEGIN SPDIFRX_Init 2 */ /* USER CODE END SPDIFRX_Init 2 */ } /** * @brief USART3 Initialization Function * @param None * @retval None */ static void MX_USART3_UART_Init(void) { /* USER CODE BEGIN USART3_Init 0 */ /* USER CODE END USART3_Init 0 */ /* USER CODE BEGIN USART3_Init 1 */ /* USER CODE END USART3_Init 1 */ huart3.Instance = USART3; huart3.Init.BaudRate = 115200; huart3.Init.WordLength = UART_WORDLENGTH_8B; huart3.Init.StopBits = UART_STOPBITS_1; huart3.Init.Parity = UART_PARITY_NONE; huart3.Init.Mode = UART_MODE_TX_RX; huart3.Init.HwFlowCtl = UART_HWCONTROL_NONE; huart3.Init.OverSampling = UART_OVERSAMPLING_16; if (HAL_UART_Init(&huart3) != HAL_OK) { Error_Handler(); } /* USER CODE BEGIN USART3_Init 2 */ /* USER CODE END USART3_Init 2 */ } /** * @brief USB_OTG_FS Initialization Function * @param None * @retval None */ static void MX_USB_OTG_FS_PCD_Init(void) { /* USER CODE BEGIN USB_OTG_FS_Init 0 */ /* USER CODE END USB_OTG_FS_Init 0 */ /* USER CODE BEGIN USB_OTG_FS_Init 1 */ /* USER CODE END USB_OTG_FS_Init 1 */ hpcd_USB_OTG_FS.Instance = USB_OTG_FS; hpcd_USB_OTG_FS.Init.dev_endpoints = 6; hpcd_USB_OTG_FS.Init.speed = PCD_SPEED_FULL; hpcd_USB_OTG_FS.Init.dma_enable = DISABLE; hpcd_USB_OTG_FS.Init.phy_itface = PCD_PHY_EMBEDDED; hpcd_USB_OTG_FS.Init.Sof_enable = DISABLE; hpcd_USB_OTG_FS.Init.low_power_enable = DISABLE; hpcd_USB_OTG_FS.Init.lpm_enable = DISABLE; hpcd_USB_OTG_FS.Init.vbus_sensing_enable = DISABLE; hpcd_USB_OTG_FS.Init.use_dedicated_ep1 = DISABLE; if (HAL_PCD_Init(&hpcd_USB_OTG_FS) != HAL_OK) { Error_Handler(); } /* USER CODE BEGIN USB_OTG_FS_Init 2 */ /* USER CODE END USB_OTG_FS_Init 2 */ } /** * @brief GPIO Initialization Function * @param None * @retval None */ static void MX_GPIO_Init(void) { GPIO_InitTypeDef GPIO_InitStruct = {0}; /* GPIO Ports Clock Enable */ __HAL_RCC_GPIOH_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); /*Configure GPIO pin : PC9 */ GPIO_InitStruct.Pin = GPIO_PIN_9; GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; GPIO_InitStruct.Pull = GPIO_NOPULL; GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; GPIO_InitStruct.Alternate = GPIO_AF5_SPI1; HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); /*Configure GPIO pin : PA8 */ GPIO_InitStruct.Pin = GPIO_PIN_8; GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; GPIO_InitStruct.Pull = GPIO_NOPULL; GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; GPIO_InitStruct.Alternate = GPIO_AF0_MCO; HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); } /* USER CODE BEGIN 4 */ /* USER CODE END 4 */ /** * @brief This function is executed in case of error occurrence. * @retval None */ void Error_Handler(void) { /* USER CODE BEGIN Error_Handler_Debug */ /* User can add his own implementation to report the HAL error return state */ __disable_irq(); while (1) { } /* USER CODE END Error_Handler_Debug */ } #ifdef USE_FULL_ASSERT /** * @brief Reports the name of the source file and the source line number * where the assert_param error has occurred. * @param file: pointer to the source file name * @param line: assert_param error line source number * @retval None */ void assert_failed(uint8_t *file, uint32_t line) { /* USER CODE BEGIN 6 */ /* User can add his own implementation to report the file name and line number, ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */ /* USER CODE END 6 */ } #endif /* USE_FULL_ASSERT */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
660052.c
// Auto-generated file. Do not edit! // Template: src/f32-dwconv/up-avx512.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_f32_dwconv_minmax_ukernel_up16x25__avx512f_acc2( size_t channels, size_t output_width, const float** input, const float* weights, float* output, size_t input_stride, size_t output_increment, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max)); const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min)); do { const float* i0 = input[0]; assert(i0 != NULL); const float* i1 = input[1]; assert(i1 != NULL); const float* i2 = input[2]; assert(i2 != NULL); const float* i3 = input[3]; assert(i3 != NULL); const float* i4 = input[4]; assert(i4 != NULL); const float* i5 = input[5]; assert(i5 != NULL); const float* i6 = input[6]; assert(i6 != NULL); const float* i7 = input[7]; assert(i7 != NULL); const float* i8 = input[8]; assert(i8 != NULL); const float* i9 = input[9]; assert(i9 != NULL); const float* i10 = input[10]; assert(i10 != NULL); const float* i11 = input[11]; assert(i11 != NULL); const float* i12 = input[12]; assert(i12 != NULL); const float* i13 = input[13]; assert(i13 != NULL); const float* i14 = input[14]; assert(i14 != NULL); const float* i15 = input[15]; assert(i15 != NULL); const float* i16 = input[16]; assert(i16 != NULL); const float* i17 = input[17]; assert(i17 != NULL); const float* i18 = input[18]; assert(i18 != NULL); const float* i19 = input[19]; assert(i19 != NULL); const float* i20 = input[20]; assert(i20 != NULL); const float* i21 = input[21]; assert(i21 != NULL); const float* i22 = input[22]; assert(i22 != NULL); const float* i23 = input[23]; assert(i23 != NULL); const float* i24 = input[24]; assert(i24 != NULL); input = (const float**) ((uintptr_t) input + input_stride); size_t c = channels; const float* w = weights; for (; c >= 16; c -= 16) { __m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w); const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0); i0 += 16; const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1); i1 += 16; const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32); __m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF); const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2); i2 += 16; const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3); i3 += 16; const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4); i4 += 16; const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 80); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5); i5 += 16; const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 96); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6); i6 += 16; const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 112); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7); i7 += 16; const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 128); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8); i8 += 16; const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 144); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi9x0123456789ABCDEF = _mm512_loadu_ps(i9); i9 += 16; const __m512 vk9x0123456789ABCDEF = _mm512_load_ps(w + 160); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi10x0123456789ABCDEF = _mm512_loadu_ps(i10); i10 += 16; const __m512 vk10x0123456789ABCDEF = _mm512_load_ps(w + 176); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi11x0123456789ABCDEF = _mm512_loadu_ps(i11); i11 += 16; const __m512 vk11x0123456789ABCDEF = _mm512_load_ps(w + 192); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi12x0123456789ABCDEF = _mm512_loadu_ps(i12); i12 += 16; const __m512 vk12x0123456789ABCDEF = _mm512_load_ps(w + 208); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi13x0123456789ABCDEF = _mm512_loadu_ps(i13); i13 += 16; const __m512 vk13x0123456789ABCDEF = _mm512_load_ps(w + 224); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi14x0123456789ABCDEF = _mm512_loadu_ps(i14); i14 += 16; const __m512 vk14x0123456789ABCDEF = _mm512_load_ps(w + 240); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi15x0123456789ABCDEF = _mm512_loadu_ps(i15); i15 += 16; const __m512 vk15x0123456789ABCDEF = _mm512_load_ps(w + 256); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi16x0123456789ABCDEF = _mm512_loadu_ps(i16); i16 += 16; const __m512 vk16x0123456789ABCDEF = _mm512_load_ps(w + 272); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi17x0123456789ABCDEF = _mm512_loadu_ps(i17); i17 += 16; const __m512 vk17x0123456789ABCDEF = _mm512_load_ps(w + 288); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi18x0123456789ABCDEF = _mm512_loadu_ps(i18); i18 += 16; const __m512 vk18x0123456789ABCDEF = _mm512_load_ps(w + 304); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi19x0123456789ABCDEF = _mm512_loadu_ps(i19); i19 += 16; const __m512 vk19x0123456789ABCDEF = _mm512_load_ps(w + 320); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi20x0123456789ABCDEF = _mm512_loadu_ps(i20); i20 += 16; const __m512 vk20x0123456789ABCDEF = _mm512_load_ps(w + 336); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi21x0123456789ABCDEF = _mm512_loadu_ps(i21); i21 += 16; const __m512 vk21x0123456789ABCDEF = _mm512_load_ps(w + 352); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi22x0123456789ABCDEF = _mm512_loadu_ps(i22); i22 += 16; const __m512 vk22x0123456789ABCDEF = _mm512_load_ps(w + 368); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi23x0123456789ABCDEF = _mm512_loadu_ps(i23); i23 += 16; const __m512 vk23x0123456789ABCDEF = _mm512_load_ps(w + 384); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi24x0123456789ABCDEF = _mm512_loadu_ps(i24); i24 += 16; const __m512 vk24x0123456789ABCDEF = _mm512_load_ps(w + 400); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF, vacc0123456789ABCDEFp0); w += 416; // Add up all accumulators to vacc0123456789ABCDEFp0 vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1); __m512 vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEFp0, vmin); vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vmax); _mm512_storeu_ps(output, vacc0123456789ABCDEF); output += 16; } if XNN_UNLIKELY(c != 0) { assert(c >= 1); assert(c <= 16); // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1))); __m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w); const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0); const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1); const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32); __m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF); const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2); const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3); const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i4); const __m512 vk4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 80); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i5); const __m512 vk5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i6); const __m512 vk6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 112); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i7); const __m512 vk7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i8); const __m512 vk8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 144); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi9x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i9); const __m512 vk9x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 160); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi10x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i10); const __m512 vk10x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 176); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi11x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i11); const __m512 vk11x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 192); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi12x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i12); const __m512 vk12x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 208); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi13x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i13); const __m512 vk13x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 224); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi14x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i14); const __m512 vk14x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 240); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi15x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i15); const __m512 vk15x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 256); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi16x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i16); const __m512 vk16x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 272); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi17x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i17); const __m512 vk17x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 288); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi18x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i18); const __m512 vk18x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 304); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi19x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i19); const __m512 vk19x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 320); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi20x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i20); const __m512 vk20x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 336); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi21x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i21); const __m512 vk21x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 352); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi22x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i22); const __m512 vk22x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 368); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF, vacc0123456789ABCDEFp0); const __m512 vi23x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i23); const __m512 vk23x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 384); vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF, vacc0123456789ABCDEFp1); const __m512 vi24x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i24); const __m512 vk24x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 400); vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF, vacc0123456789ABCDEFp0); // Add up all accumulators to vacc0123456789ABCDEFp0 vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1); __m512 vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEFp0, vmin); vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vmax); _mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF); output += c; } output = (float*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
647714.c
/* * Copyright (c) 1997 - 2003 Kungliga Tekniska Högskolan * (Royal Institute of Technology, Stockholm, Sweden). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "gsskrb5_locl.h" /* * Return initiator subkey, or if that doesn't exists, the subkey. */ krb5_error_code _gsskrb5i_get_initiator_subkey(const gsskrb5_ctx ctx, krb5_context context, krb5_keyblock **key) { krb5_error_code ret; *key = NULL; if (ctx->more_flags & LOCAL) { ret = krb5_auth_con_getlocalsubkey(context, ctx->auth_context, key); } else { ret = krb5_auth_con_getremotesubkey(context, ctx->auth_context, key); } if (ret == 0 && *key == NULL) ret = krb5_auth_con_getkey(context, ctx->auth_context, key); if (ret == 0 && *key == NULL) { krb5_set_error_message(context, 0, "No initiator subkey available"); return GSS_KRB5_S_KG_NO_SUBKEY; } return ret; } krb5_error_code _gsskrb5i_get_acceptor_subkey(const gsskrb5_ctx ctx, krb5_context context, krb5_keyblock **key) { krb5_error_code ret; *key = NULL; if (ctx->more_flags & LOCAL) { ret = krb5_auth_con_getremotesubkey(context, ctx->auth_context, key); } else { ret = krb5_auth_con_getlocalsubkey(context, ctx->auth_context, key); } if (ret == 0 && *key == NULL) { krb5_set_error_message(context, 0, "No acceptor subkey available"); return GSS_KRB5_S_KG_NO_SUBKEY; } return ret; } OM_uint32 _gsskrb5i_get_token_key(const gsskrb5_ctx ctx, krb5_context context, krb5_keyblock **key) { _gsskrb5i_get_acceptor_subkey(ctx, context, key); if(*key == NULL) { /* * Only use the initiator subkey or ticket session key if an * acceptor subkey was not required. */ if ((ctx->more_flags & ACCEPTOR_SUBKEY) == 0) _gsskrb5i_get_initiator_subkey(ctx, context, key); } if (*key == NULL) { krb5_set_error_message(context, 0, "No token key available"); return GSS_KRB5_S_KG_NO_SUBKEY; } return 0; } static OM_uint32 sub_wrap_size ( OM_uint32 req_output_size, OM_uint32 * max_input_size, int blocksize, int extrasize ) { size_t len, total_len; len = 8 + req_output_size + blocksize + extrasize; _gsskrb5_encap_length(len, &len, &total_len, GSS_KRB5_MECHANISM); total_len -= req_output_size; /* token length */ if (total_len < req_output_size) { *max_input_size = (req_output_size - total_len); (*max_input_size) &= (~(OM_uint32)(blocksize - 1)); } else { *max_input_size = 0; } return GSS_S_COMPLETE; } OM_uint32 GSSAPI_CALLCONV _gsskrb5_wrap_size_limit ( OM_uint32 * minor_status, const gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, OM_uint32 req_output_size, OM_uint32 * max_input_size ) { krb5_context context; krb5_keyblock *key; OM_uint32 ret; krb5_keytype keytype; const gsskrb5_ctx ctx = (const gsskrb5_ctx) context_handle; GSSAPI_KRB5_INIT (&context); if (ctx->more_flags & IS_CFX) return _gssapi_wrap_size_cfx(minor_status, ctx, context, conf_req_flag, qop_req, req_output_size, max_input_size); HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); ret = _gsskrb5i_get_token_key(ctx, context, &key); HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); if (ret) { *minor_status = ret; return GSS_S_FAILURE; } krb5_enctype_to_keytype (context, key->keytype, &keytype); switch (keytype) { case KEYTYPE_DES : #ifdef HEIM_WEAK_CRYPTO ret = sub_wrap_size(req_output_size, max_input_size, 8, 22); #else ret = GSS_S_FAILURE; #endif break; case ENCTYPE_ARCFOUR_HMAC_MD5: case ENCTYPE_ARCFOUR_HMAC_MD5_56: ret = _gssapi_wrap_size_arcfour(minor_status, ctx, context, conf_req_flag, qop_req, req_output_size, max_input_size, key); break; case KEYTYPE_DES3 : ret = sub_wrap_size(req_output_size, max_input_size, 8, 34); break; default : abort(); break; } krb5_free_keyblock (context, key); *minor_status = 0; return ret; } #ifdef HEIM_WEAK_CRYPTO static OM_uint32 wrap_des (OM_uint32 * minor_status, const gsskrb5_ctx ctx, krb5_context context, int conf_req_flag, gss_qop_t qop_req, const gss_buffer_t input_message_buffer, int * conf_state, gss_buffer_t output_message_buffer, krb5_keyblock *key ) { u_char *p; EVP_MD_CTX *md5; u_char hash[16]; DES_key_schedule schedule; EVP_CIPHER_CTX *des_ctx; DES_cblock deskey; DES_cblock zero; size_t i; int32_t seq_number; size_t len, total_len, padlength, datalen; if (IS_DCE_STYLE(ctx)) { padlength = 0; datalen = input_message_buffer->length; len = 22 + 8; _gsskrb5_encap_length (len, &len, &total_len, GSS_KRB5_MECHANISM); total_len += datalen; datalen += 8; } else { padlength = 8 - (input_message_buffer->length % 8); datalen = input_message_buffer->length + padlength + 8; len = datalen + 22; _gsskrb5_encap_length (len, &len, &total_len, GSS_KRB5_MECHANISM); } output_message_buffer->length = total_len; output_message_buffer->value = malloc (total_len); if (output_message_buffer->value == NULL) { output_message_buffer->length = 0; *minor_status = ENOMEM; return GSS_S_FAILURE; } p = _gsskrb5_make_header(output_message_buffer->value, len, "\x02\x01", /* TOK_ID */ GSS_KRB5_MECHANISM); /* SGN_ALG */ memcpy (p, "\x00\x00", 2); p += 2; /* SEAL_ALG */ if(conf_req_flag) memcpy (p, "\x00\x00", 2); else memcpy (p, "\xff\xff", 2); p += 2; /* Filler */ memcpy (p, "\xff\xff", 2); p += 2; /* fill in later */ memset (p, 0, 16); p += 16; /* confounder + data + pad */ krb5_generate_random_block(p, 8); memcpy (p + 8, input_message_buffer->value, input_message_buffer->length); memset (p + 8 + input_message_buffer->length, padlength, padlength); /* checksum */ md5 = EVP_MD_CTX_create(); EVP_DigestInit_ex(md5, EVP_md5(), NULL); EVP_DigestUpdate(md5, p - 24, 8); EVP_DigestUpdate(md5, p, datalen); EVP_DigestFinal_ex(md5, hash, NULL); EVP_MD_CTX_destroy(md5); memset (&zero, 0, sizeof(zero)); memcpy (&deskey, key->keyvalue.data, sizeof(deskey)); DES_set_key_unchecked (&deskey, &schedule); DES_cbc_cksum ((void *)hash, (void *)hash, sizeof(hash), &schedule, &zero); memcpy (p - 8, hash, 8); des_ctx = EVP_CIPHER_CTX_new(); if (des_ctx == NULL) { memset (deskey, 0, sizeof(deskey)); memset (&schedule, 0, sizeof(schedule)); free(output_message_buffer->value); output_message_buffer->value = NULL; output_message_buffer->length = 0; *minor_status = ENOMEM; return GSS_S_FAILURE; } /* sequence number */ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); krb5_auth_con_getlocalseqnumber (context, ctx->auth_context, &seq_number); p -= 16; p[0] = (seq_number >> 0) & 0xFF; p[1] = (seq_number >> 8) & 0xFF; p[2] = (seq_number >> 16) & 0xFF; p[3] = (seq_number >> 24) & 0xFF; memset (p + 4, (ctx->more_flags & LOCAL) ? 0 : 0xFF, 4); EVP_CipherInit_ex(des_ctx, EVP_des_cbc(), NULL, key->keyvalue.data, p + 8, 1); EVP_Cipher(des_ctx, p, p, 8); krb5_auth_con_setlocalseqnumber (context, ctx->auth_context, ++seq_number); HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); /* encrypt the data */ p += 16; if(conf_req_flag) { memcpy (&deskey, key->keyvalue.data, sizeof(deskey)); for (i = 0; i < sizeof(deskey); ++i) deskey[i] ^= 0xf0; EVP_CIPHER_CTX_reset(des_ctx); EVP_CipherInit_ex(des_ctx, EVP_des_cbc(), NULL, deskey, zero, 1); EVP_Cipher(des_ctx, p, p, datalen); } EVP_CIPHER_CTX_free(des_ctx); memset (deskey, 0, sizeof(deskey)); memset (&schedule, 0, sizeof(schedule)); if(conf_state != NULL) *conf_state = conf_req_flag; *minor_status = 0; return GSS_S_COMPLETE; } #endif static OM_uint32 wrap_des3 (OM_uint32 * minor_status, const gsskrb5_ctx ctx, krb5_context context, int conf_req_flag, gss_qop_t qop_req, const gss_buffer_t input_message_buffer, int * conf_state, gss_buffer_t output_message_buffer, krb5_keyblock *key ) { u_char *p; u_char seq[8]; int32_t seq_number; size_t len, total_len, padlength, datalen; uint32_t ret; krb5_crypto crypto; Checksum cksum; krb5_data encdata; if (IS_DCE_STYLE(ctx)) { padlength = 0; datalen = input_message_buffer->length; len = 34 + 8; _gsskrb5_encap_length (len, &len, &total_len, GSS_KRB5_MECHANISM); total_len += datalen; datalen += 8; } else { padlength = 8 - (input_message_buffer->length % 8); datalen = input_message_buffer->length + padlength + 8; len = datalen + 34; _gsskrb5_encap_length (len, &len, &total_len, GSS_KRB5_MECHANISM); } output_message_buffer->length = total_len; output_message_buffer->value = malloc (total_len); if (output_message_buffer->value == NULL) { output_message_buffer->length = 0; *minor_status = ENOMEM; return GSS_S_FAILURE; } p = _gsskrb5_make_header(output_message_buffer->value, len, "\x02\x01", /* TOK_ID */ GSS_KRB5_MECHANISM); /* SGN_ALG */ memcpy (p, "\x04\x00", 2); /* HMAC SHA1 DES3-KD */ p += 2; /* SEAL_ALG */ if(conf_req_flag) memcpy (p, "\x02\x00", 2); /* DES3-KD */ else memcpy (p, "\xff\xff", 2); p += 2; /* Filler */ memcpy (p, "\xff\xff", 2); p += 2; /* calculate checksum (the above + confounder + data + pad) */ memcpy (p + 20, p - 8, 8); krb5_generate_random_block(p + 28, 8); memcpy (p + 28 + 8, input_message_buffer->value, input_message_buffer->length); memset (p + 28 + 8 + input_message_buffer->length, padlength, padlength); ret = krb5_crypto_init(context, key, 0, &crypto); if (ret) { free (output_message_buffer->value); output_message_buffer->length = 0; output_message_buffer->value = NULL; *minor_status = ret; return GSS_S_FAILURE; } ret = krb5_create_checksum (context, crypto, KRB5_KU_USAGE_SIGN, 0, p + 20, datalen + 8, &cksum); krb5_crypto_destroy (context, crypto); if (ret) { free (output_message_buffer->value); output_message_buffer->length = 0; output_message_buffer->value = NULL; *minor_status = ret; return GSS_S_FAILURE; } /* zero out SND_SEQ + SGN_CKSUM in case */ memset (p, 0, 28); memcpy (p + 8, cksum.checksum.data, cksum.checksum.length); free_Checksum (&cksum); HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); /* sequence number */ krb5_auth_con_getlocalseqnumber (context, ctx->auth_context, &seq_number); seq[0] = (seq_number >> 0) & 0xFF; seq[1] = (seq_number >> 8) & 0xFF; seq[2] = (seq_number >> 16) & 0xFF; seq[3] = (seq_number >> 24) & 0xFF; memset (seq + 4, (ctx->more_flags & LOCAL) ? 0 : 0xFF, 4); ret = krb5_crypto_init(context, key, ETYPE_DES3_CBC_NONE, &crypto); if (ret) { free (output_message_buffer->value); output_message_buffer->length = 0; output_message_buffer->value = NULL; *minor_status = ret; return GSS_S_FAILURE; } { DES_cblock ivec; memcpy (&ivec, p + 8, 8); ret = krb5_encrypt_ivec (context, crypto, KRB5_KU_USAGE_SEQ, seq, 8, &encdata, &ivec); } krb5_crypto_destroy (context, crypto); if (ret) { free (output_message_buffer->value); output_message_buffer->length = 0; output_message_buffer->value = NULL; *minor_status = ret; return GSS_S_FAILURE; } assert (encdata.length == 8); memcpy (p, encdata.data, encdata.length); krb5_data_free (&encdata); krb5_auth_con_setlocalseqnumber (context, ctx->auth_context, ++seq_number); HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); /* encrypt the data */ p += 28; if(conf_req_flag) { krb5_data tmp; ret = krb5_crypto_init(context, key, ETYPE_DES3_CBC_NONE, &crypto); if (ret) { free (output_message_buffer->value); output_message_buffer->length = 0; output_message_buffer->value = NULL; *minor_status = ret; return GSS_S_FAILURE; } ret = krb5_encrypt(context, crypto, KRB5_KU_USAGE_SEAL, p, datalen, &tmp); krb5_crypto_destroy(context, crypto); if (ret) { free (output_message_buffer->value); output_message_buffer->length = 0; output_message_buffer->value = NULL; *minor_status = ret; return GSS_S_FAILURE; } assert (tmp.length == datalen); memcpy (p, tmp.data, datalen); krb5_data_free(&tmp); } if(conf_state != NULL) *conf_state = conf_req_flag; *minor_status = 0; return GSS_S_COMPLETE; } OM_uint32 GSSAPI_CALLCONV _gsskrb5_wrap (OM_uint32 * minor_status, const gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, const gss_buffer_t input_message_buffer, int * conf_state, gss_buffer_t output_message_buffer ) { krb5_context context; krb5_keyblock *key; OM_uint32 ret; krb5_keytype keytype; const gsskrb5_ctx ctx = (const gsskrb5_ctx) context_handle; output_message_buffer->value = NULL; output_message_buffer->length = 0; GSSAPI_KRB5_INIT (&context); if (ctx->more_flags & IS_CFX) return _gssapi_wrap_cfx (minor_status, ctx, context, conf_req_flag, input_message_buffer, conf_state, output_message_buffer); HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); ret = _gsskrb5i_get_token_key(ctx, context, &key); HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); if (ret) { *minor_status = ret; return GSS_S_FAILURE; } krb5_enctype_to_keytype (context, key->keytype, &keytype); switch (keytype) { case KEYTYPE_DES : #ifdef HEIM_WEAK_CRYPTO ret = wrap_des (minor_status, ctx, context, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer, key); #else ret = GSS_S_FAILURE; #endif break; case KEYTYPE_DES3 : ret = wrap_des3 (minor_status, ctx, context, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer, key); break; case KEYTYPE_ARCFOUR: case KEYTYPE_ARCFOUR_56: ret = _gssapi_wrap_arcfour (minor_status, ctx, context, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer, key); break; default : abort(); break; } krb5_free_keyblock (context, key); return ret; }
407546.c
/* vim:expandtab:shiftwidth=2:tabstop=2:smarttab: * * Libmemcached library * * Copyright (C) 2011 Data Differential, http://datadifferential.com/ * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * * The names of its contributors may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* -*- Mode: C; tab-width: 2; c-basic-offset: 2; indent-tabs-mode: nil -*- */ #include <libmemcached/protocol/common.h> #include <sys/types.h> #include <inttypes.h> #define ensure(a) if (!(a)) { return false; } bool memcached_binary_protocol_pedantic_check_request(const protocol_binary_request_header *request) { ensure(request->request.magic == PROTOCOL_BINARY_REQ); ensure(request->request.datatype == PROTOCOL_BINARY_RAW_BYTES); ensure(request->bytes[6] == 0); ensure(request->bytes[7] == 0); uint8_t opcode= request->request.opcode; uint16_t keylen= ntohs(request->request.keylen); uint8_t extlen= request->request.extlen; uint32_t bodylen= ntohl(request->request.bodylen); ensure(bodylen >= (keylen + extlen)); switch (opcode) { case PROTOCOL_BINARY_CMD_GET: case PROTOCOL_BINARY_CMD_GETK: case PROTOCOL_BINARY_CMD_GETKQ: case PROTOCOL_BINARY_CMD_GETQ: ensure(extlen == 0); ensure(keylen > 0); ensure(keylen == bodylen); ensure(request->request.cas == 0); break; case PROTOCOL_BINARY_CMD_ADD: case PROTOCOL_BINARY_CMD_ADDQ: /* it makes no sense to run add with a cas value */ ensure(request->request.cas == 0); /* FALLTHROUGH */ case PROTOCOL_BINARY_CMD_SET: case PROTOCOL_BINARY_CMD_SETQ: case PROTOCOL_BINARY_CMD_REPLACE: case PROTOCOL_BINARY_CMD_REPLACEQ: ensure(keylen > 0); ensure(extlen == 8); break; case PROTOCOL_BINARY_CMD_DELETE: case PROTOCOL_BINARY_CMD_DELETEQ: ensure(extlen == 0); ensure(keylen > 0); ensure(keylen == bodylen); break; case PROTOCOL_BINARY_CMD_INCREMENT: case PROTOCOL_BINARY_CMD_INCREMENTQ: case PROTOCOL_BINARY_CMD_DECREMENT: case PROTOCOL_BINARY_CMD_DECREMENTQ: ensure(extlen == 20); ensure(keylen > 0); ensure(keylen + extlen == bodylen); break; case PROTOCOL_BINARY_CMD_QUIT: case PROTOCOL_BINARY_CMD_QUITQ: case PROTOCOL_BINARY_CMD_NOOP: case PROTOCOL_BINARY_CMD_VERSION: ensure(extlen == 0); ensure(keylen == 0); ensure(bodylen == 0); break; case PROTOCOL_BINARY_CMD_FLUSH: case PROTOCOL_BINARY_CMD_FLUSHQ: ensure(extlen == 0 || extlen == 4); ensure(keylen == 0); ensure(bodylen == extlen); break; case PROTOCOL_BINARY_CMD_STAT: ensure(extlen == 0); /* May have key, but not value */ ensure(keylen == bodylen); break; case PROTOCOL_BINARY_CMD_APPEND: case PROTOCOL_BINARY_CMD_APPENDQ: case PROTOCOL_BINARY_CMD_PREPEND: case PROTOCOL_BINARY_CMD_PREPENDQ: ensure(extlen == 0); ensure(keylen > 0); break; default: /* Unknown command */ ; } return true; } bool memcached_binary_protocol_pedantic_check_response(const protocol_binary_request_header *request, const protocol_binary_response_header *response) { ensure(response->response.magic == PROTOCOL_BINARY_RES); ensure(response->response.datatype == PROTOCOL_BINARY_RAW_BYTES); ensure(response->response.opaque == request->request.opaque); uint16_t status= ntohs(response->response.status); uint8_t opcode= response->response.opcode; if (status == PROTOCOL_BINARY_RESPONSE_SUCCESS) { switch (opcode) { case PROTOCOL_BINARY_CMD_ADDQ: case PROTOCOL_BINARY_CMD_APPENDQ: case PROTOCOL_BINARY_CMD_DECREMENTQ: case PROTOCOL_BINARY_CMD_DELETEQ: case PROTOCOL_BINARY_CMD_FLUSHQ: case PROTOCOL_BINARY_CMD_INCREMENTQ: case PROTOCOL_BINARY_CMD_PREPENDQ: case PROTOCOL_BINARY_CMD_QUITQ: case PROTOCOL_BINARY_CMD_REPLACEQ: case PROTOCOL_BINARY_CMD_SETQ: /* Quiet command shouldn't return on success */ return false; default: break; } switch (opcode) { case PROTOCOL_BINARY_CMD_ADD: case PROTOCOL_BINARY_CMD_REPLACE: case PROTOCOL_BINARY_CMD_SET: case PROTOCOL_BINARY_CMD_APPEND: case PROTOCOL_BINARY_CMD_PREPEND: ensure(response->response.keylen == 0); ensure(response->response.extlen == 0); ensure(response->response.bodylen == 0); ensure(response->response.cas != 0); break; case PROTOCOL_BINARY_CMD_FLUSH: case PROTOCOL_BINARY_CMD_NOOP: case PROTOCOL_BINARY_CMD_QUIT: case PROTOCOL_BINARY_CMD_DELETE: ensure(response->response.keylen == 0); ensure(response->response.extlen == 0); ensure(response->response.bodylen == 0); ensure(response->response.cas == 0); break; case PROTOCOL_BINARY_CMD_DECREMENT: case PROTOCOL_BINARY_CMD_INCREMENT: ensure(response->response.keylen == 0); ensure(response->response.extlen == 0); ensure(ntohl(response->response.bodylen) == 8); ensure(response->response.cas != 0); break; case PROTOCOL_BINARY_CMD_STAT: ensure(response->response.extlen == 0); /* key and value exists in all packets except in the terminating */ ensure(response->response.cas == 0); break; case PROTOCOL_BINARY_CMD_VERSION: ensure(response->response.keylen == 0); ensure(response->response.extlen == 0); ensure(response->response.bodylen != 0); ensure(response->response.cas == 0); break; case PROTOCOL_BINARY_CMD_GET: case PROTOCOL_BINARY_CMD_GETQ: ensure(response->response.keylen == 0); ensure(response->response.extlen == 4); ensure(response->response.cas != 0); break; case PROTOCOL_BINARY_CMD_GETK: case PROTOCOL_BINARY_CMD_GETKQ: ensure(response->response.keylen != 0); ensure(response->response.extlen == 4); ensure(response->response.cas != 0); break; default: /* Undefined command code */ break; } } else { ensure(response->response.cas == 0); ensure(response->response.extlen == 0); if (opcode != PROTOCOL_BINARY_CMD_GETK) { ensure(response->response.keylen == 0); } } return true; }
609998.c
/*------------------------------------------------------------------------- * * date.c * implements DATE and TIME data types specified in SQL-92 standard * * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group * Portions Copyright (c) 1994-5, Regents of the University of California * * * IDENTIFICATION * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.138.2.1 2008/07/07 18:09:53 tgl Exp $ * *------------------------------------------------------------------------- */ #include "postgres.h" #include <ctype.h> #include <limits.h> #include <float.h> #include <time.h> #include "access/hash.h" #include "libpq/pqformat.h" #include "miscadmin.h" #include "parser/scansup.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/date.h" #include "utils/nabstime.h" /* * gcc's -ffast-math switch breaks routines that expect exact results from * expressions like timeval / SECS_PER_HOUR, where timeval is double. */ #ifdef __FAST_MATH__ #error -ffast-math is known to break this code #endif static int time2tm(TimeADT time, struct pg_tm * tm, fsec_t *fsec); static int timetz2tm(TimeTzADT *time, struct pg_tm * tm, fsec_t *fsec, int *tzp); static int tm2time(struct pg_tm * tm, fsec_t fsec, TimeADT *result); static int tm2timetz(struct pg_tm * tm, fsec_t fsec, int tz, TimeTzADT *result); static void AdjustTimeForTypmod(TimeADT *time, int32 typmod); /* common code for timetypmodin and timetztypmodin */ static int32 anytime_typmodin(bool istz, ArrayType *ta) { int32 typmod; int32 *tl; int n; tl = ArrayGetIntegerTypmods(ta, &n); /* * we're not too tense about good error message here because grammar * shouldn't allow wrong number of modifiers for TIME */ if (n != 1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid type modifier"))); if (*tl < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("TIME(%d)%s precision must not be negative", *tl, (istz ? " WITH TIME ZONE" : "")))); if (*tl > MAX_TIME_PRECISION) { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("TIME(%d)%s precision reduced to maximum allowed, %d", *tl, (istz ? " WITH TIME ZONE" : ""), MAX_TIME_PRECISION))); typmod = MAX_TIME_PRECISION; } else typmod = *tl; return typmod; } /* common code for timetypmodout and timetztypmodout */ static char * anytime_typmodout(bool istz, int32 typmod) { char *res = (char *) palloc(64); const char *tz = istz ? " with time zone" : " without time zone"; if (typmod >= 0) snprintf(res, 64, "(%d)%s", (int) typmod, tz); else snprintf(res, 64, "%s", tz); return res; } /***************************************************************************** * Date ADT *****************************************************************************/ /* date_in() * Given date text string, convert to internal date format. */ Datum date_in(PG_FUNCTION_ARGS) { char *str = PG_GETARG_CSTRING(0); DateADT date; fsec_t fsec; struct pg_tm tt, *tm = &tt; int tzp; int dtype; int nf; int dterr; char *field[MAXDATEFIELDS]; int ftype[MAXDATEFIELDS]; char workbuf[MAXDATELEN + 1]; dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr == 0) dterr = DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, &tzp); if (dterr != 0) DateTimeParseError(dterr, str, "date"); switch (dtype) { case DTK_DATE: break; case DTK_CURRENT: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("date/time value \"current\" is no longer supported"))); GetCurrentDateTime(tm); break; case DTK_EPOCH: GetEpochTime(tm); break; default: DateTimeParseError(DTERR_BAD_FORMAT, str, "date"); break; } if (!IS_VALID_JULIAN(tm->tm_year, tm->tm_mon, tm->tm_mday)) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("date out of range: \"%s\"", str))); date = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE; PG_RETURN_DATEADT(date); } /* date_out() * Given internal format date, convert to text string. */ Datum date_out(PG_FUNCTION_ARGS) { DateADT date = PG_GETARG_DATEADT(0); char *result; struct pg_tm tt, *tm = &tt; char buf[MAXDATELEN + 1]; j2date(date + POSTGRES_EPOCH_JDATE, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday)); EncodeDateOnly(tm, DateStyle, buf); result = pstrdup(buf); PG_RETURN_CSTRING(result); } /* * date_recv - converts external binary format to date */ Datum date_recv(PG_FUNCTION_ARGS) { StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); PG_RETURN_DATEADT((DateADT) pq_getmsgint(buf, sizeof(DateADT))); } /* * date_send - converts date to binary format */ Datum date_send(PG_FUNCTION_ARGS) { DateADT date = PG_GETARG_DATEADT(0); StringInfoData buf; pq_begintypsend(&buf); pq_sendint(&buf, date, sizeof(date)); PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } /* * Comparison functions for dates */ Datum date_eq(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); PG_RETURN_BOOL(dateVal1 == dateVal2); } Datum date_ne(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); PG_RETURN_BOOL(dateVal1 != dateVal2); } Datum date_lt(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); PG_RETURN_BOOL(dateVal1 < dateVal2); } Datum date_le(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); PG_RETURN_BOOL(dateVal1 <= dateVal2); } Datum date_gt(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); PG_RETURN_BOOL(dateVal1 > dateVal2); } Datum date_ge(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); PG_RETURN_BOOL(dateVal1 >= dateVal2); } Datum date_cmp(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); if (dateVal1 < dateVal2) PG_RETURN_INT32(-1); else if (dateVal1 > dateVal2) PG_RETURN_INT32(1); PG_RETURN_INT32(0); } Datum date_larger(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); PG_RETURN_DATEADT((dateVal1 > dateVal2) ? dateVal1 : dateVal2); } Datum date_smaller(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); PG_RETURN_DATEADT((dateVal1 < dateVal2) ? dateVal1 : dateVal2); } /* Compute difference between two dates in days. */ Datum date_mi(PG_FUNCTION_ARGS) { DateADT dateVal1 = PG_GETARG_DATEADT(0); DateADT dateVal2 = PG_GETARG_DATEADT(1); PG_RETURN_INT32(date_diff(dateVal1, dateVal2)); } /* Add a number of days to a date, giving a new date. * Must handle both positive and negative numbers of days. */ Datum date_pli(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); int32 days = PG_GETARG_INT32(1); PG_RETURN_DATEADT(date_pl_days(dateVal, days)); } /* Subtract a number of days from a date, giving a new date. */ Datum date_mii(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); int32 days = PG_GETARG_INT32(1); PG_RETURN_DATEADT(dateVal - days); } /* * Internal routines for promoting date to timestamp and timestamp with * time zone */ static Timestamp date2timestamp(DateADT dateVal) { Timestamp result; #ifdef HAVE_INT64_TIMESTAMP /* date is days since 2000, timestamp is microseconds since same... */ result = dateVal * USECS_PER_DAY; /* Date's range is wider than timestamp's, so must check for overflow */ if (result / USECS_PER_DAY != dateVal) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("date out of range for timestamp"))); #else /* date is days since 2000, timestamp is seconds since same... */ result = dateVal * (double) SECS_PER_DAY; #endif return result; } static TimestampTz date2timestamptz(DateADT dateVal) { TimestampTz result; struct pg_tm tt, *tm = &tt; int tz; j2date(dateVal + POSTGRES_EPOCH_JDATE, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday)); tm->tm_hour = 0; tm->tm_min = 0; tm->tm_sec = 0; tz = DetermineTimeZoneOffset(tm, session_timezone); #ifdef HAVE_INT64_TIMESTAMP result = dateVal * USECS_PER_DAY + tz * USECS_PER_SEC; /* Date's range is wider than timestamp's, so must check for overflow */ if ((result - tz * USECS_PER_SEC) / USECS_PER_DAY != dateVal) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("date out of range for timestamp"))); #else result = dateVal * (double) SECS_PER_DAY + tz; #endif return result; } /* * date2timestamp_no_overflow * * This is chartered to produce a double value that is numerically * equivalent to the corresponding Timestamp value, if the date is in the * valid range of Timestamps, but in any case not throw an overflow error. * We can do this since the numerical range of double is greater than * that of non-erroneous timestamps. The results are currently only * used for statistical estimation purposes. */ double date2timestamp_no_overflow(DateADT dateVal) { double result; #ifdef HAVE_INT64_TIMESTAMP /* date is days since 2000, timestamp is microseconds since same... */ result = dateVal * (double) USECS_PER_DAY; #else /* date is days since 2000, timestamp is seconds since same... */ result = dateVal * (double) SECS_PER_DAY; #endif return result; } /* * Crosstype comparison functions for dates */ Datum date_eq_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); Timestamp dt1; dt1 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) == 0); } Datum date_ne_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); Timestamp dt1; dt1 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) != 0); } Datum date_lt_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); Timestamp dt1; dt1 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) < 0); } Datum date_gt_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); Timestamp dt1; dt1 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) > 0); } Datum date_le_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); Timestamp dt1; dt1 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) <= 0); } Datum date_ge_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); Timestamp dt1; dt1 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) >= 0); } Datum date_cmp_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); Timestamp dt1; dt1 = date2timestamp(dateVal); PG_RETURN_INT32(timestamp_cmp_internal(dt1, dt2)); } Datum date_eq_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); TimestampTz dt1; dt1 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) == 0); } Datum date_ne_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); TimestampTz dt1; dt1 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) != 0); } Datum date_lt_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); TimestampTz dt1; dt1 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) < 0); } Datum date_gt_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); TimestampTz dt1; dt1 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) > 0); } Datum date_le_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); TimestampTz dt1; dt1 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) <= 0); } Datum date_ge_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); TimestampTz dt1; dt1 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) >= 0); } Datum date_cmp_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); TimestampTz dt1; dt1 = date2timestamptz(dateVal); PG_RETURN_INT32(timestamptz_cmp_internal(dt1, dt2)); } Datum timestamp_eq_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); Timestamp dt2; dt2 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) == 0); } Datum timestamp_ne_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); Timestamp dt2; dt2 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) != 0); } Datum timestamp_lt_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); Timestamp dt2; dt2 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) < 0); } Datum timestamp_gt_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); Timestamp dt2; dt2 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) > 0); } Datum timestamp_le_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); Timestamp dt2; dt2 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) <= 0); } Datum timestamp_ge_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); Timestamp dt2; dt2 = date2timestamp(dateVal); PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) >= 0); } Datum timestamp_cmp_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); Timestamp dt2; dt2 = date2timestamp(dateVal); PG_RETURN_INT32(timestamp_cmp_internal(dt1, dt2)); } Datum timestamptz_eq_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); TimestampTz dt2; dt2 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) == 0); } Datum timestamptz_ne_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); TimestampTz dt2; dt2 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) != 0); } Datum timestamptz_lt_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); TimestampTz dt2; dt2 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) < 0); } Datum timestamptz_gt_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); TimestampTz dt2; dt2 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) > 0); } Datum timestamptz_le_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); TimestampTz dt2; dt2 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) <= 0); } Datum timestamptz_ge_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); TimestampTz dt2; dt2 = date2timestamptz(dateVal); PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) >= 0); } Datum timestamptz_cmp_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); TimestampTz dt2; dt2 = date2timestamptz(dateVal); PG_RETURN_INT32(timestamptz_cmp_internal(dt1, dt2)); } /* Add an interval to a date, giving a new date. * Must handle both positive and negative intervals. * * We implement this by promoting the date to timestamp (without time zone) * and then using the timestamp plus interval function. */ Datum date_pl_interval(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Interval *span = PG_GETARG_INTERVAL_P(1); Timestamp dateStamp; dateStamp = date2timestamp(dateVal); return DirectFunctionCall2(timestamp_pl_interval, TimestampGetDatum(dateStamp), PointerGetDatum(span)); } /* Subtract an interval from a date, giving a new date. * Must handle both positive and negative intervals. * * We implement this by promoting the date to timestamp (without time zone) * and then using the timestamp minus interval function. */ Datum date_mi_interval(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Interval *span = PG_GETARG_INTERVAL_P(1); Timestamp dateStamp; dateStamp = date2timestamp(dateVal); return DirectFunctionCall2(timestamp_mi_interval, TimestampGetDatum(dateStamp), PointerGetDatum(span)); } /* date_timestamp() * Convert date to timestamp data type. */ Datum date_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp result; result = date2timestamp(dateVal); PG_RETURN_TIMESTAMP(result); } /* timestamp_date() * Convert timestamp to date data type. */ Datum timestamp_date(PG_FUNCTION_ARGS) { Timestamp timestamp = PG_GETARG_TIMESTAMP(0); DateADT result; struct pg_tm tt, *tm = &tt; fsec_t fsec; if (TIMESTAMP_NOT_FINITE(timestamp)) PG_RETURN_NULL(); if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) != 0) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE; PG_RETURN_DATEADT(result); } /* date_timestamptz() * Convert date to timestamp with time zone data type. */ Datum date_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz result; result = date2timestamptz(dateVal); PG_RETURN_TIMESTAMP(result); } /* timestamptz_date() * Convert timestamp with time zone to date data type. */ Datum timestamptz_date(PG_FUNCTION_ARGS) { TimestampTz timestamp = PG_GETARG_TIMESTAMP(0); DateADT result; struct pg_tm tt, *tm = &tt; fsec_t fsec; int tz; char *tzn; if (TIMESTAMP_NOT_FINITE(timestamp)) PG_RETURN_NULL(); if (timestamp2tm(timestamp, &tz, tm, &fsec, &tzn, NULL) != 0) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE; PG_RETURN_DATEADT(result); } /* abstime_date() * Convert abstime to date data type. */ Datum abstime_date(PG_FUNCTION_ARGS) { AbsoluteTime abstime = PG_GETARG_ABSOLUTETIME(0); DateADT result; struct pg_tm tt, *tm = &tt; int tz; switch (abstime) { case INVALID_ABSTIME: case NOSTART_ABSTIME: case NOEND_ABSTIME: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot convert reserved abstime value to date"))); /* * pretend to drop through to make compiler think that result will * be set */ default: abstime2tm(abstime, &tz, tm, NULL); result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE; break; } PG_RETURN_DATEADT(result); } /***************************************************************************** * Time ADT *****************************************************************************/ Datum time_in(PG_FUNCTION_ARGS) { char *str = PG_GETARG_CSTRING(0); #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); #endif int32 typmod = PG_GETARG_INT32(2); TimeADT result; fsec_t fsec; struct pg_tm tt, *tm = &tt; int tz; int nf; int dterr; char workbuf[MAXDATELEN + 1]; char *field[MAXDATEFIELDS]; int dtype; int ftype[MAXDATEFIELDS]; dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr == 0) dterr = DecodeTimeOnly(field, ftype, nf, &dtype, tm, &fsec, &tz); if (dterr != 0) DateTimeParseError(dterr, str, "time"); tm2time(tm, fsec, &result); AdjustTimeForTypmod(&result, typmod); PG_RETURN_TIMEADT(result); } /* tm2time() * Convert a tm structure to a time data type. */ static int tm2time(struct pg_tm * tm, fsec_t fsec, TimeADT *result) { #ifdef HAVE_INT64_TIMESTAMP *result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) * USECS_PER_SEC) + fsec; #else *result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec; #endif return 0; } /* time2tm() * Convert time data type to POSIX time structure. * * For dates within the range of pg_time_t, convert to the local time zone. * If out of this range, leave as UTC (in practice that could only happen * if pg_time_t is just 32 bits) - thomas 97/05/27 */ static int time2tm(TimeADT time, struct pg_tm * tm, fsec_t *fsec) { #ifdef HAVE_INT64_TIMESTAMP tm->tm_hour = time / USECS_PER_HOUR; time -= tm->tm_hour * USECS_PER_HOUR; tm->tm_min = time / USECS_PER_MINUTE; time -= tm->tm_min * USECS_PER_MINUTE; tm->tm_sec = time / USECS_PER_SEC; time -= tm->tm_sec * USECS_PER_SEC; *fsec = time; #else double trem; recalc: trem = time; TMODULO(trem, tm->tm_hour, (double) SECS_PER_HOUR); TMODULO(trem, tm->tm_min, (double) SECS_PER_MINUTE); TMODULO(trem, tm->tm_sec, 1.0); trem = TIMEROUND(trem); /* roundoff may need to propagate to higher-order fields */ if (trem >= 1.0) { time = ceil(time); goto recalc; } *fsec = trem; #endif return 0; } Datum time_out(PG_FUNCTION_ARGS) { TimeADT time = PG_GETARG_TIMEADT(0); char *result; struct pg_tm tt, *tm = &tt; fsec_t fsec; char buf[MAXDATELEN + 1]; time2tm(time, tm, &fsec); EncodeTimeOnly(tm, fsec, NULL, DateStyle, buf); result = pstrdup(buf); PG_RETURN_CSTRING(result); } /* * time_recv - converts external binary format to time * * We make no attempt to provide compatibility between int and float * time representations ... */ Datum time_recv(PG_FUNCTION_ARGS) { StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); #endif int32 typmod = PG_GETARG_INT32(2); TimeADT result; #ifdef HAVE_INT64_TIMESTAMP result = pq_getmsgint64(buf); #else result = pq_getmsgfloat8(buf); #endif AdjustTimeForTypmod(&result, typmod); PG_RETURN_TIMEADT(result); } /* * time_send - converts time to binary format */ Datum time_send(PG_FUNCTION_ARGS) { TimeADT time = PG_GETARG_TIMEADT(0); StringInfoData buf; pq_begintypsend(&buf); #ifdef HAVE_INT64_TIMESTAMP pq_sendint64(&buf, time); #else pq_sendfloat8(&buf, time); #endif PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } Datum timetypmodin(PG_FUNCTION_ARGS) { ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0); PG_RETURN_INT32(anytime_typmodin(false, ta)); } Datum timetypmodout(PG_FUNCTION_ARGS) { int32 typmod = PG_GETARG_INT32(0); PG_RETURN_CSTRING(anytime_typmodout(false, typmod)); } /* time_scale() * Adjust time type for specified scale factor. * Used by PostgreSQL type system to stuff columns. */ Datum time_scale(PG_FUNCTION_ARGS) { TimeADT time = PG_GETARG_TIMEADT(0); int32 typmod = PG_GETARG_INT32(1); TimeADT result; result = time; AdjustTimeForTypmod(&result, typmod); PG_RETURN_TIMEADT(result); } /* AdjustTimeForTypmod() * Force the precision of the time value to a specified value. * Uses *exactly* the same code as in AdjustTimestampForTypemod() * but we make a separate copy because those types do not * have a fundamental tie together but rather a coincidence of * implementation. - thomas */ static void AdjustTimeForTypmod(TimeADT *time, int32 typmod) { #ifdef HAVE_INT64_TIMESTAMP static const int64 TimeScales[MAX_TIME_PRECISION + 1] = { INT64CONST(1000000), INT64CONST(100000), INT64CONST(10000), INT64CONST(1000), INT64CONST(100), INT64CONST(10), INT64CONST(1) }; static const int64 TimeOffsets[MAX_TIME_PRECISION + 1] = { INT64CONST(500000), INT64CONST(50000), INT64CONST(5000), INT64CONST(500), INT64CONST(50), INT64CONST(5), INT64CONST(0) }; #else /* note MAX_TIME_PRECISION differs in this case */ static const double TimeScales[MAX_TIME_PRECISION + 1] = { 1.0, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, 10000000.0, 100000000.0, 1000000000.0, 10000000000.0 }; #endif if (typmod >= 0 && typmod <= MAX_TIME_PRECISION) { /* * Note: this round-to-nearest code is not completely consistent about * rounding values that are exactly halfway between integral values. * On most platforms, rint() will implement round-to-nearest-even, but * the integer code always rounds up (away from zero). Is it worth * trying to be consistent? */ #ifdef HAVE_INT64_TIMESTAMP if (*time >= INT64CONST(0)) *time = ((*time + TimeOffsets[typmod]) / TimeScales[typmod]) * TimeScales[typmod]; else *time = -((((-*time) + TimeOffsets[typmod]) / TimeScales[typmod]) * TimeScales[typmod]); #else *time = rint((double) *time * TimeScales[typmod]) / TimeScales[typmod]; #endif } } Datum time_eq(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); PG_RETURN_BOOL(time1 == time2); } Datum time_ne(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); PG_RETURN_BOOL(time1 != time2); } Datum time_lt(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); PG_RETURN_BOOL(time1 < time2); } Datum time_le(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); PG_RETURN_BOOL(time1 <= time2); } Datum time_gt(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); PG_RETURN_BOOL(time1 > time2); } Datum time_ge(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); PG_RETURN_BOOL(time1 >= time2); } Datum time_cmp(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); if (time1 < time2) PG_RETURN_INT32(-1); if (time1 > time2) PG_RETURN_INT32(1); PG_RETURN_INT32(0); } Datum time_hash(PG_FUNCTION_ARGS) { /* We can use either hashint8 or hashfloat8 directly */ #ifdef HAVE_INT64_TIMESTAMP return hashint8(fcinfo); #else return hashfloat8(fcinfo); #endif } Datum time_larger(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); PG_RETURN_TIMEADT((time1 > time2) ? time1 : time2); } Datum time_smaller(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); PG_RETURN_TIMEADT((time1 < time2) ? time1 : time2); } /* overlaps_time() --- implements the SQL92 OVERLAPS operator. * * Algorithm is per SQL92 spec. This is much harder than you'd think * because the spec requires us to deliver a non-null answer in some cases * where some of the inputs are null. */ Datum overlaps_time(PG_FUNCTION_ARGS) { /* * The arguments are TimeADT, but we leave them as generic Datums to avoid * dereferencing nulls (TimeADT is pass-by-reference!) */ Datum ts1 = PG_GETARG_DATUM(0); Datum te1 = PG_GETARG_DATUM(1); Datum ts2 = PG_GETARG_DATUM(2); Datum te2 = PG_GETARG_DATUM(3); bool ts1IsNull = PG_ARGISNULL(0); bool te1IsNull = PG_ARGISNULL(1); bool ts2IsNull = PG_ARGISNULL(2); bool te2IsNull = PG_ARGISNULL(3); #define TIMEADT_GT(t1,t2) \ (DatumGetTimeADT(t1) > DatumGetTimeADT(t2)) #define TIMEADT_LT(t1,t2) \ (DatumGetTimeADT(t1) < DatumGetTimeADT(t2)) /* * If both endpoints of interval 1 are null, the result is null (unknown). * If just one endpoint is null, take ts1 as the non-null one. Otherwise, * take ts1 as the lesser endpoint. */ if (ts1IsNull) { if (te1IsNull) PG_RETURN_NULL(); /* swap null for non-null */ ts1 = te1; te1IsNull = true; } else if (!te1IsNull) { if (TIMEADT_GT(ts1, te1)) { Datum tt = ts1; ts1 = te1; te1 = tt; } } /* Likewise for interval 2. */ if (ts2IsNull) { if (te2IsNull) PG_RETURN_NULL(); /* swap null for non-null */ ts2 = te2; te2IsNull = true; } else if (!te2IsNull) { if (TIMEADT_GT(ts2, te2)) { Datum tt = ts2; ts2 = te2; te2 = tt; } } /* * At this point neither ts1 nor ts2 is null, so we can consider three * cases: ts1 > ts2, ts1 < ts2, ts1 = ts2 */ if (TIMEADT_GT(ts1, ts2)) { /* * This case is ts1 < te2 OR te1 < te2, which may look redundant but * in the presence of nulls it's not quite completely so. */ if (te2IsNull) PG_RETURN_NULL(); if (TIMEADT_LT(ts1, te2)) PG_RETURN_BOOL(true); if (te1IsNull) PG_RETURN_NULL(); /* * If te1 is not null then we had ts1 <= te1 above, and we just found * ts1 >= te2, hence te1 >= te2. */ PG_RETURN_BOOL(false); } else if (TIMEADT_LT(ts1, ts2)) { /* This case is ts2 < te1 OR te2 < te1 */ if (te1IsNull) PG_RETURN_NULL(); if (TIMEADT_LT(ts2, te1)) PG_RETURN_BOOL(true); if (te2IsNull) PG_RETURN_NULL(); /* * If te2 is not null then we had ts2 <= te2 above, and we just found * ts2 >= te1, hence te2 >= te1. */ PG_RETURN_BOOL(false); } else { /* * For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a * rather silly way of saying "true if both are nonnull, else null". */ if (te1IsNull || te2IsNull) PG_RETURN_NULL(); PG_RETURN_BOOL(true); } #undef TIMEADT_GT #undef TIMEADT_LT } /* timestamp_time() * Convert timestamp to time data type. */ Datum timestamp_time(PG_FUNCTION_ARGS) { Timestamp timestamp = PG_GETARG_TIMESTAMP(0); TimeADT result; struct pg_tm tt, *tm = &tt; fsec_t fsec; if (TIMESTAMP_NOT_FINITE(timestamp)) PG_RETURN_NULL(); if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) != 0) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); #ifdef HAVE_INT64_TIMESTAMP /* * Could also do this with time = (timestamp / USECS_PER_DAY * * USECS_PER_DAY) - timestamp; */ result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) * USECS_PER_SEC) + fsec; #else result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec; #endif PG_RETURN_TIMEADT(result); } /* timestamptz_time() * Convert timestamptz to time data type. */ Datum timestamptz_time(PG_FUNCTION_ARGS) { TimestampTz timestamp = PG_GETARG_TIMESTAMP(0); TimeADT result; struct pg_tm tt, *tm = &tt; int tz; fsec_t fsec; char *tzn; if (TIMESTAMP_NOT_FINITE(timestamp)) PG_RETURN_NULL(); if (timestamp2tm(timestamp, &tz, tm, &fsec, &tzn, NULL) != 0) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); #ifdef HAVE_INT64_TIMESTAMP /* * Could also do this with time = (timestamp / USECS_PER_DAY * * USECS_PER_DAY) - timestamp; */ result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) * USECS_PER_SEC) + fsec; #else result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec; #endif PG_RETURN_TIMEADT(result); } /* datetime_timestamp() * Convert date and time to timestamp data type. */ Datum datetime_timestamp(PG_FUNCTION_ARGS) { DateADT date = PG_GETARG_DATEADT(0); TimeADT time = PG_GETARG_TIMEADT(1); Timestamp result; result = DatumGetTimestamp(DirectFunctionCall1(date_timestamp, DateADTGetDatum(date))); result += time; PG_RETURN_TIMESTAMP(result); } /* time_interval() * Convert time to interval data type. */ Datum time_interval(PG_FUNCTION_ARGS) { TimeADT time = PG_GETARG_TIMEADT(0); Interval *result; result = (Interval *) palloc(sizeof(Interval)); result->time = time; result->day = 0; result->month = 0; PG_RETURN_INTERVAL_P(result); } /* interval_time() * Convert interval to time data type. * * This is defined as producing the fractional-day portion of the interval. * Therefore, we can just ignore the months field. It is not real clear * what to do with negative intervals, but we choose to subtract the floor, * so that, say, '-2 hours' becomes '22:00:00'. */ Datum interval_time(PG_FUNCTION_ARGS) { Interval *span = PG_GETARG_INTERVAL_P(0); TimeADT result; #ifdef HAVE_INT64_TIMESTAMP int64 days; result = span->time; if (result >= USECS_PER_DAY) { days = result / USECS_PER_DAY; result -= days * USECS_PER_DAY; } else if (result < 0) { days = (-result + USECS_PER_DAY - 1) / USECS_PER_DAY; result += days * USECS_PER_DAY; } #else result = span->time; if (result >= (double) SECS_PER_DAY || result < 0) result -= floor(result / (double) SECS_PER_DAY) * (double) SECS_PER_DAY; #endif PG_RETURN_TIMEADT(result); } /* time_mi_time() * Subtract two times to produce an interval. */ Datum time_mi_time(PG_FUNCTION_ARGS) { TimeADT time1 = PG_GETARG_TIMEADT(0); TimeADT time2 = PG_GETARG_TIMEADT(1); Interval *result; result = (Interval *) palloc(sizeof(Interval)); result->month = 0; result->day = 0; result->time = time1 - time2; PG_RETURN_INTERVAL_P(result); } /* time_pl_interval_internal() * Common code to add interval to time. */ static inline TimeADT time_pl_interval_internal(TimeADT time, Interval *span) { TimeADT result; #ifdef HAVE_INT64_TIMESTAMP result = time + span->time; result -= result / USECS_PER_DAY * USECS_PER_DAY; if (result < INT64CONST(0)) result += USECS_PER_DAY; #else TimeADT time1; result = time + span->time; TMODULO(result, time1, (double) SECS_PER_DAY); if (result < 0) result += SECS_PER_DAY; #endif return result; } /* time_pl_interval() * Add interval to time. */ Datum time_pl_interval(PG_FUNCTION_ARGS) { TimeADT time = PG_GETARG_TIMEADT(0); Interval *span = PG_GETARG_INTERVAL_P(1); TimeADT result = time_pl_interval_internal(time, span); PG_RETURN_TIMEADT(result); } /* * time_li_fraction * * What fraction of interval <x0, x1> does <x0, x> represent? */ float8 time_li_fraction(TimeADT x, TimeADT x0, TimeADT x1, bool *eq_bounds, bool *eq_abscissas) { float8 result; Interval diffx; Interval diffx1; Assert(eq_bounds && eq_abscissas); *eq_bounds = false; *eq_abscissas = false; diffx.time = x - x0; diffx.month = 0; diffx.day = 0; diffx1.time = x1 - x0; diffx1.month = 0; diffx1.day = 0; if ( ! interval_div_internal(&diffx, &diffx1, &result, NULL) ) { *eq_bounds = true; *eq_abscissas = (x == x0); result = NAN; } return result; } /* * time_li_value * * What interval value lies fraction <f> of the way into interval * <y0, y1>? * * Note * li_value(0.0, y0, y1) --> y0 * li_value(1.0, y0, y1) --> y1 */ Timestamp time_li_value(float8 f, TimeADT y0, TimeADT y1) { TimeADT y; Interval diffy; Interval *offset; diffy.month = 0; diffy.day = 0; diffy.time = y1 - y0; offset = DatumGetIntervalP(DirectFunctionCall2(interval_mul, IntervalPGetDatum(&diffy), Float8GetDatum(f))); y = time_pl_interval_internal(y0, offset); pfree(offset); return y; } /* time_mi_interval() * Subtract interval from time. */ Datum time_mi_interval(PG_FUNCTION_ARGS) { TimeADT time = PG_GETARG_TIMEADT(0); Interval *span = PG_GETARG_INTERVAL_P(1); TimeADT result; #ifdef HAVE_INT64_TIMESTAMP result = time - span->time; result -= result / USECS_PER_DAY * USECS_PER_DAY; if (result < INT64CONST(0)) result += USECS_PER_DAY; #else TimeADT time1; result = time - span->time; TMODULO(result, time1, (double) SECS_PER_DAY); if (result < 0) result += SECS_PER_DAY; #endif PG_RETURN_TIMEADT(result); } /* time_part() * Extract specified field from time type. */ Datum time_part(PG_FUNCTION_ARGS) { text *units = PG_GETARG_TEXT_P(0); TimeADT time = PG_GETARG_TIMEADT(1); float8 result; int type, val; char *lowunits; lowunits = downcase_truncate_identifier(VARDATA(units), VARSIZE(units) - VARHDRSZ, false); type = DecodeUnits(0, lowunits, &val); if (type == UNKNOWN_FIELD) type = DecodeSpecial(0, lowunits, &val); if (type == UNITS) { fsec_t fsec; struct pg_tm tt, *tm = &tt; time2tm(time, tm, &fsec); switch (val) { case DTK_MICROSEC: #ifdef HAVE_INT64_TIMESTAMP result = tm->tm_sec * USECS_PER_SEC + fsec; #else result = (tm->tm_sec + fsec) * 1000000; #endif break; case DTK_MILLISEC: #ifdef HAVE_INT64_TIMESTAMP result = tm->tm_sec * INT64CONST(1000) + fsec / INT64CONST(1000); #else result = (tm->tm_sec + fsec) * 1000; #endif break; case DTK_SECOND: #ifdef HAVE_INT64_TIMESTAMP result = tm->tm_sec + fsec / USECS_PER_SEC; #else result = tm->tm_sec + fsec; #endif break; case DTK_MINUTE: result = tm->tm_min; break; case DTK_HOUR: result = tm->tm_hour; break; case DTK_TZ: case DTK_TZ_MINUTE: case DTK_TZ_HOUR: case DTK_DAY: case DTK_MONTH: case DTK_QUARTER: case DTK_YEAR: case DTK_DECADE: case DTK_CENTURY: case DTK_MILLENNIUM: case DTK_ISOYEAR: default: ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"time\" units \"%s\" not recognized", DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(units)))))); result = 0; } } else if (type == RESERV && val == DTK_EPOCH) { #ifdef HAVE_INT64_TIMESTAMP result = time / 1000000.0; #else result = time; #endif } else { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"time\" units \"%s\" not recognized", DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(units)))))); result = 0; } PG_RETURN_FLOAT8(result); } /***************************************************************************** * Time With Time Zone ADT *****************************************************************************/ /* tm2timetz() * Convert a tm structure to a time data type. */ static int tm2timetz(struct pg_tm * tm, fsec_t fsec, int tz, TimeTzADT *result) { #ifdef HAVE_INT64_TIMESTAMP result->time = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) * USECS_PER_SEC) + fsec; #else result->time = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec; #endif result->zone = tz; return 0; } Datum timetz_in(PG_FUNCTION_ARGS) { char *str = PG_GETARG_CSTRING(0); #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); #endif int32 typmod = PG_GETARG_INT32(2); TimeTzADT *result; fsec_t fsec; struct pg_tm tt, *tm = &tt; int tz; int nf; int dterr; char workbuf[MAXDATELEN + 1]; char *field[MAXDATEFIELDS]; int dtype; int ftype[MAXDATEFIELDS]; dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr == 0) dterr = DecodeTimeOnly(field, ftype, nf, &dtype, tm, &fsec, &tz); if (dterr != 0) DateTimeParseError(dterr, str, "time with time zone"); result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); tm2timetz(tm, fsec, tz, result); AdjustTimeForTypmod(&(result->time), typmod); PG_RETURN_TIMETZADT_P(result); } Datum timetz_out(PG_FUNCTION_ARGS) { TimeTzADT *time = PG_GETARG_TIMETZADT_P(0); char *result; struct pg_tm tt, *tm = &tt; fsec_t fsec; int tz; char buf[MAXDATELEN + 1]; timetz2tm(time, tm, &fsec, &tz); EncodeTimeOnly(tm, fsec, &tz, DateStyle, buf); result = pstrdup(buf); PG_RETURN_CSTRING(result); } /* * timetz_recv - converts external binary format to timetz */ Datum timetz_recv(PG_FUNCTION_ARGS) { StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); #endif int32 typmod = PG_GETARG_INT32(2); TimeTzADT *result; result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); #ifdef HAVE_INT64_TIMESTAMP result->time = pq_getmsgint64(buf); #else result->time = pq_getmsgfloat8(buf); #endif result->zone = pq_getmsgint(buf, sizeof(result->zone)); AdjustTimeForTypmod(&(result->time), typmod); PG_RETURN_TIMETZADT_P(result); } /* * timetz_send - converts timetz to binary format */ Datum timetz_send(PG_FUNCTION_ARGS) { TimeTzADT *time = PG_GETARG_TIMETZADT_P(0); StringInfoData buf; pq_begintypsend(&buf); #ifdef HAVE_INT64_TIMESTAMP pq_sendint64(&buf, time->time); #else pq_sendfloat8(&buf, time->time); #endif pq_sendint(&buf, time->zone, sizeof(time->zone)); PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } Datum timetztypmodin(PG_FUNCTION_ARGS) { ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0); PG_RETURN_INT32(anytime_typmodin(true, ta)); } Datum timetztypmodout(PG_FUNCTION_ARGS) { int32 typmod = PG_GETARG_INT32(0); PG_RETURN_CSTRING(anytime_typmodout(true, typmod)); } /* timetz2tm() * Convert TIME WITH TIME ZONE data type to POSIX time structure. */ static int timetz2tm(TimeTzADT *time, struct pg_tm * tm, fsec_t *fsec, int *tzp) { #ifdef HAVE_INT64_TIMESTAMP int64 trem = time->time; tm->tm_hour = trem / USECS_PER_HOUR; trem -= tm->tm_hour * USECS_PER_HOUR; tm->tm_min = trem / USECS_PER_MINUTE; trem -= tm->tm_min * USECS_PER_MINUTE; tm->tm_sec = trem / USECS_PER_SEC; *fsec = trem - tm->tm_sec * USECS_PER_SEC; #else double trem = time->time; recalc: TMODULO(trem, tm->tm_hour, (double) SECS_PER_HOUR); TMODULO(trem, tm->tm_min, (double) SECS_PER_MINUTE); TMODULO(trem, tm->tm_sec, 1.0); trem = TIMEROUND(trem); /* roundoff may need to propagate to higher-order fields */ if (trem >= 1.0) { trem = ceil(time->time); goto recalc; } *fsec = trem; #endif if (tzp != NULL) *tzp = time->zone; return 0; } /* timetz_scale() * Adjust time type for specified scale factor. * Used by PostgreSQL type system to stuff columns. */ Datum timetz_scale(PG_FUNCTION_ARGS) { TimeTzADT *time = PG_GETARG_TIMETZADT_P(0); int32 typmod = PG_GETARG_INT32(1); TimeTzADT *result; result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); result->time = time->time; result->zone = time->zone; AdjustTimeForTypmod(&(result->time), typmod); PG_RETURN_TIMETZADT_P(result); } static int timetz_cmp_internal(TimeTzADT *time1, TimeTzADT *time2) { /* Primary sort is by true (GMT-equivalent) time */ #ifdef HAVE_INT64_TIMESTAMP int64 t1, t2; t1 = time1->time + (time1->zone * USECS_PER_SEC); t2 = time2->time + (time2->zone * USECS_PER_SEC); #else double t1, t2; t1 = time1->time + time1->zone; t2 = time2->time + time2->zone; #endif if (t1 > t2) return 1; if (t1 < t2) return -1; /* * If same GMT time, sort by timezone; we only want to say that two * timetz's are equal if both the time and zone parts are equal. */ if (time1->zone > time2->zone) return 1; if (time1->zone < time2->zone) return -1; return 0; } Datum timetz_eq(PG_FUNCTION_ARGS) { TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0); TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1); PG_RETURN_BOOL(timetz_cmp_internal(time1, time2) == 0); } Datum timetz_ne(PG_FUNCTION_ARGS) { TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0); TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1); PG_RETURN_BOOL(timetz_cmp_internal(time1, time2) != 0); } Datum timetz_lt(PG_FUNCTION_ARGS) { TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0); TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1); PG_RETURN_BOOL(timetz_cmp_internal(time1, time2) < 0); } Datum timetz_le(PG_FUNCTION_ARGS) { TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0); TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1); PG_RETURN_BOOL(timetz_cmp_internal(time1, time2) <= 0); } Datum timetz_gt(PG_FUNCTION_ARGS) { TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0); TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1); PG_RETURN_BOOL(timetz_cmp_internal(time1, time2) > 0); } Datum timetz_ge(PG_FUNCTION_ARGS) { TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0); TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1); PG_RETURN_BOOL(timetz_cmp_internal(time1, time2) >= 0); } Datum timetz_cmp(PG_FUNCTION_ARGS) { TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0); TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1); PG_RETURN_INT32(timetz_cmp_internal(time1, time2)); } Datum timetz_hash(PG_FUNCTION_ARGS) { TimeTzADT *key = PG_GETARG_TIMETZADT_P(0); uint32 thash; /* * To avoid any problems with padding bytes in the struct, we figure the * field hashes separately and XOR them. This also provides a convenient * framework for dealing with the fact that the time field might be either * double or int64. */ #ifdef HAVE_INT64_TIMESTAMP thash = DatumGetUInt32(DirectFunctionCall1(hashint8, Int64GetDatumFast(key->time))); #else thash = DatumGetUInt32(DirectFunctionCall1(hashfloat8, Float8GetDatumFast(key->time))); #endif thash ^= DatumGetUInt32(hash_uint32(key->zone)); PG_RETURN_UINT32(thash); } Datum timetz_larger(PG_FUNCTION_ARGS) { TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0); TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1); TimeTzADT *result; if (timetz_cmp_internal(time1, time2) > 0) result = time1; else result = time2; PG_RETURN_TIMETZADT_P(result); } Datum timetz_smaller(PG_FUNCTION_ARGS) { TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0); TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1); TimeTzADT *result; if (timetz_cmp_internal(time1, time2) < 0) result = time1; else result = time2; PG_RETURN_TIMETZADT_P(result); } /* timetz_pl_interval() * Add interval to timetz. */ Datum timetz_pl_interval(PG_FUNCTION_ARGS) { TimeTzADT *time = PG_GETARG_TIMETZADT_P(0); Interval *span = PG_GETARG_INTERVAL_P(1); TimeTzADT *result; #ifndef HAVE_INT64_TIMESTAMP TimeTzADT time1; #endif result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); #ifdef HAVE_INT64_TIMESTAMP result->time = time->time + span->time; result->time -= result->time / USECS_PER_DAY * USECS_PER_DAY; if (result->time < INT64CONST(0)) result->time += USECS_PER_DAY; #else result->time = time->time + span->time; TMODULO(result->time, time1.time, (double) SECS_PER_DAY); if (result->time < 0) result->time += SECS_PER_DAY; #endif result->zone = time->zone; PG_RETURN_TIMETZADT_P(result); } /* timetz_mi_interval() * Subtract interval from timetz. */ Datum timetz_mi_interval(PG_FUNCTION_ARGS) { TimeTzADT *time = PG_GETARG_TIMETZADT_P(0); Interval *span = PG_GETARG_INTERVAL_P(1); TimeTzADT *result; #ifndef HAVE_INT64_TIMESTAMP TimeTzADT time1; #endif result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); #ifdef HAVE_INT64_TIMESTAMP result->time = time->time - span->time; result->time -= result->time / USECS_PER_DAY * USECS_PER_DAY; if (result->time < INT64CONST(0)) result->time += USECS_PER_DAY; #else result->time = time->time - span->time; TMODULO(result->time, time1.time, (double) SECS_PER_DAY); if (result->time < 0) result->time += SECS_PER_DAY; #endif result->zone = time->zone; PG_RETURN_TIMETZADT_P(result); } /* overlaps_timetz() --- implements the SQL92 OVERLAPS operator. * * Algorithm is per SQL92 spec. This is much harder than you'd think * because the spec requires us to deliver a non-null answer in some cases * where some of the inputs are null. */ Datum overlaps_timetz(PG_FUNCTION_ARGS) { /* * The arguments are TimeTzADT *, but we leave them as generic Datums for * convenience of notation --- and to avoid dereferencing nulls. */ Datum ts1 = PG_GETARG_DATUM(0); Datum te1 = PG_GETARG_DATUM(1); Datum ts2 = PG_GETARG_DATUM(2); Datum te2 = PG_GETARG_DATUM(3); bool ts1IsNull = PG_ARGISNULL(0); bool te1IsNull = PG_ARGISNULL(1); bool ts2IsNull = PG_ARGISNULL(2); bool te2IsNull = PG_ARGISNULL(3); #define TIMETZ_GT(t1,t2) \ DatumGetBool(DirectFunctionCall2(timetz_gt,t1,t2)) #define TIMETZ_LT(t1,t2) \ DatumGetBool(DirectFunctionCall2(timetz_lt,t1,t2)) /* * If both endpoints of interval 1 are null, the result is null (unknown). * If just one endpoint is null, take ts1 as the non-null one. Otherwise, * take ts1 as the lesser endpoint. */ if (ts1IsNull) { if (te1IsNull) PG_RETURN_NULL(); /* swap null for non-null */ ts1 = te1; te1IsNull = true; } else if (!te1IsNull) { if (TIMETZ_GT(ts1, te1)) { Datum tt = ts1; ts1 = te1; te1 = tt; } } /* Likewise for interval 2. */ if (ts2IsNull) { if (te2IsNull) PG_RETURN_NULL(); /* swap null for non-null */ ts2 = te2; te2IsNull = true; } else if (!te2IsNull) { if (TIMETZ_GT(ts2, te2)) { Datum tt = ts2; ts2 = te2; te2 = tt; } } /* * At this point neither ts1 nor ts2 is null, so we can consider three * cases: ts1 > ts2, ts1 < ts2, ts1 = ts2 */ if (TIMETZ_GT(ts1, ts2)) { /* * This case is ts1 < te2 OR te1 < te2, which may look redundant but * in the presence of nulls it's not quite completely so. */ if (te2IsNull) PG_RETURN_NULL(); if (TIMETZ_LT(ts1, te2)) PG_RETURN_BOOL(true); if (te1IsNull) PG_RETURN_NULL(); /* * If te1 is not null then we had ts1 <= te1 above, and we just found * ts1 >= te2, hence te1 >= te2. */ PG_RETURN_BOOL(false); } else if (TIMETZ_LT(ts1, ts2)) { /* This case is ts2 < te1 OR te2 < te1 */ if (te1IsNull) PG_RETURN_NULL(); if (TIMETZ_LT(ts2, te1)) PG_RETURN_BOOL(true); if (te2IsNull) PG_RETURN_NULL(); /* * If te2 is not null then we had ts2 <= te2 above, and we just found * ts2 >= te1, hence te2 >= te1. */ PG_RETURN_BOOL(false); } else { /* * For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a * rather silly way of saying "true if both are nonnull, else null". */ if (te1IsNull || te2IsNull) PG_RETURN_NULL(); PG_RETURN_BOOL(true); } #undef TIMETZ_GT #undef TIMETZ_LT } Datum timetz_time(PG_FUNCTION_ARGS) { TimeTzADT *timetz = PG_GETARG_TIMETZADT_P(0); TimeADT result; /* swallow the time zone and just return the time */ result = timetz->time; PG_RETURN_TIMEADT(result); } Datum time_timetz(PG_FUNCTION_ARGS) { TimeADT time = PG_GETARG_TIMEADT(0); TimeTzADT *result; struct pg_tm tt, *tm = &tt; fsec_t fsec; int tz; GetCurrentDateTime(tm); time2tm(time, tm, &fsec); tz = DetermineTimeZoneOffset(tm, session_timezone); result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); result->time = time; result->zone = tz; PG_RETURN_TIMETZADT_P(result); } /* timestamptz_timetz() * Convert timestamp to timetz data type. */ Datum timestamptz_timetz(PG_FUNCTION_ARGS) { TimestampTz timestamp = PG_GETARG_TIMESTAMP(0); TimeTzADT *result; struct pg_tm tt, *tm = &tt; int tz; fsec_t fsec; char *tzn; if (TIMESTAMP_NOT_FINITE(timestamp)) PG_RETURN_NULL(); if (timestamp2tm(timestamp, &tz, tm, &fsec, &tzn, NULL) != 0) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); tm2timetz(tm, fsec, tz, result); PG_RETURN_TIMETZADT_P(result); } /* datetimetz_timestamptz() * Convert date and timetz to timestamp with time zone data type. * Timestamp is stored in GMT, so add the time zone * stored with the timetz to the result. * - thomas 2000-03-10 */ Datum datetimetz_timestamptz(PG_FUNCTION_ARGS) { DateADT date = PG_GETARG_DATEADT(0); TimeTzADT *time = PG_GETARG_TIMETZADT_P(1); TimestampTz result; #ifdef HAVE_INT64_TIMESTAMP result = date * USECS_PER_DAY + time->time + time->zone * USECS_PER_SEC; #else result = date * (double) SECS_PER_DAY + time->time + time->zone; #endif PG_RETURN_TIMESTAMP(result); } /* timetz_part() * Extract specified field from time type. */ Datum timetz_part(PG_FUNCTION_ARGS) { text *units = PG_GETARG_TEXT_P(0); TimeTzADT *time = PG_GETARG_TIMETZADT_P(1); float8 result; int type, val; char *lowunits; lowunits = downcase_truncate_identifier(VARDATA(units), VARSIZE(units) - VARHDRSZ, false); type = DecodeUnits(0, lowunits, &val); if (type == UNKNOWN_FIELD) type = DecodeSpecial(0, lowunits, &val); if (type == UNITS) { double dummy; int tz; fsec_t fsec; struct pg_tm tt, *tm = &tt; timetz2tm(time, tm, &fsec, &tz); switch (val) { case DTK_TZ: result = -tz; break; case DTK_TZ_MINUTE: result = -tz; result /= SECS_PER_MINUTE; FMODULO(result, dummy, (double) SECS_PER_MINUTE); break; case DTK_TZ_HOUR: dummy = -tz; FMODULO(dummy, result, (double) SECS_PER_HOUR); break; case DTK_MICROSEC: #ifdef HAVE_INT64_TIMESTAMP result = tm->tm_sec * USECS_PER_SEC + fsec; #else result = (tm->tm_sec + fsec) * 1000000; #endif break; case DTK_MILLISEC: #ifdef HAVE_INT64_TIMESTAMP result = tm->tm_sec * INT64CONST(1000) + fsec / INT64CONST(1000); #else result = (tm->tm_sec + fsec) * 1000; #endif break; case DTK_SECOND: #ifdef HAVE_INT64_TIMESTAMP result = tm->tm_sec + fsec / USECS_PER_SEC; #else result = tm->tm_sec + fsec; #endif break; case DTK_MINUTE: result = tm->tm_min; break; case DTK_HOUR: result = tm->tm_hour; break; case DTK_DAY: case DTK_MONTH: case DTK_QUARTER: case DTK_YEAR: case DTK_DECADE: case DTK_CENTURY: case DTK_MILLENNIUM: default: ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"time with time zone\" units \"%s\" not recognized", DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(units)))))); result = 0; } } else if (type == RESERV && val == DTK_EPOCH) { #ifdef HAVE_INT64_TIMESTAMP result = time->time / 1000000.0 + time->zone; #else result = time->time + time->zone; #endif } else { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"time with time zone\" units \"%s\" not recognized", DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(units)))))); result = 0; } PG_RETURN_FLOAT8(result); } /* timetz_zone() * Encode time with time zone type with specified time zone. * Applies DST rules as of the current date. */ Datum timetz_zone(PG_FUNCTION_ARGS) { text *zone = PG_GETARG_TEXT_P(0); TimeTzADT *t = PG_GETARG_TIMETZADT_P(1); TimeTzADT *result; int tz; char tzname[TZ_STRLEN_MAX + 1]; int len; char *lowzone; int type, val; pg_tz *tzp; /* * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's * important because the timezone database unwisely uses a few zone names * that are identical to offset abbreviations.) */ lowzone = downcase_truncate_identifier(VARDATA(zone), VARSIZE(zone) - VARHDRSZ, false); type = DecodeSpecial(0, lowzone, &val); if (type == TZ || type == DTZ) tz = val * 60; else { len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX); memcpy(tzname, VARDATA(zone), len); tzname[len] = '\0'; tzp = pg_tzset(tzname); if (tzp) { /* Get the offset-from-GMT that is valid today for the zone */ pg_time_t now; struct pg_tm *tm; now = time(NULL); tm = pg_localtime(&now, tzp); tz = -tm->tm_gmtoff; } else { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("time zone \"%s\" not recognized", tzname))); tz = 0; /* keep compiler quiet */ } } result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); #ifdef HAVE_INT64_TIMESTAMP result->time = t->time + (t->zone - tz) * USECS_PER_SEC; while (result->time < INT64CONST(0)) result->time += USECS_PER_DAY; while (result->time >= USECS_PER_DAY) result->time -= USECS_PER_DAY; #else result->time = t->time + (t->zone - tz); while (result->time < 0) result->time += SECS_PER_DAY; while (result->time >= SECS_PER_DAY) result->time -= SECS_PER_DAY; #endif result->zone = tz; PG_RETURN_TIMETZADT_P(result); } /* timetz_izone() * Encode time with time zone type with specified time interval as time zone. */ Datum timetz_izone(PG_FUNCTION_ARGS) { Interval *zone = PG_GETARG_INTERVAL_P(0); TimeTzADT *time = PG_GETARG_TIMETZADT_P(1); TimeTzADT *result; int tz; if (zone->month != 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"interval\" time zone \"%s\" not valid", DatumGetCString(DirectFunctionCall1(interval_out, PointerGetDatum(zone)))))); #ifdef HAVE_INT64_TIMESTAMP tz = -(zone->time / USECS_PER_SEC); #else tz = -(zone->time); #endif result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); #ifdef HAVE_INT64_TIMESTAMP result->time = time->time + (time->zone - tz) * USECS_PER_SEC; while (result->time < INT64CONST(0)) result->time += USECS_PER_DAY; while (result->time >= USECS_PER_DAY) result->time -= USECS_PER_DAY; #else result->time = time->time + (time->zone - tz); while (result->time < 0) result->time += SECS_PER_DAY; while (result->time >= SECS_PER_DAY) result->time -= SECS_PER_DAY; #endif result->zone = tz; PG_RETURN_TIMETZADT_P(result); }
420200.c
// // Created by overseven on 21.06.2021. // #include <stdint.h> #include "network.h" static const char* NETWORK_SORA = "XOR"; static const char* NETWORK_SORA_TESTNET = "XOR"; static const char* GENESIS_SORA = "7e4e32d0feafd4f9c9414b0be86373f9a1efa904809b683453a9af6856d38ad5"; static const char* GENESIS_SORA_TESTNET = "1a0983c6c9fd3178ae24656bcfca4510a439ab7b90c83e360a1671609752b09a"; static const char* NETWORK_ERROR = "ERROR"; static const char* GENESIS_ERROR = "00"; const char *get_network_name(uint8_t id) { switch (id) { case Network_SORA: return NETWORK_SORA; case Network_SORA_TESTNET: return NETWORK_SORA_TESTNET; default: return NETWORK_ERROR; } } // Full list of der. paths: https://github.com/satoshilabs/slips/blob/master/slip-0044.md uint32_t get_network_derivation_path(uint8_t id) { switch (id) { case Network_SORA: return (0x80000000 | 0x269); // 617 = m/44/617/0/0/0 case Network_SORA_TESTNET: return (0x80000000 | 0x269); // 617 = m/44/617/0/0/0 default: return 0; } } const char* get_network_genesis_hash(uint8_t id){ switch (id) { case Network_SORA: return GENESIS_SORA; case Network_SORA_TESTNET: return GENESIS_SORA_TESTNET; default: return GENESIS_ERROR; } } // https://polkadot.subscan.io/tools/ss58_transform uint8_t get_network_address_type(uint8_t id){ switch (id) { case Network_SORA: return 69; case Network_SORA_TESTNET: return 69; default: return 0; } }
36847.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE GGG IIIII SSSSS TTTTT RRRR Y Y % % R R E G I SS T R R Y Y % % RRRR EEE G GGG I SSS T RRRR Y % % R R E G G I SS T R R Y % % R R EEEEE GGG IIIII SSSSS T R R Y % % % % % % MagickCore Registry Methods % % % % Software Design % % Cristy % % March 2000 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/registry.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _RegistryInfo { RegistryType type; void *value; size_t signature; } RegistryInfo; /* Static declarations. */ static SplayTreeInfo *registry = (SplayTreeInfo *) NULL; static SemaphoreInfo *registry_semaphore = (SemaphoreInfo *) NULL; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e f i n e I m a g e R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageRegistry() associates a key/value pair with the image registry. % % The format of the DefineImageRegistry method is: % % MagickBooleanType DefineImageRegistry(const RegistryType type, % const char *option,ExceptionInfo *exception) % % A description of each parameter follows: % % o type: the type. % % o option: the option. % % o exception: the exception. % */ MagickExport MagickBooleanType DefineImageRegistry(const RegistryType type, const char *option,ExceptionInfo *exception) { char key[MaxTextExtent], value[MaxTextExtent]; register char *p; assert(option != (const char *) NULL); (void) CopyMagickString(key,option,MaxTextExtent); for (p=key; *p != '\0'; p++) if (*p == '=') break; *value='\0'; if (*p == '=') (void) CopyMagickString(value,p+1,MaxTextExtent); *p='\0'; return(SetImageRegistry(type,key,value,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageRegistry() deletes a key from the image registry. % % The format of the DeleteImageRegistry method is: % % MagickBooleanType DeleteImageRegistry(const char *key) % % A description of each parameter follows: % % o key: the registry. % */ MagickExport MagickBooleanType DeleteImageRegistry(const char *key) { if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",key); if (registry == (void *) NULL) return(MagickFalse); return(DeleteNodeFromSplayTree(registry,key)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageRegistry() returns a value associated with an image registry key. % % The format of the GetImageRegistry method is: % % void *GetImageRegistry(const RegistryType type,const char *key, % ExceptionInfo *exception) % % A description of each parameter follows: % % o type: the type. % % o key: the key. % % o exception: the exception. % */ MagickExport void *GetImageRegistry(const RegistryType type,const char *key, ExceptionInfo *exception) { void *value; RegistryInfo *registry_info; if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",key); if (registry == (void *) NULL) return((void *) NULL); registry_info=(RegistryInfo *) GetValueFromSplayTree(registry,key); if (registry_info == (void *) NULL) return((void *) NULL); value=(void *) NULL; switch (type) { case ImageRegistryType: { if (type == registry_info->type) value=(void *) CloneImageList((Image *) registry_info->value,exception); break; } case ImageInfoRegistryType: { if (type == registry_info->type) value=(void *) CloneImageInfo((ImageInfo *) registry_info->value); break; } case StringRegistryType: { switch (registry_info->type) { case ImageRegistryType: { value=(Image *) ConstantString(((Image *) registry_info->value)->filename); break; } case ImageInfoRegistryType: { value=(Image *) ConstantString(((ImageInfo *) registry_info->value)->filename); break; } case StringRegistryType: { value=(void *) ConstantString((char *) registry_info->value); break; } default: break; } break; } default: break; } return(value); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageRegistry() gets the next image registry value. % % The format of the GetNextImageRegistry method is: % % char *GetNextImageRegistry(void) % */ MagickExport char *GetNextImageRegistry(void) { if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (registry == (void *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree(registry)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e g i s t r y C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegistryComponentGenesis() instantiates the registry component. % % The format of the RegistryComponentGenesis method is: % % MagickBooleanType RegistryComponentGenesis(void) % */ MagickExport MagickBooleanType RegistryComponentGenesis(void) { if (registry_semaphore == (SemaphoreInfo *) NULL) registry_semaphore=AllocateSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t r y C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegistryComponentTerminus() destroys the registry component. % % The format of the DestroyDefines method is: % % void RegistryComponentTerminus(void) % */ MagickExport void RegistryComponentTerminus(void) { if (registry_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&registry_semaphore); LockSemaphoreInfo(registry_semaphore); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (registry != (void *) NULL) registry=DestroySplayTree(registry); UnlockSemaphoreInfo(registry_semaphore); DestroySemaphoreInfo(&registry_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageRegistry() removes a key from the image registry and returns its % value. % % The format of the RemoveImageRegistry method is: % % void *RemoveImageRegistry(const char *key) % % A description of each parameter follows: % % o key: the registry. % */ MagickExport void *RemoveImageRegistry(const char *key) { if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",key); if (registry == (void *) NULL) return((void *) NULL); return(RemoveNodeFromSplayTree(registry,key)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e R e g i s t r y I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageRegistryIterator() resets the registry iterator. Use it in % conjunction with GetNextImageRegistry() to iterate over all the values % in the image registry. % % The format of the ResetImageRegistryIterator method is: % % ResetImageRegistryIterator(void) % */ MagickExport void ResetImageRegistryIterator(void) { if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (registry == (void *) NULL) return; ResetSplayTreeIterator(registry); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageRegistry() associates a value with an image registry key. % % The format of the SetImageRegistry method is: % % MagickBooleanType SetImageRegistry(const RegistryType type, % const char *key,const void *value,ExceptionInfo *exception) % % A description of each parameter follows: % % o type: the type. % % o key: the key. % % o value: the value. % % o exception: the exception. % */ static void *DestroyRegistryNode(void *registry_info) { register RegistryInfo *p; p=(RegistryInfo *) registry_info; switch (p->type) { case StringRegistryType: default: { p->value=RelinquishMagickMemory(p->value); break; } case ImageRegistryType: { p->value=(void *) DestroyImageList((Image *) p->value); break; } case ImageInfoRegistryType: { p->value=(void *) DestroyImageInfo((ImageInfo *) p->value); break; } } return(RelinquishMagickMemory(p)); } MagickExport MagickBooleanType SetImageRegistry(const RegistryType type, const char *key,const void *value,ExceptionInfo *exception) { MagickBooleanType status; RegistryInfo *registry_info; void *clone_value; if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",key); if (value == (const void *) NULL) return(MagickFalse); clone_value=(void *) NULL; switch (type) { case StringRegistryType: default: { const char *string; string=(const char *) value; clone_value=(void *) ConstantString(string); break; } case ImageRegistryType: { const Image *image; image=(const Image *) value; if (image->signature != MagickCoreSignature) { (void) ThrowMagickException(exception,GetMagickModule(),RegistryError, "UnableToSetRegistry","%s",key); return(MagickFalse); } clone_value=(void *) CloneImageList(image,exception); break; } case ImageInfoRegistryType: { const ImageInfo *image_info; image_info=(const ImageInfo *) value; if (image_info->signature != MagickCoreSignature) { (void) ThrowMagickException(exception,GetMagickModule(),RegistryError, "UnableToSetRegistry","%s",key); return(MagickFalse); } clone_value=(void *) CloneImageInfo(image_info); break; } } if (clone_value == (void *) NULL) return(MagickFalse); registry_info=(RegistryInfo *) AcquireCriticalMemory(sizeof(*registry_info)); (void) ResetMagickMemory(registry_info,0,sizeof(*registry_info)); registry_info->type=type; registry_info->value=clone_value; registry_info->signature=MagickCoreSignature; if (registry == (SplayTreeInfo *) NULL) { if (registry_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&registry_semaphore); LockSemaphoreInfo(registry_semaphore); if (registry == (SplayTreeInfo *) NULL) registry=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyRegistryNode); UnlockSemaphoreInfo(registry_semaphore); } status=AddValueToSplayTree(registry,ConstantString(key),registry_info); return(status); }
99005.c
// Options: --arrays --no-bitfields --checksum --no-float --no-inline-function --max-array-dim 3 --max-array-len-per-dim 10 --max-block-depth 5 --max-block-size 4 --max-expr-complexity 2 --max-funcs 1 --safe-math --no-packed-struct --no-pointers --no-structs --no-unions --no-volatiles --no-volatile-pointers --no-const-pointers --concise #include "csmith.h" static long __undefined; static int32_t g_2[8] = {(-4L),(-4L),(-4L),(-4L),(-4L),(-4L),(-4L),(-4L)}; static int32_t g_5 = 1L; static int8_t g_17 = (-5L); static uint32_t g_20 = 18446744073709551615UL; static int16_t g_29[8] = {0xA7C7L,0xA7C7L,0xA7C7L,0xA7C7L,0xA7C7L,0xA7C7L,0xA7C7L,0xA7C7L}; static uint32_t g_30 = 0xC0C7FD57L; static int32_t g_42 = 0x28C51F7EL; static int32_t g_78 = (-1L); static int8_t g_104 = 0L; static int8_t g_114 = 0x48L; static uint32_t g_129 = 0x2273286AL; static int16_t g_135 = 8L; static uint16_t g_156 = 0x057CL; static uint32_t g_218 = 4294967291UL; static int64_t g_223[9] = {0xD1B266F61C0A8395LL,0xD1B266F61C0A8395LL,0xD1B266F61C0A8395LL,0xD1B266F61C0A8395LL,0xD1B266F61C0A8395LL,0xD1B266F61C0A8395LL,0xD1B266F61C0A8395LL,0xD1B266F61C0A8395LL,0xD1B266F61C0A8395LL}; static uint16_t g_226 = 0UL; static const uint8_t func_1(void); static const uint8_t func_1(void) { int16_t l_9 = 0L; int32_t l_19 = 0x7C422D5FL; int16_t l_39 = (-1L); int32_t l_144 = (-1L); int16_t l_159 = (-1L); int32_t l_194 = 7L; uint32_t l_199 = 1UL; lbl_160: for (g_2[1] = 0; (g_2[1] >= (-20)); g_2[1] = safe_sub_func_uint16_t_u_u(g_2[1], 3)) { uint16_t l_10 = 0xE056L; int32_t l_11[8] = {8L,8L,0xF7921E12L,8L,8L,0xF7921E12L,8L,8L}; int32_t l_48 = 0xA975881DL; uint64_t l_126 = 18446744073709551615UL; int16_t l_139 = (-3L); int i; for (g_5 = 17; (g_5 == 16); g_5--) { uint16_t l_8 = 0x9F84L; if (g_2[0]) break; l_8 |= g_2[1]; } g_5 = (((l_9 && 0x44AD374006310FECLL) < l_10) & g_2[2]); l_11[4] ^= (1L ^ 0L); if (g_2[5]) { int8_t l_12 = 0L; int32_t l_16[6][1] = {{0x3A38D067L},{0x05D76270L},{0x3A38D067L},{0x05D76270L},{0x3A38D067L},{0x05D76270L}}; int32_t l_28[10] = {1L,1L,0L,0x1E2CD849L,0L,1L,1L,0L,0xF199E1F9L,0L}; int i, j; if (l_12) { uint64_t l_13 = 0xD816F93BD5DE1FC6LL; int32_t l_18 = 0x3DEC995FL; l_13 = (-1L); if (l_13) continue; for (g_5 = 0; (g_5 != 25); g_5 = safe_add_func_uint32_t_u_u(g_5, 9)) { uint64_t l_41 = 18446744073709551615UL; int32_t l_43 = (-4L); --g_20; if ((l_28[3] |= ((safe_rshift_func_int16_t_s_s((safe_div_func_int8_t_s_s((((safe_unary_minus_func_int8_t_s((1UL < 0xE91CL))) <= 0x4B693050L) & 1UL), 0x0FL)), l_16[3][0])) && l_18))) { uint16_t l_40 = 0x2DB6L; g_30--; l_40 = (safe_add_func_uint64_t_u_u(((safe_div_func_int32_t_s_s(((((safe_sub_func_uint64_t_u_u(((g_29[6] | l_39) | 0x3FL), g_29[3])) && 0xE011L) >= g_5) & 65535UL), g_2[1])) | l_39), g_2[1])); l_41 = l_40; } else { g_42 = (((l_11[2] && g_29[6]) , l_18) , (-6L)); l_19 = g_42; l_43 &= ((-7L) && l_16[1][0]); l_16[5][0] = (safe_mul_func_int8_t_s_s((safe_div_func_uint32_t_u_u((l_18 = l_19), l_10)), l_10)); } if (g_29[6]) continue; l_18 |= (g_29[6] && g_29[6]); } } else { uint32_t l_49 = 18446744073709551615UL; l_49--; return g_29[3]; } } else { uint64_t l_76[7][4] = {{18446744073709551615UL,18446744073709551615UL,0x9D740F48B4F12B15LL,0UL},{0x4433F2F19166C0C0LL,0x33A031552AE0D08FLL,0x4433F2F19166C0C0LL,0x9D740F48B4F12B15LL},{0x4433F2F19166C0C0LL,0x9D740F48B4F12B15LL,0x9D740F48B4F12B15LL,0x4433F2F19166C0C0LL},{18446744073709551615UL,0x9D740F48B4F12B15LL,0UL,0x9D740F48B4F12B15LL},{0x9D740F48B4F12B15LL,0x33A031552AE0D08FLL,0UL,0UL},{18446744073709551615UL,18446744073709551615UL,0x9D740F48B4F12B15LL,0UL},{0x4433F2F19166C0C0LL,0x33A031552AE0D08FLL,0x4433F2F19166C0C0LL,0x9D740F48B4F12B15LL}}; const uint32_t l_77 = 0x8D4E4AF0L; int32_t l_107[9][9][3] = {{{0x7CEBB1B1L,7L,0L},{0x3DFEA10FL,0x5D179A93L,(-1L)},{(-7L),9L,0x49FC4A87L},{6L,5L,0x2964CB19L},{(-7L),0x94CD19B6L,1L},{0x3DFEA10FL,0x12C06D41L,(-2L)},{0x7CEBB1B1L,(-4L),0xE31D7495L},{0xC3585974L,(-2L),0x5A7CFDFCL},{0x1196D9F7L,0xFC800275L,0x5D179A93L}},{{0x94CD19B6L,0x65199CEAL,0L},{0xFC800275L,0x65199CEAL,0x7CEBB1B1L},{5L,0xFC800275L,0x7DE0C385L},{0x4B99A093L,(-2L),7L},{(-4L),(-4L),5L},{0x7DE0C385L,0x12C06D41L,(-4L)},{9L,0x94CD19B6L,(-1L)},{0x49FC4A87L,5L,7L},{0xE31D7495L,9L,(-1L)}},{{(-10L),0x5D179A93L,(-4L)},{(-6L),7L,5L},{(-1L),(-1L),7L},{0x5D179A93L,(-1L),0x7DE0C385L},{0L,0x7DE0C385L,0x7CEBB1B1L},{(-10L),0L,0L},{(-10L),(-9L),0x5D179A93L},{0L,6L,0x5A7CFDFCL},{0x5D179A93L,0xE31D7495L,0xE31D7495L}},{{(-1L),0x1196D9F7L,(-2L)},{(-6L),(-10L),1L},{(-10L),0x07506916L,0x2964CB19L},{0xE31D7495L,0xC3585974L,0x49FC4A87L},{0x49FC4A87L,0x07506916L,(-1L)},{9L,(-10L),0L},{0x7DE0C385L,0x1196D9F7L,0xC3585974L},{(-4L),0xE31D7495L,(-6L)},{0x4B99A093L,6L,0x65199CEAL}},{{5L,(-9L),0x3DFEA10FL},{0xFC800275L,0L,0x3DFEA10FL},{0x94CD19B6L,0x7DE0C385L,0x65199CEAL},{0x1196D9F7L,(-1L),(-6L)},{0xC3585974L,(-1L),0xC3585974L},{0x7CEBB1B1L,7L,0L},{0x3DFEA10FL,0x5D179A93L,(-1L)},{(-7L),9L,0x49FC4A87L},{6L,5L,0x2964CB19L}},{{(-7L),0x94CD19B6L,1L},{0x3DFEA10FL,0x12C06D41L,(-2L)},{0x7CEBB1B1L,(-4L),0xE31D7495L},{0xC3585974L,(-2L),0x5A7CFDFCL},{0x1196D9F7L,0xFC800275L,0x5D179A93L},{0x94CD19B6L,0x65199CEAL,0L},{0xFC800275L,0x65199CEAL,0x7CEBB1B1L},{5L,0xFC800275L,0x7DE0C385L},{0x4B99A093L,0x07506916L,1L}},{{5L,5L,0x23E8202AL},{5L,(-10L),5L},{0x12C06D41L,6L,(-10L)},{(-6L),(-2L),0x2964CB19L},{0x5D179A93L,0x12C06D41L,(-10L)},{0x1196D9F7L,(-7L),5L},{(-1L),0x2964CB19L,0x23E8202AL},{5L,0x7DE0C385L,1L},{(-7L),(-10L),5L}},{{(-1L),5L,7L},{0x94CD19B6L,0x7D9254FFL,0x33BC337EL},{0x94CD19B6L,0xC3585974L,(-7L)},{(-1L),0x49FC4A87L,(-4L)},{(-7L),0x5D179A93L,0x5D179A93L},{5L,7L,0x07506916L},{(-1L),0x1196D9F7L,0x4B99A093L},{0x1196D9F7L,0L,0L},{0x5D179A93L,9L,(-6L)}},{{(-6L),0L,0x7DE0C385L},{0x12C06D41L,0x1196D9F7L,0x7D9254FFL},{5L,7L,9L},{5L,0x5D179A93L,(-1L)},{0x3DFEA10FL,0x49FC4A87L,(-9L)},{0x23E8202AL,0xC3585974L,0x7CEBB1B1L},{0x5A7CFDFCL,0x7D9254FFL,0x7CEBB1B1L},{6L,5L,(-9L)},{7L,(-10L),(-1L)}}}; uint32_t l_148 = 4294967288UL; int i, j, k; if ((safe_mul_func_int16_t_s_s((g_78 &= (safe_add_func_int32_t_s_s((((+(g_29[1] = (((safe_lshift_func_int8_t_s_s((safe_lshift_func_int16_t_s_s(((safe_div_func_int64_t_s_s(((safe_lshift_func_uint16_t_u_u(((safe_lshift_func_uint8_t_u_u((((safe_lshift_func_int8_t_s_s((g_17 = (safe_sub_func_int16_t_s_s(((((safe_sub_func_uint8_t_u_u(((!(safe_sub_func_uint64_t_u_u(((-1L) | 9UL), l_76[3][3]))) & g_42), 0x2CL)) || g_17) ^ 0x0AFDD742L) ^ 8UL), g_29[7]))), 0)) && 0xE8488E6FL) >= g_29[0]), g_2[0])) > g_5), l_76[3][3])) > l_11[1]), l_77)) != l_48), l_77)), 7)) >= g_42) ^ g_2[1]))) || 1L) != g_2[2]), g_2[1]))), l_48))) { int64_t l_89 = 0xA4024FFAFD2A9E32LL; l_89 = ((safe_unary_minus_func_uint64_t_u((((((safe_lshift_func_int8_t_s_u((safe_mul_func_uint16_t_u_u((((+(safe_add_func_int8_t_s_s((safe_sub_func_uint8_t_u_u(l_11[7], 0xFDL)), g_2[0]))) ^ 0x3E519B98L) > 0x187EA63FL), l_19)), 2)) | 1L) < g_29[6]) != 0x1715C743L) & l_48))) & l_11[1]); } else { int32_t l_94 = 0x717EFFEBL; int32_t l_113 = 0xD3AFC3C3L; if ((safe_lshift_func_uint8_t_u_s((safe_mod_func_uint32_t_u_u(0x20E1BCB0L, 4294967287UL)), l_94))) { uint32_t l_101[2][2][4] = {{{1UL,0UL,0UL,1UL},{0UL,1UL,0UL,0UL}},{{1UL,1UL,4294967295UL,1UL},{1UL,0UL,0UL,1UL}}}; int64_t l_111 = 0xF186003B377FB2C3LL; int32_t l_112[3]; int i, j, k; for (i = 0; i < 3; i++) l_112[i] = 1L; l_101[1][1][2] = (safe_lshift_func_int8_t_s_s((safe_sub_func_uint8_t_u_u(((safe_div_func_int16_t_s_s(g_17, (-1L))) ^ l_94), l_39)), 6)); g_42 = (safe_add_func_int32_t_s_s(0x24A76911L, g_104)); if ((((((safe_div_func_uint32_t_u_u((((l_11[2] = (0x7BF5L <= g_30)) , 1UL) ^ 0xB0F4L), g_78)) , 1UL) ^ g_20) <= g_29[6]) <= 2UL)) { uint64_t l_108[3][9] = {{18446744073709551607UL,18446744073709551614UL,18446744073709551607UL,18446744073709551607UL,18446744073709551614UL,18446744073709551607UL,18446744073709551607UL,18446744073709551614UL,18446744073709551607UL},{0xBCA39D249FF5904ALL,0x5D717EF146544C23LL,0xBCA39D249FF5904ALL,0xBCA39D249FF5904ALL,0x5D717EF146544C23LL,0xBCA39D249FF5904ALL,0xBCA39D249FF5904ALL,0x5D717EF146544C23LL,0xBCA39D249FF5904ALL},{18446744073709551607UL,18446744073709551614UL,18446744073709551607UL,18446744073709551607UL,18446744073709551614UL,18446744073709551607UL,18446744073709551607UL,18446744073709551614UL,18446744073709551607UL}}; int i, j; l_108[2][3]++; } else { uint32_t l_115 = 0x330DB192L; if (g_42) break; ++l_115; } l_112[1] = (g_5 ^= l_11[4]); } else { int64_t l_122[5] = {0L,0L,0L,0L,0L}; int32_t l_125 = 0x1D16DB7CL; int32_t l_134 = 6L; int i; l_107[7][7][2] = ((safe_mod_func_int16_t_s_s((((safe_div_func_uint32_t_u_u(((1L || l_122[3]) <= 0x4958L), l_9)) , g_29[7]) < l_48), l_39)) , 0x2C83D21EL); for (l_19 = 0; (l_19 <= 2); l_19 += 1) { l_11[1] = 3L; } if ((safe_mul_func_uint8_t_u_u(0xD3L, l_122[3]))) { if (l_122[4]) break; return l_11[5]; } else { --l_126; ++g_129; g_5 |= 0x013F98BFL; } for (l_113 = 0; (l_113 >= (-13)); l_113--) { uint64_t l_136 = 0x8E61E940A49417E0LL; l_136--; } } l_139 = (l_113 | g_129); if (l_39) continue; for (l_19 = 0; (l_19 <= (-14)); l_19--) { int16_t l_155 = (-2L); for (g_30 = 26; (g_30 < 34); g_30 = safe_add_func_int16_t_s_s(g_30, 2)) { uint64_t l_145 = 0x04D593052EA8D45ALL; l_11[4] |= 0L; g_42 = ((g_42 , g_42) < 0x786C87C1L); --l_145; } g_5 |= (l_148 = 2L); if (l_19) continue; if ((safe_rshift_func_uint8_t_u_u(0x39L, 1))) { l_107[7][1][2] &= (safe_rshift_func_int16_t_s_u(((((safe_lshift_func_int8_t_s_u((0x47A7A27731F37730LL ^ g_129), g_129)) == g_2[5]) < l_11[4]) && g_114), 5)); if (g_114) goto lbl_160; } else { g_42 = 0xE6B62B2DL; --g_156; l_94 = (l_159 >= g_42); if (l_155) continue; } } } return l_19; } } lbl_175: l_144 &= (safe_mul_func_int16_t_s_s((((safe_add_func_uint16_t_u_u((safe_add_func_uint16_t_u_u((safe_mod_func_int32_t_s_s((((((safe_lshift_func_uint16_t_u_s((safe_add_func_int16_t_s_s(((0x53B31187L > g_156) ^ 0x99L), g_129)), 12)) >= g_78) & g_29[2]) == (-2L)) , g_78), l_159)), l_19)), g_17)) | 249UL) <= g_30), l_9)); if (l_19) goto lbl_160; for (l_159 = 0; (l_159 >= 6); l_159 = safe_add_func_int64_t_s_s(l_159, 7)) { uint32_t l_176[9][3][9] = {{{4294967289UL,0x7DD29350L,0xE8A5D0D5L,0xC171E5B3L,0xE8A5D0D5L,0x7DD29350L,4294967289UL,0x9C82483CL,0x7DD29350L},{0xC171E5B3L,0x7DD29350L,4294967295UL,0UL,0x1E81E1FAL,4294967288UL,8UL,0x1E81E1FAL,0x7DD29350L},{4294967289UL,0xE8A5D0D5L,0x9C82483CL,8UL,4294967295UL,4294967295UL,8UL,0x9C82483CL,0xE8A5D0D5L}},{{0xF213F659L,0x9C82483CL,4294967288UL,7UL,4294967295UL,4294967288UL,4294967289UL,0xE8A5D0D5L,0x9C82483CL},{0UL,0x1E81E1FAL,4294967288UL,8UL,0x1E81E1FAL,0x7DD29350L,7UL,0x7DD29350L,0x1E81E1FAL},{0UL,0x9C82483CL,0x9C82483CL,0UL,0xE8A5D0D5L,0x1E81E1FAL,0xF213F659L,0x7DD29350L,0x9C82483CL}},{{0xF213F659L,0xE8A5D0D5L,4294967295UL,0xC171E5B3L,4294967291UL,6UL,0xE8A5D0D5L,0x06CFD441L,0x06CFD441L},{0x1E81E1FAL,0x0A783F81L,0x06CFD441L,4294967288UL,0x06CFD441L,0x0A783F81L,0x1E81E1FAL,4294967292UL,0x0A783F81L},{4294967288UL,0x0A783F81L,0xCD555FCFL,0x9C82483CL,6UL,1UL,4294967295UL,6UL,0x0A783F81L}},{{0x1E81E1FAL,0x06CFD441L,4294967292UL,4294967295UL,0xCD555FCFL,0xCD555FCFL,4294967295UL,4294967292UL,0x06CFD441L},{4294967291UL,4294967292UL,1UL,0xE8A5D0D5L,0xCD555FCFL,1UL,0x1E81E1FAL,0x06CFD441L,4294967292UL},{0x9C82483CL,6UL,1UL,4294967295UL,6UL,0x0A783F81L,0xE8A5D0D5L,0x0A783F81L,6UL}},{{0x9C82483CL,4294967292UL,4294967292UL,0x9C82483CL,0x06CFD441L,6UL,4294967291UL,0x0A783F81L,4294967292UL},{4294967291UL,0x06CFD441L,0xCD555FCFL,4294967288UL,0x836A7604L,6UL,0xE8A5D0D5L,0x06CFD441L,0x06CFD441L},{0x1E81E1FAL,0x0A783F81L,0x06CFD441L,4294967288UL,0x06CFD441L,0x0A783F81L,0x1E81E1FAL,4294967292UL,0x0A783F81L}},{{4294967288UL,0x0A783F81L,0xCD555FCFL,0x9C82483CL,6UL,1UL,4294967295UL,6UL,0x0A783F81L},{0x1E81E1FAL,0x06CFD441L,4294967292UL,4294967295UL,0xCD555FCFL,0xCD555FCFL,4294967295UL,4294967292UL,0x06CFD441L},{4294967291UL,4294967292UL,1UL,0xE8A5D0D5L,0xCD555FCFL,1UL,0x1E81E1FAL,0x06CFD441L,4294967292UL}},{{0x9C82483CL,6UL,1UL,4294967295UL,6UL,0x0A783F81L,0xE8A5D0D5L,0x0A783F81L,6UL},{0x9C82483CL,4294967292UL,4294967292UL,0x9C82483CL,0x06CFD441L,6UL,4294967291UL,0x0A783F81L,4294967292UL},{4294967291UL,0x06CFD441L,0xCD555FCFL,4294967288UL,0x836A7604L,6UL,0xE8A5D0D5L,0x06CFD441L,0x06CFD441L}},{{0x1E81E1FAL,0x0A783F81L,0x06CFD441L,4294967288UL,0x06CFD441L,0x0A783F81L,0x1E81E1FAL,4294967292UL,0x0A783F81L},{4294967288UL,0x0A783F81L,0xCD555FCFL,0x9C82483CL,6UL,1UL,4294967295UL,6UL,0x0A783F81L},{0x1E81E1FAL,0x06CFD441L,4294967292UL,4294967295UL,0xCD555FCFL,0xCD555FCFL,4294967295UL,4294967292UL,0x06CFD441L}},{{4294967291UL,4294967292UL,1UL,0xE8A5D0D5L,0xCD555FCFL,1UL,0x1E81E1FAL,0x06CFD441L,4294967292UL},{0x9C82483CL,6UL,1UL,4294967295UL,6UL,0x0A783F81L,0xE8A5D0D5L,0x0A783F81L,6UL},{0x9C82483CL,4294967292UL,4294967292UL,0x9C82483CL,0x06CFD441L,6UL,4294967291UL,0x0A783F81L,4294967292UL}}}; int32_t l_193[10]; uint32_t l_208 = 18446744073709551611UL; int i, j, k; for (i = 0; i < 10; i++) l_193[i] = 0x6987CFA7L; if (l_159) goto lbl_175; g_2[1] ^= 0x3FEDF429L; l_176[0][0][3]--; for (l_39 = 0; (l_39 <= 7); l_39 += 1) { int32_t l_221 = 0xE40D1DC7L; int32_t l_222 = 0x4737F3D3L; int i; if ((((safe_rshift_func_uint16_t_u_s(((l_194 = (safe_sub_func_uint32_t_u_u((safe_rshift_func_int8_t_s_s((safe_mod_func_uint16_t_u_u((safe_mod_func_uint64_t_u_u(((safe_div_func_uint64_t_u_u((safe_rshift_func_int16_t_s_u(((l_193[4] = 0UL) , g_29[l_39]), g_5)), l_176[0][0][3])) ^ 0L), g_29[l_39])), 0xDAB5L)), g_78)), 0x70141F53L))) > 1L), 9)) != (-3L)) <= g_29[l_39])) { g_5 &= (((g_29[1] == l_193[4]) == 0L) , l_176[0][0][5]); if (l_193[4]) continue; } else { return g_156; } if ((safe_add_func_uint64_t_u_u((safe_div_func_uint32_t_u_u(0xA24D4EC0L, l_193[3])), g_104))) { uint16_t l_202 = 0x9608L; if (g_5) break; if (g_17) break; l_199--; l_202++; } else { uint32_t l_207 = 4294967286UL; int32_t l_224 = 0x7E5EAF47L; g_2[2] |= (((safe_add_func_uint32_t_u_u(l_207, l_208)) > 0L) | g_29[3]); for (l_194 = 2; (l_194 <= 9); l_194 += 1) { int i; g_5 = ((((((safe_mul_func_uint8_t_u_u((l_193[6] = g_17), 0xF4L)) <= g_2[1]) , 0x93L) || g_20) == g_29[4]) <= g_29[l_39]); return g_29[l_39]; } if (((safe_mul_func_uint8_t_u_u(l_19, 0x33L)) == 6UL)) { for (l_208 = 0; (l_208 <= 9); l_208 += 1) { int i; l_193[(l_39 + 2)] = 0x99CF0CBDL; if (l_193[l_208]) break; g_42 &= (~(g_218 |= (safe_add_func_int16_t_s_s(((safe_mod_func_int8_t_s_s((g_17 = g_2[l_39]), g_29[1])) , g_5), 0x746CL)))); return g_218; } if (g_5) continue; if (g_156) goto lbl_175; } else { int32_t l_225 = 1L; for (g_20 = 0; (g_20 == 26); g_20++) { return g_42; } --g_226; g_5 |= (g_2[1] = (g_42 &= g_29[5])); } g_42 = (safe_mul_func_int8_t_s_s(l_159, 1L)); } } } return l_39; } int main (int argc, char* argv[]) { int i; int print_hash_value = 0; if (argc == 2 && strcmp(argv[1], "1") == 0) print_hash_value = 1; platform_main_begin(); crc32_gentab(); func_1(); for (i = 0; i < 8; i++) { transparent_crc(g_2[i], "g_2[i]", print_hash_value); if (print_hash_value) printf("index = [%d]\n", i); } transparent_crc(g_5, "g_5", print_hash_value); transparent_crc(g_17, "g_17", print_hash_value); transparent_crc(g_20, "g_20", print_hash_value); for (i = 0; i < 8; i++) { transparent_crc(g_29[i], "g_29[i]", print_hash_value); if (print_hash_value) printf("index = [%d]\n", i); } transparent_crc(g_30, "g_30", print_hash_value); transparent_crc(g_42, "g_42", print_hash_value); transparent_crc(g_78, "g_78", print_hash_value); transparent_crc(g_104, "g_104", print_hash_value); transparent_crc(g_114, "g_114", print_hash_value); transparent_crc(g_129, "g_129", print_hash_value); transparent_crc(g_135, "g_135", print_hash_value); transparent_crc(g_156, "g_156", print_hash_value); transparent_crc(g_218, "g_218", print_hash_value); for (i = 0; i < 9; i++) { transparent_crc(g_223[i], "g_223[i]", print_hash_value); if (print_hash_value) printf("index = [%d]\n", i); } transparent_crc(g_226, "g_226", print_hash_value); platform_main_end(crc32_context ^ 0xFFFFFFFFUL, print_hash_value); return 0; }
541837.c
#include <stdio.h> int sort (int i) { if (i == 0) return 0; puts("recursive"); i--; sort(i); } int main() { int n; scanf("%d" ,&n); sort(n); return 0; }
789561.c
/** @file evolve.c @brief This file contains all the core VPLANET integration routines including the timestepping algorithm and the Runge-Kutta Integration scheme. @author Rory Barnes ([RoryBarnes](https://github.com/RoryBarnes/)) @date May 2014 */ #include <stdio.h> #include <math.h> #include <assert.h> #include <stdlib.h> #include "vplanet.h" void PropsAuxGeneral(BODY *body,CONTROL *control) { /* Recompute the mean motion, necessary for most modules */ int iBody; // Dummy counting variable for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) { if (iBody != 0 && body[iBody].bBinary == 0) { body[iBody].dMeanMotion = fdSemiToMeanMotion(body[iBody].dSemi,(body[0].dMass+body[iBody].dMass)); } } } void PropertiesAuxiliary(BODY *body,CONTROL *control,UPDATE *update) { /* Evaluate single and multi-module auxialliary functions to update parameters * of interest such as mean motion. */ int iBody,iModule; // Dummy counter variables PropsAuxGeneral(body,control); /* Get properties from each module */ for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) { // Uni-module properties for (iModule=0;iModule<control->Evolve.iNumModules[iBody];iModule++) control->fnPropsAux[iBody][iModule](body,&control->Evolve,update,iBody); // Multi-module properties for (iModule=0;iModule<control->iNumMultiProps[iBody];iModule++) control->fnPropsAuxMulti[iBody][iModule](body,&control->Evolve,update,iBody); } } /* * Integration Control */ double AssignDt(double dMin,double dNextOutput,double dEta) { /* Compute the next timestep, dt, making sure it's not larger than the output * cadence */ dMin = dEta * dMin; if (dNextOutput < dMin) { dMin = dNextOutput; } return dMin; } double fdNextOutput(double dTime,double dOutputInterval) { /* Compute when the next timestep occurs. */ int nSteps; // Number of outputs so far /* Number of output so far */ nSteps = (int)(dTime/dOutputInterval); /* Next output is one more */ return (nSteps+1.0)*dOutputInterval; } double fdGetTimeStep(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate) { /* Fills the Update arrays with the derivatives * or new values. It returns the smallest timescale for use * in variable timestepping. Uses either a 4th order Runge-Kutte integrator or * an Euler step. */ int iBody,iVar,iEqn; // Dummy counting variables EVOLVE integr; // Dummy EVOLVE struct so we don't have to dereference control a lot double dVarNow,dMinNow,dMin=HUGE,dVarTotal; // Intermediate storage variables integr = control->Evolve; // XXX Change Eqn to Proc? for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) { if (update[iBody].iNumVars > 0) { for (iVar=0;iVar<update[iBody].iNumVars;iVar++) { // The parameter does not require a derivative, but is calculated explicitly as a function of age. if (update[iBody].iaType[iVar][0] == 0) { dVarNow = *update[iBody].pdVar[iVar]; for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]); } if (control->Evolve.bFirstStep) { dMin = integr.dTimeStep; control->Evolve.bFirstStep = 0; } else { /* Sum over all equations giving new value of the variable */ dVarTotal = 0.; for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) { dVarTotal += update[iBody].daDerivProc[iVar][iEqn]; } // Prevent division by zero if (dVarNow != dVarTotal) { dMinNow = fabs(dVarNow/((dVarNow - dVarTotal)/integr.dTimeStep)); if (dMinNow < dMin) dMin = dMinNow; } } } /* The parameter does not require a derivative, but is calculated explicitly as a function of age and can oscillate through 0 (like circumbinary position, velocities). 10 because binary! */ else if (update[iBody].iaType[iVar][0] == 10) { dVarNow = *update[iBody].pdVar[iVar]; // Something like amp = body.CBPAmp[iVar]; for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]); } if (control->Evolve.bFirstStep) { dMin = integr.dTimeStep; control->Evolve.bFirstStep = 0; } else { /* Sum over all equations giving new value of the variable */ dVarTotal = 0.; for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) { dVarTotal += update[iBody].daDerivProc[iVar][iEqn]; } // Prevent division by zero if (fabs(dVarNow - dVarTotal) > 1.0e-5) { dMinNow = fabs(dVarNow/((dVarNow - dVarTotal)/integr.dTimeStep)); if (dMinNow < dMin && dMinNow > DAYSEC) // Don't resolve things on < 1 day scales (dflemin3 ad-hoc assumption) { dMin = dMinNow; } } } } /* The parameter does not require a derivative, but is calculated explicitly as a function of age and is a sinusoidal quantity (e.g. h,k,p,q in DistOrb) */ else if (update[iBody].iaType[iVar][0] == 3) { dVarNow = *update[iBody].pdVar[iVar]; for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]); } if (control->Evolve.bFirstStep) { dMin = integr.dTimeStep; control->Evolve.bFirstStep = 0; } else { /* Sum over all equations giving new value of the variable */ dVarTotal = 0.; for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) { dVarTotal += update[iBody].daDerivProc[iVar][iEqn]; } // Prevent division by zero if (dVarNow != dVarTotal) { dMinNow = fabs(1.0/((dVarNow - dVarTotal)/integr.dTimeStep)); if (dMinNow < dMin) dMin = dMinNow; } } } /* The parameter is a "polar/sinusoidal quantity" and controlled by a time derivative */ else { for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) { if (update[iBody].iaType[iVar][iEqn] == 2) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]); //if (update[iBody].daDerivProc[iVar][iEqn] != 0 && *(update[iBody].pdVar[iVar]) != 0) { if (update[iBody].daDerivProc[iVar][iEqn] != 0) { /* ?Obl require special treatment because they can overconstrain obliquity and PrecA */ if (iVar == update[iBody].iXobl || iVar == update[iBody].iYobl || iVar == update[iBody].iZobl) { if (body[iBody].dObliquity != 0) dMinNow = fabs(sin(body[iBody].dObliquity)/update[iBody].daDerivProc[iVar][iEqn]); } else if (iVar == update[iBody].iHecc || iVar == update[iBody].iKecc) { if (body[iBody].dEcc != 0) dMinNow = fabs(body[iBody].dEcc/update[iBody].daDerivProc[iVar][iEqn]); } else { dMinNow = fabs(1.0/update[iBody].daDerivProc[iVar][iEqn]); } if (dMinNow < dMin) dMin = dMinNow; } } // enforce a minimum step size for ice sheets, otherwise dDt -> 0 real fast else if (update[iBody].iaType[iVar][iEqn] == 9) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]); if (update[iBody].daDerivProc[iVar][iEqn] != 0 && *(update[iBody].pdVar[iVar]) != 0) { dMinNow = fabs((*(update[iBody].pdVar[iVar]))/update[iBody].daDerivProc[iVar][iEqn]); if (dMinNow < dMin) { if (dMinNow < control->Halt[iBody].iMinIceDt*(2*PI/body[iBody].dMeanMotion)/control->Evolve.dEta) { dMin = control->Halt[iBody].iMinIceDt*(2*PI/body[iBody].dMeanMotion)/control->Evolve.dEta; } else { dMin = dMinNow; } } } } // SpiNBody timestep: semi-temporary hack XXX // dt = r^2/v^2 // r: Position vector // v: Velocity vector // Inefficient? else if (update[iBody].iaType[iVar][iEqn] == 7) { if ( (control->Evolve.bSpiNBodyDistOrb==0) || (control->Evolve.bUsingSpiNBody==1) ) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]); dMinNow = sqrt((body[iBody].dPositionX*body[iBody].dPositionX+body[iBody].dPositionY*body[iBody].dPositionY+body[iBody].dPositionZ*body[iBody].dPositionZ) /(body[iBody].dVelX*body[iBody].dVelX+body[iBody].dVelY*body[iBody].dVelY+body[iBody].dVelZ*body[iBody].dVelZ)); if (dMinNow < dMin) dMin = dMinNow; } } else { // The parameter is controlled by a time derivative update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]); if (!bFloatComparison(update[iBody].daDerivProc[iVar][iEqn],0.0) && !bFloatComparison(*(update[iBody].pdVar[iVar]),0.0)) { //if (update[iBody].daDerivProc[iVar][iEqn] != 0 && *(update[iBody].pdVar[iVar]) != 0) { // Obselete float comparison dMinNow = fabs((*(update[iBody].pdVar[iVar]))/update[iBody].daDerivProc[iVar][iEqn]); if (dMinNow < dMin) dMin = dMinNow; } } } } } } } return dMin; } void fdGetUpdateInfo(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate) { /* Fills the Update arrays with the derivatives * or new values.. */ int iBody,iVar,iEqn,iNumBodies,iNumVars,iNumEqns; // Dummy counting variables EVOLVE integr; // Dummy EVOLVE struct so we don't have to dereference control a lot double dVarNow,dMinNow,dMin=HUGE,dVarTotal; // Intermediate storage variables integr = control->Evolve; // XXX Change Eqn to Proc? iNumBodies = control->Evolve.iNumBodies; for (iBody=0;iBody<iNumBodies;iBody++) { if (update[iBody].iNumVars > 0) { iNumVars = update[iBody].iNumVars; for (iVar=0;iVar<iNumVars;iVar++) { iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn=0;iEqn<iNumEqns;iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]); } } } } } void EulerStep(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate,double *dDt,int iDir) { /* Compute and apply an Euler update step to a given parameter (x = dx/dt * dt) */ int iBody,iVar,iEqn; double dTimeOut,dFoo; /* Adjust dt? */ if (control->Evolve.bVarDt) { dTimeOut = fdNextOutput(control->Evolve.dTime,control->Io.dOutputTime); /* This is minimum dynamical timescale */ *dDt = fdGetTimeStep(body,control,system,update,fnUpdate); *dDt = AssignDt(*dDt,(dTimeOut - control->Evolve.dTime),control->Evolve.dEta); } for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) { for (iVar=0;iVar<update[iBody].iNumVars;iVar++) { for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) { if (update[iBody].iaType[iVar][iEqn] == 0) /* XXX This looks broken */ *(update[iBody].pdVar[iVar]) = update[iBody].daDerivProc[iVar][iEqn]; else { /* Update the parameter in the BODY struct! Be careful! */ *(update[iBody].pdVar[iVar]) += iDir*update[iBody].daDerivProc[iVar][iEqn]*(*dDt); } } } } } void RungeKutta4Step(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate,double *dDt,int iDir) { /* Compute and apply a 4th order Runge-Kutta update step a given parameter. */ int iBody,iVar,iEqn,iSubStep,iNumBodies,iNumVars,iNumEqns; double dTimeOut,dFoo,dDelta; EVOLVE *evolve = &(control->Evolve); // Save Evolve as a variable for speed and legibility /* Create a copy of BODY array */ BodyCopy(evolve->tmpBody,body,&control->Evolve); /* Verify that rotation angles behave correctly in an eqtide-only run if (evolve->tmpBody[1].dPrecA != 0) printf("PrecA = %e\n",evolve->tmpBody[1].dPrecA); XXX */ /* Derivatives at start */ *dDt = fdGetTimeStep(body,control,system,evolve->tmpUpdate,fnUpdate); /* Adjust dt? */ if (evolve->bVarDt) { dTimeOut = fdNextOutput(evolve->dTime,control->Io.dOutputTime); /* This is minimum dynamical timescale */ *dDt = AssignDt(*dDt,(dTimeOut - evolve->dTime),evolve->dEta); } else *dDt = evolve->dTimeStep; evolve->dCurrentDt = *dDt; /* XXX Should each eqn be updated separately? Each parameter at a midpoint is moved by all the modules operating on it together. Does RK4 require the equations to be independent over the full step? */ iNumBodies = evolve->iNumBodies; for (iBody=0;iBody<iNumBodies;iBody++) { iNumVars = update[iBody].iNumVars; for (iVar=0;iVar<iNumVars;iVar++) { evolve->daDeriv[0][iBody][iVar] = 0; iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn=0;iEqn<iNumEqns;iEqn++) { // XXX Set update.dDxDtModule here? evolve->daDeriv[0][iBody][iVar] += iDir*evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; //evolve->daTmpVal[0][iBody][iVar] += (*dDt)*iDir*evolve->tmpUpdate[iBody].daDeriv[iVar][iEqn]; } if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){ // LUGER: Note that this is the VALUE of the variable getting passed, contrary to what the names suggest // These values are updated in the tmpUpdate struct so that equations which are dependent upon them will be // evaluated with higher accuracy *(evolve->tmpUpdate[iBody].pdVar[iVar]) = evolve->daDeriv[0][iBody][iVar]; } else { /* While we're in this loop, move each parameter to the midpoint of the timestep */ *(evolve->tmpUpdate[iBody].pdVar[iVar]) = *(update[iBody].pdVar[iVar]) + 0.5*(*dDt)*evolve->daDeriv[0][iBody][iVar]; } } } /* First midpoint derivative.*/ PropertiesAuxiliary(evolve->tmpBody,control,update); /* Don't need this timestep info, so assign output to dFoo */ fdGetUpdateInfo(evolve->tmpBody,control,system,evolve->tmpUpdate,fnUpdate); for (iBody=0;iBody<iNumBodies;iBody++) { iNumVars = update[iBody].iNumVars; for (iVar=0;iVar<iNumVars;iVar++) { evolve->daDeriv[1][iBody][iVar] = 0; iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn=0;iEqn<iNumEqns;iEqn++) { evolve->daDeriv[1][iBody][iVar] += iDir*evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; } if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){ // LUGER: Note that this is the VALUE of the variable getting passed, contrary to what the names suggest // These values are updated in the tmpUpdate struct so that equations which are dependent upon them will be // evaluated with higher accuracy *(evolve->tmpUpdate[iBody].pdVar[iVar]) = evolve->daDeriv[1][iBody][iVar]; } else { /* While we're in this loop, move each parameter to the midpoint of the timestep based on the midpoint derivative. */ *(evolve->tmpUpdate[iBody].pdVar[iVar]) = *(update[iBody].pdVar[iVar]) + 0.5*(*dDt)*evolve->daDeriv[1][iBody][iVar]; } } } /* Second midpoint derivative */ PropertiesAuxiliary(evolve->tmpBody,control,update); fdGetUpdateInfo(evolve->tmpBody,control,system,evolve->tmpUpdate,fnUpdate); for (iBody=0;iBody<iNumBodies;iBody++) { iNumVars = update[iBody].iNumVars; for (iVar=0;iVar<iNumVars;iVar++) { evolve->daDeriv[2][iBody][iVar] = 0; iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn=0;iEqn<iNumEqns;iEqn++) { evolve->daDeriv[2][iBody][iVar] += iDir*evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; } if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){ // LUGER: Note that this is the VALUE of the variable getting passed, contrary to what the names suggest // These values are updated in the tmpUpdate struct so that equations which are dependent upon them will be // evaluated with higher accuracy *(evolve->tmpUpdate[iBody].pdVar[iVar]) = evolve->daDeriv[2][iBody][iVar]; } else { /* While we're in this loop, move each parameter to the end of the timestep based on the second midpoint derivative. */ *(evolve->tmpUpdate[iBody].pdVar[iVar]) = *(update[iBody].pdVar[iVar]) + *dDt*evolve->daDeriv[2][iBody][iVar]; } } } /* Full step derivative */ PropertiesAuxiliary(evolve->tmpBody,control,update); fdGetUpdateInfo(evolve->tmpBody,control,system,evolve->tmpUpdate,fnUpdate); for (iBody=0;iBody<iNumBodies;iBody++) { iNumVars = update[iBody].iNumVars; for (iVar=0;iVar<iNumVars;iVar++) { if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){ // NOTHING! } else { evolve->daDeriv[3][iBody][iVar] = 0; iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn=0;iEqn<iNumEqns;iEqn++) { evolve->daDeriv[3][iBody][iVar] += iDir*evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; } } } } /* Now do the update -- Note the pointer to the home of the actual variables!!! */ for (iBody=0;iBody<iNumBodies;iBody++) { iNumVars = update[iBody].iNumVars; for (iVar=0;iVar<iNumVars;iVar++) { update[iBody].daDeriv[iVar] = 1./6*(evolve->daDeriv[0][iBody][iVar] + 2*evolve->daDeriv[1][iBody][iVar] + 2*evolve->daDeriv[2][iBody][iVar] + evolve->daDeriv[3][iBody][iVar]); if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){ // LUGER: Note that this is the VALUE of the variable getting passed, contrary to what the names suggest *(update[iBody].pdVar[iVar]) = evolve->daDeriv[0][iBody][iVar]; } else { *(update[iBody].pdVar[iVar]) += update[iBody].daDeriv[iVar]*(*dDt); } } } } /* * Evolution Subroutine */ void Evolve(BODY *body,CONTROL *control,FILES *files,MODULE *module,OUTPUT *output,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate,fnWriteOutput *fnWrite,fnIntegrate fnOneStep) { /* Master evolution routine that controls the simulation integration. */ int iDir,iBody,iModule,nSteps; // Dummy counting variables double dTimeOut; // When to output next double dDt,dFoo; // Next timestep, dummy variable double dEqSpinRate; // Store the equilibrium spin rate control->Evolve.nSteps=0; nSteps=0; if (control->Evolve.bDoForward) iDir=1; else iDir=-1; dTimeOut = fdNextOutput(control->Evolve.dTime,control->Io.dOutputTime); PropertiesAuxiliary(body,control,update); // Get derivatives at start, useful for logging dDt = fdGetTimeStep(body,control,system,update,fnUpdate); /* Adjust dt? */ if (control->Evolve.bVarDt) { /* Get time to next output */ dTimeOut = fdNextOutput(control->Evolve.dTime,control->Io.dOutputTime); /* Now choose the correct timestep */ dDt = AssignDt(dDt,(dTimeOut - control->Evolve.dTime),control->Evolve.dEta); } else dDt = control->Evolve.dTimeStep; /* Write out initial conditions */ WriteOutput(body,control,files,output,system,update,fnWrite,control->Evolve.dTime,dDt); /* If Runge-Kutta need to copy actual update to that in control->Evolve. This transfer all the meta-data about the struct. */ UpdateCopy(control->Evolve.tmpUpdate,update,control->Evolve.iNumBodies); /* * * Main loop begins here * */ while (control->Evolve.dTime < control->Evolve.dStopTime) { /* Take one step */ fnOneStep(body,control,system,update,fnUpdate,&dDt,iDir); for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) { for (iModule=0;iModule<control->Evolve.iNumModules[iBody];iModule++) control->fnForceBehavior[iBody][iModule](body,module,&control->Evolve,&control->Io,system,update,fnUpdate,iBody,iModule); for (iModule=0;iModule<control->iNumMultiForce[iBody];iModule++) control->fnForceBehaviorMulti[iBody][iModule](body,module,&control->Evolve,&control->Io,system,update,fnUpdate,iModule,iBody); } /* Halt? */ if (fbCheckHalt(body,control,update)) { /* Use dummy variable as dDt is used for the integration. * Here we just want the instantaneous derivatives. * This should make the output self-consistent. */ fdGetUpdateInfo(body,control,system,update,fnUpdate); WriteOutput(body,control,files,output,system,update,fnWrite,control->Evolve.dTime,control->Io.dOutputTime/control->Evolve.nSteps); return; } for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) body[iBody].dAge += iDir*dDt; control->Evolve.dTime += dDt; nSteps++; /* Time for Output? */ if (control->Evolve.dTime >= dTimeOut) { fdGetUpdateInfo(body,control,system,update,fnUpdate); WriteOutput(body,control,files,output,system,update,fnWrite,control->Evolve.dTime,control->Io.dOutputTime/control->Evolve.nSteps); dTimeOut = fdNextOutput(control->Evolve.dTime,control->Io.dOutputTime); control->Evolve.nSteps += nSteps; nSteps=0; } /* Get auxiliary properties for next step -- first call was prior to loop. */ PropertiesAuxiliary(body,control,update); // If control->Evolve.bFirstStep hasn't been switched off by now, do so. if (control->Evolve.bFirstStep) { control->Evolve.bFirstStep = 0; } } if (control->Io.iVerbose >= VERBPROG) printf("Evolution completed.\n"); // printf("%d\n",body[1].iBadImpulse); }
680514.c
/* * Project name: Wireless Water Level and Hot Water Temperature Indicator * Copyright: Cagan Cerkez, August 2014 * Description: Water level is measured using a self-made 10-level reed switch float level probe. Reed switches are sealed inside a plastic pipe, so no contact with water. A magnet, also sealed inside a float around the vertically placed reed switch pipe turns on reed switches, whcih are read to find the water level. Hot water temperature is measured using an MCP9700A temperature sensor. Values are transmitted via a Microchip MRF89XAM8A Transciever. Device is battery powered. Schematic is in the project folder. Timer1 and 32.768kHz crystal is used during sleep for improved/consistent sleep periods so the RX unit can synchronize reception more successfully. * Configuration: MCU: PIC16F883 Oscillator: 8.000 MHz (using internal oscillator) SW: mikroC PRO for PIC * Connections: Battery: x1 D battery Level probe: AN11 & AN13 MCP9700A: Vs (RB1), Vout (RB0, AN12) MRF89XAM8A: CSCON (RC6), CSDATA (RC2), SPI (RC3 RC4 RC5) Low-voltage ICSP: RB6 and RB7 reserved for PGD and PGC * NOTES: - Send 203 at the 1st, 2nd, 9th and 10th packets to indicate continuous transmit mode during the initial power on - RB3 should be grounded during low voltage ICSP */ // Static constant for the initial continuous transmit mode, will continue for 10 minutes, PIC will transmit every ~4 seconds // so the counter will need to count till 150 (5 min = 600 seconds / 4 = 150) static const TX_mode_counter = 5; static const TMR1_sleep = 19; // TRM1 overflows every 16s, TRM1_sleep = 18 is about 04:48, plus 1/2 TMR1_sleep will be assigned to TRM1H:L so total sleep_time -> 04:56 static const SYNC_word1 = 0xCC; // Network address btye 1 (between 0-255) static const SYNC_word2 = 0xCC; // Network address btye 2 (between 0-255) static const SYNC_word3 = 0xCC; // Network address btye 3 (between 0-255) static const SYNC_word4 = 0xCC; // Network address btye 4 (between 0-255) static const unique_node_adrs = 10; // Node address (between 0-255) static const payload_lngth = 0b00000110; // Payload length set to 6 (1 node address byte, level byte, temp byte, battery chck byte, transmit # byte, TX mode byte. Bit 7 = 0 disables Manchester encoding // ADC reference voltage is 3.3V (3300 mv / 1024 steps = 3.22) static const lowbat_threshold = 310; // ADC results: Battery voltage 1.6V (497), 1.5V(465),1.0V(310), 0.9V(279), 0.8V(248), 0.75V(232), 0.7V(216), 0.65V(201), 0.6V(186) unsigned short TMR1_counter = 0; // Counter for TIMER1 #include "built_in.h" #include "Registers.h" #include "blink_led.h" #include "TRX_config.h" #include "water_level.h" #include "temperature.h" #include "battery.h" #include "transmit_data.h" // Interrupt service routine void interrupt() { if (TMR1IF_bit) { // Check if interrupt is due to TMR1 overflow TMR1_counter++; // Counter incremented on every TRM1 overflow if (TMR1_counter == (TMR1_sleep - 1)) { TMR1H = 0b10000000; // Move TMR1H:L to the mid value so the next overflow will be 8s later (instead of 16s) TMR1L = 0b00000000; T1CON.T1CKPS0 = 1; // TMR1 prescaler set to 1:8, TRM1 will interrupt every 16 seconds with 32.768 kHz crystal T1CON.T1CKPS1 = 1; } TMR1IF_bit = 0; // Clear TMR1IF //TMR1H = 0x00; // Refer to AN580 (Using Timer1 in Asynchronous Clock Mode) for TMR1 & Sleep mode //TMR1L = 0x00; // !!!! LOADING TRM1H & TMR1L with anything clears Timer1 prescaler to 1!!!!!!! Be careful if prescaler is set differently. } } void main(){ ANSEL = 0b00000010; // RA1 (AN1) (battery voltage) analog, rest digital ANSELH = 0b00110000; // RB0 (AN12) (temp sensor), RB5 (AN13)(reed level sensor) analog, rest digital TRISA = 0b00000010; // RA1 (AN1) (battery voltage) input, rest output TRISB = 0b00100001; // RB5 (reed level sensor) & RB0 input (temp sensor), rest output TRISC = 0b00010011; // RC0 & RC1 (TMR1 crystal) and RC4 (SDI) input, rest output // TRISD = 0x00; // Set direction to be output // TRISE = 0x00; // Set direction to be output T_pow_Direction = 0; // Port to Temp sensor power pin set as output Tin_Direction = 1; // Port to Temp sensor output pin set as input PORTA = 0x00; PORTB = 0x00; //PORTC = 0x00; // Do not set SPI ports to 0! //PORTD = 0x00; //PORTE = 0x00; OSCCON = 0x71; // Set internal oscillator to 8Mhz (p. 64) // page 187/261 of 'PIC Microcontrollers Book' //OSCCON = 0x61; // Set internal oscillator to 4Mhz (p. 64) CSCON = 1; // Deselect chips CSDAT = 1; //IRQ0_Direction = 1; //IRQ0 pin direction input //IRQ1_Direction = 1; //IRQ1 pin direction input //RST_Direction = 0; //RST pin direction output CSCON_Direction = 0; //CSCON pin direction output CSDAT_Direction = 0; //CSDAT pin direction output // Setup Interrupts INTCON.GIE = 1; // Enable global interrupt INTCON.PEIE = 1; // Enable peripheral interrupt // Setup TMR0 INTCON.T0IE = 0; // Timer0 disabled // Setup Comparator1 CM1CON0.C1ON = 0; // Comparator 1 off (p.90) // Setup Comparator2 CM2CON0.C2ON = 0; // Comparator 2 off (p.90) // Set Comparator Voltage Reference VRCON.VREN = 0; // Disable Comparator1 Voltage Reference // TMR1 setup PIE1.TMR1IE = 1; // Enable Timer1 interrupt INTCON.PEIE = 1; INTCON.GIE = 1; // Enable Global interrupts T1CON.TMR1CS = 1; // TMR1 clock source is supplied externally (the 32.768 kHz crystal) T1CON.TMR1GE = 0; // Disable TMR1 Gate control T1CON.T1OSCEN = 1; // Enable external crystal connection on RA6 (T1OSO) & RA7 (T1OSI) T1CON.T1SYNC = 1; // Do NOT synchronize TRM1 clock with internal clock so TMR1 works during sleep T1CON.TMR1ON = 0; // Timer1 off TMR1H = 0x00; // !!!! LOADING TRM1H & TMR1L with anything clears Timer1 prescaler to 1!!!!!!! Be careful if prescaler is set differently. TMR1L = 0x00; T1CON.T1CKPS0 = 1; // TMR1 prescaler set to 1:8, TRM1 will interrupt every 16 seconds with 32.768 kHz crystal T1CON.T1CKPS1 = 1; Delay_ms(50); // Delay to wait for TMR1 LP oscillator stabilizaton // PS. minimum 10ms delay at POR or manual reset blink_led(); blink_led(); // Initialize SPI module // Max SPI clock for CONFIG mode is 6 MHz, for DATA mode (to read/write FIFO) is 1 MHz // for Fosc = 8MHz, _SPI_MASTER_OSC_DIV16 gives SPI clock of 0.5Mhz // Data is received by TRX through SDI pin and is clocked on the rising edge of SCK. // MRF89XA sends data through SDO pin and is clocked out on the falling edge of SCK SPI1_Init_Advanced(_SPI_MASTER_OSC_DIV16, _SPI_DATA_SAMPLE_MIDDLE, _SPI_CLK_IDLE_LOW, _SPI_LOW_2_HIGH); Delay_ms(100); initialize_TRX(); // Initialize TRX module //PLL_lock(); // Wait for the PLL lock, puts TRX into Freq Synth mode set_chip_mode(4); // 4 (Standby mode) Delay_ms(15); // Short delay while(1){ measure_temperature(); // Measure temperature measure_level(); // Measure water level check_battery(); // Check battery voltage set_chip_mode(4); // 4 (Standby mode) Delay_ms(15); // Short delay PLL_lock(); // Wait for the PLL lock, puts TRX into Freq Synth mode // FIRST DATA TRANSMISSION transmit_data1(); // FIRST TRANSMISSION blink_led(); // Going to Sleep mode clears FIFO set_chip_mode(4); // 4 (Standby mode) // SECONDS DATA TRANSMISSION if normal TX mode began if (initial_TX_mode >= TX_mode_counter) { Delay_ms(2500); // Second transmission about 3 seconds later PLL_lock(); // Wait for the PLL lock, puts TRX into Freq Synth mode transmit_data2(); // SECOND TRANSMISSION blink_led(); // Going to Sleep mode clears FIFO } set_chip_mode(5); // 5 (TRX put to Sleep mode) // Initial continuous transmit mode will continue for 10 minutes, PIC will transmit every 3 seconds // so the counter will need to count till 200 (600 seconds / 3 = 200) if (initial_TX_mode >= TX_mode_counter) { // Go to SLEEP T1CON.TMR1ON = 1; // Timer1 on while (TMR1_counter < TMR1_sleep) { asm {sleep}; // Go to sleep } TMR1_counter = 0; } else { Delay_ms(3500); // Wait for ~3-4 seconds and transmit again initial_TX_mode++; // Increase the counter T1CON.TMR1ON = 0; // Timer1 off if (initial_TX_mode >= TX_mode_counter) { blink_led(); blink_led(); blink_led(); blink_led(); T1CON.TMR1ON = 1; // Timer1 on } } } }
421188.c
/* * STMicroelectronics ConneXt (STA2X11) GPIO driver * * Copyright 2012 ST Microelectronics (Alessandro Rubini) * Based on gpio-ml-ioh.c, Copyright 2010 OKI Semiconductors Ltd. * Also based on previous sta2x11 work, Copyright 2011 Wind River Systems, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/mfd/sta2x11-mfd.h> struct gsta_regs { u32 dat; /* 0x00 */ u32 dats; u32 datc; u32 pdis; u32 dir; /* 0x10 */ u32 dirs; u32 dirc; u32 unused_1c; u32 afsela; /* 0x20 */ u32 unused_24[7]; u32 rimsc; /* 0x40 */ u32 fimsc; u32 is; u32 ic; }; struct gsta_gpio { spinlock_t lock; struct device *dev; void __iomem *reg_base; struct gsta_regs __iomem *regs[GSTA_NR_BLOCKS]; struct gpio_chip gpio; int irq_base; /* FIXME: save the whole config here (AF, ...) */ unsigned irq_type[GSTA_NR_GPIO]; }; static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr) { return chip->regs[nr / GSTA_GPIO_PER_BLOCK]; } static inline u32 __bit(int nr) { return 1U << (nr % GSTA_GPIO_PER_BLOCK); } /* * gpio methods */ static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val) { struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio); struct gsta_regs __iomem *regs = __regs(chip, nr); u32 bit = __bit(nr); if (val) writel(bit, &regs->dats); else writel(bit, &regs->datc); } static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr) { struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio); struct gsta_regs __iomem *regs = __regs(chip, nr); u32 bit = __bit(nr); return readl(&regs->dat) & bit; } static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, int val) { struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio); struct gsta_regs __iomem *regs = __regs(chip, nr); u32 bit = __bit(nr); writel(bit, &regs->dirs); /* Data register after direction, otherwise pullup/down is selected */ if (val) writel(bit, &regs->dats); else writel(bit, &regs->datc); return 0; } static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr) { struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio); struct gsta_regs __iomem *regs = __regs(chip, nr); u32 bit = __bit(nr); writel(bit, &regs->dirc); return 0; } static int gsta_gpio_to_irq(struct gpio_chip *gpio, unsigned offset) { struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio); return chip->irq_base + offset; } static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */ { struct gpio_chip *gpio = &chip->gpio; /* * ARCH_NR_GPIOS is currently 256 and dynamic allocation starts * from the end. However, for compatibility, we need the first * ConneXt device to start from gpio 0: it's the main chipset * on most boards so documents and drivers assume gpio0..gpio127 */ static int gpio_base; gpio->label = dev_name(chip->dev); gpio->owner = THIS_MODULE; gpio->direction_input = gsta_gpio_direction_input; gpio->get = gsta_gpio_get; gpio->direction_output = gsta_gpio_direction_output; gpio->set = gsta_gpio_set; gpio->dbg_show = NULL; gpio->base = gpio_base; gpio->ngpio = GSTA_NR_GPIO; gpio->can_sleep = 0; gpio->to_irq = gsta_gpio_to_irq; /* * After the first device, turn to dynamic gpio numbers. * For example, with ARCH_NR_GPIOS = 256 we can fit two cards */ if (!gpio_base) gpio_base = -1; } /* * Special method: alternate functions and pullup/pulldown. This is only * invoked on startup to configure gpio's according to platform data. * FIXME : this functionality shall be managed (and exported to other drivers) * via the pin control subsystem. */ static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg) { struct gsta_regs __iomem *regs = __regs(chip, nr); unsigned long flags; u32 bit = __bit(nr); u32 val; int err = 0; pr_info("%s: %p %i %i\n", __func__, chip, nr, cfg); if (cfg == PINMUX_TYPE_NONE) return; /* Alternate function or not? */ spin_lock_irqsave(&chip->lock, flags); val = readl(&regs->afsela); if (cfg == PINMUX_TYPE_FUNCTION) val |= bit; else val &= ~bit; writel(val | bit, &regs->afsela); if (cfg == PINMUX_TYPE_FUNCTION) { spin_unlock_irqrestore(&chip->lock, flags); return; } /* not alternate function: set details */ switch (cfg) { case PINMUX_TYPE_OUTPUT_LOW: writel(bit, &regs->dirs); writel(bit, &regs->datc); break; case PINMUX_TYPE_OUTPUT_HIGH: writel(bit, &regs->dirs); writel(bit, &regs->dats); break; case PINMUX_TYPE_INPUT: writel(bit, &regs->dirc); val = readl(&regs->pdis) | bit; writel(val, &regs->pdis); break; case PINMUX_TYPE_INPUT_PULLUP: writel(bit, &regs->dirc); val = readl(&regs->pdis) & ~bit; writel(val, &regs->pdis); writel(bit, &regs->dats); break; case PINMUX_TYPE_INPUT_PULLDOWN: writel(bit, &regs->dirc); val = readl(&regs->pdis) & ~bit; writel(val, &regs->pdis); writel(bit, &regs->datc); break; default: err = 1; } spin_unlock_irqrestore(&chip->lock, flags); if (err) pr_err("%s: chip %p, pin %i, cfg %i is invalid\n", __func__, chip, nr, cfg); } /* * Irq methods */ static void gsta_irq_disable(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); struct gsta_gpio *chip = gc->private; int nr = data->irq - chip->irq_base; struct gsta_regs __iomem *regs = __regs(chip, nr); u32 bit = __bit(nr); u32 val; unsigned long flags; spin_lock_irqsave(&chip->lock, flags); if (chip->irq_type[nr] & IRQ_TYPE_EDGE_RISING) { val = readl(&regs->rimsc) & ~bit; writel(val, &regs->rimsc); } if (chip->irq_type[nr] & IRQ_TYPE_EDGE_FALLING) { val = readl(&regs->fimsc) & ~bit; writel(val, &regs->fimsc); } spin_unlock_irqrestore(&chip->lock, flags); return; } static void gsta_irq_enable(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); struct gsta_gpio *chip = gc->private; int nr = data->irq - chip->irq_base; struct gsta_regs __iomem *regs = __regs(chip, nr); u32 bit = __bit(nr); u32 val; int type; unsigned long flags; type = chip->irq_type[nr]; spin_lock_irqsave(&chip->lock, flags); val = readl(&regs->rimsc); if (type & IRQ_TYPE_EDGE_RISING) writel(val | bit, &regs->rimsc); else writel(val & ~bit, &regs->rimsc); val = readl(&regs->rimsc); if (type & IRQ_TYPE_EDGE_FALLING) writel(val | bit, &regs->fimsc); else writel(val & ~bit, &regs->fimsc); spin_unlock_irqrestore(&chip->lock, flags); return; } static int gsta_irq_type(struct irq_data *d, unsigned int type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct gsta_gpio *chip = gc->private; int nr = d->irq - chip->irq_base; /* We only support edge interrupts */ if (!(type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) { pr_debug("%s: unsupported type 0x%x\n", __func__, type); return -EINVAL; } chip->irq_type[nr] = type; /* used for enable/disable */ gsta_irq_enable(d); return 0; } static irqreturn_t gsta_gpio_handler(int irq, void *dev_id) { struct gsta_gpio *chip = dev_id; struct gsta_regs __iomem *regs; u32 is; int i, nr, base; irqreturn_t ret = IRQ_NONE; for (i = 0; i < GSTA_NR_BLOCKS; i++) { regs = chip->regs[i]; base = chip->irq_base + i * GSTA_GPIO_PER_BLOCK; while ((is = readl(&regs->is))) { nr = __ffs(is); irq = base + nr; generic_handle_irq(irq); writel(1 << nr, &regs->ic); ret = IRQ_HANDLED; } } return ret; } static void gsta_alloc_irq_chip(struct gsta_gpio *chip) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base, chip->reg_base, handle_simple_irq); gc->private = chip; ct = gc->chip_types; ct->chip.irq_set_type = gsta_irq_type; ct->chip.irq_disable = gsta_irq_disable; ct->chip.irq_enable = gsta_irq_enable; /* FIXME: this makes at most 32 interrupts. Request 0 by now */ irq_setup_generic_chip(gc, 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, 0, IRQ_NOREQUEST | IRQ_NOPROBE, 0); /* Set up all all 128 interrupts: code from setup_generic_chip */ { struct irq_chip_type *ct = gc->chip_types; int i, j; for (j = 0; j < GSTA_NR_GPIO; j++) { i = chip->irq_base + j; irq_set_chip_and_handler(i, &ct->chip, ct->handler); irq_set_chip_data(i, gc); irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0); } gc->irq_cnt = i - gc->irq_base; } } /* The platform device used here is instantiated by the MFD device */ static int gsta_probe(struct platform_device *dev) { int i, err; struct pci_dev *pdev; struct sta2x11_gpio_pdata *gpio_pdata; struct gsta_gpio *chip; struct resource *res; pdev = *(struct pci_dev **)dev_get_platdata(&dev->dev); gpio_pdata = dev_get_platdata(&pdev->dev); if (gpio_pdata == NULL) dev_err(&dev->dev, "no gpio config\n"); pr_debug("gpio config: %p\n", gpio_pdata); res = platform_get_resource(dev, IORESOURCE_MEM, 0); chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->dev = &dev->dev; chip->reg_base = devm_ioremap_resource(&dev->dev, res); if (IS_ERR(chip->reg_base)) return PTR_ERR(chip->reg_base); for (i = 0; i < GSTA_NR_BLOCKS; i++) { chip->regs[i] = chip->reg_base + i * 4096; /* disable all irqs */ writel(0, &chip->regs[i]->rimsc); writel(0, &chip->regs[i]->fimsc); writel(~0, &chip->regs[i]->ic); } spin_lock_init(&chip->lock); gsta_gpio_setup(chip); if (gpio_pdata) for (i = 0; i < GSTA_NR_GPIO; i++) gsta_set_config(chip, i, gpio_pdata->pinconfig[i]); /* 384 was used in previous code: be compatible for other drivers */ err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE); if (err < 0) { dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n", -err); return err; } chip->irq_base = err; gsta_alloc_irq_chip(chip); err = request_irq(pdev->irq, gsta_gpio_handler, IRQF_SHARED, KBUILD_MODNAME, chip); if (err < 0) { dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n", -err); goto err_free_descs; } err = gpiochip_add(&chip->gpio); if (err < 0) { dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n", -err); goto err_free_irq; } platform_set_drvdata(dev, chip); return 0; err_free_irq: free_irq(pdev->irq, chip); err_free_descs: irq_free_descs(chip->irq_base, GSTA_NR_GPIO); return err; } static struct platform_driver sta2x11_gpio_platform_driver = { .driver = { .name = "sta2x11-gpio", .owner = THIS_MODULE, }, .probe = gsta_probe, }; module_platform_driver(sta2x11_gpio_platform_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("sta2x11_gpio GPIO driver");
149397.c
/* ibm/mii.c Created: Nov 2004 by Philip Homburg <philip@f-mnx.phicoh.com> Media Independent (Ethernet) Interface functions */ #include <minix/drivers.h> #if __minix_vmd #include "config.h" #endif #include "mii.h" /*===========================================================================* * mii_print_stat_speed * *===========================================================================*/ void mii_print_stat_speed(u16_t stat, u16_t extstat) { int fs, ft; fs= 1; if (stat & MII_STATUS_EXT_STAT) { if (extstat & (MII_ESTAT_1000XFD | MII_ESTAT_1000XHD | MII_ESTAT_1000TFD | MII_ESTAT_1000THD)) { printf("1000 Mbps: "); fs= 0; ft= 1; if (extstat & (MII_ESTAT_1000XFD | MII_ESTAT_1000XHD)) { ft= 0; printf("X-"); switch(extstat & (MII_ESTAT_1000XFD|MII_ESTAT_1000XHD)) { case MII_ESTAT_1000XFD: printf("FD"); break; case MII_ESTAT_1000XHD: printf("HD"); break; default: printf("FD/HD"); break; } } if (extstat & (MII_ESTAT_1000TFD | MII_ESTAT_1000THD)) { if (!ft) printf(", "); ft= 0; printf("T-"); switch(extstat & (MII_ESTAT_1000TFD|MII_ESTAT_1000THD)) { case MII_ESTAT_1000TFD: printf("FD"); break; case MII_ESTAT_1000THD: printf("HD"); break; default: printf("FD/HD"); break; } } } } if (stat & (MII_STATUS_100T4 | MII_STATUS_100XFD | MII_STATUS_100XHD | MII_STATUS_100T2FD | MII_STATUS_100T2HD)) { if (!fs) printf(", "); fs= 0; printf("100 Mbps: "); ft= 1; if (stat & MII_STATUS_100T4) { printf("T4"); ft= 0; } if (stat & (MII_STATUS_100XFD | MII_STATUS_100XHD)) { if (!ft) printf(", "); ft= 0; printf("TX-"); switch(stat & (MII_STATUS_100XFD|MII_STATUS_100XHD)) { case MII_STATUS_100XFD: printf("FD"); break; case MII_STATUS_100XHD: printf("HD"); break; default: printf("FD/HD"); break; } } if (stat & (MII_STATUS_100T2FD | MII_STATUS_100T2HD)) { if (!ft) printf(", "); ft= 0; printf("T2-"); switch(stat & (MII_STATUS_100T2FD|MII_STATUS_100T2HD)) { case MII_STATUS_100T2FD: printf("FD"); break; case MII_STATUS_100T2HD: printf("HD"); break; default: printf("FD/HD"); break; } } } if (stat & (MII_STATUS_10FD | MII_STATUS_10HD)) { if (!fs) printf(", "); printf("10 Mbps: "); fs= 0; printf("T-"); switch(stat & (MII_STATUS_10FD|MII_STATUS_10HD)) { case MII_STATUS_10FD: printf("FD"); break; case MII_STATUS_10HD: printf("HD"); break; default: printf("FD/HD"); break; } } } /*===========================================================================* * mii_print_techab * *===========================================================================*/ void mii_print_techab(u16_t techab) { int fs, ft; if ((techab & MII_ANA_SEL_M) != MII_ANA_SEL_802_3) { printf("strange selector 0x%x, value 0x%x", techab & MII_ANA_SEL_M, (techab & MII_ANA_TAF_M) >> MII_ANA_TAF_S); return; } fs= 1; if (techab & (MII_ANA_100T4 | MII_ANA_100TXFD | MII_ANA_100TXHD)) { printf("100 Mbps: "); fs= 0; ft= 1; if (techab & MII_ANA_100T4) { printf("T4"); ft= 0; } if (techab & (MII_ANA_100TXFD | MII_ANA_100TXHD)) { if (!ft) printf(", "); ft= 0; printf("TX-"); switch(techab & (MII_ANA_100TXFD|MII_ANA_100TXHD)) { case MII_ANA_100TXFD: printf("FD"); break; case MII_ANA_100TXHD: printf("HD"); break; default: printf("FD/HD"); break; } } } if (techab & (MII_ANA_10TFD | MII_ANA_10THD)) { if (!fs) printf(", "); printf("10 Mbps: "); fs= 0; printf("T-"); switch(techab & (MII_ANA_10TFD|MII_ANA_10THD)) { case MII_ANA_10TFD: printf("FD"); break; case MII_ANA_10THD: printf("HD"); break; default: printf("FD/HD"); break; } } if (techab & MII_ANA_PAUSE_SYM) { if (!fs) printf(", "); fs= 0; printf("pause(SYM)"); } if (techab & MII_ANA_PAUSE_ASYM) { if (!fs) printf(", "); fs= 0; printf("pause(ASYM)"); } if (techab & MII_ANA_TAF_RES) { if (!fs) printf(", "); fs= 0; printf("0x%x", (techab & MII_ANA_TAF_RES) >> MII_ANA_TAF_S); } } /* * $PchId: mii.c,v 1.2 2005/01/31 22:17:26 philip Exp $ */
448842.c
/* * Interworking (IEEE 802.11u) * Copyright (c) 2011-2013, Qualcomm Atheros, Inc. * Copyright (c) 2011-2014, Jouni Malinen <j@w1.fi> * * This software may be distributed under the terms of the BSD license. * See README for more details. */ #include "includes.h" #include "common.h" #include "common/ieee802_11_defs.h" #include "common/gas.h" #include "common/wpa_ctrl.h" #include "utils/pcsc_funcs.h" #include "utils/eloop.h" #include "drivers/driver.h" #include "eap_common/eap_defs.h" #include "eap_peer/eap.h" #include "eap_peer/eap_methods.h" #include "eapol_supp/eapol_supp_sm.h" #include "rsn_supp/wpa.h" #include "wpa_supplicant_i.h" #include "config.h" #include "config_ssid.h" #include "bss.h" #include "scan.h" #include "notify.h" #include "driver_i.h" #include "gas_query.h" #include "hs20_supplicant.h" #include "interworking.h" #if defined(EAP_SIM) | defined(EAP_SIM_DYNAMIC) #define INTERWORKING_3GPP #else #if defined(EAP_AKA) | defined(EAP_AKA_DYNAMIC) #define INTERWORKING_3GPP #else #if defined(EAP_AKA_PRIME) | defined(EAP_AKA_PRIME_DYNAMIC) #define INTERWORKING_3GPP #endif #endif #endif static void interworking_next_anqp_fetch(struct wpa_supplicant *wpa_s); static struct wpa_cred * interworking_credentials_available_realm( struct wpa_supplicant *wpa_s, struct wpa_bss *bss, int ignore_bw, int *excluded); static struct wpa_cred * interworking_credentials_available_3gpp( struct wpa_supplicant *wpa_s, struct wpa_bss *bss, int ignore_bw, int *excluded); static int cred_prio_cmp(const struct wpa_cred *a, const struct wpa_cred *b) { if (a->priority > b->priority) return 1; if (a->priority < b->priority) return -1; if (a->provisioning_sp == NULL || b->provisioning_sp == NULL || os_strcmp(a->provisioning_sp, b->provisioning_sp) != 0) return 0; if (a->sp_priority < b->sp_priority) return 1; if (a->sp_priority > b->sp_priority) return -1; return 0; } static void interworking_reconnect(struct wpa_supplicant *wpa_s) { unsigned int tried; if (wpa_s->wpa_state >= WPA_AUTHENTICATING) { wpa_supplicant_cancel_sched_scan(wpa_s); wpa_s->own_disconnect_req = 1; wpa_supplicant_deauthenticate(wpa_s, WLAN_REASON_DEAUTH_LEAVING); } wpa_s->disconnected = 0; wpa_s->reassociate = 1; tried = wpa_s->interworking_fast_assoc_tried; wpa_s->interworking_fast_assoc_tried = 1; if (!tried && wpa_supplicant_fast_associate(wpa_s) >= 0) return; wpa_s->interworking_fast_assoc_tried = 0; wpa_supplicant_req_scan(wpa_s, 0, 0); } static struct wpabuf * anqp_build_req(u16 info_ids[], size_t num_ids, struct wpabuf *extra) { struct wpabuf *buf; size_t i; u8 *len_pos; buf = gas_anqp_build_initial_req(0, 4 + num_ids * 2 + (extra ? wpabuf_len(extra) : 0)); if (buf == NULL) return NULL; if (num_ids > 0) { len_pos = gas_anqp_add_element(buf, ANQP_QUERY_LIST); for (i = 0; i < num_ids; i++) wpabuf_put_le16(buf, info_ids[i]); gas_anqp_set_element_len(buf, len_pos); } if (extra) wpabuf_put_buf(buf, extra); gas_anqp_set_len(buf); return buf; } static void interworking_anqp_resp_cb(void *ctx, const u8 *dst, u8 dialog_token, enum gas_query_result result, const struct wpabuf *adv_proto, const struct wpabuf *resp, u16 status_code) { struct wpa_supplicant *wpa_s = ctx; wpa_printf(MSG_DEBUG, "ANQP: Response callback dst=" MACSTR " dialog_token=%u result=%d status_code=%u", MAC2STR(dst), dialog_token, result, status_code); anqp_resp_cb(wpa_s, dst, dialog_token, result, adv_proto, resp, status_code); interworking_next_anqp_fetch(wpa_s); } static int cred_with_roaming_consortium(struct wpa_supplicant *wpa_s) { struct wpa_cred *cred; for (cred = wpa_s->conf->cred; cred; cred = cred->next) { if (cred->roaming_consortium_len) return 1; if (cred->required_roaming_consortium_len) return 1; } return 0; } static int cred_with_3gpp(struct wpa_supplicant *wpa_s) { struct wpa_cred *cred; for (cred = wpa_s->conf->cred; cred; cred = cred->next) { if (cred->pcsc || cred->imsi) return 1; } return 0; } static int cred_with_nai_realm(struct wpa_supplicant *wpa_s) { struct wpa_cred *cred; for (cred = wpa_s->conf->cred; cred; cred = cred->next) { if (cred->pcsc || cred->imsi) continue; if (!cred->eap_method) return 1; if (cred->realm && cred->roaming_consortium_len == 0) return 1; } return 0; } static int cred_with_domain(struct wpa_supplicant *wpa_s) { struct wpa_cred *cred; for (cred = wpa_s->conf->cred; cred; cred = cred->next) { if (cred->domain || cred->pcsc || cred->imsi || cred->roaming_partner) return 1; } return 0; } #ifdef CONFIG_HS20 static int cred_with_min_backhaul(struct wpa_supplicant *wpa_s) { struct wpa_cred *cred; for (cred = wpa_s->conf->cred; cred; cred = cred->next) { if (cred->min_dl_bandwidth_home || cred->min_ul_bandwidth_home || cred->min_dl_bandwidth_roaming || cred->min_ul_bandwidth_roaming) return 1; } return 0; } static int cred_with_conn_capab(struct wpa_supplicant *wpa_s) { struct wpa_cred *cred; for (cred = wpa_s->conf->cred; cred; cred = cred->next) { if (cred->num_req_conn_capab) return 1; } return 0; } #endif /* CONFIG_HS20 */ static int additional_roaming_consortiums(struct wpa_bss *bss) { const u8 *ie; ie = wpa_bss_get_ie(bss, WLAN_EID_ROAMING_CONSORTIUM); if (ie == NULL || ie[1] == 0) return 0; return ie[2]; /* Number of ANQP OIs */ } static void interworking_continue_anqp(void *eloop_ctx, void *sock_ctx) { struct wpa_supplicant *wpa_s = eloop_ctx; interworking_next_anqp_fetch(wpa_s); } static int interworking_anqp_send_req(struct wpa_supplicant *wpa_s, struct wpa_bss *bss) { struct wpabuf *buf; int ret = 0; int res; u16 info_ids[8]; size_t num_info_ids = 0; struct wpabuf *extra = NULL; int all = wpa_s->fetch_all_anqp; wpa_msg(wpa_s, MSG_DEBUG, "Interworking: ANQP Query Request to " MACSTR, MAC2STR(bss->bssid)); wpa_s->interworking_gas_bss = bss; info_ids[num_info_ids++] = ANQP_CAPABILITY_LIST; if (all) { info_ids[num_info_ids++] = ANQP_VENUE_NAME; info_ids[num_info_ids++] = ANQP_NETWORK_AUTH_TYPE; } if (all || (cred_with_roaming_consortium(wpa_s) && additional_roaming_consortiums(bss))) info_ids[num_info_ids++] = ANQP_ROAMING_CONSORTIUM; if (all) info_ids[num_info_ids++] = ANQP_IP_ADDR_TYPE_AVAILABILITY; if (all || cred_with_nai_realm(wpa_s)) info_ids[num_info_ids++] = ANQP_NAI_REALM; if (all || cred_with_3gpp(wpa_s)) { info_ids[num_info_ids++] = ANQP_3GPP_CELLULAR_NETWORK; wpa_supplicant_scard_init(wpa_s, NULL); } if (all || cred_with_domain(wpa_s)) info_ids[num_info_ids++] = ANQP_DOMAIN_NAME; wpa_hexdump(MSG_DEBUG, "Interworking: ANQP Query info", (u8 *) info_ids, num_info_ids * 2); #ifdef CONFIG_HS20 if (wpa_bss_get_vendor_ie(bss, HS20_IE_VENDOR_TYPE)) { u8 *len_pos; extra = wpabuf_alloc(100); if (!extra) return -1; len_pos = gas_anqp_add_element(extra, ANQP_VENDOR_SPECIFIC); wpabuf_put_be24(extra, OUI_WFA); wpabuf_put_u8(extra, HS20_ANQP_OUI_TYPE); wpabuf_put_u8(extra, HS20_STYPE_QUERY_LIST); wpabuf_put_u8(extra, 0); /* Reserved */ wpabuf_put_u8(extra, HS20_STYPE_CAPABILITY_LIST); if (all) wpabuf_put_u8(extra, HS20_STYPE_OPERATOR_FRIENDLY_NAME); if (all || cred_with_min_backhaul(wpa_s)) wpabuf_put_u8(extra, HS20_STYPE_WAN_METRICS); if (all || cred_with_conn_capab(wpa_s)) wpabuf_put_u8(extra, HS20_STYPE_CONNECTION_CAPABILITY); if (all) wpabuf_put_u8(extra, HS20_STYPE_OPERATING_CLASS); if (all) wpabuf_put_u8(extra, HS20_STYPE_OSU_PROVIDERS_LIST); gas_anqp_set_element_len(extra, len_pos); } #endif /* CONFIG_HS20 */ buf = anqp_build_req(info_ids, num_info_ids, extra); wpabuf_free(extra); if (buf == NULL) return -1; res = gas_query_req(wpa_s->gas, bss->bssid, bss->freq, 0, buf, interworking_anqp_resp_cb, wpa_s); if (res < 0) { wpa_msg(wpa_s, MSG_DEBUG, "ANQP: Failed to send Query Request"); wpabuf_free(buf); ret = -1; eloop_register_timeout(0, 0, interworking_continue_anqp, wpa_s, NULL); } else wpa_msg(wpa_s, MSG_DEBUG, "ANQP: Query started with dialog token %u", res); return ret; } struct nai_realm_eap { u8 method; u8 inner_method; enum nai_realm_eap_auth_inner_non_eap inner_non_eap; u8 cred_type; u8 tunneled_cred_type; }; struct nai_realm { u8 encoding; char *realm; u8 eap_count; struct nai_realm_eap *eap; }; static void nai_realm_free(struct nai_realm *realms, u16 count) { u16 i; if (realms == NULL) return; for (i = 0; i < count; i++) { os_free(realms[i].eap); os_free(realms[i].realm); } os_free(realms); } static const u8 * nai_realm_parse_eap(struct nai_realm_eap *e, const u8 *pos, const u8 *end) { u8 elen, auth_count, a; const u8 *e_end; if (end - pos < 3) { wpa_printf(MSG_DEBUG, "No room for EAP Method fixed fields"); return NULL; } elen = *pos++; if (elen > end - pos || elen < 2) { wpa_printf(MSG_DEBUG, "No room for EAP Method subfield"); return NULL; } e_end = pos + elen; e->method = *pos++; auth_count = *pos++; wpa_printf(MSG_DEBUG, "EAP Method: len=%u method=%u auth_count=%u", elen, e->method, auth_count); for (a = 0; a < auth_count; a++) { u8 id, len; if (end - pos < 2) { wpa_printf(MSG_DEBUG, "No room for Authentication Parameter subfield header"); return NULL; } id = *pos++; len = *pos++; if (len > end - pos) { wpa_printf(MSG_DEBUG, "No room for Authentication Parameter subfield"); return NULL; } switch (id) { case NAI_REALM_EAP_AUTH_NON_EAP_INNER_AUTH: if (len < 1) break; e->inner_non_eap = *pos; if (e->method != EAP_TYPE_TTLS) break; switch (*pos) { case NAI_REALM_INNER_NON_EAP_PAP: wpa_printf(MSG_DEBUG, "EAP-TTLS/PAP"); break; case NAI_REALM_INNER_NON_EAP_CHAP: wpa_printf(MSG_DEBUG, "EAP-TTLS/CHAP"); break; case NAI_REALM_INNER_NON_EAP_MSCHAP: wpa_printf(MSG_DEBUG, "EAP-TTLS/MSCHAP"); break; case NAI_REALM_INNER_NON_EAP_MSCHAPV2: wpa_printf(MSG_DEBUG, "EAP-TTLS/MSCHAPV2"); break; } break; case NAI_REALM_EAP_AUTH_INNER_AUTH_EAP_METHOD: if (len < 1) break; e->inner_method = *pos; wpa_printf(MSG_DEBUG, "Inner EAP method: %u", e->inner_method); break; case NAI_REALM_EAP_AUTH_CRED_TYPE: if (len < 1) break; e->cred_type = *pos; wpa_printf(MSG_DEBUG, "Credential Type: %u", e->cred_type); break; case NAI_REALM_EAP_AUTH_TUNNELED_CRED_TYPE: if (len < 1) break; e->tunneled_cred_type = *pos; wpa_printf(MSG_DEBUG, "Tunneled EAP Method Credential " "Type: %u", e->tunneled_cred_type); break; default: wpa_printf(MSG_DEBUG, "Unsupported Authentication " "Parameter: id=%u len=%u", id, len); wpa_hexdump(MSG_DEBUG, "Authentication Parameter " "Value", pos, len); break; } pos += len; } return e_end; } static const u8 * nai_realm_parse_realm(struct nai_realm *r, const u8 *pos, const u8 *end) { u16 len; const u8 *f_end; u8 realm_len, e; if (end - pos < 4) { wpa_printf(MSG_DEBUG, "No room for NAI Realm Data " "fixed fields"); return NULL; } len = WPA_GET_LE16(pos); /* NAI Realm Data field Length */ pos += 2; if (len > end - pos || len < 3) { wpa_printf(MSG_DEBUG, "No room for NAI Realm Data " "(len=%u; left=%u)", len, (unsigned int) (end - pos)); return NULL; } f_end = pos + len; r->encoding = *pos++; realm_len = *pos++; if (realm_len > f_end - pos) { wpa_printf(MSG_DEBUG, "No room for NAI Realm " "(len=%u; left=%u)", realm_len, (unsigned int) (f_end - pos)); return NULL; } wpa_hexdump_ascii(MSG_DEBUG, "NAI Realm", pos, realm_len); r->realm = dup_binstr(pos, realm_len); if (r->realm == NULL) return NULL; pos += realm_len; if (f_end - pos < 1) { wpa_printf(MSG_DEBUG, "No room for EAP Method Count"); return NULL; } r->eap_count = *pos++; wpa_printf(MSG_DEBUG, "EAP Count: %u", r->eap_count); if (r->eap_count * 3 > f_end - pos) { wpa_printf(MSG_DEBUG, "No room for EAP Methods"); return NULL; } r->eap = os_calloc(r->eap_count, sizeof(struct nai_realm_eap)); if (r->eap == NULL) return NULL; for (e = 0; e < r->eap_count; e++) { pos = nai_realm_parse_eap(&r->eap[e], pos, f_end); if (pos == NULL) return NULL; } return f_end; } static struct nai_realm * nai_realm_parse(struct wpabuf *anqp, u16 *count) { struct nai_realm *realm; const u8 *pos, *end; u16 i, num; size_t left; if (anqp == NULL) return NULL; left = wpabuf_len(anqp); if (left < 2) return NULL; pos = wpabuf_head_u8(anqp); end = pos + left; num = WPA_GET_LE16(pos); wpa_printf(MSG_DEBUG, "NAI Realm Count: %u", num); pos += 2; left -= 2; if (num > left / 5) { wpa_printf(MSG_DEBUG, "Invalid NAI Realm Count %u - not " "enough data (%u octets) for that many realms", num, (unsigned int) left); return NULL; } realm = os_calloc(num, sizeof(struct nai_realm)); if (realm == NULL) return NULL; for (i = 0; i < num; i++) { pos = nai_realm_parse_realm(&realm[i], pos, end); if (pos == NULL) { nai_realm_free(realm, num); return NULL; } } *count = num; return realm; } static int nai_realm_match(struct nai_realm *realm, const char *home_realm) { char *tmp, *pos, *end; int match = 0; if (realm->realm == NULL || home_realm == NULL) return 0; if (os_strchr(realm->realm, ';') == NULL) return os_strcasecmp(realm->realm, home_realm) == 0; tmp = os_strdup(realm->realm); if (tmp == NULL) return 0; pos = tmp; while (*pos) { end = os_strchr(pos, ';'); if (end) *end = '\0'; if (os_strcasecmp(pos, home_realm) == 0) { match = 1; break; } if (end == NULL) break; pos = end + 1; } os_free(tmp); return match; } static int nai_realm_cred_username(struct wpa_supplicant *wpa_s, struct nai_realm_eap *eap) { if (eap_get_name(EAP_VENDOR_IETF, eap->method) == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-cred-username: EAP method not supported: %d", eap->method); return 0; /* method not supported */ } if (eap->method != EAP_TYPE_TTLS && eap->method != EAP_TYPE_PEAP && eap->method != EAP_TYPE_FAST) { /* Only tunneled methods with username/password supported */ wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-cred-username: Method: %d is not TTLS, PEAP, or FAST", eap->method); return 0; } if (eap->method == EAP_TYPE_PEAP || eap->method == EAP_TYPE_FAST) { if (eap->inner_method && eap_get_name(EAP_VENDOR_IETF, eap->inner_method) == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-cred-username: PEAP/FAST: Inner method not supported: %d", eap->inner_method); return 0; } if (!eap->inner_method && eap_get_name(EAP_VENDOR_IETF, EAP_TYPE_MSCHAPV2) == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-cred-username: MSCHAPv2 not supported"); return 0; } } if (eap->method == EAP_TYPE_TTLS) { if (eap->inner_method == 0 && eap->inner_non_eap == 0) return 1; /* Assume TTLS/MSCHAPv2 is used */ if (eap->inner_method && eap_get_name(EAP_VENDOR_IETF, eap->inner_method) == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-cred-username: TTLS, but inner not supported: %d", eap->inner_method); return 0; } if (eap->inner_non_eap && eap->inner_non_eap != NAI_REALM_INNER_NON_EAP_PAP && eap->inner_non_eap != NAI_REALM_INNER_NON_EAP_CHAP && eap->inner_non_eap != NAI_REALM_INNER_NON_EAP_MSCHAP && eap->inner_non_eap != NAI_REALM_INNER_NON_EAP_MSCHAPV2) { wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-cred-username: TTLS, inner-non-eap not supported: %d", eap->inner_non_eap); return 0; } } if (eap->inner_method && eap->inner_method != EAP_TYPE_GTC && eap->inner_method != EAP_TYPE_MSCHAPV2) { wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-cred-username: inner-method not GTC or MSCHAPv2: %d", eap->inner_method); return 0; } return 1; } static int nai_realm_cred_cert(struct wpa_supplicant *wpa_s, struct nai_realm_eap *eap) { if (eap_get_name(EAP_VENDOR_IETF, eap->method) == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-cred-cert: Method not supported: %d", eap->method); return 0; /* method not supported */ } if (eap->method != EAP_TYPE_TLS) { /* Only EAP-TLS supported for credential authentication */ wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-cred-cert: Method not TLS: %d", eap->method); return 0; } return 1; } static struct nai_realm_eap * nai_realm_find_eap(struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct nai_realm *realm) { u8 e; if (cred->username == NULL || cred->username[0] == '\0' || ((cred->password == NULL || cred->password[0] == '\0') && (cred->private_key == NULL || cred->private_key[0] == '\0'))) { wpa_msg(wpa_s, MSG_DEBUG, "nai-realm-find-eap: incomplete cred info: username: %s password: %s private_key: %s", cred->username ? cred->username : "NULL", cred->password ? cred->password : "NULL", cred->private_key ? cred->private_key : "NULL"); return NULL; } for (e = 0; e < realm->eap_count; e++) { struct nai_realm_eap *eap = &realm->eap[e]; if (cred->password && cred->password[0] && nai_realm_cred_username(wpa_s, eap)) return eap; if (cred->private_key && cred->private_key[0] && nai_realm_cred_cert(wpa_s, eap)) return eap; } return NULL; } #ifdef INTERWORKING_3GPP static int plmn_id_match(struct wpabuf *anqp, const char *imsi, int mnc_len) { u8 plmn[3], plmn2[3]; const u8 *pos, *end; u8 udhl; /* * See Annex A of 3GPP TS 24.234 v8.1.0 for description. The network * operator is allowed to include only two digits of the MNC, so allow * matches based on both two and three digit MNC assumptions. Since some * SIM/USIM cards may not expose MNC length conveniently, we may be * provided the default MNC length 3 here and as such, checking with MNC * length 2 is justifiable even though 3GPP TS 24.234 does not mention * that case. Anyway, MCC/MNC pair where both 2 and 3 digit MNC is used * with otherwise matching values would not be good idea in general, so * this should not result in selecting incorrect networks. */ /* Match with 3 digit MNC */ plmn[0] = (imsi[0] - '0') | ((imsi[1] - '0') << 4); plmn[1] = (imsi[2] - '0') | ((imsi[5] - '0') << 4); plmn[2] = (imsi[3] - '0') | ((imsi[4] - '0') << 4); /* Match with 2 digit MNC */ plmn2[0] = (imsi[0] - '0') | ((imsi[1] - '0') << 4); plmn2[1] = (imsi[2] - '0') | 0xf0; plmn2[2] = (imsi[3] - '0') | ((imsi[4] - '0') << 4); if (anqp == NULL) return 0; pos = wpabuf_head_u8(anqp); end = pos + wpabuf_len(anqp); if (end - pos < 2) return 0; if (*pos != 0) { wpa_printf(MSG_DEBUG, "Unsupported GUD version 0x%x", *pos); return 0; } pos++; udhl = *pos++; if (udhl > end - pos) { wpa_printf(MSG_DEBUG, "Invalid UDHL"); return 0; } end = pos + udhl; wpa_printf(MSG_DEBUG, "Interworking: Matching against MCC/MNC alternatives: %02x:%02x:%02x or %02x:%02x:%02x (IMSI %s, MNC length %d)", plmn[0], plmn[1], plmn[2], plmn2[0], plmn2[1], plmn2[2], imsi, mnc_len); while (end - pos >= 2) { u8 iei, len; const u8 *l_end; iei = *pos++; len = *pos++ & 0x7f; if (len > end - pos) break; l_end = pos + len; if (iei == 0 && len > 0) { /* PLMN List */ u8 num, i; wpa_hexdump(MSG_DEBUG, "Interworking: PLMN List information element", pos, len); num = *pos++; for (i = 0; i < num; i++) { if (l_end - pos < 3) break; if (os_memcmp(pos, plmn, 3) == 0 || os_memcmp(pos, plmn2, 3) == 0) return 1; /* Found matching PLMN */ pos += 3; } } else { wpa_hexdump(MSG_DEBUG, "Interworking: Unrecognized 3GPP information element", pos, len); } pos = l_end; } return 0; } static int build_root_nai(char *nai, size_t nai_len, const char *imsi, size_t mnc_len, char prefix) { const char *sep, *msin; char *end, *pos; size_t msin_len, plmn_len; /* * TS 23.003, Clause 14 (3GPP to WLAN Interworking) * Root NAI: * <aka:0|sim:1><IMSI>@wlan.mnc<MNC>.mcc<MCC>.3gppnetwork.org * <MNC> is zero-padded to three digits in case two-digit MNC is used */ if (imsi == NULL || os_strlen(imsi) > 16) { wpa_printf(MSG_DEBUG, "No valid IMSI available"); return -1; } sep = os_strchr(imsi, '-'); if (sep) { plmn_len = sep - imsi; msin = sep + 1; } else if (mnc_len && os_strlen(imsi) >= 3 + mnc_len) { plmn_len = 3 + mnc_len; msin = imsi + plmn_len; } else return -1; if (plmn_len != 5 && plmn_len != 6) return -1; msin_len = os_strlen(msin); pos = nai; end = nai + nai_len; if (prefix) *pos++ = prefix; os_memcpy(pos, imsi, plmn_len); pos += plmn_len; os_memcpy(pos, msin, msin_len); pos += msin_len; pos += os_snprintf(pos, end - pos, "@wlan.mnc"); if (plmn_len == 5) { *pos++ = '0'; *pos++ = imsi[3]; *pos++ = imsi[4]; } else { *pos++ = imsi[3]; *pos++ = imsi[4]; *pos++ = imsi[5]; } os_snprintf(pos, end - pos, ".mcc%c%c%c.3gppnetwork.org", imsi[0], imsi[1], imsi[2]); return 0; } static int set_root_nai(struct wpa_ssid *ssid, const char *imsi, char prefix) { char nai[100]; if (build_root_nai(nai, sizeof(nai), imsi, 0, prefix) < 0) return -1; return wpa_config_set_quoted(ssid, "identity", nai); } #endif /* INTERWORKING_3GPP */ static int already_connected(struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct wpa_bss *bss) { struct wpa_ssid *ssid, *sel_ssid; struct wpa_bss *selected; if (wpa_s->wpa_state < WPA_ASSOCIATED || wpa_s->current_ssid == NULL) return 0; ssid = wpa_s->current_ssid; if (ssid->parent_cred != cred) return 0; if (ssid->ssid_len != bss->ssid_len || os_memcmp(ssid->ssid, bss->ssid, bss->ssid_len) != 0) return 0; sel_ssid = NULL; selected = wpa_supplicant_pick_network(wpa_s, &sel_ssid); if (selected && sel_ssid && sel_ssid->priority > ssid->priority) return 0; /* higher priority network in scan results */ return 1; } static void remove_duplicate_network(struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct wpa_bss *bss) { struct wpa_ssid *ssid; for (ssid = wpa_s->conf->ssid; ssid; ssid = ssid->next) { if (ssid->parent_cred != cred) continue; if (ssid->ssid_len != bss->ssid_len || os_memcmp(ssid->ssid, bss->ssid, bss->ssid_len) != 0) continue; break; } if (ssid == NULL) return; wpa_printf(MSG_DEBUG, "Interworking: Remove duplicate network entry for the same credential"); if (ssid == wpa_s->current_ssid) { wpa_sm_set_config(wpa_s->wpa, NULL); eapol_sm_notify_config(wpa_s->eapol, NULL, NULL); wpa_s->own_disconnect_req = 1; wpa_supplicant_deauthenticate(wpa_s, WLAN_REASON_DEAUTH_LEAVING); } wpas_notify_network_removed(wpa_s, ssid); wpa_config_remove_network(wpa_s->conf, ssid->id); } static int interworking_set_hs20_params(struct wpa_supplicant *wpa_s, struct wpa_ssid *ssid) { const char *key_mgmt = NULL; #ifdef CONFIG_IEEE80211R int res; struct wpa_driver_capa capa; res = wpa_drv_get_capa(wpa_s, &capa); if (res == 0 && capa.key_mgmt & WPA_DRIVER_CAPA_KEY_MGMT_FT) { key_mgmt = wpa_s->conf->pmf != NO_MGMT_FRAME_PROTECTION ? "WPA-EAP WPA-EAP-SHA256 FT-EAP" : "WPA-EAP FT-EAP"; } #endif /* CONFIG_IEEE80211R */ if (!key_mgmt) key_mgmt = wpa_s->conf->pmf != NO_MGMT_FRAME_PROTECTION ? "WPA-EAP WPA-EAP-SHA256" : "WPA-EAP"; if (wpa_config_set(ssid, "key_mgmt", key_mgmt, 0) < 0 || wpa_config_set(ssid, "proto", "RSN", 0) < 0 || wpa_config_set(ssid, "pairwise", "CCMP", 0) < 0) return -1; return 0; } static int interworking_connect_3gpp(struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct wpa_bss *bss, int only_add) { #ifdef INTERWORKING_3GPP struct wpa_ssid *ssid; int eap_type; int res; char prefix; if (bss->anqp == NULL || bss->anqp->anqp_3gpp == NULL) return -1; wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Connect with " MACSTR " (3GPP)", MAC2STR(bss->bssid)); if (already_connected(wpa_s, cred, bss)) { wpa_msg(wpa_s, MSG_INFO, INTERWORKING_ALREADY_CONNECTED MACSTR, MAC2STR(bss->bssid)); return wpa_s->current_ssid->id; } remove_duplicate_network(wpa_s, cred, bss); ssid = wpa_config_add_network(wpa_s->conf); if (ssid == NULL) return -1; ssid->parent_cred = cred; wpas_notify_network_added(wpa_s, ssid); wpa_config_set_network_defaults(ssid); ssid->priority = cred->priority; ssid->temporary = 1; ssid->ssid = os_zalloc(bss->ssid_len + 1); if (ssid->ssid == NULL) goto fail; os_memcpy(ssid->ssid, bss->ssid, bss->ssid_len); ssid->ssid_len = bss->ssid_len; ssid->eap.sim_num = cred->sim_num; if (interworking_set_hs20_params(wpa_s, ssid) < 0) goto fail; eap_type = EAP_TYPE_SIM; if (cred->pcsc && wpa_s->scard && scard_supports_umts(wpa_s->scard)) eap_type = EAP_TYPE_AKA; if (cred->eap_method && cred->eap_method[0].vendor == EAP_VENDOR_IETF) { if (cred->eap_method[0].method == EAP_TYPE_SIM || cred->eap_method[0].method == EAP_TYPE_AKA || cred->eap_method[0].method == EAP_TYPE_AKA_PRIME) eap_type = cred->eap_method[0].method; } switch (eap_type) { case EAP_TYPE_SIM: prefix = '1'; res = wpa_config_set(ssid, "eap", "SIM", 0); break; case EAP_TYPE_AKA: prefix = '0'; res = wpa_config_set(ssid, "eap", "AKA", 0); break; case EAP_TYPE_AKA_PRIME: prefix = '6'; res = wpa_config_set(ssid, "eap", "AKA'", 0); break; default: res = -1; break; } if (res < 0) { wpa_msg(wpa_s, MSG_DEBUG, "Selected EAP method (%d) not supported", eap_type); goto fail; } if (!cred->pcsc && set_root_nai(ssid, cred->imsi, prefix) < 0) { wpa_msg(wpa_s, MSG_DEBUG, "Failed to set Root NAI"); goto fail; } if (cred->milenage && cred->milenage[0]) { if (wpa_config_set_quoted(ssid, "password", cred->milenage) < 0) goto fail; } else if (cred->pcsc) { if (wpa_config_set_quoted(ssid, "pcsc", "") < 0) goto fail; if (wpa_s->conf->pcsc_pin && wpa_config_set_quoted(ssid, "pin", wpa_s->conf->pcsc_pin) < 0) goto fail; } wpa_s->next_ssid = ssid; wpa_config_update_prio_list(wpa_s->conf); if (!only_add) interworking_reconnect(wpa_s); return ssid->id; fail: wpas_notify_network_removed(wpa_s, ssid); wpa_config_remove_network(wpa_s->conf, ssid->id); #endif /* INTERWORKING_3GPP */ return -1; } static int roaming_consortium_element_match(const u8 *ie, const u8 *rc_id, size_t rc_len) { const u8 *pos, *end; u8 lens; if (ie == NULL) return 0; pos = ie + 2; end = ie + 2 + ie[1]; /* Roaming Consortium element: * Number of ANQP OIs * OI #1 and #2 lengths * OI #1, [OI #2], [OI #3] */ if (end - pos < 2) return 0; pos++; /* skip Number of ANQP OIs */ lens = *pos++; if ((lens & 0x0f) + (lens >> 4) > end - pos) return 0; if ((lens & 0x0f) == rc_len && os_memcmp(pos, rc_id, rc_len) == 0) return 1; pos += lens & 0x0f; if ((lens >> 4) == rc_len && os_memcmp(pos, rc_id, rc_len) == 0) return 1; pos += lens >> 4; if (pos < end && (size_t) (end - pos) == rc_len && os_memcmp(pos, rc_id, rc_len) == 0) return 1; return 0; } static int roaming_consortium_anqp_match(const struct wpabuf *anqp, const u8 *rc_id, size_t rc_len) { const u8 *pos, *end; u8 len; if (anqp == NULL) return 0; pos = wpabuf_head(anqp); end = pos + wpabuf_len(anqp); /* Set of <OI Length, OI> duples */ while (pos < end) { len = *pos++; if (len > end - pos) break; if (len == rc_len && os_memcmp(pos, rc_id, rc_len) == 0) return 1; pos += len; } return 0; } static int roaming_consortium_match(const u8 *ie, const struct wpabuf *anqp, const u8 *rc_id, size_t rc_len) { return roaming_consortium_element_match(ie, rc_id, rc_len) || roaming_consortium_anqp_match(anqp, rc_id, rc_len); } static int cred_no_required_oi_match(struct wpa_cred *cred, struct wpa_bss *bss) { const u8 *ie; if (cred->required_roaming_consortium_len == 0) return 0; ie = wpa_bss_get_ie(bss, WLAN_EID_ROAMING_CONSORTIUM); if (ie == NULL && (bss->anqp == NULL || bss->anqp->roaming_consortium == NULL)) return 1; return !roaming_consortium_match(ie, bss->anqp ? bss->anqp->roaming_consortium : NULL, cred->required_roaming_consortium, cred->required_roaming_consortium_len); } static int cred_excluded_ssid(struct wpa_cred *cred, struct wpa_bss *bss) { size_t i; if (!cred->excluded_ssid) return 0; for (i = 0; i < cred->num_excluded_ssid; i++) { struct excluded_ssid *e = &cred->excluded_ssid[i]; if (bss->ssid_len == e->ssid_len && os_memcmp(bss->ssid, e->ssid, e->ssid_len) == 0) return 1; } return 0; } static int cred_below_min_backhaul(struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct wpa_bss *bss) { #ifdef CONFIG_HS20 int res; unsigned int dl_bandwidth, ul_bandwidth; const u8 *wan; u8 wan_info, dl_load, ul_load; u16 lmd; u32 ul_speed, dl_speed; if (!cred->min_dl_bandwidth_home && !cred->min_ul_bandwidth_home && !cred->min_dl_bandwidth_roaming && !cred->min_ul_bandwidth_roaming) return 0; /* No bandwidth constraint specified */ if (bss->anqp == NULL || bss->anqp->hs20_wan_metrics == NULL) return 0; /* No WAN Metrics known - ignore constraint */ wan = wpabuf_head(bss->anqp->hs20_wan_metrics); wan_info = wan[0]; if (wan_info & BIT(3)) return 1; /* WAN link at capacity */ lmd = WPA_GET_LE16(wan + 11); if (lmd == 0) return 0; /* Downlink/Uplink Load was not measured */ dl_speed = WPA_GET_LE32(wan + 1); ul_speed = WPA_GET_LE32(wan + 5); dl_load = wan[9]; ul_load = wan[10]; if (dl_speed >= 0xffffff) dl_bandwidth = dl_speed / 255 * (255 - dl_load); else dl_bandwidth = dl_speed * (255 - dl_load) / 255; if (ul_speed >= 0xffffff) ul_bandwidth = ul_speed / 255 * (255 - ul_load); else ul_bandwidth = ul_speed * (255 - ul_load) / 255; res = interworking_home_sp_cred(wpa_s, cred, bss->anqp ? bss->anqp->domain_name : NULL); if (res > 0) { if (cred->min_dl_bandwidth_home > dl_bandwidth) return 1; if (cred->min_ul_bandwidth_home > ul_bandwidth) return 1; } else { if (cred->min_dl_bandwidth_roaming > dl_bandwidth) return 1; if (cred->min_ul_bandwidth_roaming > ul_bandwidth) return 1; } #endif /* CONFIG_HS20 */ return 0; } static int cred_over_max_bss_load(struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct wpa_bss *bss) { const u8 *ie; int res; if (!cred->max_bss_load) return 0; /* No BSS Load constraint specified */ ie = wpa_bss_get_ie(bss, WLAN_EID_BSS_LOAD); if (ie == NULL || ie[1] < 3) return 0; /* No BSS Load advertised */ res = interworking_home_sp_cred(wpa_s, cred, bss->anqp ? bss->anqp->domain_name : NULL); if (res <= 0) return 0; /* Not a home network */ return ie[4] > cred->max_bss_load; } #ifdef CONFIG_HS20 static int has_proto_match(const u8 *pos, const u8 *end, u8 proto) { while (end - pos >= 4) { if (pos[0] == proto && pos[3] == 1 /* Open */) return 1; pos += 4; } return 0; } static int has_proto_port_match(const u8 *pos, const u8 *end, u8 proto, u16 port) { while (end - pos >= 4) { if (pos[0] == proto && WPA_GET_LE16(&pos[1]) == port && pos[3] == 1 /* Open */) return 1; pos += 4; } return 0; } #endif /* CONFIG_HS20 */ static int cred_conn_capab_missing(struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct wpa_bss *bss) { #ifdef CONFIG_HS20 int res; const u8 *capab, *end; unsigned int i, j; int *ports; if (!cred->num_req_conn_capab) return 0; /* No connection capability constraint specified */ if (bss->anqp == NULL || bss->anqp->hs20_connection_capability == NULL) return 0; /* No Connection Capability known - ignore constraint */ res = interworking_home_sp_cred(wpa_s, cred, bss->anqp ? bss->anqp->domain_name : NULL); if (res > 0) return 0; /* No constraint in home network */ capab = wpabuf_head(bss->anqp->hs20_connection_capability); end = capab + wpabuf_len(bss->anqp->hs20_connection_capability); for (i = 0; i < cred->num_req_conn_capab; i++) { ports = cred->req_conn_capab_port[i]; if (!ports) { if (!has_proto_match(capab, end, cred->req_conn_capab_proto[i])) return 1; } else { for (j = 0; ports[j] > -1; j++) { if (!has_proto_port_match( capab, end, cred->req_conn_capab_proto[i], ports[j])) return 1; } } } #endif /* CONFIG_HS20 */ return 0; } static struct wpa_cred * interworking_credentials_available_roaming_consortium( struct wpa_supplicant *wpa_s, struct wpa_bss *bss, int ignore_bw, int *excluded) { struct wpa_cred *cred, *selected = NULL; const u8 *ie; int is_excluded = 0; ie = wpa_bss_get_ie(bss, WLAN_EID_ROAMING_CONSORTIUM); if (ie == NULL && (bss->anqp == NULL || bss->anqp->roaming_consortium == NULL)) return NULL; if (wpa_s->conf->cred == NULL) return NULL; for (cred = wpa_s->conf->cred; cred; cred = cred->next) { if (cred->roaming_consortium_len == 0) continue; if (!roaming_consortium_match(ie, bss->anqp ? bss->anqp->roaming_consortium : NULL, cred->roaming_consortium, cred->roaming_consortium_len)) continue; if (cred_no_required_oi_match(cred, bss)) continue; if (!ignore_bw && cred_below_min_backhaul(wpa_s, cred, bss)) continue; if (!ignore_bw && cred_over_max_bss_load(wpa_s, cred, bss)) continue; if (!ignore_bw && cred_conn_capab_missing(wpa_s, cred, bss)) continue; if (cred_excluded_ssid(cred, bss)) { if (excluded == NULL) continue; if (selected == NULL) { selected = cred; is_excluded = 1; } } else { if (selected == NULL || is_excluded || cred_prio_cmp(selected, cred) < 0) { selected = cred; is_excluded = 0; } } } if (excluded) *excluded = is_excluded; return selected; } static int interworking_set_eap_params(struct wpa_ssid *ssid, struct wpa_cred *cred, int ttls) { if (cred->eap_method) { ttls = cred->eap_method->vendor == EAP_VENDOR_IETF && cred->eap_method->method == EAP_TYPE_TTLS; os_free(ssid->eap.eap_methods); ssid->eap.eap_methods = os_malloc(sizeof(struct eap_method_type) * 2); if (ssid->eap.eap_methods == NULL) return -1; os_memcpy(ssid->eap.eap_methods, cred->eap_method, sizeof(*cred->eap_method)); ssid->eap.eap_methods[1].vendor = EAP_VENDOR_IETF; ssid->eap.eap_methods[1].method = EAP_TYPE_NONE; } if (ttls && cred->username && cred->username[0]) { const char *pos; char *anon; /* Use anonymous NAI in Phase 1 */ pos = os_strchr(cred->username, '@'); if (pos) { size_t buflen = 9 + os_strlen(pos) + 1; anon = os_malloc(buflen); if (anon == NULL) return -1; os_snprintf(anon, buflen, "anonymous%s", pos); } else if (cred->realm) { size_t buflen = 10 + os_strlen(cred->realm) + 1; anon = os_malloc(buflen); if (anon == NULL) return -1; os_snprintf(anon, buflen, "anonymous@%s", cred->realm); } else { anon = os_strdup("anonymous"); if (anon == NULL) return -1; } if (wpa_config_set_quoted(ssid, "anonymous_identity", anon) < 0) { os_free(anon); return -1; } os_free(anon); } if (!ttls && cred->username && cred->username[0] && cred->realm && !os_strchr(cred->username, '@')) { char *id; size_t buflen; int res; buflen = os_strlen(cred->username) + 1 + os_strlen(cred->realm) + 1; id = os_malloc(buflen); if (!id) return -1; os_snprintf(id, buflen, "%s@%s", cred->username, cred->realm); res = wpa_config_set_quoted(ssid, "identity", id); os_free(id); if (res < 0) return -1; } else if (cred->username && cred->username[0] && wpa_config_set_quoted(ssid, "identity", cred->username) < 0) return -1; if (cred->password && cred->password[0]) { if (cred->ext_password && wpa_config_set(ssid, "password", cred->password, 0) < 0) return -1; if (!cred->ext_password && wpa_config_set_quoted(ssid, "password", cred->password) < 0) return -1; } if (cred->client_cert && cred->client_cert[0] && wpa_config_set_quoted(ssid, "client_cert", cred->client_cert) < 0) return -1; #ifdef ANDROID if (cred->private_key && os_strncmp(cred->private_key, "keystore://", 11) == 0) { /* Use OpenSSL engine configuration for Android keystore */ if (wpa_config_set_quoted(ssid, "engine_id", "keystore") < 0 || wpa_config_set_quoted(ssid, "key_id", cred->private_key + 11) < 0 || wpa_config_set(ssid, "engine", "1", 0) < 0) return -1; } else #endif /* ANDROID */ if (cred->private_key && cred->private_key[0] && wpa_config_set_quoted(ssid, "private_key", cred->private_key) < 0) return -1; if (cred->private_key_passwd && cred->private_key_passwd[0] && wpa_config_set_quoted(ssid, "private_key_passwd", cred->private_key_passwd) < 0) return -1; if (cred->phase1) { os_free(ssid->eap.phase1); ssid->eap.phase1 = os_strdup(cred->phase1); } if (cred->phase2) { os_free(ssid->eap.phase2); ssid->eap.phase2 = os_strdup(cred->phase2); } if (cred->ca_cert && cred->ca_cert[0] && wpa_config_set_quoted(ssid, "ca_cert", cred->ca_cert) < 0) return -1; if (cred->domain_suffix_match && cred->domain_suffix_match[0] && wpa_config_set_quoted(ssid, "domain_suffix_match", cred->domain_suffix_match) < 0) return -1; ssid->eap.ocsp = cred->ocsp; return 0; } static int interworking_connect_roaming_consortium( struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct wpa_bss *bss, int only_add) { struct wpa_ssid *ssid; wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Connect with " MACSTR " based on roaming consortium match", MAC2STR(bss->bssid)); if (already_connected(wpa_s, cred, bss)) { wpa_msg(wpa_s, MSG_INFO, INTERWORKING_ALREADY_CONNECTED MACSTR, MAC2STR(bss->bssid)); return wpa_s->current_ssid->id; } remove_duplicate_network(wpa_s, cred, bss); ssid = wpa_config_add_network(wpa_s->conf); if (ssid == NULL) return -1; ssid->parent_cred = cred; wpas_notify_network_added(wpa_s, ssid); wpa_config_set_network_defaults(ssid); ssid->priority = cred->priority; ssid->temporary = 1; ssid->ssid = os_zalloc(bss->ssid_len + 1); if (ssid->ssid == NULL) goto fail; os_memcpy(ssid->ssid, bss->ssid, bss->ssid_len); ssid->ssid_len = bss->ssid_len; if (interworking_set_hs20_params(wpa_s, ssid) < 0) goto fail; if (cred->eap_method == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: No EAP method set for credential using roaming consortium"); goto fail; } if (interworking_set_eap_params( ssid, cred, cred->eap_method->vendor == EAP_VENDOR_IETF && cred->eap_method->method == EAP_TYPE_TTLS) < 0) goto fail; wpa_s->next_ssid = ssid; wpa_config_update_prio_list(wpa_s->conf); if (!only_add) interworking_reconnect(wpa_s); return ssid->id; fail: wpas_notify_network_removed(wpa_s, ssid); wpa_config_remove_network(wpa_s->conf, ssid->id); return -1; } int interworking_connect(struct wpa_supplicant *wpa_s, struct wpa_bss *bss, int only_add) { struct wpa_cred *cred, *cred_rc, *cred_3gpp; struct wpa_ssid *ssid; struct nai_realm *realm; struct nai_realm_eap *eap = NULL; u16 count, i; char buf[100]; int excluded = 0, *excl = &excluded; const char *name; if (wpa_s->conf->cred == NULL || bss == NULL) return -1; if (disallowed_bssid(wpa_s, bss->bssid) || disallowed_ssid(wpa_s, bss->ssid, bss->ssid_len)) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Reject connection to disallowed BSS " MACSTR, MAC2STR(bss->bssid)); return -1; } wpa_printf(MSG_DEBUG, "Interworking: Considering BSS " MACSTR " for connection", MAC2STR(bss->bssid)); if (!wpa_bss_get_ie(bss, WLAN_EID_RSN)) { /* * We currently support only HS 2.0 networks and those are * required to use WPA2-Enterprise. */ wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Network does not use RSN"); return -1; } cred_rc = interworking_credentials_available_roaming_consortium( wpa_s, bss, 0, excl); if (cred_rc) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Highest roaming consortium matching credential priority %d sp_priority %d", cred_rc->priority, cred_rc->sp_priority); if (excl && !(*excl)) excl = NULL; } cred = interworking_credentials_available_realm(wpa_s, bss, 0, excl); if (cred) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Highest NAI Realm list matching credential priority %d sp_priority %d", cred->priority, cred->sp_priority); if (excl && !(*excl)) excl = NULL; } cred_3gpp = interworking_credentials_available_3gpp(wpa_s, bss, 0, excl); if (cred_3gpp) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Highest 3GPP matching credential priority %d sp_priority %d", cred_3gpp->priority, cred_3gpp->sp_priority); if (excl && !(*excl)) excl = NULL; } if (!cred_rc && !cred && !cred_3gpp) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: No full credential matches - consider options without BW(etc.) limits"); cred_rc = interworking_credentials_available_roaming_consortium( wpa_s, bss, 1, excl); if (cred_rc) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Highest roaming consortium matching credential priority %d sp_priority %d (ignore BW)", cred_rc->priority, cred_rc->sp_priority); if (excl && !(*excl)) excl = NULL; } cred = interworking_credentials_available_realm(wpa_s, bss, 1, excl); if (cred) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Highest NAI Realm list matching credential priority %d sp_priority %d (ignore BW)", cred->priority, cred->sp_priority); if (excl && !(*excl)) excl = NULL; } cred_3gpp = interworking_credentials_available_3gpp(wpa_s, bss, 1, excl); if (cred_3gpp) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Highest 3GPP matching credential priority %d sp_priority %d (ignore BW)", cred_3gpp->priority, cred_3gpp->sp_priority); if (excl && !(*excl)) excl = NULL; } } if (cred_rc && (cred == NULL || cred_prio_cmp(cred_rc, cred) >= 0) && (cred_3gpp == NULL || cred_prio_cmp(cred_rc, cred_3gpp) >= 0)) return interworking_connect_roaming_consortium(wpa_s, cred_rc, bss, only_add); if (cred_3gpp && (cred == NULL || cred_prio_cmp(cred_3gpp, cred) >= 0)) { return interworking_connect_3gpp(wpa_s, cred_3gpp, bss, only_add); } if (cred == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: No matching credentials found for " MACSTR, MAC2STR(bss->bssid)); return -1; } realm = nai_realm_parse(bss->anqp ? bss->anqp->nai_realm : NULL, &count); if (realm == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Could not parse NAI Realm list from " MACSTR, MAC2STR(bss->bssid)); return -1; } for (i = 0; i < count; i++) { if (!nai_realm_match(&realm[i], cred->realm)) continue; eap = nai_realm_find_eap(wpa_s, cred, &realm[i]); if (eap) break; } if (!eap) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: No matching credentials and EAP method found for " MACSTR, MAC2STR(bss->bssid)); nai_realm_free(realm, count); return -1; } wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Connect with " MACSTR, MAC2STR(bss->bssid)); if (already_connected(wpa_s, cred, bss)) { wpa_msg(wpa_s, MSG_INFO, INTERWORKING_ALREADY_CONNECTED MACSTR, MAC2STR(bss->bssid)); nai_realm_free(realm, count); return 0; } remove_duplicate_network(wpa_s, cred, bss); ssid = wpa_config_add_network(wpa_s->conf); if (ssid == NULL) { nai_realm_free(realm, count); return -1; } ssid->parent_cred = cred; wpas_notify_network_added(wpa_s, ssid); wpa_config_set_network_defaults(ssid); ssid->priority = cred->priority; ssid->temporary = 1; ssid->ssid = os_zalloc(bss->ssid_len + 1); if (ssid->ssid == NULL) goto fail; os_memcpy(ssid->ssid, bss->ssid, bss->ssid_len); ssid->ssid_len = bss->ssid_len; if (interworking_set_hs20_params(wpa_s, ssid) < 0) goto fail; if (wpa_config_set(ssid, "eap", eap_get_name(EAP_VENDOR_IETF, eap->method), 0) < 0) goto fail; switch (eap->method) { case EAP_TYPE_TTLS: if (eap->inner_method) { name = eap_get_name(EAP_VENDOR_IETF, eap->inner_method); if (!name) goto fail; os_snprintf(buf, sizeof(buf), "\"autheap=%s\"", name); if (wpa_config_set(ssid, "phase2", buf, 0) < 0) goto fail; break; } switch (eap->inner_non_eap) { case NAI_REALM_INNER_NON_EAP_PAP: if (wpa_config_set(ssid, "phase2", "\"auth=PAP\"", 0) < 0) goto fail; break; case NAI_REALM_INNER_NON_EAP_CHAP: if (wpa_config_set(ssid, "phase2", "\"auth=CHAP\"", 0) < 0) goto fail; break; case NAI_REALM_INNER_NON_EAP_MSCHAP: if (wpa_config_set(ssid, "phase2", "\"auth=MSCHAP\"", 0) < 0) goto fail; break; case NAI_REALM_INNER_NON_EAP_MSCHAPV2: if (wpa_config_set(ssid, "phase2", "\"auth=MSCHAPV2\"", 0) < 0) goto fail; break; default: /* EAP params were not set - assume TTLS/MSCHAPv2 */ if (wpa_config_set(ssid, "phase2", "\"auth=MSCHAPV2\"", 0) < 0) goto fail; break; } break; case EAP_TYPE_PEAP: case EAP_TYPE_FAST: if (wpa_config_set(ssid, "phase1", "\"fast_provisioning=2\"", 0) < 0) goto fail; if (wpa_config_set(ssid, "pac_file", "\"blob://pac_interworking\"", 0) < 0) goto fail; name = eap_get_name(EAP_VENDOR_IETF, eap->inner_method ? eap->inner_method : EAP_TYPE_MSCHAPV2); if (name == NULL) goto fail; os_snprintf(buf, sizeof(buf), "\"auth=%s\"", name); if (wpa_config_set(ssid, "phase2", buf, 0) < 0) goto fail; break; case EAP_TYPE_TLS: break; } if (interworking_set_eap_params(ssid, cred, eap->method == EAP_TYPE_TTLS) < 0) goto fail; nai_realm_free(realm, count); wpa_s->next_ssid = ssid; wpa_config_update_prio_list(wpa_s->conf); if (!only_add) interworking_reconnect(wpa_s); return ssid->id; fail: wpas_notify_network_removed(wpa_s, ssid); wpa_config_remove_network(wpa_s->conf, ssid->id); nai_realm_free(realm, count); return -1; } #ifdef PCSC_FUNCS static int interworking_pcsc_read_imsi(struct wpa_supplicant *wpa_s) { size_t len; if (wpa_s->imsi[0] && wpa_s->mnc_len) return 0; len = sizeof(wpa_s->imsi) - 1; if (scard_get_imsi(wpa_s->scard, wpa_s->imsi, &len)) { scard_deinit(wpa_s->scard); wpa_s->scard = NULL; wpa_msg(wpa_s, MSG_ERROR, "Could not read IMSI"); return -1; } wpa_s->imsi[len] = '\0'; wpa_s->mnc_len = scard_get_mnc_len(wpa_s->scard); wpa_printf(MSG_DEBUG, "SCARD: IMSI %s (MNC length %d)", wpa_s->imsi, wpa_s->mnc_len); return 0; } #endif /* PCSC_FUNCS */ static struct wpa_cred * interworking_credentials_available_3gpp( struct wpa_supplicant *wpa_s, struct wpa_bss *bss, int ignore_bw, int *excluded) { struct wpa_cred *selected = NULL; #ifdef INTERWORKING_3GPP struct wpa_cred *cred; int ret; int is_excluded = 0; if (bss->anqp == NULL || bss->anqp->anqp_3gpp == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "interworking-avail-3gpp: not avail, anqp: %p anqp_3gpp: %p", bss->anqp, bss->anqp ? bss->anqp->anqp_3gpp : NULL); return NULL; } #ifdef CONFIG_EAP_PROXY if (!wpa_s->imsi[0]) { size_t len; wpa_msg(wpa_s, MSG_DEBUG, "Interworking: IMSI not available - try to read again through eap_proxy"); wpa_s->mnc_len = eapol_sm_get_eap_proxy_imsi(wpa_s->eapol, -1, wpa_s->imsi, &len); if (wpa_s->mnc_len > 0) { wpa_s->imsi[len] = '\0'; wpa_msg(wpa_s, MSG_DEBUG, "eap_proxy: IMSI %s (MNC length %d)", wpa_s->imsi, wpa_s->mnc_len); } else { wpa_msg(wpa_s, MSG_DEBUG, "eap_proxy: IMSI not available"); } } #endif /* CONFIG_EAP_PROXY */ for (cred = wpa_s->conf->cred; cred; cred = cred->next) { char *sep; const char *imsi; int mnc_len; char imsi_buf[16]; size_t msin_len; #ifdef PCSC_FUNCS if (cred->pcsc && wpa_s->scard) { if (interworking_pcsc_read_imsi(wpa_s) < 0) continue; imsi = wpa_s->imsi; mnc_len = wpa_s->mnc_len; goto compare; } #endif /* PCSC_FUNCS */ #ifdef CONFIG_EAP_PROXY if (cred->pcsc && wpa_s->mnc_len > 0 && wpa_s->imsi[0]) { imsi = wpa_s->imsi; mnc_len = wpa_s->mnc_len; goto compare; } #endif /* CONFIG_EAP_PROXY */ if (cred->imsi == NULL || !cred->imsi[0] || (!wpa_s->conf->external_sim && (cred->milenage == NULL || !cred->milenage[0]))) continue; sep = os_strchr(cred->imsi, '-'); if (sep == NULL || (sep - cred->imsi != 5 && sep - cred->imsi != 6)) continue; mnc_len = sep - cred->imsi - 3; os_memcpy(imsi_buf, cred->imsi, 3 + mnc_len); sep++; msin_len = os_strlen(cred->imsi); if (3 + mnc_len + msin_len >= sizeof(imsi_buf) - 1) msin_len = sizeof(imsi_buf) - 3 - mnc_len - 1; os_memcpy(&imsi_buf[3 + mnc_len], sep, msin_len); imsi_buf[3 + mnc_len + msin_len] = '\0'; imsi = imsi_buf; #if defined(PCSC_FUNCS) || defined(CONFIG_EAP_PROXY) compare: #endif /* PCSC_FUNCS || CONFIG_EAP_PROXY */ wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Parsing 3GPP info from " MACSTR, MAC2STR(bss->bssid)); ret = plmn_id_match(bss->anqp->anqp_3gpp, imsi, mnc_len); wpa_msg(wpa_s, MSG_DEBUG, "PLMN match %sfound", ret ? "" : "not "); if (ret) { if (cred_no_required_oi_match(cred, bss)) continue; if (!ignore_bw && cred_below_min_backhaul(wpa_s, cred, bss)) continue; if (!ignore_bw && cred_over_max_bss_load(wpa_s, cred, bss)) continue; if (!ignore_bw && cred_conn_capab_missing(wpa_s, cred, bss)) continue; if (cred_excluded_ssid(cred, bss)) { if (excluded == NULL) continue; if (selected == NULL) { selected = cred; is_excluded = 1; } } else { if (selected == NULL || is_excluded || cred_prio_cmp(selected, cred) < 0) { selected = cred; is_excluded = 0; } } } } if (excluded) *excluded = is_excluded; #endif /* INTERWORKING_3GPP */ return selected; } static struct wpa_cred * interworking_credentials_available_realm( struct wpa_supplicant *wpa_s, struct wpa_bss *bss, int ignore_bw, int *excluded) { struct wpa_cred *cred, *selected = NULL; struct nai_realm *realm; u16 count, i; int is_excluded = 0; if (bss->anqp == NULL || bss->anqp->nai_realm == NULL) return NULL; if (wpa_s->conf->cred == NULL) return NULL; wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Parsing NAI Realm list from " MACSTR, MAC2STR(bss->bssid)); realm = nai_realm_parse(bss->anqp->nai_realm, &count); if (realm == NULL) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Could not parse NAI Realm list from " MACSTR, MAC2STR(bss->bssid)); return NULL; } for (cred = wpa_s->conf->cred; cred; cred = cred->next) { if (cred->realm == NULL) continue; for (i = 0; i < count; i++) { if (!nai_realm_match(&realm[i], cred->realm)) continue; if (nai_realm_find_eap(wpa_s, cred, &realm[i])) { if (cred_no_required_oi_match(cred, bss)) continue; if (!ignore_bw && cred_below_min_backhaul(wpa_s, cred, bss)) continue; if (!ignore_bw && cred_over_max_bss_load(wpa_s, cred, bss)) continue; if (!ignore_bw && cred_conn_capab_missing(wpa_s, cred, bss)) continue; if (cred_excluded_ssid(cred, bss)) { if (excluded == NULL) continue; if (selected == NULL) { selected = cred; is_excluded = 1; } } else { if (selected == NULL || is_excluded || cred_prio_cmp(selected, cred) < 0) { selected = cred; is_excluded = 0; } } break; } else { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: realm-find-eap returned false"); } } } nai_realm_free(realm, count); if (excluded) *excluded = is_excluded; return selected; } static struct wpa_cred * interworking_credentials_available_helper( struct wpa_supplicant *wpa_s, struct wpa_bss *bss, int ignore_bw, int *excluded) { struct wpa_cred *cred, *cred2; int excluded1, excluded2 = 0; if (disallowed_bssid(wpa_s, bss->bssid) || disallowed_ssid(wpa_s, bss->ssid, bss->ssid_len)) { wpa_printf(MSG_DEBUG, "Interworking: Ignore disallowed BSS " MACSTR, MAC2STR(bss->bssid)); return NULL; } cred = interworking_credentials_available_realm(wpa_s, bss, ignore_bw, &excluded1); cred2 = interworking_credentials_available_3gpp(wpa_s, bss, ignore_bw, &excluded2); if (cred && cred2 && (cred_prio_cmp(cred2, cred) >= 0 || (!excluded2 && excluded1))) { cred = cred2; excluded1 = excluded2; } if (!cred) { cred = cred2; excluded1 = excluded2; } cred2 = interworking_credentials_available_roaming_consortium( wpa_s, bss, ignore_bw, &excluded2); if (cred && cred2 && (cred_prio_cmp(cred2, cred) >= 0 || (!excluded2 && excluded1))) { cred = cred2; excluded1 = excluded2; } if (!cred) { cred = cred2; excluded1 = excluded2; } if (excluded) *excluded = excluded1; return cred; } static struct wpa_cred * interworking_credentials_available( struct wpa_supplicant *wpa_s, struct wpa_bss *bss, int *excluded) { struct wpa_cred *cred; if (excluded) *excluded = 0; cred = interworking_credentials_available_helper(wpa_s, bss, 0, excluded); if (cred) return cred; return interworking_credentials_available_helper(wpa_s, bss, 1, excluded); } int domain_name_list_contains(struct wpabuf *domain_names, const char *domain, int exact_match) { const u8 *pos, *end; size_t len; len = os_strlen(domain); pos = wpabuf_head(domain_names); end = pos + wpabuf_len(domain_names); while (end - pos > 1) { u8 elen; elen = *pos++; if (elen > end - pos) break; wpa_hexdump_ascii(MSG_DEBUG, "Interworking: AP domain name", pos, elen); if (elen == len && os_strncasecmp(domain, (const char *) pos, len) == 0) return 1; if (!exact_match && elen > len && pos[elen - len - 1] == '.') { const char *ap = (const char *) pos; int offset = elen - len; if (os_strncasecmp(domain, ap + offset, len) == 0) return 1; } pos += elen; } return 0; } int interworking_home_sp_cred(struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct wpabuf *domain_names) { size_t i; int ret = -1; #ifdef INTERWORKING_3GPP char nai[100], *realm; char *imsi = NULL; int mnc_len = 0; if (cred->imsi) imsi = cred->imsi; #ifdef PCSC_FUNCS else if (cred->pcsc && wpa_s->scard) { if (interworking_pcsc_read_imsi(wpa_s) < 0) return -1; imsi = wpa_s->imsi; mnc_len = wpa_s->mnc_len; } #endif /* PCSC_FUNCS */ #ifdef CONFIG_EAP_PROXY else if (cred->pcsc && wpa_s->mnc_len > 0 && wpa_s->imsi[0]) { imsi = wpa_s->imsi; mnc_len = wpa_s->mnc_len; } #endif /* CONFIG_EAP_PROXY */ if (domain_names && imsi && build_root_nai(nai, sizeof(nai), imsi, mnc_len, 0) == 0) { realm = os_strchr(nai, '@'); if (realm) realm++; wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Search for match with SIM/USIM domain %s", realm); if (realm && domain_name_list_contains(domain_names, realm, 1)) return 1; if (realm) ret = 0; } #endif /* INTERWORKING_3GPP */ if (domain_names == NULL || cred->domain == NULL) return ret; for (i = 0; i < cred->num_domain; i++) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Search for match with home SP FQDN %s", cred->domain[i]); if (domain_name_list_contains(domain_names, cred->domain[i], 1)) return 1; } return 0; } static int interworking_home_sp(struct wpa_supplicant *wpa_s, struct wpabuf *domain_names) { struct wpa_cred *cred; if (domain_names == NULL || wpa_s->conf->cred == NULL) return -1; for (cred = wpa_s->conf->cred; cred; cred = cred->next) { int res = interworking_home_sp_cred(wpa_s, cred, domain_names); if (res) return res; } return 0; } static int interworking_find_network_match(struct wpa_supplicant *wpa_s) { struct wpa_bss *bss; struct wpa_ssid *ssid; dl_list_for_each(bss, &wpa_s->bss, struct wpa_bss, list) { for (ssid = wpa_s->conf->ssid; ssid; ssid = ssid->next) { if (wpas_network_disabled(wpa_s, ssid) || ssid->mode != WPAS_MODE_INFRA) continue; if (ssid->ssid_len != bss->ssid_len || os_memcmp(ssid->ssid, bss->ssid, ssid->ssid_len) != 0) continue; /* * TODO: Consider more accurate matching of security * configuration similarly to what is done in events.c */ return 1; } } return 0; } static int roaming_partner_match(struct wpa_supplicant *wpa_s, struct roaming_partner *partner, struct wpabuf *domain_names) { wpa_printf(MSG_DEBUG, "Interworking: Comparing roaming_partner info fqdn='%s' exact_match=%d priority=%u country='%s'", partner->fqdn, partner->exact_match, partner->priority, partner->country); wpa_hexdump_ascii(MSG_DEBUG, "Interworking: Domain names", wpabuf_head(domain_names), wpabuf_len(domain_names)); if (!domain_name_list_contains(domain_names, partner->fqdn, partner->exact_match)) return 0; /* TODO: match Country */ return 1; } static u8 roaming_prio(struct wpa_supplicant *wpa_s, struct wpa_cred *cred, struct wpa_bss *bss) { size_t i; if (bss->anqp == NULL || bss->anqp->domain_name == NULL) { wpa_printf(MSG_DEBUG, "Interworking: No ANQP domain name info -> use default roaming partner priority 128"); return 128; /* cannot check preference with domain name */ } if (interworking_home_sp_cred(wpa_s, cred, bss->anqp->domain_name) > 0) { wpa_printf(MSG_DEBUG, "Interworking: Determined to be home SP -> use maximum preference 0 as roaming partner priority"); return 0; /* max preference for home SP network */ } for (i = 0; i < cred->num_roaming_partner; i++) { if (roaming_partner_match(wpa_s, &cred->roaming_partner[i], bss->anqp->domain_name)) { wpa_printf(MSG_DEBUG, "Interworking: Roaming partner preference match - priority %u", cred->roaming_partner[i].priority); return cred->roaming_partner[i].priority; } } wpa_printf(MSG_DEBUG, "Interworking: No roaming partner preference match - use default roaming partner priority 128"); return 128; } static struct wpa_bss * pick_best_roaming_partner(struct wpa_supplicant *wpa_s, struct wpa_bss *selected, struct wpa_cred *cred) { struct wpa_bss *bss; u8 best_prio, prio; struct wpa_cred *cred2; /* * Check if any other BSS is operated by a more preferred roaming * partner. */ best_prio = roaming_prio(wpa_s, cred, selected); wpa_printf(MSG_DEBUG, "Interworking: roaming_prio=%u for selected BSS " MACSTR " (cred=%d)", best_prio, MAC2STR(selected->bssid), cred->id); dl_list_for_each(bss, &wpa_s->bss, struct wpa_bss, list) { if (bss == selected) continue; cred2 = interworking_credentials_available(wpa_s, bss, NULL); if (!cred2) continue; if (!wpa_bss_get_ie(bss, WLAN_EID_RSN)) continue; prio = roaming_prio(wpa_s, cred2, bss); wpa_printf(MSG_DEBUG, "Interworking: roaming_prio=%u for BSS " MACSTR " (cred=%d)", prio, MAC2STR(bss->bssid), cred2->id); if (prio < best_prio) { int bh1, bh2, load1, load2, conn1, conn2; bh1 = cred_below_min_backhaul(wpa_s, cred, selected); load1 = cred_over_max_bss_load(wpa_s, cred, selected); conn1 = cred_conn_capab_missing(wpa_s, cred, selected); bh2 = cred_below_min_backhaul(wpa_s, cred2, bss); load2 = cred_over_max_bss_load(wpa_s, cred2, bss); conn2 = cred_conn_capab_missing(wpa_s, cred2, bss); wpa_printf(MSG_DEBUG, "Interworking: old: %d %d %d new: %d %d %d", bh1, load1, conn1, bh2, load2, conn2); if (bh1 || load1 || conn1 || !(bh2 || load2 || conn2)) { wpa_printf(MSG_DEBUG, "Interworking: Better roaming partner " MACSTR " selected", MAC2STR(bss->bssid)); best_prio = prio; selected = bss; } } } return selected; } static void interworking_select_network(struct wpa_supplicant *wpa_s) { struct wpa_bss *bss, *selected = NULL, *selected_home = NULL; struct wpa_bss *selected2 = NULL, *selected2_home = NULL; unsigned int count = 0; const char *type; int res; struct wpa_cred *cred, *selected_cred = NULL; struct wpa_cred *selected_home_cred = NULL; struct wpa_cred *selected2_cred = NULL; struct wpa_cred *selected2_home_cred = NULL; wpa_s->network_select = 0; wpa_printf(MSG_DEBUG, "Interworking: Select network (auto_select=%d)", wpa_s->auto_select); dl_list_for_each(bss, &wpa_s->bss, struct wpa_bss, list) { int excluded = 0; int bh, bss_load, conn_capab; cred = interworking_credentials_available(wpa_s, bss, &excluded); if (!cred) continue; if (!wpa_bss_get_ie(bss, WLAN_EID_RSN)) { /* * We currently support only HS 2.0 networks and those * are required to use WPA2-Enterprise. */ wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Credential match with " MACSTR " but network does not use RSN", MAC2STR(bss->bssid)); continue; } if (!excluded) count++; res = interworking_home_sp(wpa_s, bss->anqp ? bss->anqp->domain_name : NULL); if (res > 0) type = "home"; else if (res == 0) type = "roaming"; else type = "unknown"; bh = cred_below_min_backhaul(wpa_s, cred, bss); bss_load = cred_over_max_bss_load(wpa_s, cred, bss); conn_capab = cred_conn_capab_missing(wpa_s, cred, bss); wpa_msg(wpa_s, MSG_INFO, "%s" MACSTR " type=%s%s%s%s id=%d priority=%d sp_priority=%d", excluded ? INTERWORKING_BLACKLISTED : INTERWORKING_AP, MAC2STR(bss->bssid), type, bh ? " below_min_backhaul=1" : "", bss_load ? " over_max_bss_load=1" : "", conn_capab ? " conn_capab_missing=1" : "", cred->id, cred->priority, cred->sp_priority); if (excluded) continue; if (wpa_s->auto_select || (wpa_s->conf->auto_interworking && wpa_s->auto_network_select)) { if (bh || bss_load || conn_capab) { if (selected2_cred == NULL || cred_prio_cmp(cred, selected2_cred) > 0) { wpa_printf(MSG_DEBUG, "Interworking: Mark as selected2"); selected2 = bss; selected2_cred = cred; } if (res > 0 && (selected2_home_cred == NULL || cred_prio_cmp(cred, selected2_home_cred) > 0)) { wpa_printf(MSG_DEBUG, "Interworking: Mark as selected2_home"); selected2_home = bss; selected2_home_cred = cred; } } else { if (selected_cred == NULL || cred_prio_cmp(cred, selected_cred) > 0) { wpa_printf(MSG_DEBUG, "Interworking: Mark as selected"); selected = bss; selected_cred = cred; } if (res > 0 && (selected_home_cred == NULL || cred_prio_cmp(cred, selected_home_cred) > 0)) { wpa_printf(MSG_DEBUG, "Interworking: Mark as selected_home"); selected_home = bss; selected_home_cred = cred; } } } } if (selected_home && selected_home != selected && selected_home_cred && (selected_cred == NULL || cred_prio_cmp(selected_home_cred, selected_cred) >= 0)) { /* Prefer network operated by the Home SP */ wpa_printf(MSG_DEBUG, "Interworking: Overrided selected with selected_home"); selected = selected_home; selected_cred = selected_home_cred; } if (!selected) { if (selected2_home) { wpa_printf(MSG_DEBUG, "Interworking: Use home BSS with BW limit mismatch since no other network could be selected"); selected = selected2_home; selected_cred = selected2_home_cred; } else if (selected2) { wpa_printf(MSG_DEBUG, "Interworking: Use visited BSS with BW limit mismatch since no other network could be selected"); selected = selected2; selected_cred = selected2_cred; } } if (count == 0) { /* * No matching network was found based on configured * credentials. Check whether any of the enabled network blocks * have matching APs. */ if (interworking_find_network_match(wpa_s)) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Possible BSS match for enabled network configurations"); if (wpa_s->auto_select) { interworking_reconnect(wpa_s); return; } } if (wpa_s->auto_network_select) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Continue scanning after ANQP fetch"); wpa_supplicant_req_scan(wpa_s, wpa_s->scan_interval, 0); return; } wpa_msg(wpa_s, MSG_INFO, INTERWORKING_NO_MATCH "No network " "with matching credentials found"); if (wpa_s->wpa_state == WPA_SCANNING) wpa_supplicant_set_state(wpa_s, WPA_DISCONNECTED); } if (selected) { wpa_printf(MSG_DEBUG, "Interworking: Selected " MACSTR, MAC2STR(selected->bssid)); selected = pick_best_roaming_partner(wpa_s, selected, selected_cred); wpa_printf(MSG_DEBUG, "Interworking: Selected " MACSTR " (after best roaming partner selection)", MAC2STR(selected->bssid)); wpa_msg(wpa_s, MSG_INFO, INTERWORKING_SELECTED MACSTR, MAC2STR(selected->bssid)); interworking_connect(wpa_s, selected, 0); } else if (wpa_s->wpa_state == WPA_SCANNING) wpa_supplicant_set_state(wpa_s, WPA_DISCONNECTED); } static struct wpa_bss_anqp * interworking_match_anqp_info(struct wpa_supplicant *wpa_s, struct wpa_bss *bss) { struct wpa_bss *other; if (is_zero_ether_addr(bss->hessid)) return NULL; /* Cannot be in the same homegenous ESS */ dl_list_for_each(other, &wpa_s->bss, struct wpa_bss, list) { if (other == bss) continue; if (other->anqp == NULL) continue; if (other->anqp->roaming_consortium == NULL && other->anqp->nai_realm == NULL && other->anqp->anqp_3gpp == NULL && other->anqp->domain_name == NULL) continue; if (!(other->flags & WPA_BSS_ANQP_FETCH_TRIED)) continue; if (os_memcmp(bss->hessid, other->hessid, ETH_ALEN) != 0) continue; if (bss->ssid_len != other->ssid_len || os_memcmp(bss->ssid, other->ssid, bss->ssid_len) != 0) continue; wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Share ANQP data with already fetched BSSID " MACSTR " and " MACSTR, MAC2STR(other->bssid), MAC2STR(bss->bssid)); other->anqp->users++; return other->anqp; } return NULL; } static void interworking_next_anqp_fetch(struct wpa_supplicant *wpa_s) { struct wpa_bss *bss; int found = 0; const u8 *ie; wpa_printf(MSG_DEBUG, "Interworking: next_anqp_fetch - " "fetch_anqp_in_progress=%d fetch_osu_icon_in_progress=%d", wpa_s->fetch_anqp_in_progress, wpa_s->fetch_osu_icon_in_progress); if (eloop_terminated() || !wpa_s->fetch_anqp_in_progress) { wpa_printf(MSG_DEBUG, "Interworking: Stop next-ANQP-fetch"); return; } #ifdef CONFIG_HS20 if (wpa_s->fetch_osu_icon_in_progress) { wpa_printf(MSG_DEBUG, "Interworking: Next icon (in progress)"); hs20_next_osu_icon(wpa_s); return; } #endif /* CONFIG_HS20 */ dl_list_for_each(bss, &wpa_s->bss, struct wpa_bss, list) { if (!(bss->caps & IEEE80211_CAP_ESS)) continue; ie = wpa_bss_get_ie(bss, WLAN_EID_EXT_CAPAB); if (ie == NULL || ie[1] < 4 || !(ie[5] & 0x80)) continue; /* AP does not support Interworking */ if (disallowed_bssid(wpa_s, bss->bssid) || disallowed_ssid(wpa_s, bss->ssid, bss->ssid_len)) continue; /* Disallowed BSS */ if (!(bss->flags & WPA_BSS_ANQP_FETCH_TRIED)) { if (bss->anqp == NULL) { bss->anqp = interworking_match_anqp_info(wpa_s, bss); if (bss->anqp) { /* Shared data already fetched */ continue; } bss->anqp = wpa_bss_anqp_alloc(); if (bss->anqp == NULL) break; } found++; bss->flags |= WPA_BSS_ANQP_FETCH_TRIED; wpa_msg(wpa_s, MSG_INFO, "Starting ANQP fetch for " MACSTR, MAC2STR(bss->bssid)); interworking_anqp_send_req(wpa_s, bss); break; } } if (found == 0) { #ifdef CONFIG_HS20 if (wpa_s->fetch_osu_info) { if (wpa_s->num_prov_found == 0 && wpa_s->fetch_osu_waiting_scan && wpa_s->num_osu_scans < 3) { wpa_printf(MSG_DEBUG, "HS 2.0: No OSU providers seen - try to scan again"); hs20_start_osu_scan(wpa_s); return; } wpa_printf(MSG_DEBUG, "Interworking: Next icon"); hs20_osu_icon_fetch(wpa_s); return; } #endif /* CONFIG_HS20 */ wpa_msg(wpa_s, MSG_INFO, "ANQP fetch completed"); wpa_s->fetch_anqp_in_progress = 0; if (wpa_s->network_select) interworking_select_network(wpa_s); } } void interworking_start_fetch_anqp(struct wpa_supplicant *wpa_s) { struct wpa_bss *bss; dl_list_for_each(bss, &wpa_s->bss, struct wpa_bss, list) bss->flags &= ~WPA_BSS_ANQP_FETCH_TRIED; wpa_s->fetch_anqp_in_progress = 1; /* * Start actual ANQP operation from eloop call to make sure the loop * does not end up using excessive recursion. */ eloop_register_timeout(0, 0, interworking_continue_anqp, wpa_s, NULL); } int interworking_fetch_anqp(struct wpa_supplicant *wpa_s) { if (wpa_s->fetch_anqp_in_progress || wpa_s->network_select) return 0; wpa_s->network_select = 0; wpa_s->fetch_all_anqp = 1; wpa_s->fetch_osu_info = 0; interworking_start_fetch_anqp(wpa_s); return 0; } void interworking_stop_fetch_anqp(struct wpa_supplicant *wpa_s) { if (!wpa_s->fetch_anqp_in_progress) return; wpa_s->fetch_anqp_in_progress = 0; } int anqp_send_req(struct wpa_supplicant *wpa_s, const u8 *dst, u16 info_ids[], size_t num_ids, u32 subtypes, u32 mbo_subtypes) { struct wpabuf *buf; struct wpabuf *extra_buf = NULL; int ret = 0; int freq; struct wpa_bss *bss; int res; bss = wpa_bss_get_bssid(wpa_s, dst); if (!bss) { wpa_printf(MSG_WARNING, "ANQP: Cannot send query to unknown BSS " MACSTR, MAC2STR(dst)); return -1; } wpa_bss_anqp_unshare_alloc(bss); freq = bss->freq; wpa_msg(wpa_s, MSG_DEBUG, "ANQP: Query Request to " MACSTR " for %u id(s)", MAC2STR(dst), (unsigned int) num_ids); #ifdef CONFIG_HS20 if (subtypes != 0) { extra_buf = wpabuf_alloc(100); if (extra_buf == NULL) return -1; hs20_put_anqp_req(subtypes, NULL, 0, extra_buf); } #endif /* CONFIG_HS20 */ #ifdef CONFIG_MBO if (mbo_subtypes) { struct wpabuf *mbo; mbo = mbo_build_anqp_buf(wpa_s, bss, mbo_subtypes); if (mbo) { if (wpabuf_resize(&extra_buf, wpabuf_len(mbo))) { wpabuf_free(extra_buf); wpabuf_free(mbo); return -1; } wpabuf_put_buf(extra_buf, mbo); wpabuf_free(mbo); } } #endif /* CONFIG_MBO */ buf = anqp_build_req(info_ids, num_ids, extra_buf); wpabuf_free(extra_buf); if (buf == NULL) return -1; res = gas_query_req(wpa_s->gas, dst, freq, 0, buf, anqp_resp_cb, wpa_s); if (res < 0) { wpa_msg(wpa_s, MSG_DEBUG, "ANQP: Failed to send Query Request"); wpabuf_free(buf); ret = -1; } else { wpa_msg(wpa_s, MSG_DEBUG, "ANQP: Query started with dialog token %u", res); } return ret; } static void anqp_add_extra(struct wpa_supplicant *wpa_s, struct wpa_bss_anqp *anqp, u16 info_id, const u8 *data, size_t slen) { struct wpa_bss_anqp_elem *tmp, *elem = NULL; if (!anqp) return; dl_list_for_each(tmp, &anqp->anqp_elems, struct wpa_bss_anqp_elem, list) { if (tmp->infoid == info_id) { elem = tmp; break; } } if (!elem) { elem = os_zalloc(sizeof(*elem)); if (!elem) return; elem->infoid = info_id; dl_list_add(&anqp->anqp_elems, &elem->list); } else { wpabuf_free(elem->payload); } elem->payload = wpabuf_alloc_copy(data, slen); if (!elem->payload) { dl_list_del(&elem->list); os_free(elem); } } static void interworking_parse_rx_anqp_resp(struct wpa_supplicant *wpa_s, struct wpa_bss *bss, const u8 *sa, u16 info_id, const u8 *data, size_t slen, u8 dialog_token) { const u8 *pos = data; struct wpa_bss_anqp *anqp = NULL; u8 type; if (bss) anqp = bss->anqp; switch (info_id) { case ANQP_CAPABILITY_LIST: wpa_msg(wpa_s, MSG_INFO, RX_ANQP MACSTR " ANQP Capability list", MAC2STR(sa)); wpa_hexdump_ascii(MSG_DEBUG, "ANQP: Capability list", pos, slen); if (anqp) { wpabuf_free(anqp->capability_list); anqp->capability_list = wpabuf_alloc_copy(pos, slen); } break; case ANQP_VENUE_NAME: wpa_msg(wpa_s, MSG_INFO, RX_ANQP MACSTR " Venue Name", MAC2STR(sa)); wpa_hexdump_ascii(MSG_DEBUG, "ANQP: Venue Name", pos, slen); if (anqp) { wpabuf_free(anqp->venue_name); anqp->venue_name = wpabuf_alloc_copy(pos, slen); } break; case ANQP_NETWORK_AUTH_TYPE: wpa_msg(wpa_s, MSG_INFO, RX_ANQP MACSTR " Network Authentication Type information", MAC2STR(sa)); wpa_hexdump_ascii(MSG_DEBUG, "ANQP: Network Authentication " "Type", pos, slen); if (anqp) { wpabuf_free(anqp->network_auth_type); anqp->network_auth_type = wpabuf_alloc_copy(pos, slen); } break; case ANQP_ROAMING_CONSORTIUM: wpa_msg(wpa_s, MSG_INFO, RX_ANQP MACSTR " Roaming Consortium list", MAC2STR(sa)); wpa_hexdump_ascii(MSG_DEBUG, "ANQP: Roaming Consortium", pos, slen); if (anqp) { wpabuf_free(anqp->roaming_consortium); anqp->roaming_consortium = wpabuf_alloc_copy(pos, slen); } break; case ANQP_IP_ADDR_TYPE_AVAILABILITY: wpa_msg(wpa_s, MSG_INFO, RX_ANQP MACSTR " IP Address Type Availability information", MAC2STR(sa)); wpa_hexdump(MSG_MSGDUMP, "ANQP: IP Address Availability", pos, slen); if (anqp) { wpabuf_free(anqp->ip_addr_type_availability); anqp->ip_addr_type_availability = wpabuf_alloc_copy(pos, slen); } break; case ANQP_NAI_REALM: wpa_msg(wpa_s, MSG_INFO, RX_ANQP MACSTR " NAI Realm list", MAC2STR(sa)); wpa_hexdump_ascii(MSG_DEBUG, "ANQP: NAI Realm", pos, slen); if (anqp) { wpabuf_free(anqp->nai_realm); anqp->nai_realm = wpabuf_alloc_copy(pos, slen); } break; case ANQP_3GPP_CELLULAR_NETWORK: wpa_msg(wpa_s, MSG_INFO, RX_ANQP MACSTR " 3GPP Cellular Network information", MAC2STR(sa)); wpa_hexdump_ascii(MSG_DEBUG, "ANQP: 3GPP Cellular Network", pos, slen); if (anqp) { wpabuf_free(anqp->anqp_3gpp); anqp->anqp_3gpp = wpabuf_alloc_copy(pos, slen); } break; case ANQP_DOMAIN_NAME: wpa_msg(wpa_s, MSG_INFO, RX_ANQP MACSTR " Domain Name list", MAC2STR(sa)); wpa_hexdump_ascii(MSG_MSGDUMP, "ANQP: Domain Name", pos, slen); if (anqp) { wpabuf_free(anqp->domain_name); anqp->domain_name = wpabuf_alloc_copy(pos, slen); } break; #ifdef CONFIG_FILS case ANQP_FILS_REALM_INFO: wpa_msg(wpa_s, MSG_INFO, RX_ANQP MACSTR " FILS Realm Information", MAC2STR(sa)); wpa_hexdump_ascii(MSG_MSGDUMP, "ANQP: FILS Realm Information", pos, slen); if (anqp) { wpabuf_free(anqp->fils_realm_info); anqp->fils_realm_info = wpabuf_alloc_copy(pos, slen); } break; #endif /* CONFIG_FILS */ case ANQP_VENDOR_SPECIFIC: if (slen < 3) return; switch (WPA_GET_BE24(pos)) { case OUI_WFA: pos += 3; slen -= 3; if (slen < 1) return; type = *pos++; slen--; switch (type) { #ifdef CONFIG_HS20 case HS20_ANQP_OUI_TYPE: hs20_parse_rx_hs20_anqp_resp(wpa_s, bss, sa, pos, slen, dialog_token); break; #endif /* CONFIG_HS20 */ #ifdef CONFIG_MBO case MBO_ANQP_OUI_TYPE: mbo_parse_rx_anqp_resp(wpa_s, bss, sa, pos, slen); break; #endif /* CONFIG_MBO */ default: wpa_msg(wpa_s, MSG_DEBUG, "ANQP: Unsupported ANQP vendor type %u", type); break; } break; default: wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Unsupported vendor-specific ANQP OUI %06x", WPA_GET_BE24(pos)); return; } break; default: wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Unsupported ANQP Info ID %u", info_id); anqp_add_extra(wpa_s, anqp, info_id, data, slen); break; } } void anqp_resp_cb(void *ctx, const u8 *dst, u8 dialog_token, enum gas_query_result result, const struct wpabuf *adv_proto, const struct wpabuf *resp, u16 status_code) { struct wpa_supplicant *wpa_s = ctx; const u8 *pos; const u8 *end; u16 info_id; u16 slen; struct wpa_bss *bss = NULL, *tmp; const char *anqp_result = "SUCCESS"; wpa_printf(MSG_DEBUG, "Interworking: anqp_resp_cb dst=" MACSTR " dialog_token=%u result=%d status_code=%u", MAC2STR(dst), dialog_token, result, status_code); if (result != GAS_QUERY_SUCCESS) { #ifdef CONFIG_HS20 if (wpa_s->fetch_osu_icon_in_progress) hs20_icon_fetch_failed(wpa_s); #endif /* CONFIG_HS20 */ anqp_result = "FAILURE"; goto out; } pos = wpabuf_head(adv_proto); if (wpabuf_len(adv_proto) < 4 || pos[0] != WLAN_EID_ADV_PROTO || pos[1] < 2 || pos[3] != ACCESS_NETWORK_QUERY_PROTOCOL) { wpa_msg(wpa_s, MSG_DEBUG, "ANQP: Unexpected Advertisement Protocol in response"); #ifdef CONFIG_HS20 if (wpa_s->fetch_osu_icon_in_progress) hs20_icon_fetch_failed(wpa_s); #endif /* CONFIG_HS20 */ anqp_result = "INVALID_FRAME"; goto out; } /* * If possible, select the BSS entry based on which BSS entry was used * for the request. This can help in cases where multiple BSS entries * may exist for the same AP. */ dl_list_for_each_reverse(tmp, &wpa_s->bss, struct wpa_bss, list) { if (tmp == wpa_s->interworking_gas_bss && os_memcmp(tmp->bssid, dst, ETH_ALEN) == 0) { bss = tmp; break; } } if (bss == NULL) bss = wpa_bss_get_bssid(wpa_s, dst); pos = wpabuf_head(resp); end = pos + wpabuf_len(resp); while (pos < end) { unsigned int left = end - pos; if (left < 4) { wpa_msg(wpa_s, MSG_DEBUG, "ANQP: Invalid element"); anqp_result = "INVALID_FRAME"; goto out_parse_done; } info_id = WPA_GET_LE16(pos); pos += 2; slen = WPA_GET_LE16(pos); pos += 2; left -= 4; if (left < slen) { wpa_msg(wpa_s, MSG_DEBUG, "ANQP: Invalid element length for Info ID %u", info_id); anqp_result = "INVALID_FRAME"; goto out_parse_done; } interworking_parse_rx_anqp_resp(wpa_s, bss, dst, info_id, pos, slen, dialog_token); pos += slen; } out_parse_done: #ifdef CONFIG_HS20 hs20_notify_parse_done(wpa_s); #endif /* CONFIG_HS20 */ out: wpa_msg(wpa_s, MSG_INFO, ANQP_QUERY_DONE "addr=" MACSTR " result=%s", MAC2STR(dst), anqp_result); } static void interworking_scan_res_handler(struct wpa_supplicant *wpa_s, struct wpa_scan_results *scan_res) { wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Scan results available - start ANQP fetch"); interworking_start_fetch_anqp(wpa_s); } int interworking_select(struct wpa_supplicant *wpa_s, int auto_select, int *freqs) { interworking_stop_fetch_anqp(wpa_s); wpa_s->network_select = 1; wpa_s->auto_network_select = 0; wpa_s->auto_select = !!auto_select; wpa_s->fetch_all_anqp = 0; wpa_s->fetch_osu_info = 0; wpa_msg(wpa_s, MSG_DEBUG, "Interworking: Start scan for network selection"); wpa_s->scan_res_handler = interworking_scan_res_handler; wpa_s->normal_scans = 0; wpa_s->scan_req = MANUAL_SCAN_REQ; os_free(wpa_s->manual_scan_freqs); wpa_s->manual_scan_freqs = freqs; wpa_s->after_wps = 0; wpa_s->known_wps_freq = 0; wpa_supplicant_req_scan(wpa_s, 0, 0); return 0; } static void gas_resp_cb(void *ctx, const u8 *addr, u8 dialog_token, enum gas_query_result result, const struct wpabuf *adv_proto, const struct wpabuf *resp, u16 status_code) { struct wpa_supplicant *wpa_s = ctx; struct wpabuf *n; wpa_msg(wpa_s, MSG_INFO, GAS_RESPONSE_INFO "addr=" MACSTR " dialog_token=%d status_code=%d resp_len=%d", MAC2STR(addr), dialog_token, status_code, resp ? (int) wpabuf_len(resp) : -1); if (!resp) return; n = wpabuf_dup(resp); if (n == NULL) return; wpabuf_free(wpa_s->prev_gas_resp); wpa_s->prev_gas_resp = wpa_s->last_gas_resp; os_memcpy(wpa_s->prev_gas_addr, wpa_s->last_gas_addr, ETH_ALEN); wpa_s->prev_gas_dialog_token = wpa_s->last_gas_dialog_token; wpa_s->last_gas_resp = n; os_memcpy(wpa_s->last_gas_addr, addr, ETH_ALEN); wpa_s->last_gas_dialog_token = dialog_token; } int gas_send_request(struct wpa_supplicant *wpa_s, const u8 *dst, const struct wpabuf *adv_proto, const struct wpabuf *query) { struct wpabuf *buf; int ret = 0; int freq; struct wpa_bss *bss; int res; size_t len; u8 query_resp_len_limit = 0; freq = wpa_s->assoc_freq; bss = wpa_bss_get_bssid(wpa_s, dst); if (bss) freq = bss->freq; if (freq <= 0) return -1; wpa_msg(wpa_s, MSG_DEBUG, "GAS request to " MACSTR " (freq %d MHz)", MAC2STR(dst), freq); wpa_hexdump_buf(MSG_DEBUG, "Advertisement Protocol ID", adv_proto); wpa_hexdump_buf(MSG_DEBUG, "GAS Query", query); len = 3 + wpabuf_len(adv_proto) + 2; if (query) len += wpabuf_len(query); buf = gas_build_initial_req(0, len); if (buf == NULL) return -1; /* Advertisement Protocol IE */ wpabuf_put_u8(buf, WLAN_EID_ADV_PROTO); wpabuf_put_u8(buf, 1 + wpabuf_len(adv_proto)); /* Length */ wpabuf_put_u8(buf, query_resp_len_limit & 0x7f); wpabuf_put_buf(buf, adv_proto); /* GAS Query */ if (query) { wpabuf_put_le16(buf, wpabuf_len(query)); wpabuf_put_buf(buf, query); } else wpabuf_put_le16(buf, 0); res = gas_query_req(wpa_s->gas, dst, freq, 0, buf, gas_resp_cb, wpa_s); if (res < 0) { wpa_msg(wpa_s, MSG_DEBUG, "GAS: Failed to send Query Request"); wpabuf_free(buf); ret = -1; } else wpa_msg(wpa_s, MSG_DEBUG, "GAS: Query started with dialog token %u", res); return ret; }
972478.c
/** @file Legacy Region Support Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. Modified by dmazar with support for different chipsets and added newer ones. **/ #include "LegacyRegion2.h" // // Current chipset's tables // UINT32 mVendorDeviceId = 0; STATIC PAM_REGISTER_VALUE *mRegisterValues = NULL; UINT8 mPamPciBus = 0; UINT8 mPamPciDev = 0; UINT8 mPamPciFunc = 0; // // Intel 830 Chipset and similar // // // 440 PAM map. // // PAM Range Offset Bits Operation // =============== ====== ==== =============================================================== // 0xC0000-0xC3FFF 0x5a 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xC4000-0xC7FFF 0x5a 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xC8000-0xCBFFF 0x5b 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xCC000-0xCFFFF 0x5b 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xD0000-0xD3FFF 0x5c 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xD4000-0xD7FFF 0x5c 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xD8000-0xDBFFF 0x5d 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xDC000-0xDFFFF 0x5d 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xE0000-0xE3FFF 0x5e 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xE4000-0xE7FFF 0x5e 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xE8000-0xEBFFF 0x5f 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xEC000-0xEFFFF 0x5f 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xF0000-0xFFFFF 0x59 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // STATIC LEGACY_MEMORY_SECTION_INFO mSectionArray[] = { { 0xC0000, SIZE_16KB, FALSE, FALSE }, { 0xC4000, SIZE_16KB, FALSE, FALSE }, { 0xC8000, SIZE_16KB, FALSE, FALSE }, { 0xCC000, SIZE_16KB, FALSE, FALSE }, { 0xD0000, SIZE_16KB, FALSE, FALSE }, { 0xD4000, SIZE_16KB, FALSE, FALSE }, { 0xD8000, SIZE_16KB, FALSE, FALSE }, { 0xDC000, SIZE_16KB, FALSE, FALSE }, { 0xE0000, SIZE_16KB, FALSE, FALSE }, { 0xE4000, SIZE_16KB, FALSE, FALSE }, { 0xE8000, SIZE_16KB, FALSE, FALSE }, { 0xEC000, SIZE_16KB, FALSE, FALSE }, { 0xF0000, SIZE_64KB, FALSE, FALSE } }; STATIC PAM_REGISTER_VALUE mRegisterValues830[] = { { REG_PAM1_OFFSET_830, 0x01, 0x02 }, { REG_PAM1_OFFSET_830, 0x10, 0x20 }, { REG_PAM2_OFFSET_830, 0x01, 0x02 }, { REG_PAM2_OFFSET_830, 0x10, 0x20 }, { REG_PAM3_OFFSET_830, 0x01, 0x02 }, { REG_PAM3_OFFSET_830, 0x10, 0x20 }, { REG_PAM4_OFFSET_830, 0x01, 0x02 }, { REG_PAM4_OFFSET_830, 0x10, 0x20 }, { REG_PAM5_OFFSET_830, 0x01, 0x02 }, { REG_PAM5_OFFSET_830, 0x10, 0x20 }, { REG_PAM6_OFFSET_830, 0x01, 0x02 }, { REG_PAM6_OFFSET_830, 0x10, 0x20 }, { REG_PAM0_OFFSET_830, 0x10, 0x20 } }; // // Intel 4 Series Chipset and similar // // // PAM map. // // PAM Range Offset Bits Operation // =============== ====== ==== =============================================================== // 0xC0000-0xC3FFF 0x91 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xC4000-0xC7FFF 0x91 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xC8000-0xCBFFF 0x92 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xCC000-0xCFFFF 0x92 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xD0000-0xD3FFF 0x93 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xD4000-0xD7FFF 0x93 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xD8000-0xDBFFF 0x94 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xDC000-0xDFFFF 0x94 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xE0000-0xE3FFF 0x95 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xE4000-0xE7FFF 0x95 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xE8000-0xEBFFF 0x96 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xEC000-0xEFFFF 0x96 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xF0000-0xFFFFF 0x90 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // STATIC PAM_REGISTER_VALUE mRegisterValuesS4[] = { { REG_PAM1_OFFSET_S4, 0x01, 0x02 }, { REG_PAM1_OFFSET_S4, 0x10, 0x20 }, { REG_PAM2_OFFSET_S4, 0x01, 0x02 }, { REG_PAM2_OFFSET_S4, 0x10, 0x20 }, { REG_PAM3_OFFSET_S4, 0x01, 0x02 }, { REG_PAM3_OFFSET_S4, 0x10, 0x20 }, { REG_PAM4_OFFSET_S4, 0x01, 0x02 }, { REG_PAM4_OFFSET_S4, 0x10, 0x20 }, { REG_PAM5_OFFSET_S4, 0x01, 0x02 }, { REG_PAM5_OFFSET_S4, 0x10, 0x20 }, { REG_PAM6_OFFSET_S4, 0x01, 0x02 }, { REG_PAM6_OFFSET_S4, 0x10, 0x20 }, { REG_PAM0_OFFSET_S4, 0x10, 0x20 } }; // // Core processors // // // PAM map. // // PAM Range Offset Bits Operation // =============== ====== ==== =============================================================== // 0xC0000-0xC3FFF 0x81 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xC4000-0xC7FFF 0x81 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xC8000-0xCBFFF 0x82 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xCC000-0xCFFFF 0x82 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xD0000-0xD3FFF 0x83 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xD4000-0xD7FFF 0x83 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xD8000-0xDBFFF 0x84 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xDC000-0xDFFFF 0x84 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xE0000-0xE3FFF 0x85 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xE4000-0xE7FFF 0x85 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xE8000-0xEBFFF 0x86 1:0 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xEC000-0xEFFFF 0x86 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // 0xF0000-0xFFFFF 0x80 5:4 00 = DRAM Disabled, 01= Read Only, 10 = Write Only, 11 = Normal // STATIC PAM_REGISTER_VALUE mRegisterValuesCP[] = { { REG_PAM1_OFFSET_CP, 0x01, 0x02 }, { REG_PAM1_OFFSET_CP, 0x10, 0x20 }, { REG_PAM2_OFFSET_CP, 0x01, 0x02 }, { REG_PAM2_OFFSET_CP, 0x10, 0x20 }, { REG_PAM3_OFFSET_CP, 0x01, 0x02 }, { REG_PAM3_OFFSET_CP, 0x10, 0x20 }, { REG_PAM4_OFFSET_CP, 0x01, 0x02 }, { REG_PAM4_OFFSET_CP, 0x10, 0x20 }, { REG_PAM5_OFFSET_CP, 0x01, 0x02 }, { REG_PAM5_OFFSET_CP, 0x10, 0x20 }, { REG_PAM6_OFFSET_CP, 0x01, 0x02 }, { REG_PAM6_OFFSET_CP, 0x10, 0x20 }, { REG_PAM0_OFFSET_CP, 0x10, 0x20 } }; STATIC PAM_REGISTER_VALUE mRegisterValuesNH[] = { { REG_PAM1_OFFSET_NH, 0x01, 0x02 }, { REG_PAM1_OFFSET_NH, 0x10, 0x20 }, { REG_PAM2_OFFSET_NH, 0x01, 0x02 }, { REG_PAM2_OFFSET_NH, 0x10, 0x20 }, { REG_PAM3_OFFSET_NH, 0x01, 0x02 }, { REG_PAM3_OFFSET_NH, 0x10, 0x20 }, { REG_PAM4_OFFSET_NH, 0x01, 0x02 }, { REG_PAM4_OFFSET_NH, 0x10, 0x20 }, { REG_PAM5_OFFSET_NH, 0x01, 0x02 }, { REG_PAM5_OFFSET_NH, 0x10, 0x20 }, { REG_PAM6_OFFSET_NH, 0x01, 0x02 }, { REG_PAM6_OFFSET_NH, 0x10, 0x20 }, { REG_PAM0_OFFSET_NH, 0x10, 0x20 } }; // // NForce chipset // STATIC PAM_REGISTER_VALUE mRegisterValuesNV[] = { { REG_PAM1_OFFSET_NV, 0x01, 0x02 }, { REG_PAM1_OFFSET_NV, 0x10, 0x20 }, { REG_PAM2_OFFSET_NV, 0x01, 0x02 }, { REG_PAM2_OFFSET_NV, 0x10, 0x20 }, { REG_PAM3_OFFSET_NV, 0x01, 0x02 }, { REG_PAM3_OFFSET_NV, 0x10, 0x20 }, { REG_PAM4_OFFSET_NV, 0x01, 0x02 }, { REG_PAM4_OFFSET_NV, 0x10, 0x20 }, { REG_PAM5_OFFSET_NV, 0x01, 0x02 }, { REG_PAM5_OFFSET_NV, 0x10, 0x20 }, { REG_PAM6_OFFSET_NV, 0x01, 0x02 }, { REG_PAM6_OFFSET_NV, 0x10, 0x20 }, { REG_PAM0_OFFSET_NV, 0x10, 0x20 } }; // // Handle used to install the Legacy Region Protocol // STATIC EFI_HANDLE mHandle = NULL; // // Instance of the Legacy Region Protocol to install into the handle database // STATIC EFI_LEGACY_REGION2_PROTOCOL mLegacyRegion2 = { LegacyRegion2Decode, LegacyRegion2Lock, LegacyRegion2BootLock, LegacyRegion2Unlock, LegacyRegionGetInfo }; STATIC EFI_STATUS LegacyRegionManipulationInternal ( IN UINT32 Start, IN UINT32 Length, IN BOOLEAN *ReadEnable, IN BOOLEAN *WriteEnable, OUT UINT32 *Granularity ) { UINT32 EndAddress; UINTN Index; UINTN StartIndex; // // Validate input parameters. // if ((Length == 0) || (Granularity == NULL)) { return EFI_INVALID_PARAMETER; } EndAddress = Start + Length - 1; if ((Start < PAM_BASE_ADDRESS) || (EndAddress > PAM_LIMIT_ADDRESS)) { return EFI_INVALID_PARAMETER; } // // Loop to find the start PAM. // StartIndex = 0; for (Index = 0; Index < (sizeof (mSectionArray) / sizeof (mSectionArray[0])); Index++) { if ((Start >= mSectionArray[Index].Start) && (Start < (mSectionArray[Index].Start + mSectionArray[Index].Length))) { StartIndex = Index; break; } } ASSERT (Index < (sizeof (mSectionArray) / sizeof (mSectionArray[0]))); // // Program PAM until end PAM is encountered // for (Index = StartIndex; Index < (sizeof (mSectionArray) / sizeof (mSectionArray[0])); Index++) { if (ReadEnable != NULL) { if (*ReadEnable) { PciOr8 ( PCI_LIB_ADDRESS (mPamPciBus, mPamPciDev, mPamPciFunc, mRegisterValues[Index].PAMRegOffset), mRegisterValues[Index].ReadEnableData ); } else { PciAnd8 ( PCI_LIB_ADDRESS (mPamPciBus, mPamPciDev, mPamPciFunc, mRegisterValues[Index].PAMRegOffset), (UINT8)(~mRegisterValues[Index].ReadEnableData) ); } } if (WriteEnable != NULL) { if (*WriteEnable) { PciOr8 ( PCI_LIB_ADDRESS (mPamPciBus, mPamPciDev, mPamPciFunc, mRegisterValues[Index].PAMRegOffset), mRegisterValues[Index].WriteEnableData ); } else { PciAnd8 ( PCI_LIB_ADDRESS (mPamPciBus, mPamPciDev, mPamPciFunc, mRegisterValues[Index].PAMRegOffset), (UINT8)(~mRegisterValues[Index].WriteEnableData) ); } } // // If the end PAM is encountered, record its length as granularity and jump out. // if ((EndAddress >= mSectionArray[Index].Start) && (EndAddress < (mSectionArray[Index].Start + mSectionArray[Index].Length))) { *Granularity = mSectionArray[Index].Length; break; } } ASSERT (Index < (sizeof (mSectionArray) / sizeof (mSectionArray[0]))); return EFI_SUCCESS; } STATIC EFI_STATUS LegacyRegionGetInfoInternal ( OUT UINT32 *DescriptorCount, OUT LEGACY_MEMORY_SECTION_INFO **Descriptor ) { UINTN Index; UINT8 PamValue; // // Check input parameters // if ((DescriptorCount == NULL) || (Descriptor == NULL)) { return EFI_INVALID_PARAMETER; } // // Fill in current status of legacy region. // *DescriptorCount = (sizeof (mSectionArray) / sizeof (mSectionArray[0])); for (Index = 0; Index < *DescriptorCount; Index++) { PamValue = PciRead8 (PCI_LIB_ADDRESS (mPamPciBus, mPamPciDev, mPamPciFunc, mRegisterValues[Index].PAMRegOffset)); mSectionArray[Index].ReadEnabled = FALSE; if ((PamValue & mRegisterValues[Index].ReadEnableData) != 0) { mSectionArray[Index].ReadEnabled = TRUE; } mSectionArray[Index].WriteEnabled = FALSE; if ((PamValue & mRegisterValues[Index].WriteEnableData) != 0) { mSectionArray[Index].WriteEnabled = TRUE; } } *Descriptor = mSectionArray; return EFI_SUCCESS; } /** Modify the hardware to allow (decode) or disallow (not decode) memory reads in a region. If the On parameter evaluates to TRUE, this function enables memory reads in the address range Start to (Start + Length - 1). If the On parameter evaluates to FALSE, this function disables memory reads in the address range Start to (Start + Length - 1). @param This[in] Indicates the EFI_LEGACY_REGION_PROTOCOL instance. @param Start[in] The beginning of the physical address of the region whose attributes should be modified. @param Length[in] The number of bytes of memory whose attributes should be modified. The actual number of bytes modified may be greater than the number specified. @param Granularity[out] The number of bytes in the last region affected. This may be less than the total number of bytes affected if the starting address was not aligned to a region's starting address or if the length was greater than the number of bytes in the first region. @param On[in] Decode / Non-Decode flag. @retval EFI_SUCCESS The region's attributes were successfully modified. @retval EFI_INVALID_PARAMETER If Start or Length describe an address not in the Legacy Region. **/ EFI_STATUS EFIAPI LegacyRegion2Decode ( IN EFI_LEGACY_REGION2_PROTOCOL *This, IN UINT32 Start, IN UINT32 Length, OUT UINT32 *Granularity, IN BOOLEAN *On ) { return LegacyRegionManipulationInternal (Start, Length, On, NULL, Granularity); } /** Modify the hardware to disallow memory attribute changes in a region. This function makes the attributes of a region read only. Once a region is boot-locked with this function, the read and write attributes of that region cannot be changed until a power cycle has reset the boot-lock attribute. Calls to Decode(), Lock() and Unlock() will have no effect. @param This[in] Indicates the EFI_LEGACY_REGION_PROTOCOL instance. @param Start[in] The beginning of the physical address of the region whose attributes should be modified. @param Length[in] The number of bytes of memory whose attributes should be modified. The actual number of bytes modified may be greater than the number specified. @param Granularity[out] The number of bytes in the last region affected. This may be less than the total number of bytes affected if the starting address was not aligned to a region's starting address or if the length was greater than the number of bytes in the first region. @retval EFI_SUCCESS The region's attributes were successfully modified. @retval EFI_INVALID_PARAMETER If Start or Length describe an address not in the Legacy Region. @retval EFI_UNSUPPORTED The chipset does not support locking the configuration registers in a way that will not affect memory regions outside the legacy memory region. **/ EFI_STATUS EFIAPI LegacyRegion2BootLock ( IN EFI_LEGACY_REGION2_PROTOCOL *This, IN UINT32 Start, IN UINT32 Length, OUT UINT32 *Granularity ) { if ((Start < 0xC0000) || ((Start + Length - 1) > 0xFFFFF)) { return EFI_INVALID_PARAMETER; } return EFI_UNSUPPORTED; } /** Modify the hardware to disallow memory writes in a region. This function changes the attributes of a memory range to not allow writes. @param This[in] Indicates the EFI_LEGACY_REGION_PROTOCOL instance. @param Start[in] The beginning of the physical address of the region whose attributes should be modified. @param Length[in] The number of bytes of memory whose attributes should be modified. The actual number of bytes modified may be greater than the number specified. @param Granularity[out] The number of bytes in the last region affected. This may be less than the total number of bytes affected if the starting address was not aligned to a region's starting address or if the length was greater than the number of bytes in the first region. @retval EFI_SUCCESS The region's attributes were successfully modified. @retval EFI_INVALID_PARAMETER If Start or Length describe an address not in the Legacy Region. **/ EFI_STATUS EFIAPI LegacyRegion2Lock ( IN EFI_LEGACY_REGION2_PROTOCOL *This, IN UINT32 Start, IN UINT32 Length, OUT UINT32 *Granularity ) { BOOLEAN WriteEnable; WriteEnable = FALSE; return LegacyRegionManipulationInternal (Start, Length, NULL, &WriteEnable, Granularity); } /** Modify the hardware to allow memory writes in a region. This function changes the attributes of a memory range to allow writes. @param This[in] Indicates the EFI_LEGACY_REGION_PROTOCOL instance. @param Start[in] The beginning of the physical address of the region whose attributes should be modified. @param Length[in] The number of bytes of memory whose attributes should be modified. The actual number of bytes modified may be greater than the number specified. @param Granularity[out] The number of bytes in the last region affected. This may be less than the total number of bytes affected if the starting address was not aligned to a region's starting address or if the length was greater than the number of bytes in the first region. @retval EFI_SUCCESS The region's attributes were successfully modified. @retval EFI_INVALID_PARAMETER If Start or Length describe an address not in the Legacy Region. **/ EFI_STATUS EFIAPI LegacyRegion2Unlock ( IN EFI_LEGACY_REGION2_PROTOCOL *This, IN UINT32 Start, IN UINT32 Length, OUT UINT32 *Granularity ) { BOOLEAN WriteEnable; WriteEnable = TRUE; return LegacyRegionManipulationInternal (Start, Length, NULL, &WriteEnable, Granularity); } /** Get region information for the attributes of the Legacy Region. This function is used to discover the granularity of the attributes for the memory in the legacy region. Each attribute may have a different granularity and the granularity may not be the same for all memory ranges in the legacy region. @param This[in] Indicates the EFI_LEGACY_REGION_PROTOCOL instance. @param DescriptorCount[out] The number of region descriptor entries returned in the Descriptor buffer. @param Descriptor[out] A pointer to a pointer used to return a buffer where the legacy region information is deposited. This buffer will contain a list of DescriptorCount number of region descriptors. This function will provide the memory for the buffer. @retval EFI_SUCCESS The region's attributes were successfully modified. @retval EFI_INVALID_PARAMETER If Start or Length describe an address not in the Legacy Region. **/ EFI_STATUS EFIAPI LegacyRegionGetInfo ( IN EFI_LEGACY_REGION2_PROTOCOL *This, OUT UINT32 *DescriptorCount, OUT EFI_LEGACY_REGION_DESCRIPTOR **Descriptor ) { LEGACY_MEMORY_SECTION_INFO *SectionInfo; UINT32 SectionCount; EFI_LEGACY_REGION_DESCRIPTOR *DescriptorArray; UINTN Index; UINTN DescriptorIndex; // // Get section numbers and information // LegacyRegionGetInfoInternal (&SectionCount, &SectionInfo); // // Each section has 3 descriptors, corresponding to readability, writeability, and lock status. // DescriptorArray = AllocatePool (sizeof (EFI_LEGACY_REGION_DESCRIPTOR) * SectionCount * 3); if (DescriptorArray == NULL) { return EFI_OUT_OF_RESOURCES; } DescriptorIndex = 0; for (Index = 0; Index < SectionCount; Index++) { DescriptorArray[DescriptorIndex].Start = SectionInfo[Index].Start; DescriptorArray[DescriptorIndex].Length = SectionInfo[Index].Length; DescriptorArray[DescriptorIndex].Granularity = SectionInfo[Index].Length; if (SectionInfo[Index].ReadEnabled) { DescriptorArray[DescriptorIndex].Attribute = LegacyRegionDecoded; } else { DescriptorArray[DescriptorIndex].Attribute = LegacyRegionNotDecoded; } DescriptorIndex++; // // Create descriptor for writeability, according to lock status // DescriptorArray[DescriptorIndex].Start = SectionInfo[Index].Start; DescriptorArray[DescriptorIndex].Length = SectionInfo[Index].Length; DescriptorArray[DescriptorIndex].Granularity = SectionInfo[Index].Length; if (SectionInfo[Index].WriteEnabled) { DescriptorArray[DescriptorIndex].Attribute = LegacyRegionWriteEnabled; } else { DescriptorArray[DescriptorIndex].Attribute = LegacyRegionWriteDisabled; } DescriptorIndex++; // // Chipset does not support bootlock. // DescriptorArray[DescriptorIndex].Start = SectionInfo[Index].Start; DescriptorArray[DescriptorIndex].Length = SectionInfo[Index].Length; DescriptorArray[DescriptorIndex].Granularity = SectionInfo[Index].Length; DescriptorArray[DescriptorIndex].Attribute = LegacyRegionNotLocked; DescriptorIndex++; } *DescriptorCount = (UINT32)DescriptorIndex; *Descriptor = DescriptorArray; return EFI_SUCCESS; } /** Detects chipset and initialize PAM support tables @retval EFI_SUCCESS Successfully initialized **/ EFI_STATUS DetectChipset ( VOID ) { UINT16 VID = 0; UINT16 DID = 0; mRegisterValues = NULL; mVendorDeviceId = PciRead32 (PCI_LIB_ADDRESS (PAM_PCI_BUS, PAM_PCI_DEV, PAM_PCI_FUNC, 0)); switch (mVendorDeviceId) { // // Intel 830 and similar // Copied from 915 resolution created by steve tomljenovic, // Resolution module by Evan Lojewski // case 0x35758086: // 830 case 0x35808086: // 855GM /// Intel 830 and similar (PAM 0x59-0x5f). mRegisterValues = mRegisterValues830; break; case 0x25C08086: // 5000 case 0x25D48086: // 5000V case 0x65C08086: // 5100 /// Intel 5000 and similar (PAM 0x59-0x5f). mRegisterValues = mRegisterValues830; mPamPciDev = 16; break; // // Intel Series 4 and similar // Copied from 915 resolution created by steve tomljenovic, // Resolution module by Evan Lojewski // case 0x25608086: // 845G case 0x25708086: // 865G case 0x25808086: // 915G case 0x25908086: // 915GM case 0x27708086: // 945G case 0x27748086: // 955X case 0x277c8086: // 975X case 0x27a08086: // 945GM - Dell D430 Offset 090: 10 11 11 00 00 case 0x27ac8086: // 945GME case 0x29208086: // G45 case 0x29708086: // 946GZ case 0x29808086: // G965 case 0x29908086: // Q965 case 0x29a08086: // P965 case 0x29b08086: // R845 case 0x29c08086: // G31/P35 case 0x29d08086: // Q33 case 0x29e08086: // X38/X48 case 0x2a008086: // 965GM case 0x2a108086: // GME965/GLE960 case 0x2a408086: // PM/GM45/47 case 0x2e008086: // Eaglelake case 0x2e108086: // B43 case 0x2e208086: // P45 case 0x2e308086: // G41 case 0x2e408086: // B43 Base case 0x2e908086: // B43 Soft Sku case 0x81008086: // 500 case 0xA0008086: // 3150 /// Intel Series 4 and similar (PAM 0x90-0x96). mRegisterValues = mRegisterValuesS4; break; // // Core processors // http://pci-ids.ucw.cz/read/PC/8086 // case 0x01008086: // 2nd Generation Core Processor Family DRAM Controller case 0x01048086: // 2nd Generation Core Processor Family DRAM Controller case 0x01088086: // Xeon E3-1200 2nd Generation Core Processor Family DRAM Controller case 0x010c8086: // Xeon E3-1200 2nd Generation Core Processor Family DRAM Controller case 0x01508086: // 3rd Generation Core Processor Family DRAM Controller case 0x01548086: // 3rd Generation Core Processor Family DRAM Controller case 0x01588086: // 3rd Generation Core Processor Family DRAM Controller case 0x015c8086: // 3rd Generation Core Processor Family DRAM Controller case 0x01608086: // 3rd Generation Core Processor Family DRAM Controller case 0x01648086: // 3rd Generation Core Processor Family DRAM Controller case 0x0C008086: // 4rd Generation Core Processor Family DRAM Controller case 0x0C048086: // 4rd Generation M-Processor Series case 0x0C088086: // 4rd Generation Haswell Xeon case 0x0A048086: // 4rd Generation U-Processor Series case 0x0D048086: // 4rd Generation H-Processor Series (BGA) with GT3 Graphics case 0x16048086: // 5th Generation Core Processor Family DRAM Controller case 0x191f8086: // 6th Generation (Skylake) DRAM Controller (Z170X) case 0x0F008086: // Bay Trail Family DRAM Controller /// Next Generation Core processors (PAM 0x80-0x86). mRegisterValues = mRegisterValuesCP; break; // // 1st gen i7 - Nehalem // case 0x00408086: // Core Processor DRAM Controller case 0x00448086: // Core Processor DRAM Controller - Arrandale case 0x00488086: // Core Processor DRAM Controller case 0x00698086: // Core Processor DRAM Controller case 0xD1308086: // Xeon(R) CPU L3426 Processor DRAM Controller case 0xD1318086: // Core-i Processor DRAM Controller case 0xD1328086: // PM55 i7-720QM DRAM Controller case 0x34008086: // Core-i Processor DRAM Controller case 0x34018086: // Core-i Processor DRAM Controller case 0x34028086: // Core-i Processor DRAM Controller case 0x34038086: // Core-i Processor DRAM Controller case 0x34048086: // Core-i Processor DRAM Controller case 0x34058086: // X58 Core-i Processor DRAM Controller case 0x34068086: // Core-i Processor DRAM Controller case 0x34078086: // Core-i Processor DRAM Controller /// Core i7 processors (PAM 0x40-0x47). mRegisterValues = mRegisterValuesNH; mPamPciBus = 0xFF; for (mPamPciBus = 0xFF; mPamPciBus > 0x1F; mPamPciBus >>= 1) { VID = PciRead16 (PCI_LIB_ADDRESS (mPamPciBus, 0, 1, 0x00)); if (VID != 0x8086) { continue; } DID = PciRead16 (PCI_LIB_ADDRESS (mPamPciBus, 0, 1, 0x02)); if (DID > 0x2c00) { break; } } if ((VID != 0x8086) || (DID < 0x2c00)) { // // Nehalem bus is not found, assume 0. // mPamPciBus = 0; } else { mPamPciFunc = 1; } break; case 0x3C008086: // Xeon E5 Processor /// Xeon E5 processors (PAM 0x40-0x47). mRegisterValues = mRegisterValuesNH; mPamPciBus = PciRead8 (PCI_LIB_ADDRESS (0, 5, 0, 0x109)); mPamPciDev = 12; mPamPciFunc = 6; break; case 0x0a8210de: case 0x0a8610de: /// NForce MCP79 and similar (PAM 0xC0-0xC7). mRegisterValues = mRegisterValuesNV; break; default: // Unknown chipset. break; } return mRegisterValues != NULL ? EFI_SUCCESS : EFI_NOT_FOUND; } /** Initialize Legacy Region support @retval EFI_SUCCESS Successfully initialized **/ EFI_STATUS EFIAPI LegacyRegion2Install ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { EFI_STATUS Status; VOID *Protocol; // // Check for presence of gEfiLegacyRegionProtocolGuid // and gEfiLegacyRegion2ProtocolGuid // Status = gBS->LocateProtocol (&gEfiLegacyRegionProtocolGuid, NULL, (VOID **)&Protocol); if (Status == EFI_SUCCESS) { return EFI_UNSUPPORTED; } Status = gBS->LocateProtocol (&gEfiLegacyRegion2ProtocolGuid, NULL, (VOID **)&Protocol); if (Status == EFI_SUCCESS) { return EFI_UNSUPPORTED; } Status = DetectChipset (); if (EFI_ERROR (Status)) { return EFI_UNSUPPORTED; } // // Install the Legacy Region Protocol on a new handle // Status = gBS->InstallMultipleProtocolInterfaces ( &mHandle, &gEfiLegacyRegion2ProtocolGuid, &mLegacyRegion2, NULL ); return Status; }
884007.c
/* * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2009 HNR Consulting. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <rdma/ib_cache.h> #include "mad_priv.h" #include "mad_rmpp.h" #include "smi.h" #include "agent.h" MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("kernel IB MAD API"); MODULE_AUTHOR("Hal Rosenstock"); MODULE_AUTHOR("Sean Hefty"); static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; module_param_named(send_queue_size, mad_sendq_size, int, 0444); MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); module_param_named(recv_queue_size, mad_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); static struct kmem_cache *ib_mad_cache; static struct list_head ib_mad_port_list; static u32 ib_mad_client_id = 0; /* Port list lock */ static DEFINE_SPINLOCK(ib_mad_port_list_lock); /* Forward declarations */ static int method_in_use(struct ib_mad_mgmt_method_table **method, struct ib_mad_reg_req *mad_reg_req); static void remove_mad_reg_req(struct ib_mad_agent_private *priv); static struct ib_mad_agent_private *find_mad_agent( struct ib_mad_port_private *port_priv, struct ib_mad *mad); static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad); static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); static void timeout_sends(struct work_struct *work); static void local_completions(struct work_struct *work); static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv, u8 mgmt_class); static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv); /* * Returns a ib_mad_port_private structure or NULL for a device/port * Assumes ib_mad_port_list_lock is being held */ static inline struct ib_mad_port_private * __ib_get_mad_port(struct ib_device *device, int port_num) { struct ib_mad_port_private *entry; list_for_each_entry(entry, &ib_mad_port_list, port_list) { if (entry->device == device && entry->port_num == port_num) return entry; } return NULL; } /* * Wrapper function to return a ib_mad_port_private structure or NULL * for a device/port */ static inline struct ib_mad_port_private * ib_get_mad_port(struct ib_device *device, int port_num) { struct ib_mad_port_private *entry; unsigned long flags; spin_lock_irqsave(&ib_mad_port_list_lock, flags); entry = __ib_get_mad_port(device, port_num); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); return entry; } static inline u8 convert_mgmt_class(u8 mgmt_class) { /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 0 : mgmt_class; } static int get_spl_qp_index(enum ib_qp_type qp_type) { switch (qp_type) { case IB_QPT_SMI: return 0; case IB_QPT_GSI: return 1; default: return -1; } } static int vendor_class_index(u8 mgmt_class) { return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; } static int is_vendor_class(u8 mgmt_class) { if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) return 0; return 1; } static int is_vendor_oui(char *oui) { if (oui[0] || oui[1] || oui[2]) return 1; return 0; } static int is_vendor_method_in_use( struct ib_mad_mgmt_vendor_class *vendor_class, struct ib_mad_reg_req *mad_reg_req) { struct ib_mad_mgmt_method_table *method; int i; for (i = 0; i < MAX_MGMT_OUI; i++) { if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { method = vendor_class->method_table[i]; if (method) { if (method_in_use(&method, mad_reg_req)) return 1; else break; } } } return 0; } int ib_response_mad(struct ib_mad *mad) { return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) || (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) && (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP))); } EXPORT_SYMBOL(ib_response_mad); /* * ib_register_mad_agent - Register to send/receive MADs */ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, u8 port_num, enum ib_qp_type qp_type, struct ib_mad_reg_req *mad_reg_req, u8 rmpp_version, ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler, void *context) { struct ib_mad_port_private *port_priv; struct ib_mad_agent *ret = ERR_PTR(-EINVAL); struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_reg_req *reg_req = NULL; struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; struct ib_mad_mgmt_method_table *method; int ret2, qpn; unsigned long flags; u8 mgmt_class, vclass; /* Validate parameters */ qpn = get_spl_qp_index(qp_type); if (qpn == -1) goto error1; if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) goto error1; /* Validate MAD registration request if supplied */ if (mad_reg_req) { if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) goto error1; if (!recv_handler) goto error1; if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { /* * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * one in this range currently allowed */ if (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) goto error1; } else if (mad_reg_req->mgmt_class == 0) { /* * Class 0 is reserved in IBA and is used for * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE */ goto error1; } else if (is_vendor_class(mad_reg_req->mgmt_class)) { /* * If class is in "new" vendor range, * ensure supplied OUI is not zero */ if (!is_vendor_oui(mad_reg_req->oui)) goto error1; } /* Make sure class supplied is consistent with RMPP */ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (rmpp_version) goto error1; } /* Make sure class supplied is consistent with QP type */ if (qp_type == IB_QPT_SMI) { if ((mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) && (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) goto error1; } else { if ((mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) goto error1; } } else { /* No registration request supplied */ if (!send_handler) goto error1; } /* Validate device and port */ port_priv = ib_get_mad_port(device, port_num); if (!port_priv) { ret = ERR_PTR(-ENODEV); goto error1; } /* Verify the QP requested is supported. For example, Ethernet devices * will not have QP0 */ if (!port_priv->qp_info[qpn].qp) { ret = ERR_PTR(-EPROTONOSUPPORT); goto error1; } /* Allocate structures */ mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); if (!mad_agent_priv) { ret = ERR_PTR(-ENOMEM); goto error1; } mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(mad_agent_priv->agent.mr)) { ret = ERR_PTR(-ENOMEM); goto error2; } if (mad_reg_req) { reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); if (!reg_req) { ret = ERR_PTR(-ENOMEM); goto error3; } } /* Now, fill in the various structures */ mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; mad_agent_priv->reg_req = reg_req; mad_agent_priv->agent.rmpp_version = rmpp_version; mad_agent_priv->agent.device = device; mad_agent_priv->agent.recv_handler = recv_handler; mad_agent_priv->agent.send_handler = send_handler; mad_agent_priv->agent.context = context; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.port_num = port_num; spin_lock_init(&mad_agent_priv->lock); INIT_LIST_HEAD(&mad_agent_priv->send_list); INIT_LIST_HEAD(&mad_agent_priv->wait_list); INIT_LIST_HEAD(&mad_agent_priv->done_list); INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); INIT_LIST_HEAD(&mad_agent_priv->local_list); INIT_WORK(&mad_agent_priv->local_work, local_completions); atomic_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); spin_lock_irqsave(&port_priv->reg_lock, flags); mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; /* * Make sure MAD registration (if supplied) * is non overlapping with any existing ones */ if (mad_reg_req) { mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); if (!is_vendor_class(mgmt_class)) { class = port_priv->version[mad_reg_req-> mgmt_class_version].class; if (class) { method = class->method_table[mgmt_class]; if (method) { if (method_in_use(&method, mad_reg_req)) goto error4; } } ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, mgmt_class); } else { /* "New" vendor class range */ vendor = port_priv->version[mad_reg_req-> mgmt_class_version].vendor; if (vendor) { vclass = vendor_class_index(mgmt_class); vendor_class = vendor->vendor_class[vclass]; if (vendor_class) { if (is_vendor_method_in_use( vendor_class, mad_reg_req)) goto error4; } } ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); } if (ret2) { ret = ERR_PTR(ret2); goto error4; } } /* Add mad agent into port's agent list */ list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); spin_unlock_irqrestore(&port_priv->reg_lock, flags); return &mad_agent_priv->agent; error4: spin_unlock_irqrestore(&port_priv->reg_lock, flags); kfree(reg_req); error3: ib_dereg_mr(mad_agent_priv->agent.mr); error2: kfree(mad_agent_priv); error1: return ret; } EXPORT_SYMBOL(ib_register_mad_agent); static inline int is_snooping_sends(int mad_snoop_flags) { return (mad_snoop_flags & (/*IB_MAD_SNOOP_POSTED_SENDS | IB_MAD_SNOOP_RMPP_SENDS |*/ IB_MAD_SNOOP_SEND_COMPLETIONS /*| IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); } static inline int is_snooping_recvs(int mad_snoop_flags) { return (mad_snoop_flags & (IB_MAD_SNOOP_RECVS /*| IB_MAD_SNOOP_RMPP_RECVS*/)); } static int register_snoop_agent(struct ib_mad_qp_info *qp_info, struct ib_mad_snoop_private *mad_snoop_priv) { struct ib_mad_snoop_private **new_snoop_table; unsigned long flags; int i; spin_lock_irqsave(&qp_info->snoop_lock, flags); /* Check for empty slot in array. */ for (i = 0; i < qp_info->snoop_table_size; i++) if (!qp_info->snoop_table[i]) break; if (i == qp_info->snoop_table_size) { /* Grow table. */ new_snoop_table = krealloc(qp_info->snoop_table, sizeof mad_snoop_priv * (qp_info->snoop_table_size + 1), GFP_ATOMIC); if (!new_snoop_table) { i = -ENOMEM; goto out; } qp_info->snoop_table = new_snoop_table; qp_info->snoop_table_size++; } qp_info->snoop_table[i] = mad_snoop_priv; atomic_inc(&qp_info->snoop_count); out: spin_unlock_irqrestore(&qp_info->snoop_lock, flags); return i; } struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, u8 port_num, enum ib_qp_type qp_type, int mad_snoop_flags, ib_mad_snoop_handler snoop_handler, ib_mad_recv_handler recv_handler, void *context) { struct ib_mad_port_private *port_priv; struct ib_mad_agent *ret; struct ib_mad_snoop_private *mad_snoop_priv; int qpn; /* Validate parameters */ if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { ret = ERR_PTR(-EINVAL); goto error1; } qpn = get_spl_qp_index(qp_type); if (qpn == -1) { ret = ERR_PTR(-EINVAL); goto error1; } port_priv = ib_get_mad_port(device, port_num); if (!port_priv) { ret = ERR_PTR(-ENODEV); goto error1; } /* Allocate structures */ mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); if (!mad_snoop_priv) { ret = ERR_PTR(-ENOMEM); goto error1; } /* Now, fill in the various structures */ mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; mad_snoop_priv->agent.device = device; mad_snoop_priv->agent.recv_handler = recv_handler; mad_snoop_priv->agent.snoop_handler = snoop_handler; mad_snoop_priv->agent.context = context; mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_snoop_priv->agent.port_num = port_num; mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; init_completion(&mad_snoop_priv->comp); mad_snoop_priv->snoop_index = register_snoop_agent( &port_priv->qp_info[qpn], mad_snoop_priv); if (mad_snoop_priv->snoop_index < 0) { ret = ERR_PTR(mad_snoop_priv->snoop_index); goto error2; } atomic_set(&mad_snoop_priv->refcount, 1); return &mad_snoop_priv->agent; error2: kfree(mad_snoop_priv); error1: return ret; } EXPORT_SYMBOL(ib_register_mad_snoop); static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) { if (atomic_dec_and_test(&mad_agent_priv->refcount)) complete(&mad_agent_priv->comp); } static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) { if (atomic_dec_and_test(&mad_snoop_priv->refcount)) complete(&mad_snoop_priv->comp); } static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_port_private *port_priv; unsigned long flags; /* Note that we could still be handling received MADs */ /* * Canceling all sends results in dropping received response * MADs, preventing us from queuing additional work */ cancel_mads(mad_agent_priv); port_priv = mad_agent_priv->qp_info->port_priv; cancel_delayed_work(&mad_agent_priv->timed_work); spin_lock_irqsave(&port_priv->reg_lock, flags); remove_mad_reg_req(mad_agent_priv); list_del(&mad_agent_priv->agent_list); spin_unlock_irqrestore(&port_priv->reg_lock, flags); flush_workqueue(port_priv->wq); ib_cancel_rmpp_recvs(mad_agent_priv); deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); kfree(mad_agent_priv->reg_req); ib_dereg_mr(mad_agent_priv->agent.mr); kfree(mad_agent_priv); } static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) { struct ib_mad_qp_info *qp_info; unsigned long flags; qp_info = mad_snoop_priv->qp_info; spin_lock_irqsave(&qp_info->snoop_lock, flags); qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; atomic_dec(&qp_info->snoop_count); spin_unlock_irqrestore(&qp_info->snoop_lock, flags); deref_snoop_agent(mad_snoop_priv); wait_for_completion(&mad_snoop_priv->comp); kfree(mad_snoop_priv); } /* * ib_unregister_mad_agent - Unregisters a client from using MAD services */ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_snoop_private *mad_snoop_priv; /* If the TID is zero, the agent can only snoop. */ if (mad_agent->hi_tid) { mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, agent); unregister_mad_agent(mad_agent_priv); } else { mad_snoop_priv = container_of(mad_agent, struct ib_mad_snoop_private, agent); unregister_mad_snoop(mad_snoop_priv); } return 0; } EXPORT_SYMBOL(ib_unregister_mad_agent); static void dequeue_mad(struct ib_mad_list_head *mad_list) { struct ib_mad_queue *mad_queue; unsigned long flags; BUG_ON(!mad_list->mad_queue); mad_queue = mad_list->mad_queue; spin_lock_irqsave(&mad_queue->lock, flags); list_del(&mad_list->list); mad_queue->count--; spin_unlock_irqrestore(&mad_queue->lock, flags); } static void snoop_send(struct ib_mad_qp_info *qp_info, struct ib_mad_send_buf *send_buf, struct ib_mad_send_wc *mad_send_wc, int mad_snoop_flags) { struct ib_mad_snoop_private *mad_snoop_priv; unsigned long flags; int i; spin_lock_irqsave(&qp_info->snoop_lock, flags); for (i = 0; i < qp_info->snoop_table_size; i++) { mad_snoop_priv = qp_info->snoop_table[i]; if (!mad_snoop_priv || !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) continue; atomic_inc(&mad_snoop_priv->refcount); spin_unlock_irqrestore(&qp_info->snoop_lock, flags); mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, send_buf, mad_send_wc); deref_snoop_agent(mad_snoop_priv); spin_lock_irqsave(&qp_info->snoop_lock, flags); } spin_unlock_irqrestore(&qp_info->snoop_lock, flags); } static void snoop_recv(struct ib_mad_qp_info *qp_info, struct ib_mad_recv_wc *mad_recv_wc, int mad_snoop_flags) { struct ib_mad_snoop_private *mad_snoop_priv; unsigned long flags; int i; spin_lock_irqsave(&qp_info->snoop_lock, flags); for (i = 0; i < qp_info->snoop_table_size; i++) { mad_snoop_priv = qp_info->snoop_table[i]; if (!mad_snoop_priv || !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) continue; atomic_inc(&mad_snoop_priv->refcount); spin_unlock_irqrestore(&qp_info->snoop_lock, flags); mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, mad_recv_wc); deref_snoop_agent(mad_snoop_priv); spin_lock_irqsave(&qp_info->snoop_lock, flags); } spin_unlock_irqrestore(&qp_info->snoop_lock, flags); } static void build_smp_wc(struct ib_qp *qp, u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, struct ib_wc *wc) { memset(wc, 0, sizeof *wc); wc->wr_id = wr_id; wc->status = IB_WC_SUCCESS; wc->opcode = IB_WC_RECV; wc->pkey_index = pkey_index; wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); wc->src_qp = IB_QP0; wc->qp = qp; wc->slid = slid; wc->sl = 0; wc->dlid_path_bits = 0; wc->port_num = port_num; } /* * Return 0 if SMP is to be sent * Return 1 if SMP was consumed locally (whether or not solicited) * Return < 0 if error */ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr) { int ret = 0; struct ib_smp *smp = mad_send_wr->send_buf.mad; unsigned long flags; struct ib_mad_local_private *local; struct ib_mad_private *mad_priv; struct ib_mad_port_private *port_priv; struct ib_mad_agent_private *recv_mad_agent = NULL; struct ib_device *device = mad_agent_priv->agent.device; u8 port_num; struct ib_wc mad_wc; struct ib_send_wr *send_wr = &mad_send_wr->send_wr; if (device->node_type == RDMA_NODE_IB_SWITCH && smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) port_num = send_wr->wr.ud.port_num; else port_num = mad_agent_priv->agent.port_num; /* * Directed route handling starts if the initial LID routed part of * a request or the ending LID routed part of a response is empty. * If we are at the start of the LID routed part, don't update the * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. */ if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == IB_LID_PERMISSIVE && smi_handle_dr_smp_send(smp, device->node_type, port_num) == IB_SMI_DISCARD) { ret = -EINVAL; printk(KERN_ERR PFX "Invalid directed route\n"); goto out; } /* Check to post send on QP or process locally */ if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) goto out; local = kmalloc(sizeof *local, GFP_ATOMIC); if (!local) { ret = -ENOMEM; printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); goto out; } local->mad_priv = NULL; local->recv_mad_agent = NULL; mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); if (!mad_priv) { ret = -ENOMEM; printk(KERN_ERR PFX "No memory for local response MAD\n"); kfree(local); goto out; } build_smp_wc(mad_agent_priv->agent.qp, send_wr->wr_id, be16_to_cpu(smp->dr_slid), send_wr->wr.ud.pkey_index, send_wr->wr.ud.port_num, &mad_wc); /* No GRH for DR SMP */ ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, (struct ib_mad *)smp, (struct ib_mad *)&mad_priv->mad); switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: if (ib_response_mad(&mad_priv->mad.mad) && mad_agent_priv->agent.recv_handler) { local->mad_priv = mad_priv; local->recv_mad_agent = mad_agent_priv; /* * Reference MAD agent until receive * side of local completion handled */ atomic_inc(&mad_agent_priv->refcount); } else kmem_cache_free(ib_mad_cache, mad_priv); break; case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: kmem_cache_free(ib_mad_cache, mad_priv); break; case IB_MAD_RESULT_SUCCESS: /* Treat like an incoming receive MAD */ port_priv = ib_get_mad_port(mad_agent_priv->agent.device, mad_agent_priv->agent.port_num); if (port_priv) { memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad)); recv_mad_agent = find_mad_agent(port_priv, &mad_priv->mad.mad); } if (!port_priv || !recv_mad_agent) { /* * No receiving agent so drop packet and * generate send completion. */ kmem_cache_free(ib_mad_cache, mad_priv); break; } local->mad_priv = mad_priv; local->recv_mad_agent = recv_mad_agent; break; default: kmem_cache_free(ib_mad_cache, mad_priv); kfree(local); ret = -EINVAL; goto out; } local->mad_send_wr = mad_send_wr; /* Reference MAD agent until send side of local completion handled */ atomic_inc(&mad_agent_priv->refcount); /* Queue local completion to local list */ spin_lock_irqsave(&mad_agent_priv->lock, flags); list_add_tail(&local->completion_list, &mad_agent_priv->local_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); queue_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->local_work); ret = 1; out: return ret; } static int get_pad_size(int hdr_len, int data_len) { int seg_size, pad; seg_size = sizeof(struct ib_mad) - hdr_len; if (data_len && seg_size) { pad = seg_size - data_len % seg_size; return pad == seg_size ? 0 : pad; } else return seg_size; } static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_segment *s, *t; list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { list_del(&s->list); kfree(s); } } static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, gfp_t gfp_mask) { struct ib_mad_send_buf *send_buf = &send_wr->send_buf; struct ib_rmpp_mad *rmpp_mad = send_buf->mad; struct ib_rmpp_segment *seg = NULL; int left, seg_size, pad; send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len; seg_size = send_buf->seg_size; pad = send_wr->pad; /* Allocate data segments. */ for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); if (!seg) { printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem " "alloc failed for len %zd, gfp %#x\n", sizeof (*seg) + seg_size, gfp_mask); free_send_rmpp_list(send_wr); return -ENOMEM; } seg->num = ++send_buf->seg_count; list_add_tail(&seg->list, &send_wr->rmpp_list); } /* Zero any padding */ if (pad) memset(seg->data + seg_size - pad, 0, pad); rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> agent.rmpp_version; rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); send_wr->cur_seg = container_of(send_wr->rmpp_list.next, struct ib_rmpp_segment, list); send_wr->last_ack_seg = send_wr->cur_seg; return 0; } struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, int rmpp_active, int hdr_len, int data_len, gfp_t gfp_mask) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; int pad, message_size, ret, size; void *buf; mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, agent); pad = get_pad_size(hdr_len, data_len); message_size = hdr_len + data_len + pad; if ((!mad_agent->rmpp_version && (rmpp_active || message_size > sizeof(struct ib_mad))) || (!rmpp_active && message_size > sizeof(struct ib_mad))) return ERR_PTR(-EINVAL); size = rmpp_active ? hdr_len : sizeof(struct ib_mad); buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); if (!buf) return ERR_PTR(-ENOMEM); mad_send_wr = buf + size; INIT_LIST_HEAD(&mad_send_wr->rmpp_list); mad_send_wr->send_buf.mad = buf; mad_send_wr->send_buf.hdr_len = hdr_len; mad_send_wr->send_buf.data_len = data_len; mad_send_wr->pad = pad; mad_send_wr->mad_agent_priv = mad_agent_priv; mad_send_wr->sg_list[0].length = hdr_len; mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len; mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey; mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; mad_send_wr->send_wr.num_sge = 2; mad_send_wr->send_wr.opcode = IB_WR_SEND; mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; if (rmpp_active) { ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask); if (ret) { kfree(buf); return ERR_PTR(ret); } } mad_send_wr->send_buf.mad_agent = mad_agent; atomic_inc(&mad_agent_priv->refcount); return &mad_send_wr->send_buf; } EXPORT_SYMBOL(ib_create_send_mad); int ib_get_mad_data_offset(u8 mgmt_class) { if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) return IB_MGMT_SA_HDR; else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || (mgmt_class == IB_MGMT_CLASS_BIS)) return IB_MGMT_DEVICE_HDR; else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) return IB_MGMT_VENDOR_HDR; else return IB_MGMT_MAD_HDR; } EXPORT_SYMBOL(ib_get_mad_data_offset); int ib_is_mad_class_rmpp(u8 mgmt_class) { if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || (mgmt_class == IB_MGMT_CLASS_BIS) || ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) return 1; return 0; } EXPORT_SYMBOL(ib_is_mad_class_rmpp); void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) { struct ib_mad_send_wr_private *mad_send_wr; struct list_head *list; mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); list = &mad_send_wr->cur_seg->list; if (mad_send_wr->cur_seg->num < seg_num) { list_for_each_entry(mad_send_wr->cur_seg, list, list) if (mad_send_wr->cur_seg->num == seg_num) break; } else if (mad_send_wr->cur_seg->num > seg_num) { list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) if (mad_send_wr->cur_seg->num == seg_num) break; } return mad_send_wr->cur_seg->data; } EXPORT_SYMBOL(ib_get_rmpp_segment); static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) { if (mad_send_wr->send_buf.seg_count) return ib_get_rmpp_segment(&mad_send_wr->send_buf, mad_send_wr->seg_num); else return mad_send_wr->send_buf.mad + mad_send_wr->send_buf.hdr_len; } void ib_free_send_mad(struct ib_mad_send_buf *send_buf) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; mad_agent_priv = container_of(send_buf->mad_agent, struct ib_mad_agent_private, agent); mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); free_send_rmpp_list(mad_send_wr); kfree(send_buf->mad); deref_mad_agent(mad_agent_priv); } EXPORT_SYMBOL(ib_free_send_mad); int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_qp_info *qp_info; struct list_head *list; struct ib_send_wr *bad_send_wr; struct ib_mad_agent *mad_agent; struct ib_sge *sge; unsigned long flags; int ret; /* Set WR ID to find mad_send_wr upon completion */ qp_info = mad_send_wr->mad_agent_priv->qp_info; mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; mad_agent = mad_send_wr->send_buf.mad_agent; sge = mad_send_wr->sg_list; sge[0].addr = ib_dma_map_single(mad_agent->device, mad_send_wr->send_buf.mad, sge[0].length, DMA_TO_DEVICE); mad_send_wr->header_mapping = sge[0].addr; sge[1].addr = ib_dma_map_single(mad_agent->device, ib_get_payload(mad_send_wr), sge[1].length, DMA_TO_DEVICE); mad_send_wr->payload_mapping = sge[1].addr; spin_lock_irqsave(&qp_info->send_queue.lock, flags); if (qp_info->send_queue.count < qp_info->send_queue.max_active) { ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, &bad_send_wr); list = &qp_info->send_queue.list; } else { ret = 0; list = &qp_info->overflow_list; } if (!ret) { qp_info->send_queue.count++; list_add_tail(&mad_send_wr->mad_list.list, list); } spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); if (ret) { ib_dma_unmap_single(mad_agent->device, mad_send_wr->header_mapping, sge[0].length, DMA_TO_DEVICE); ib_dma_unmap_single(mad_agent->device, mad_send_wr->payload_mapping, sge[1].length, DMA_TO_DEVICE); } return ret; } /* * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated * with the registered client */ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, struct ib_mad_send_buf **bad_send_buf) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_buf *next_send_buf; struct ib_mad_send_wr_private *mad_send_wr; unsigned long flags; int ret = -EINVAL; /* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_agent_priv = mad_send_wr->mad_agent_priv; if (!send_buf->mad_agent->send_handler || (send_buf->timeout_ms && !send_buf->mad_agent->recv_handler)) { ret = -EINVAL; goto error; } if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { if (mad_agent_priv->agent.rmpp_version) { ret = -EINVAL; goto error; } } /* * Save pointer to next work request to post in case the * current one completes, and the user modifies the work * request associated with the completion */ next_send_buf = send_buf->next; mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { ret = handle_outgoing_dr_smp(mad_agent_priv, mad_send_wr); if (ret < 0) /* error */ goto error; else if (ret == 1) /* locally consumed */ continue; } mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; /* Timeout will be updated after send completes */ mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); mad_send_wr->max_retries = send_buf->retries; mad_send_wr->retries_left = send_buf->retries; send_buf->retries = 0; /* Reference for work request to QP + response */ mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); mad_send_wr->status = IB_WC_SUCCESS; /* Reference MAD agent until send completes */ atomic_inc(&mad_agent_priv->refcount); spin_lock_irqsave(&mad_agent_priv->lock, flags); list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->send_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (mad_agent_priv->agent.rmpp_version) { ret = ib_send_rmpp_mad(mad_send_wr); if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) ret = ib_send_mad(mad_send_wr); } else ret = ib_send_mad(mad_send_wr); if (ret < 0) { /* Fail send request */ spin_lock_irqsave(&mad_agent_priv->lock, flags); list_del(&mad_send_wr->agent_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); atomic_dec(&mad_agent_priv->refcount); goto error; } } return 0; error: if (bad_send_buf) *bad_send_buf = send_buf; return ret; } EXPORT_SYMBOL(ib_post_send_mad); /* * ib_free_recv_mad - Returns data buffers used to receive * a MAD to the access layer */ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *priv; struct list_head free_list; INIT_LIST_HEAD(&free_list); list_splice_init(&mad_recv_wc->rmpp_list, &free_list); list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, &free_list, list) { mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, recv_buf); mad_priv_hdr = container_of(mad_recv_wc, struct ib_mad_private_header, recv_wc); priv = container_of(mad_priv_hdr, struct ib_mad_private, header); kmem_cache_free(ib_mad_cache, priv); } } EXPORT_SYMBOL(ib_free_recv_mad); struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, u8 rmpp_version, ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler, void *context) { return ERR_PTR(-EINVAL); /* XXX: for now */ } EXPORT_SYMBOL(ib_redirect_mad_qp); int ib_process_mad_wc(struct ib_mad_agent *mad_agent, struct ib_wc *wc) { printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); return 0; } EXPORT_SYMBOL(ib_process_mad_wc); static int method_in_use(struct ib_mad_mgmt_method_table **method, struct ib_mad_reg_req *mad_reg_req) { int i; for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { if ((*method)->agent[i]) { printk(KERN_ERR PFX "Method %d already in use\n", i); return -EINVAL; } } return 0; } static int allocate_method_table(struct ib_mad_mgmt_method_table **method) { /* Allocate management method table */ *method = kzalloc(sizeof **method, GFP_ATOMIC); if (!*method) { printk(KERN_ERR PFX "No memory for " "ib_mad_mgmt_method_table\n"); return -ENOMEM; } return 0; } /* * Check to see if there are any methods still in use */ static int check_method_table(struct ib_mad_mgmt_method_table *method) { int i; for (i = 0; i < IB_MGMT_MAX_METHODS; i++) if (method->agent[i]) return 1; return 0; } /* * Check to see if there are any method tables for this class still in use */ static int check_class_table(struct ib_mad_mgmt_class_table *class) { int i; for (i = 0; i < MAX_MGMT_CLASS; i++) if (class->method_table[i]) return 1; return 0; } static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) { int i; for (i = 0; i < MAX_MGMT_OUI; i++) if (vendor_class->method_table[i]) return 1; return 0; } static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, char *oui) { int i; for (i = 0; i < MAX_MGMT_OUI; i++) /* Is there matching OUI for this vendor class ? */ if (!memcmp(vendor_class->oui[i], oui, 3)) return i; return -1; } static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) { int i; for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) if (vendor->vendor_class[i]) return 1; return 0; } static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, struct ib_mad_agent_private *agent) { int i; /* Remove any methods for this mad agent */ for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { if (method->agent[i] == agent) { method->agent[i] = NULL; } } } static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv, u8 mgmt_class) { struct ib_mad_port_private *port_priv; struct ib_mad_mgmt_class_table **class; struct ib_mad_mgmt_method_table **method; int i, ret; port_priv = agent_priv->qp_info->port_priv; class = &port_priv->version[mad_reg_req->mgmt_class_version].class; if (!*class) { /* Allocate management class table for "new" class version */ *class = kzalloc(sizeof **class, GFP_ATOMIC); if (!*class) { printk(KERN_ERR PFX "No memory for " "ib_mad_mgmt_class_table\n"); ret = -ENOMEM; goto error1; } /* Allocate method table for this management class */ method = &(*class)->method_table[mgmt_class]; if ((ret = allocate_method_table(method))) goto error2; } else { method = &(*class)->method_table[mgmt_class]; if (!*method) { /* Allocate method table for this management class */ if ((ret = allocate_method_table(method))) goto error1; } } /* Now, make sure methods are not already in use */ if (method_in_use(method, mad_reg_req)) goto error3; /* Finally, add in methods being registered */ for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) (*method)->agent[i] = agent_priv; return 0; error3: /* Remove any methods for this mad agent */ remove_methods_mad_agent(*method, agent_priv); /* Now, check to see if there are any methods in use */ if (!check_method_table(*method)) { /* If not, release management method table */ kfree(*method); *method = NULL; } ret = -EINVAL; goto error1; error2: kfree(*class); *class = NULL; error1: return ret; } static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv) { struct ib_mad_port_private *port_priv; struct ib_mad_mgmt_vendor_class_table **vendor_table; struct ib_mad_mgmt_vendor_class_table *vendor = NULL; struct ib_mad_mgmt_vendor_class *vendor_class = NULL; struct ib_mad_mgmt_method_table **method; int i, ret = -ENOMEM; u8 vclass; /* "New" vendor (with OUI) class */ vclass = vendor_class_index(mad_reg_req->mgmt_class); port_priv = agent_priv->qp_info->port_priv; vendor_table = &port_priv->version[ mad_reg_req->mgmt_class_version].vendor; if (!*vendor_table) { /* Allocate mgmt vendor class table for "new" class version */ vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); if (!vendor) { printk(KERN_ERR PFX "No memory for " "ib_mad_mgmt_vendor_class_table\n"); goto error1; } *vendor_table = vendor; } if (!(*vendor_table)->vendor_class[vclass]) { /* Allocate table for this management vendor class */ vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); if (!vendor_class) { printk(KERN_ERR PFX "No memory for " "ib_mad_mgmt_vendor_class\n"); goto error2; } (*vendor_table)->vendor_class[vclass] = vendor_class; } for (i = 0; i < MAX_MGMT_OUI; i++) { /* Is there matching OUI for this vendor class ? */ if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], mad_reg_req->oui, 3)) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; BUG_ON(!*method); goto check_in_use; } } for (i = 0; i < MAX_MGMT_OUI; i++) { /* OUI slot available ? */ if (!is_vendor_oui((*vendor_table)->vendor_class[ vclass]->oui[i])) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; BUG_ON(*method); /* Allocate method table for this OUI */ if ((ret = allocate_method_table(method))) goto error3; memcpy((*vendor_table)->vendor_class[vclass]->oui[i], mad_reg_req->oui, 3); goto check_in_use; } } printk(KERN_ERR PFX "All OUI slots in use\n"); goto error3; check_in_use: /* Now, make sure methods are not already in use */ if (method_in_use(method, mad_reg_req)) goto error4; /* Finally, add in methods being registered */ for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) (*method)->agent[i] = agent_priv; return 0; error4: /* Remove any methods for this mad agent */ remove_methods_mad_agent(*method, agent_priv); /* Now, check to see if there are any methods in use */ if (!check_method_table(*method)) { /* If not, release management method table */ kfree(*method); *method = NULL; } ret = -EINVAL; error3: if (vendor_class) { (*vendor_table)->vendor_class[vclass] = NULL; kfree(vendor_class); } error2: if (vendor) { *vendor_table = NULL; kfree(vendor); } error1: return ret; } static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) { struct ib_mad_port_private *port_priv; struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_method_table *method; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; int index; u8 mgmt_class; /* * Was MAD registration request supplied * with original registration ? */ if (!agent_priv->reg_req) { goto out; } port_priv = agent_priv->qp_info->port_priv; mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); class = port_priv->version[ agent_priv->reg_req->mgmt_class_version].class; if (!class) goto vendor_check; method = class->method_table[mgmt_class]; if (method) { /* Remove any methods for this mad agent */ remove_methods_mad_agent(method, agent_priv); /* Now, check to see if there are any methods still in use */ if (!check_method_table(method)) { /* If not, release management method table */ kfree(method); class->method_table[mgmt_class] = NULL; /* Any management classes left ? */ if (!check_class_table(class)) { /* If not, release management class table */ kfree(class); port_priv->version[ agent_priv->reg_req-> mgmt_class_version].class = NULL; } } } vendor_check: if (!is_vendor_class(mgmt_class)) goto out; /* normalize mgmt_class to vendor range 2 */ mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); vendor = port_priv->version[ agent_priv->reg_req->mgmt_class_version].vendor; if (!vendor) goto out; vendor_class = vendor->vendor_class[mgmt_class]; if (vendor_class) { index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); if (index < 0) goto out; method = vendor_class->method_table[index]; if (method) { /* Remove any methods for this mad agent */ remove_methods_mad_agent(method, agent_priv); /* * Now, check to see if there are * any methods still in use */ if (!check_method_table(method)) { /* If not, release management method table */ kfree(method); vendor_class->method_table[index] = NULL; memset(vendor_class->oui[index], 0, 3); /* Any OUIs left ? */ if (!check_vendor_class(vendor_class)) { /* If not, release vendor class table */ kfree(vendor_class); vendor->vendor_class[mgmt_class] = NULL; /* Any other vendor classes left ? */ if (!check_vendor_table(vendor)) { kfree(vendor); port_priv->version[ agent_priv->reg_req-> mgmt_class_version]. vendor = NULL; } } } } } out: return; } static struct ib_mad_agent_private * find_mad_agent(struct ib_mad_port_private *port_priv, struct ib_mad *mad) { struct ib_mad_agent_private *mad_agent = NULL; unsigned long flags; spin_lock_irqsave(&port_priv->reg_lock, flags); if (ib_response_mad(mad)) { u32 hi_tid; struct ib_mad_agent_private *entry; /* * Routing is based on high 32 bits of transaction ID * of MAD. */ hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; list_for_each_entry(entry, &port_priv->agent_list, agent_list) { if (entry->agent.hi_tid == hi_tid) { mad_agent = entry; break; } } } else { struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_method_table *method; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; struct ib_vendor_mad *vendor_mad; int index; /* * Routing is based on version, class, and method * For "newer" vendor MADs, also based on OUI */ if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION) goto out; if (!is_vendor_class(mad->mad_hdr.mgmt_class)) { class = port_priv->version[ mad->mad_hdr.class_version].class; if (!class) goto out; if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= ARRAY_SIZE(class->method_table)) goto out; method = class->method_table[convert_mgmt_class( mad->mad_hdr.mgmt_class)]; if (method) mad_agent = method->agent[mad->mad_hdr.method & ~IB_MGMT_METHOD_RESP]; } else { vendor = port_priv->version[ mad->mad_hdr.class_version].vendor; if (!vendor) goto out; vendor_class = vendor->vendor_class[vendor_class_index( mad->mad_hdr.mgmt_class)]; if (!vendor_class) goto out; /* Find matching OUI */ vendor_mad = (struct ib_vendor_mad *)mad; index = find_vendor_oui(vendor_class, vendor_mad->oui); if (index == -1) goto out; method = vendor_class->method_table[index]; if (method) { mad_agent = method->agent[mad->mad_hdr.method & ~IB_MGMT_METHOD_RESP]; } } } if (mad_agent) { if (mad_agent->agent.recv_handler) atomic_inc(&mad_agent->refcount); else { printk(KERN_NOTICE PFX "No receive handler for client " "%p on port %d\n", &mad_agent->agent, port_priv->port_num); mad_agent = NULL; } } out: spin_unlock_irqrestore(&port_priv->reg_lock, flags); return mad_agent; } static int validate_mad(struct ib_mad *mad, u32 qp_num) { int valid = 0; /* Make sure MAD base version is understood */ if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { printk(KERN_ERR PFX "MAD received with unsupported base " "version %d\n", mad->mad_hdr.base_version); goto out; } /* Filter SMI packets sent to other than QP0 */ if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { if (qp_num == 0) valid = 1; } else { /* Filter GSI packets sent to QP0 */ if (qp_num != 0) valid = 1; } out: return valid; } static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_hdr *mad_hdr) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; return !mad_agent_priv->agent.rmpp_version || !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) || (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); } static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr, struct ib_mad_recv_wc *rwc) { return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class == rwc->recv_buf.mad->mad_hdr.mgmt_class; } static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *wr, struct ib_mad_recv_wc *rwc ) { struct ib_ah_attr attr; u8 send_resp, rcv_resp; union ib_gid sgid; struct ib_device *device = mad_agent_priv->agent.device; u8 port_num = mad_agent_priv->agent.port_num; u8 lmc; send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad); rcv_resp = ib_response_mad(rwc->recv_buf.mad); if (send_resp == rcv_resp) /* both requests, or both responses. GIDs different */ return 0; if (ib_query_ah(wr->send_buf.ah, &attr)) /* Assume not equal, to avoid false positives. */ return 0; if (!!(attr.ah_flags & IB_AH_GRH) != !!(rwc->wc->wc_flags & IB_WC_GRH)) /* one has GID, other does not. Assume different */ return 0; if (!send_resp && rcv_resp) { /* is request/response. */ if (!(attr.ah_flags & IB_AH_GRH)) { if (ib_get_cached_lmc(device, port_num, &lmc)) return 0; return (!lmc || !((attr.src_path_bits ^ rwc->wc->dlid_path_bits) & ((1 << lmc) - 1))); } else { if (ib_get_cached_gid(device, port_num, attr.grh.sgid_index, &sgid)) return 0; return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 16); } } if (!(attr.ah_flags & IB_AH_GRH)) return attr.dlid == rwc->wc->slid; else return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, 16); } static inline int is_direct(u8 class) { return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); } struct ib_mad_send_wr_private* ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_recv_wc *wc) { struct ib_mad_send_wr_private *wr; struct ib_mad *mad; mad = (struct ib_mad *)wc->recv_buf.mad; list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { if ((wr->tid == mad->mad_hdr.tid) && rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs. */ (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) return (wr->status == IB_WC_SUCCESS) ? wr : NULL; } /* * It's possible to receive the response before we've * been notified that the send has completed */ list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { if (is_data_mad(mad_agent_priv, wr->send_buf.mad) && wr->tid == mad->mad_hdr.tid && wr->timeout && rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs. */ (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) /* Verify request has not been canceled */ return (wr->status == IB_WC_SUCCESS) ? wr : NULL; } return NULL; } void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) { mad_send_wr->timeout = 0; if (mad_send_wr->refcount == 1) list_move_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->done_list); } static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (mad_agent_priv->agent.rmpp_version) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); if (!mad_recv_wc) { deref_mad_agent(mad_agent_priv); return; } } /* Complete corresponding request */ if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); return; } ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); /* Defined behavior is to complete response before request */ mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); atomic_dec(&mad_agent_priv->refcount); mad_send_wc.status = IB_WC_SUCCESS; mad_send_wc.vendor_err = 0; mad_send_wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); deref_mad_agent(mad_agent_priv); } } static bool generate_unmatched_resp(struct ib_mad_private *recv, struct ib_mad_private *response) { if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { memcpy(response, recv, sizeof *response); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.mad = &response->mad.mad; response->header.recv_wc.recv_buf.grh = &response->grh; response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; response->mad.mad.mad_hdr.status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION; return true; } else { return false; } } static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { struct ib_mad_qp_info *qp_info; struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *recv, *response = NULL; struct ib_mad_list_head *mad_list; struct ib_mad_agent_private *mad_agent; int port_num; int ret = IB_MAD_RESULT_SUCCESS; mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; qp_info = mad_list->mad_queue->qp_info; dequeue_mad(mad_list); mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); recv = container_of(mad_priv_hdr, struct ib_mad_private, header); ib_dma_unmap_single(port_priv->device, recv->header.mapping, sizeof(struct ib_mad_private) - sizeof(struct ib_mad_private_header), DMA_FROM_DEVICE); /* Setup MAD receive work completion from "normal" work completion */ recv->header.wc = *wc; recv->header.recv_wc.wc = &recv->header.wc; recv->header.recv_wc.mad_len = sizeof(struct ib_mad); recv->header.recv_wc.recv_buf.mad = &recv->mad.mad; recv->header.recv_wc.recv_buf.grh = &recv->grh; if (atomic_read(&qp_info->snoop_count)) snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); /* Validate MAD */ if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) goto out; response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); if (!response) { printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " "for response buffer\n"); goto out; } if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) port_num = wc->port_num; else port_num = port_priv->port_num; if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { enum smi_forward_action retsmi; if (smi_handle_dr_smp_recv(&recv->mad.smp, port_priv->device->node_type, port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD) goto out; retsmi = smi_check_forward_dr_smp(&recv->mad.smp); if (retsmi == IB_SMI_LOCAL) goto local; if (retsmi == IB_SMI_SEND) { /* don't forward */ if (smi_handle_dr_smp_send(&recv->mad.smp, port_priv->device->node_type, port_num) == IB_SMI_DISCARD) goto out; if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD) goto out; } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { /* forward case for switches */ memcpy(response, recv, sizeof(*response)); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.mad = &response->mad.mad; response->header.recv_wc.recv_buf.grh = &response->grh; agent_send_response(&response->mad.mad, &response->grh, wc, port_priv->device, smi_get_fwd_port(&recv->mad.smp), qp_info->qp->qp_num); goto out; } } local: /* Give driver "right of first refusal" on incoming MAD */ if (port_priv->device->process_mad) { ret = port_priv->device->process_mad(port_priv->device, 0, port_priv->port_num, wc, &recv->grh, &recv->mad.mad, &response->mad.mad); if (ret & IB_MAD_RESULT_SUCCESS) { if (ret & IB_MAD_RESULT_CONSUMED) goto out; if (ret & IB_MAD_RESULT_REPLY) { agent_send_response(&response->mad.mad, &recv->grh, wc, port_priv->device, port_num, qp_info->qp->qp_num); goto out; } } } mad_agent = find_mad_agent(port_priv, &recv->mad.mad); if (mad_agent) { ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); /* * recv is freed up in error cases in ib_mad_complete_recv * or via recv_handler in ib_mad_complete_recv() */ recv = NULL; } else if ((ret & IB_MAD_RESULT_SUCCESS) && generate_unmatched_resp(recv, response)) { agent_send_response(&response->mad.mad, &recv->grh, wc, port_priv->device, port_num, qp_info->qp->qp_num); } out: /* Post another receive request for this QP */ if (response) { ib_mad_post_receive_mads(qp_info, response); if (recv) kmem_cache_free(ib_mad_cache, recv); } else ib_mad_post_receive_mads(qp_info, recv); } static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_send_wr_private *mad_send_wr; unsigned long delay; if (list_empty(&mad_agent_priv->wait_list)) { cancel_delayed_work(&mad_agent_priv->timed_work); } else { mad_send_wr = list_entry(mad_agent_priv->wait_list.next, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_agent_priv->timeout, mad_send_wr->timeout)) { mad_agent_priv->timeout = mad_send_wr->timeout; delay = mad_send_wr->timeout - jiffies; if ((long)delay <= 0) delay = 1; mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->timed_work, delay); } } } static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *temp_mad_send_wr; struct list_head *list_item; unsigned long delay; mad_agent_priv = mad_send_wr->mad_agent_priv; list_del(&mad_send_wr->agent_list); delay = mad_send_wr->timeout; mad_send_wr->timeout += jiffies; if (delay) { list_for_each_prev(list_item, &mad_agent_priv->wait_list) { temp_mad_send_wr = list_entry(list_item, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_send_wr->timeout, temp_mad_send_wr->timeout)) break; } } else list_item = &mad_agent_priv->wait_list; list_add(&mad_send_wr->agent_list, list_item); /* Reschedule a work item if we have a shorter timeout */ if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->timed_work, delay); } void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, int timeout_ms) { mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); wait_for_response(mad_send_wr); } /* * Process a send work completion */ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc) { struct ib_mad_agent_private *mad_agent_priv; unsigned long flags; int ret; mad_agent_priv = mad_send_wr->mad_agent_priv; spin_lock_irqsave(&mad_agent_priv->lock, flags); if (mad_agent_priv->agent.rmpp_version) { ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); if (ret == IB_RMPP_RESULT_CONSUMED) goto done; } else ret = IB_RMPP_RESULT_UNHANDLED; if (mad_send_wc->status != IB_WC_SUCCESS && mad_send_wr->status == IB_WC_SUCCESS) { mad_send_wr->status = mad_send_wc->status; mad_send_wr->refcount -= (mad_send_wr->timeout > 0); } if (--mad_send_wr->refcount > 0) { if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && mad_send_wr->status == IB_WC_SUCCESS) { wait_for_response(mad_send_wr); } goto done; } /* Remove send from MAD agent and notify client of completion */ list_del(&mad_send_wr->agent_list); adjust_timeout(mad_agent_priv); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (mad_send_wr->status != IB_WC_SUCCESS ) mad_send_wc->status = mad_send_wr->status; if (ret == IB_RMPP_RESULT_INTERNAL) ib_rmpp_send_handler(mad_send_wc); else mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, mad_send_wc); /* Release reference on agent taken when sending */ deref_mad_agent(mad_agent_priv); return; done: spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; struct ib_mad_list_head *mad_list; struct ib_mad_qp_info *qp_info; struct ib_mad_queue *send_queue; struct ib_send_wr *bad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; int ret; mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); send_queue = mad_list->mad_queue; qp_info = send_queue->qp_info; retry: ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, mad_send_wr->header_mapping, mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, mad_send_wr->payload_mapping, mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); queued_send_wr = NULL; spin_lock_irqsave(&send_queue->lock, flags); list_del(&mad_list->list); /* Move queued send to the send queue */ if (send_queue->count-- > send_queue->max_active) { mad_list = container_of(qp_info->overflow_list.next, struct ib_mad_list_head, list); queued_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); list_move_tail(&mad_list->list, &send_queue->list); } spin_unlock_irqrestore(&send_queue->lock, flags); mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_send_wc.status = wc->status; mad_send_wc.vendor_err = wc->vendor_err; if (atomic_read(&qp_info->snoop_count)) snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); if (queued_send_wr) { ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, &bad_send_wr); if (ret) { printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); mad_send_wr = queued_send_wr; wc->status = IB_WC_LOC_QP_OP_ERR; goto retry; } } } static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_list_head *mad_list; unsigned long flags; spin_lock_irqsave(&qp_info->send_queue.lock, flags); list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); mad_send_wr->retry = 1; } spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); } static void mad_error_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { struct ib_mad_list_head *mad_list; struct ib_mad_qp_info *qp_info; struct ib_mad_send_wr_private *mad_send_wr; int ret; /* Determine if failure was a send or receive */ mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; qp_info = mad_list->mad_queue->qp_info; if (mad_list->mad_queue == &qp_info->recv_queue) /* * Receive errors indicate that the QP has entered the error * state - error handling/shutdown code will cleanup */ return; /* * Send errors will transition the QP to SQE - move * QP to RTS and repost flushed work requests */ mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); if (wc->status == IB_WC_WR_FLUSH_ERR) { if (mad_send_wr->retry) { /* Repost send */ struct ib_send_wr *bad_send_wr; mad_send_wr->retry = 0; ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, &bad_send_wr); if (ret) ib_mad_send_done_handler(port_priv, wc); } else ib_mad_send_done_handler(port_priv, wc); } else { struct ib_qp_attr *attr; /* Transition QP to RTS and fail offending send */ attr = kmalloc(sizeof *attr, GFP_KERNEL); if (attr) { attr->qp_state = IB_QPS_RTS; attr->cur_qp_state = IB_QPS_SQE; ret = ib_modify_qp(qp_info->qp, attr, IB_QP_STATE | IB_QP_CUR_STATE); kfree(attr); if (ret) printk(KERN_ERR PFX "mad_error_handler - " "ib_modify_qp to RTS : %d\n", ret); else mark_sends_for_retry(qp_info); } ib_mad_send_done_handler(port_priv, wc); } } /* * IB MAD completion callback */ static void ib_mad_completion_handler(struct work_struct *work) { struct ib_mad_port_private *port_priv; struct ib_wc wc; port_priv = container_of(work, struct ib_mad_port_private, work); ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { if (wc.status == IB_WC_SUCCESS) { switch (wc.opcode) { case IB_WC_SEND: ib_mad_send_done_handler(port_priv, &wc); break; case IB_WC_RECV: ib_mad_recv_done_handler(port_priv, &wc); break; default: BUG_ON(1); break; } } else mad_error_handler(port_priv, &wc); } } static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) { unsigned long flags; struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; struct ib_mad_send_wc mad_send_wc; struct list_head cancel_list; INIT_LIST_HEAD(&cancel_list); spin_lock_irqsave(&mad_agent_priv->lock, flags); list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, &mad_agent_priv->send_list, agent_list) { if (mad_send_wr->status == IB_WC_SUCCESS) { mad_send_wr->status = IB_WC_WR_FLUSH_ERR; mad_send_wr->refcount -= (mad_send_wr->timeout > 0); } } /* Empty wait list to prevent receives from finding a request */ list_splice_init(&mad_agent_priv->wait_list, &cancel_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); /* Report all cancelled requests */ mad_send_wc.status = IB_WC_WR_FLUSH_ERR; mad_send_wc.vendor_err = 0; list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, &cancel_list, agent_list) { mad_send_wc.send_buf = &mad_send_wr->send_buf; list_del(&mad_send_wr->agent_list); mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); atomic_dec(&mad_agent_priv->refcount); } } static struct ib_mad_send_wr_private* find_send_wr(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_buf *send_buf) { struct ib_mad_send_wr_private *mad_send_wr; list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, agent_list) { if (&mad_send_wr->send_buf == send_buf) return mad_send_wr; } list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, agent_list) { if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && &mad_send_wr->send_buf == send_buf) return mad_send_wr; } return NULL; } int ib_modify_mad(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf *send_buf, u32 timeout_ms) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; unsigned long flags; int active; mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, agent); spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = find_send_wr(mad_agent_priv, send_buf); if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); return -EINVAL; } active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); if (!timeout_ms) { mad_send_wr->status = IB_WC_WR_FLUSH_ERR; mad_send_wr->refcount -= (mad_send_wr->timeout > 0); } mad_send_wr->send_buf.timeout_ms = timeout_ms; if (active) mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); else ib_reset_mad_timeout(mad_send_wr, timeout_ms); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); return 0; } EXPORT_SYMBOL(ib_modify_mad); void ib_cancel_mad(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf *send_buf) { ib_modify_mad(mad_agent, send_buf, 0); } EXPORT_SYMBOL(ib_cancel_mad); static void local_completions(struct work_struct *work) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_local_private *local; struct ib_mad_agent_private *recv_mad_agent; unsigned long flags; int free_mad; struct ib_wc wc; struct ib_mad_send_wc mad_send_wc; mad_agent_priv = container_of(work, struct ib_mad_agent_private, local_work); spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->local_list)) { local = list_entry(mad_agent_priv->local_list.next, struct ib_mad_local_private, completion_list); list_del(&local->completion_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); free_mad = 0; if (local->mad_priv) { recv_mad_agent = local->recv_mad_agent; if (!recv_mad_agent) { printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); free_mad = 1; goto local_send_completion; } /* * Defined behavior is to complete response * before request */ build_smp_wc(recv_mad_agent->agent.qp, (unsigned long) local->mad_send_wr, be16_to_cpu(IB_LID_PERMISSIVE), 0, recv_mad_agent->agent.port_num, &wc); local->mad_priv->header.recv_wc.wc = &wc; local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); list_add(&local->mad_priv->header.recv_wc.recv_buf.list, &local->mad_priv->header.recv_wc.rmpp_list); local->mad_priv->header.recv_wc.recv_buf.grh = NULL; local->mad_priv->header.recv_wc.recv_buf.mad = &local->mad_priv->mad.mad; if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) snoop_recv(recv_mad_agent->qp_info, &local->mad_priv->header.recv_wc, IB_MAD_SNOOP_RECVS); recv_mad_agent->agent.recv_handler( &recv_mad_agent->agent, &local->mad_priv->header.recv_wc); spin_lock_irqsave(&recv_mad_agent->lock, flags); atomic_dec(&recv_mad_agent->refcount); spin_unlock_irqrestore(&recv_mad_agent->lock, flags); } local_send_completion: /* Complete send */ mad_send_wc.status = IB_WC_SUCCESS; mad_send_wc.vendor_err = 0; mad_send_wc.send_buf = &local->mad_send_wr->send_buf; if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) snoop_send(mad_agent_priv->qp_info, &local->mad_send_wr->send_buf, &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); spin_lock_irqsave(&mad_agent_priv->lock, flags); atomic_dec(&mad_agent_priv->refcount); if (free_mad) kmem_cache_free(ib_mad_cache, local->mad_priv); kfree(local); } spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) { int ret; if (!mad_send_wr->retries_left) return -ETIMEDOUT; mad_send_wr->retries_left--; mad_send_wr->send_buf.retries++; mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { ret = ib_retry_rmpp(mad_send_wr); switch (ret) { case IB_RMPP_RESULT_UNHANDLED: ret = ib_send_mad(mad_send_wr); break; case IB_RMPP_RESULT_CONSUMED: ret = 0; break; default: ret = -ECOMM; break; } } else ret = ib_send_mad(mad_send_wr); if (!ret) { mad_send_wr->refcount++; list_add_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->send_list); } return ret; } static void timeout_sends(struct work_struct *work) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags, delay; mad_agent_priv = container_of(work, struct ib_mad_agent_private, timed_work.work); mad_send_wc.vendor_err = 0; spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->wait_list)) { mad_send_wr = list_entry(mad_agent_priv->wait_list.next, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_send_wr->timeout, jiffies)) { delay = mad_send_wr->timeout - jiffies; if ((long)delay <= 0) delay = 1; queue_delayed_work(mad_agent_priv->qp_info-> port_priv->wq, &mad_agent_priv->timed_work, delay); break; } list_del(&mad_send_wr->agent_list); if (mad_send_wr->status == IB_WC_SUCCESS && !retry_send(mad_send_wr)) continue; spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (mad_send_wr->status == IB_WC_SUCCESS) mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; else mad_send_wc.status = mad_send_wr->status; mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); atomic_dec(&mad_agent_priv->refcount); spin_lock_irqsave(&mad_agent_priv->lock, flags); } spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) { struct ib_mad_port_private *port_priv = cq->cq_context; unsigned long flags; spin_lock_irqsave(&ib_mad_port_list_lock, flags); if (!list_empty(&port_priv->port_list)) queue_work(port_priv->wq, &port_priv->work); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); } /* * Allocate receive MADs and post receive WRs for them */ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad) { unsigned long flags; int post, ret; struct ib_mad_private *mad_priv; struct ib_sge sg_list; struct ib_recv_wr recv_wr, *bad_recv_wr; struct ib_mad_queue *recv_queue = &qp_info->recv_queue; /* Initialize common scatter list fields */ sg_list.length = sizeof *mad_priv - sizeof mad_priv->header; sg_list.lkey = (*qp_info->port_priv->mr).lkey; /* Initialize common receive WR fields */ recv_wr.next = NULL; recv_wr.sg_list = &sg_list; recv_wr.num_sge = 1; do { /* Allocate and map receive buffer */ if (mad) { mad_priv = mad; mad = NULL; } else { mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); if (!mad_priv) { printk(KERN_ERR PFX "No memory for receive buffer\n"); ret = -ENOMEM; break; } } sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, &mad_priv->grh, sizeof *mad_priv - sizeof mad_priv->header, DMA_FROM_DEVICE); mad_priv->header.mapping = sg_list.addr; recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; mad_priv->header.mad_list.mad_queue = recv_queue; /* Post receive WR */ spin_lock_irqsave(&recv_queue->lock, flags); post = (++recv_queue->count < recv_queue->max_active); list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); spin_unlock_irqrestore(&recv_queue->lock, flags); ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); if (ret) { spin_lock_irqsave(&recv_queue->lock, flags); list_del(&mad_priv->header.mad_list.list); recv_queue->count--; spin_unlock_irqrestore(&recv_queue->lock, flags); ib_dma_unmap_single(qp_info->port_priv->device, mad_priv->header.mapping, sizeof *mad_priv - sizeof mad_priv->header, DMA_FROM_DEVICE); kmem_cache_free(ib_mad_cache, mad_priv); printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); break; } } while (post); return ret; } /* * Return all the posted receive MADs */ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) { struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *recv; struct ib_mad_list_head *mad_list; if (!qp_info->qp) return; while (!list_empty(&qp_info->recv_queue.list)) { mad_list = list_entry(qp_info->recv_queue.list.next, struct ib_mad_list_head, list); mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); recv = container_of(mad_priv_hdr, struct ib_mad_private, header); /* Remove from posted receive MAD list */ list_del(&mad_list->list); ib_dma_unmap_single(qp_info->port_priv->device, recv->header.mapping, sizeof(struct ib_mad_private) - sizeof(struct ib_mad_private_header), DMA_FROM_DEVICE); kmem_cache_free(ib_mad_cache, recv); } qp_info->recv_queue.count = 0; } /* * Start the port */ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) { int ret, i; struct ib_qp_attr *attr; struct ib_qp *qp; attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) { printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); return -ENOMEM; } for (i = 0; i < IB_MAD_QPS_CORE; i++) { qp = port_priv->qp_info[i].qp; if (!qp) continue; /* * PKey index for QP1 is irrelevant but * one is needed for the Reset to Init transition */ attr->qp_state = IB_QPS_INIT; attr->pkey_index = 0; attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY); if (ret) { printk(KERN_ERR PFX "Couldn't change QP%d state to " "INIT: %d\n", i, ret); goto out; } attr->qp_state = IB_QPS_RTR; ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { printk(KERN_ERR PFX "Couldn't change QP%d state to " "RTR: %d\n", i, ret); goto out; } attr->qp_state = IB_QPS_RTS; attr->sq_psn = IB_MAD_SEND_Q_PSN; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); if (ret) { printk(KERN_ERR PFX "Couldn't change QP%d state to " "RTS: %d\n", i, ret); goto out; } } ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); if (ret) { printk(KERN_ERR PFX "Failed to request completion " "notification: %d\n", ret); goto out; } for (i = 0; i < IB_MAD_QPS_CORE; i++) { if (!port_priv->qp_info[i].qp) continue; ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); if (ret) { printk(KERN_ERR PFX "Couldn't post receive WRs\n"); goto out; } } out: kfree(attr); return ret; } static void qp_event_handler(struct ib_event *event, void *qp_context) { struct ib_mad_qp_info *qp_info = qp_context; /* It's worse than that! He's dead, Jim! */ printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", event->event, qp_info->qp->qp_num); } static void init_mad_queue(struct ib_mad_qp_info *qp_info, struct ib_mad_queue *mad_queue) { mad_queue->qp_info = qp_info; mad_queue->count = 0; spin_lock_init(&mad_queue->lock); INIT_LIST_HEAD(&mad_queue->list); } static void init_mad_qp(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info) { qp_info->port_priv = port_priv; init_mad_queue(qp_info, &qp_info->send_queue); init_mad_queue(qp_info, &qp_info->recv_queue); INIT_LIST_HEAD(&qp_info->overflow_list); spin_lock_init(&qp_info->snoop_lock); qp_info->snoop_table = NULL; qp_info->snoop_table_size = 0; atomic_set(&qp_info->snoop_count, 0); } static int create_mad_qp(struct ib_mad_qp_info *qp_info, enum ib_qp_type qp_type) { struct ib_qp_init_attr qp_init_attr; int ret; memset(&qp_init_attr, 0, sizeof qp_init_attr); qp_init_attr.send_cq = qp_info->port_priv->cq; qp_init_attr.recv_cq = qp_info->port_priv->cq; qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; qp_init_attr.cap.max_send_wr = mad_sendq_size; qp_init_attr.cap.max_recv_wr = mad_recvq_size; qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; qp_init_attr.qp_type = qp_type; qp_init_attr.port_num = qp_info->port_priv->port_num; qp_init_attr.qp_context = qp_info; qp_init_attr.event_handler = qp_event_handler; qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); if (IS_ERR(qp_info->qp)) { printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", get_spl_qp_index(qp_type)); ret = PTR_ERR(qp_info->qp); goto error; } /* Use minimum queue sizes unless the CQ is resized */ qp_info->send_queue.max_active = mad_sendq_size; qp_info->recv_queue.max_active = mad_recvq_size; return 0; error: return ret; } static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) { if (!qp_info->qp) return; ib_destroy_qp(qp_info->qp); kfree(qp_info->snoop_table); } /* * Open the port * Create the QP, PD, MR, and CQ if needed */ static int ib_mad_port_open(struct ib_device *device, int port_num) { int ret, cq_size; struct ib_mad_port_private *port_priv; unsigned long flags; char name[sizeof "ib_mad123"]; int has_smi; /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); return -ENOMEM; } port_priv->device = device; port_priv->port_num = port_num; spin_lock_init(&port_priv->reg_lock); INIT_LIST_HEAD(&port_priv->agent_list); init_mad_qp(port_priv, &port_priv->qp_info[0]); init_mad_qp(port_priv, &port_priv->qp_info[1]); cq_size = mad_sendq_size + mad_recvq_size; has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND; if (has_smi) cq_size *= 2; port_priv->cq = ib_create_cq(port_priv->device, ib_mad_thread_completion_handler, NULL, port_priv, cq_size, 0); if (IS_ERR(port_priv->cq)) { printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); goto error3; } port_priv->pd = ib_alloc_pd(device); if (IS_ERR(port_priv->pd)) { printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); ret = PTR_ERR(port_priv->pd); goto error4; } port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(port_priv->mr)) { printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); ret = PTR_ERR(port_priv->mr); goto error5; } if (has_smi) { ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); if (ret) goto error6; } ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); if (ret) goto error7; snprintf(name, sizeof name, "ib_mad%d", port_num); port_priv->wq = create_singlethread_workqueue(name); if (!port_priv->wq) { ret = -ENOMEM; goto error8; } INIT_WORK(&port_priv->work, ib_mad_completion_handler); spin_lock_irqsave(&ib_mad_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_mad_port_list); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); ret = ib_mad_port_start(port_priv); if (ret) { printk(KERN_ERR PFX "Couldn't start port\n"); goto error9; } return 0; error9: spin_lock_irqsave(&ib_mad_port_list_lock, flags); list_del_init(&port_priv->port_list); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); destroy_workqueue(port_priv->wq); error8: destroy_mad_qp(&port_priv->qp_info[1]); error7: destroy_mad_qp(&port_priv->qp_info[0]); error6: ib_dereg_mr(port_priv->mr); error5: ib_dealloc_pd(port_priv->pd); error4: ib_destroy_cq(port_priv->cq); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); error3: kfree(port_priv); return ret; } /* * Close the port * If there are no classes using the port, free the port * resources (CQ, MR, PD, QP) and remove the port's info structure */ static int ib_mad_port_close(struct ib_device *device, int port_num) { struct ib_mad_port_private *port_priv; unsigned long flags; spin_lock_irqsave(&ib_mad_port_list_lock, flags); port_priv = __ib_get_mad_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); printk(KERN_ERR PFX "Port %d not found\n", port_num); return -ENODEV; } list_del_init(&port_priv->port_list); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); destroy_workqueue(port_priv->wq); destroy_mad_qp(&port_priv->qp_info[1]); destroy_mad_qp(&port_priv->qp_info[0]); ib_dereg_mr(port_priv->mr); ib_dealloc_pd(port_priv->pd); ib_destroy_cq(port_priv->cq); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); /* XXX: Handle deallocation of MAD registration tables */ kfree(port_priv); return 0; } static void ib_mad_init_device(struct ib_device *device) { int start, end, i; if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) return; if (device->node_type == RDMA_NODE_IB_SWITCH) { start = 0; end = 0; } else { start = 1; end = device->phys_port_cnt; } for (i = start; i <= end; i++) { if (ib_mad_port_open(device, i)) { printk(KERN_ERR PFX "Couldn't open %s port %d\n", device->name, i); goto error; } if (ib_agent_port_open(device, i)) { printk(KERN_ERR PFX "Couldn't open %s port %d " "for agents\n", device->name, i); goto error_agent; } } return; error_agent: if (ib_mad_port_close(device, i)) printk(KERN_ERR PFX "Couldn't close %s port %d\n", device->name, i); error: i--; while (i >= start) { if (ib_agent_port_close(device, i)) printk(KERN_ERR PFX "Couldn't close %s port %d " "for agents\n", device->name, i); if (ib_mad_port_close(device, i)) printk(KERN_ERR PFX "Couldn't close %s port %d\n", device->name, i); i--; } } static void ib_mad_remove_device(struct ib_device *device) { int i, num_ports, cur_port; if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) return; if (device->node_type == RDMA_NODE_IB_SWITCH) { num_ports = 1; cur_port = 0; } else { num_ports = device->phys_port_cnt; cur_port = 1; } for (i = 0; i < num_ports; i++, cur_port++) { if (ib_agent_port_close(device, cur_port)) printk(KERN_ERR PFX "Couldn't close %s port %d " "for agents\n", device->name, cur_port); if (ib_mad_port_close(device, cur_port)) printk(KERN_ERR PFX "Couldn't close %s port %d\n", device->name, cur_port); } } static struct ib_client mad_client = { .name = "mad", .add = ib_mad_init_device, .remove = ib_mad_remove_device }; static int __init ib_mad_init_module(void) { int ret; mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); ib_mad_cache = kmem_cache_create("ib_mad", sizeof(struct ib_mad_private), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ib_mad_cache) { printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); ret = -ENOMEM; goto error1; } INIT_LIST_HEAD(&ib_mad_port_list); if (ib_register_client(&mad_client)) { printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); ret = -EINVAL; goto error2; } return 0; error2: kmem_cache_destroy(ib_mad_cache); error1: return ret; } static void __exit ib_mad_cleanup_module(void) { ib_unregister_client(&mad_client); kmem_cache_destroy(ib_mad_cache); } module_init(ib_mad_init_module); module_exit(ib_mad_cleanup_module);
41349.c
/* ** EPITECH PROJECT, 2017 ** how_long_tab ** File description: ** test */ #include <criterion/criterion.h> #include <stdlib.h> #include <stdio.h> int how_long_tab(const char **tab); Test(how_long_tab, test) { char *str = "la"; char **tab = malloc(sizeof(char *) * 2); tab[0] = str; tab[1] = NULL; cr_assert_eq(tab[1], NULL); cr_assert_eq(how_long_tab((const char **)tab), 1); } Test(how_long_tab, test2) { char *str = "la"; char **tab = malloc(sizeof(char *) * 3); tab[0] = str; tab[1] = str; tab[2] = NULL; cr_assert_eq(tab[2], NULL); cr_assert_eq(how_long_tab((const char **)tab), 2); } Test(how_long_tab, test3) { char *str = "la"; char **tab = malloc(sizeof(char *) * 5); tab[0] = str; tab[1] = str; tab[2] = str; tab[3] = str; tab[4] = NULL; cr_assert_eq(how_long_tab((const char **)tab), 4); } Test(how_long_tab, test4) { char *env[] = {"FIRST=first\0", "SECOND=second\0", NULL}; cr_assert_eq(how_long_tab((const char **)env), 2); } Test(how_long_tab, test5) { char **tab = NULL; cr_assert_eq(how_long_tab((const char **)tab), 0); }
536339.c
/* $Id: isdnl1.c,v 2.46.2.5 2004/02/11 13:21:34 Exp $ * * common low level stuff for Siemens Chipsetbased isdn cards * * Author Karsten Keil * based on the teles driver from Jan den Ouden * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For changes and modifications please read * Documentation/isdn/HiSax.cert * * Thanks to Jan den Ouden * Fritz Elfert * Beat Doebeli * */ #include <linux/init.h> #include <linux/gfp.h> #include "hisax.h" #include "isdnl1.h" const char *l1_revision = "$Revision: 2.46.2.5 $"; #define TIMER3_VALUE 7000 static struct Fsm l1fsm_b; static struct Fsm l1fsm_s; enum { ST_L1_F2, ST_L1_F3, ST_L1_F4, ST_L1_F5, ST_L1_F6, ST_L1_F7, ST_L1_F8, }; #define L1S_STATE_COUNT (ST_L1_F8+1) static char *strL1SState[] = { "ST_L1_F2", "ST_L1_F3", "ST_L1_F4", "ST_L1_F5", "ST_L1_F6", "ST_L1_F7", "ST_L1_F8", }; #ifdef HISAX_UINTERFACE static struct Fsm l1fsm_u = {NULL, 0, 0, NULL, NULL}; enum { ST_L1_RESET, ST_L1_DEACT, ST_L1_SYNC2, ST_L1_TRANS, }; #define L1U_STATE_COUNT (ST_L1_TRANS+1) static char *strL1UState[] = { "ST_L1_RESET", "ST_L1_DEACT", "ST_L1_SYNC2", "ST_L1_TRANS", }; #endif enum { ST_L1_NULL, ST_L1_WAIT_ACT, ST_L1_WAIT_DEACT, ST_L1_ACTIV, }; #define L1B_STATE_COUNT (ST_L1_ACTIV+1) static char *strL1BState[] = { "ST_L1_NULL", "ST_L1_WAIT_ACT", "ST_L1_WAIT_DEACT", "ST_L1_ACTIV", }; enum { EV_PH_ACTIVATE, EV_PH_DEACTIVATE, EV_RESET_IND, EV_DEACT_CNF, EV_DEACT_IND, EV_POWER_UP, EV_RSYNC_IND, EV_INFO2_IND, EV_INFO4_IND, EV_TIMER_DEACT, EV_TIMER_ACT, EV_TIMER3, }; #define L1_EVENT_COUNT (EV_TIMER3 + 1) static char *strL1Event[] = { "EV_PH_ACTIVATE", "EV_PH_DEACTIVATE", "EV_RESET_IND", "EV_DEACT_CNF", "EV_DEACT_IND", "EV_POWER_UP", "EV_RSYNC_IND", "EV_INFO2_IND", "EV_INFO4_IND", "EV_TIMER_DEACT", "EV_TIMER_ACT", "EV_TIMER3", }; void debugl1(struct IsdnCardState *cs, char *fmt, ...) { va_list args; char tmp[8]; va_start(args, fmt); sprintf(tmp, "Card%d ", cs->cardnr + 1); VHiSax_putstatus(cs, tmp, fmt, args); va_end(args); } static void l1m_debug(struct FsmInst *fi, char *fmt, ...) { va_list args; struct PStack *st = fi->userdata; struct IsdnCardState *cs = st->l1.hardware; char tmp[8]; va_start(args, fmt); sprintf(tmp, "Card%d ", cs->cardnr + 1); VHiSax_putstatus(cs, tmp, fmt, args); va_end(args); } static void L1activated(struct IsdnCardState *cs) { struct PStack *st; st = cs->stlist; while (st) { if (test_and_clear_bit(FLG_L1_ACTIVATING, &st->l1.Flags)) st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL); else st->l1.l1l2(st, PH_ACTIVATE | INDICATION, NULL); st = st->next; } } static void L1deactivated(struct IsdnCardState *cs) { struct PStack *st; st = cs->stlist; while (st) { if (test_bit(FLG_L1_DBUSY, &cs->HW_Flags)) st->l1.l1l2(st, PH_PAUSE | CONFIRM, NULL); st->l1.l1l2(st, PH_DEACTIVATE | INDICATION, NULL); st = st->next; } test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags); } void DChannel_proc_xmt(struct IsdnCardState *cs) { struct PStack *stptr; if (cs->tx_skb) return; stptr = cs->stlist; while (stptr != NULL) { if (test_and_clear_bit(FLG_L1_PULL_REQ, &stptr->l1.Flags)) { stptr->l1.l1l2(stptr, PH_PULL | CONFIRM, NULL); break; } else stptr = stptr->next; } } void DChannel_proc_rcv(struct IsdnCardState *cs) { struct sk_buff *skb, *nskb; struct PStack *stptr = cs->stlist; int found, tei, sapi; if (stptr) if (test_bit(FLG_L1_ACTTIMER, &stptr->l1.Flags)) FsmEvent(&stptr->l1.l1m, EV_TIMER_ACT, NULL); while ((skb = skb_dequeue(&cs->rq))) { #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA", 1); #endif stptr = cs->stlist; if (skb->len<3) { debugl1(cs, "D-channel frame too short(%d)",skb->len); dev_kfree_skb(skb); return; } if ((skb->data[0] & 1) || !(skb->data[1] &1)) { debugl1(cs, "D-channel frame wrong EA0/EA1"); dev_kfree_skb(skb); return; } sapi = skb->data[0] >> 2; tei = skb->data[1] >> 1; if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 1); if (tei == GROUP_TEI) { if (sapi == CTRL_SAPI) { /* sapi 0 */ while (stptr != NULL) { if ((nskb = skb_clone(skb, GFP_ATOMIC))) stptr->l1.l1l2(stptr, PH_DATA | INDICATION, nskb); else printk(KERN_WARNING "HiSax: isdn broadcast buffer shortage\n"); stptr = stptr->next; } } else if (sapi == TEI_SAPI) { while (stptr != NULL) { if ((nskb = skb_clone(skb, GFP_ATOMIC))) stptr->l1.l1tei(stptr, PH_DATA | INDICATION, nskb); else printk(KERN_WARNING "HiSax: tei broadcast buffer shortage\n"); stptr = stptr->next; } } dev_kfree_skb(skb); } else if (sapi == CTRL_SAPI) { /* sapi 0 */ found = 0; while (stptr != NULL) if (tei == stptr->l2.tei) { stptr->l1.l1l2(stptr, PH_DATA | INDICATION, skb); found = !0; break; } else stptr = stptr->next; if (!found) dev_kfree_skb(skb); } else dev_kfree_skb(skb); } } static void BChannel_proc_xmt(struct BCState *bcs) { struct PStack *st = bcs->st; if (test_bit(BC_FLG_BUSY, &bcs->Flag)) { debugl1(bcs->cs, "BC_BUSY Error"); return; } if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags)) st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); if (!test_bit(BC_FLG_ACTIV, &bcs->Flag)) { if (!test_bit(BC_FLG_BUSY, &bcs->Flag) && skb_queue_empty(&bcs->squeue)) { st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL); } } } static void BChannel_proc_rcv(struct BCState *bcs) { struct sk_buff *skb; if (bcs->st->l1.l1m.state == ST_L1_WAIT_ACT) { FsmDelTimer(&bcs->st->l1.timer, 4); FsmEvent(&bcs->st->l1.l1m, EV_TIMER_ACT, NULL); } while ((skb = skb_dequeue(&bcs->rqueue))) { bcs->st->l1.l1l2(bcs->st, PH_DATA | INDICATION, skb); } } static void BChannel_proc_ack(struct BCState *bcs) { u_long flags; int ack; spin_lock_irqsave(&bcs->aclock, flags); ack = bcs->ackcnt; bcs->ackcnt = 0; spin_unlock_irqrestore(&bcs->aclock, flags); if (ack) lli_writewakeup(bcs->st, ack); } void BChannel_bh(struct work_struct *work) { struct BCState *bcs = container_of(work, struct BCState, tqueue); if (!bcs) return; if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event)) BChannel_proc_rcv(bcs); if (test_and_clear_bit(B_XMTBUFREADY, &bcs->event)) BChannel_proc_xmt(bcs); if (test_and_clear_bit(B_ACKPENDING, &bcs->event)) BChannel_proc_ack(bcs); } void HiSax_addlist(struct IsdnCardState *cs, struct PStack *st) { st->next = cs->stlist; cs->stlist = st; } void HiSax_rmlist(struct IsdnCardState *cs, struct PStack *st) { struct PStack *p; FsmDelTimer(&st->l1.timer, 0); if (cs->stlist == st) cs->stlist = st->next; else { p = cs->stlist; while (p) if (p->next == st) { p->next = st->next; return; } else p = p->next; } } void init_bcstate(struct IsdnCardState *cs, int bc) { struct BCState *bcs = cs->bcs + bc; bcs->cs = cs; bcs->channel = bc; INIT_WORK(&bcs->tqueue, BChannel_bh); spin_lock_init(&bcs->aclock); bcs->BC_SetStack = NULL; bcs->BC_Close = NULL; bcs->Flag = 0; } #ifdef L2FRAME_DEBUG /* psa */ static char * l2cmd(u_char cmd) { switch (cmd & ~0x10) { case 1: return "RR"; case 5: return "RNR"; case 9: return "REJ"; case 0x6f: return "SABME"; case 0x0f: return "DM"; case 3: return "UI"; case 0x43: return "DISC"; case 0x63: return "UA"; case 0x87: return "FRMR"; case 0xaf: return "XID"; default: if (!(cmd & 1)) return "I"; else return "invalid command"; } } static char tmpdeb[32]; static char * l2frames(u_char * ptr) { switch (ptr[2] & ~0x10) { case 1: case 5: case 9: sprintf(tmpdeb, "%s[%d](nr %d)", l2cmd(ptr[2]), ptr[3] & 1, ptr[3] >> 1); break; case 0x6f: case 0x0f: case 3: case 0x43: case 0x63: case 0x87: case 0xaf: sprintf(tmpdeb, "%s[%d]", l2cmd(ptr[2]), (ptr[2] & 0x10) >> 4); break; default: if (!(ptr[2] & 1)) { sprintf(tmpdeb, "I[%d](ns %d, nr %d)", ptr[3] & 1, ptr[2] >> 1, ptr[3] >> 1); break; } else return "invalid command"; } return tmpdeb; } void Logl2Frame(struct IsdnCardState *cs, struct sk_buff *skb, char *buf, int dir) { u_char *ptr; ptr = skb->data; if (ptr[0] & 1 || !(ptr[1] & 1)) debugl1(cs, "Address not LAPD"); else debugl1(cs, "%s %s: %s%c (sapi %d, tei %d)", (dir ? "<-" : "->"), buf, l2frames(ptr), ((ptr[0] & 2) >> 1) == dir ? 'C' : 'R', ptr[0] >> 2, ptr[1] >> 1); } #endif static void l1_reset(struct FsmInst *fi, int event, void *arg) { FsmChangeState(fi, ST_L1_F3); } static void l1_deact_cnf(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmChangeState(fi, ST_L1_F3); if (test_bit(FLG_L1_ACTIVATING, &st->l1.Flags)) st->l1.l1hw(st, HW_ENABLE | REQUEST, NULL); } static void l1_deact_req_s(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmChangeState(fi, ST_L1_F3); FsmRestartTimer(&st->l1.timer, 550, EV_TIMER_DEACT, NULL, 2); test_and_set_bit(FLG_L1_DEACTTIMER, &st->l1.Flags); } static void l1_power_up_s(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_L1_ACTIVATING, &st->l1.Flags)) { FsmChangeState(fi, ST_L1_F4); st->l1.l1hw(st, HW_INFO3 | REQUEST, NULL); FsmRestartTimer(&st->l1.timer, TIMER3_VALUE, EV_TIMER3, NULL, 2); test_and_set_bit(FLG_L1_T3RUN, &st->l1.Flags); } else FsmChangeState(fi, ST_L1_F3); } static void l1_go_F5(struct FsmInst *fi, int event, void *arg) { FsmChangeState(fi, ST_L1_F5); } static void l1_go_F8(struct FsmInst *fi, int event, void *arg) { FsmChangeState(fi, ST_L1_F8); } static void l1_info2_ind(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; #ifdef HISAX_UINTERFACE if (test_bit(FLG_L1_UINT, &st->l1.Flags)) FsmChangeState(fi, ST_L1_SYNC2); else #endif FsmChangeState(fi, ST_L1_F6); st->l1.l1hw(st, HW_INFO3 | REQUEST, NULL); } static void l1_info4_ind(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; #ifdef HISAX_UINTERFACE if (test_bit(FLG_L1_UINT, &st->l1.Flags)) FsmChangeState(fi, ST_L1_TRANS); else #endif FsmChangeState(fi, ST_L1_F7); st->l1.l1hw(st, HW_INFO3 | REQUEST, NULL); if (test_and_clear_bit(FLG_L1_DEACTTIMER, &st->l1.Flags)) FsmDelTimer(&st->l1.timer, 4); if (!test_bit(FLG_L1_ACTIVATED, &st->l1.Flags)) { if (test_and_clear_bit(FLG_L1_T3RUN, &st->l1.Flags)) FsmDelTimer(&st->l1.timer, 3); FsmRestartTimer(&st->l1.timer, 110, EV_TIMER_ACT, NULL, 2); test_and_set_bit(FLG_L1_ACTTIMER, &st->l1.Flags); } } static void l1_timer3(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; test_and_clear_bit(FLG_L1_T3RUN, &st->l1.Flags); if (test_and_clear_bit(FLG_L1_ACTIVATING, &st->l1.Flags)) L1deactivated(st->l1.hardware); #ifdef HISAX_UINTERFACE if (!test_bit(FLG_L1_UINT, &st->l1.Flags)) #endif if (st->l1.l1m.state != ST_L1_F6) { FsmChangeState(fi, ST_L1_F3); st->l1.l1hw(st, HW_ENABLE | REQUEST, NULL); } } static void l1_timer_act(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; test_and_clear_bit(FLG_L1_ACTTIMER, &st->l1.Flags); test_and_set_bit(FLG_L1_ACTIVATED, &st->l1.Flags); L1activated(st->l1.hardware); } static void l1_timer_deact(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; test_and_clear_bit(FLG_L1_DEACTTIMER, &st->l1.Flags); test_and_clear_bit(FLG_L1_ACTIVATED, &st->l1.Flags); L1deactivated(st->l1.hardware); st->l1.l1hw(st, HW_DEACTIVATE | RESPONSE, NULL); } static void l1_activate_s(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->l1.l1hw(st, HW_RESET | REQUEST, NULL); } static void l1_activate_no(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if ((!test_bit(FLG_L1_DEACTTIMER, &st->l1.Flags)) && (!test_bit(FLG_L1_T3RUN, &st->l1.Flags))) { test_and_clear_bit(FLG_L1_ACTIVATING, &st->l1.Flags); L1deactivated(st->l1.hardware); } } static struct FsmNode L1SFnList[] __initdata = { {ST_L1_F3, EV_PH_ACTIVATE, l1_activate_s}, {ST_L1_F6, EV_PH_ACTIVATE, l1_activate_no}, {ST_L1_F8, EV_PH_ACTIVATE, l1_activate_no}, {ST_L1_F3, EV_RESET_IND, l1_reset}, {ST_L1_F4, EV_RESET_IND, l1_reset}, {ST_L1_F5, EV_RESET_IND, l1_reset}, {ST_L1_F6, EV_RESET_IND, l1_reset}, {ST_L1_F7, EV_RESET_IND, l1_reset}, {ST_L1_F8, EV_RESET_IND, l1_reset}, {ST_L1_F3, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F4, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F5, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F6, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F7, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F8, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F6, EV_DEACT_IND, l1_deact_req_s}, {ST_L1_F7, EV_DEACT_IND, l1_deact_req_s}, {ST_L1_F8, EV_DEACT_IND, l1_deact_req_s}, {ST_L1_F3, EV_POWER_UP, l1_power_up_s}, {ST_L1_F4, EV_RSYNC_IND, l1_go_F5}, {ST_L1_F6, EV_RSYNC_IND, l1_go_F8}, {ST_L1_F7, EV_RSYNC_IND, l1_go_F8}, {ST_L1_F3, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F4, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F5, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F7, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F8, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F3, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F4, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F5, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F6, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F8, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F3, EV_TIMER3, l1_timer3}, {ST_L1_F4, EV_TIMER3, l1_timer3}, {ST_L1_F5, EV_TIMER3, l1_timer3}, {ST_L1_F6, EV_TIMER3, l1_timer3}, {ST_L1_F8, EV_TIMER3, l1_timer3}, {ST_L1_F7, EV_TIMER_ACT, l1_timer_act}, {ST_L1_F3, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F4, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F5, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F6, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F7, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F8, EV_TIMER_DEACT, l1_timer_deact}, }; #ifdef HISAX_UINTERFACE static void l1_deact_req_u(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmChangeState(fi, ST_L1_RESET); FsmRestartTimer(&st->l1.timer, 550, EV_TIMER_DEACT, NULL, 2); test_and_set_bit(FLG_L1_DEACTTIMER, &st->l1.Flags); st->l1.l1hw(st, HW_ENABLE | REQUEST, NULL); } static void l1_power_up_u(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmRestartTimer(&st->l1.timer, TIMER3_VALUE, EV_TIMER3, NULL, 2); test_and_set_bit(FLG_L1_T3RUN, &st->l1.Flags); } static void l1_info0_ind(struct FsmInst *fi, int event, void *arg) { FsmChangeState(fi, ST_L1_DEACT); } static void l1_activate_u(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->l1.l1hw(st, HW_INFO1 | REQUEST, NULL); } static struct FsmNode L1UFnList[] __initdata = { {ST_L1_RESET, EV_DEACT_IND, l1_deact_req_u}, {ST_L1_DEACT, EV_DEACT_IND, l1_deact_req_u}, {ST_L1_SYNC2, EV_DEACT_IND, l1_deact_req_u}, {ST_L1_TRANS, EV_DEACT_IND, l1_deact_req_u}, {ST_L1_DEACT, EV_PH_ACTIVATE, l1_activate_u}, {ST_L1_DEACT, EV_POWER_UP, l1_power_up_u}, {ST_L1_DEACT, EV_INFO2_IND, l1_info2_ind}, {ST_L1_TRANS, EV_INFO2_IND, l1_info2_ind}, {ST_L1_RESET, EV_DEACT_CNF, l1_info0_ind}, {ST_L1_DEACT, EV_INFO4_IND, l1_info4_ind}, {ST_L1_SYNC2, EV_INFO4_IND, l1_info4_ind}, {ST_L1_RESET, EV_INFO4_IND, l1_info4_ind}, {ST_L1_DEACT, EV_TIMER3, l1_timer3}, {ST_L1_SYNC2, EV_TIMER3, l1_timer3}, {ST_L1_TRANS, EV_TIMER_ACT, l1_timer_act}, {ST_L1_DEACT, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_SYNC2, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_RESET, EV_TIMER_DEACT, l1_timer_deact}, }; #endif static void l1b_activate(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmChangeState(fi, ST_L1_WAIT_ACT); FsmRestartTimer(&st->l1.timer, st->l1.delay, EV_TIMER_ACT, NULL, 2); } static void l1b_deactivate(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmChangeState(fi, ST_L1_WAIT_DEACT); FsmRestartTimer(&st->l1.timer, 10, EV_TIMER_DEACT, NULL, 2); } static void l1b_timer_act(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmChangeState(fi, ST_L1_ACTIV); st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL); } static void l1b_timer_deact(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmChangeState(fi, ST_L1_NULL); st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL); } static struct FsmNode L1BFnList[] __initdata = { {ST_L1_NULL, EV_PH_ACTIVATE, l1b_activate}, {ST_L1_WAIT_ACT, EV_TIMER_ACT, l1b_timer_act}, {ST_L1_ACTIV, EV_PH_DEACTIVATE, l1b_deactivate}, {ST_L1_WAIT_DEACT, EV_TIMER_DEACT, l1b_timer_deact}, }; int __init Isdnl1New(void) { int retval; l1fsm_s.state_count = L1S_STATE_COUNT; l1fsm_s.event_count = L1_EVENT_COUNT; l1fsm_s.strEvent = strL1Event; l1fsm_s.strState = strL1SState; retval = FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); if (retval) return retval; l1fsm_b.state_count = L1B_STATE_COUNT; l1fsm_b.event_count = L1_EVENT_COUNT; l1fsm_b.strEvent = strL1Event; l1fsm_b.strState = strL1BState; retval = FsmNew(&l1fsm_b, L1BFnList, ARRAY_SIZE(L1BFnList)); if (retval) { FsmFree(&l1fsm_s); return retval; } #ifdef HISAX_UINTERFACE l1fsm_u.state_count = L1U_STATE_COUNT; l1fsm_u.event_count = L1_EVENT_COUNT; l1fsm_u.strEvent = strL1Event; l1fsm_u.strState = strL1UState; retval = FsmNew(&l1fsm_u, L1UFnList, ARRAY_SIZE(L1UFnList)); if (retval) { FsmFree(&l1fsm_s); FsmFree(&l1fsm_b); return retval; } #endif return 0; } void Isdnl1Free(void) { #ifdef HISAX_UINTERFACE FsmFree(&l1fsm_u); #endif FsmFree(&l1fsm_s); FsmFree(&l1fsm_b); } static void dch_l2l1(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; switch (pr) { case (PH_DATA | REQUEST): case (PH_PULL | REQUEST): case (PH_PULL |INDICATION): st->l1.l1hw(st, pr, arg); break; case (PH_ACTIVATE | REQUEST): if (cs->debug) debugl1(cs, "PH_ACTIVATE_REQ %s", st->l1.l1m.fsm->strState[st->l1.l1m.state]); if (test_bit(FLG_L1_ACTIVATED, &st->l1.Flags)) st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL); else { test_and_set_bit(FLG_L1_ACTIVATING, &st->l1.Flags); FsmEvent(&st->l1.l1m, EV_PH_ACTIVATE, arg); } break; case (PH_TESTLOOP | REQUEST): if (1 & (long) arg) debugl1(cs, "PH_TEST_LOOP B1"); if (2 & (long) arg) debugl1(cs, "PH_TEST_LOOP B2"); if (!(3 & (long) arg)) debugl1(cs, "PH_TEST_LOOP DISABLED"); st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg); break; default: if (cs->debug) debugl1(cs, "dch_l2l1 msg %04X unhandled", pr); break; } } void l1_msg(struct IsdnCardState *cs, int pr, void *arg) { struct PStack *st; st = cs->stlist; while (st) { switch(pr) { case (HW_RESET | INDICATION): FsmEvent(&st->l1.l1m, EV_RESET_IND, arg); break; case (HW_DEACTIVATE | CONFIRM): FsmEvent(&st->l1.l1m, EV_DEACT_CNF, arg); break; case (HW_DEACTIVATE | INDICATION): FsmEvent(&st->l1.l1m, EV_DEACT_IND, arg); break; case (HW_POWERUP | CONFIRM): FsmEvent(&st->l1.l1m, EV_POWER_UP, arg); break; case (HW_RSYNC | INDICATION): FsmEvent(&st->l1.l1m, EV_RSYNC_IND, arg); break; case (HW_INFO2 | INDICATION): FsmEvent(&st->l1.l1m, EV_INFO2_IND, arg); break; case (HW_INFO4_P8 | INDICATION): case (HW_INFO4_P10 | INDICATION): FsmEvent(&st->l1.l1m, EV_INFO4_IND, arg); break; default: if (cs->debug) debugl1(cs, "l1msg %04X unhandled", pr); break; } st = st->next; } } void l1_msg_b(struct PStack *st, int pr, void *arg) { switch(pr) { case (PH_ACTIVATE | REQUEST): FsmEvent(&st->l1.l1m, EV_PH_ACTIVATE, NULL); break; case (PH_DEACTIVATE | REQUEST): FsmEvent(&st->l1.l1m, EV_PH_DEACTIVATE, NULL); break; } } void setstack_HiSax(struct PStack *st, struct IsdnCardState *cs) { st->l1.hardware = cs; st->protocol = cs->protocol; st->l1.l1m.fsm = &l1fsm_s; st->l1.l1m.state = ST_L1_F3; st->l1.Flags = 0; #ifdef HISAX_UINTERFACE if (test_bit(FLG_HW_L1_UINT, &cs->HW_Flags)) { st->l1.l1m.fsm = &l1fsm_u; st->l1.l1m.state = ST_L1_RESET; st->l1.Flags = FLG_L1_UINT; } #endif st->l1.l1m.debug = cs->debug; st->l1.l1m.userdata = st; st->l1.l1m.userint = 0; st->l1.l1m.printdebug = l1m_debug; FsmInitTimer(&st->l1.l1m, &st->l1.timer); setstack_tei(st); setstack_manager(st); st->l1.stlistp = &(cs->stlist); st->l2.l2l1 = dch_l2l1; if (cs->setstack_d) cs->setstack_d(st, cs); } void setstack_l1_B(struct PStack *st) { struct IsdnCardState *cs = st->l1.hardware; st->l1.l1m.fsm = &l1fsm_b; st->l1.l1m.state = ST_L1_NULL; st->l1.l1m.debug = cs->debug; st->l1.l1m.userdata = st; st->l1.l1m.userint = 0; st->l1.l1m.printdebug = l1m_debug; st->l1.Flags = 0; FsmInitTimer(&st->l1.l1m, &st->l1.timer); }
239432.c
/* $OpenBSD: lpt_ssio.c,v 1.1 2007/06/20 18:22:15 kettenis Exp $ */ /* * Copyright (c) 2007 Mark Kettenis * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <sys/param.h> #include <sys/systm.h> #include <sys/device.h> #include <machine/bus.h> #include <dev/ic/lptreg.h> #include <dev/ic/lptvar.h> #include <hppa/dev/ssiovar.h> int lpt_ssio_match(struct device *, void *, void *); void lpt_ssio_attach(struct device *, struct device *, void *); struct cfattach lpt_ssio_ca = { sizeof(struct lpt_softc), lpt_ssio_match, lpt_ssio_attach }; int lpt_ssio_match(struct device *parent, void *match, void *aux) { struct cfdata *cf = match; struct ssio_attach_args *saa = aux; if (strcmp(saa->saa_name, "lpt") != 0) return (0); /* Check locators. */ if (cf->ssiocf_irq != SSIO_UNK_IRQ && cf->ssiocf_irq != saa->saa_irq) return (0); return (1); } void lpt_ssio_attach(struct device *parent, struct device *self, void *aux) { struct lpt_softc *sc = (void *)self; struct ssio_attach_args *saa = aux; sc->sc_iot = saa->saa_iot; if (bus_space_map(sc->sc_iot, saa->saa_iobase, LPT_NPORTS, 0, &sc->sc_ioh)) { printf(": cannot map io space\n"); return; } lpt_attach_common(sc); sc->sc_ih = ssio_intr_establish(IPL_TTY, saa->saa_irq, lptintr, sc, sc->sc_dev.dv_xname); }
170615.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic DSI Command Mode panel driver * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> */ /* #define DEBUG */ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/of_device.h> #include <linux/regulator/consumer.h> #include <drm/drm_connector.h> #include <video/mipi_display.h> #include <video/of_display_timing.h> #include "../dss/omapdss.h" /* DSI Virtual channel. Hardcoded for now. */ #define TCH 0 #define DCS_READ_NUM_ERRORS 0x05 #define DCS_BRIGHTNESS 0x51 #define DCS_CTRL_DISPLAY 0x53 #define DCS_GET_ID1 0xda #define DCS_GET_ID2 0xdb #define DCS_GET_ID3 0xdc struct panel_drv_data { struct omap_dss_device dssdev; struct omap_dss_device *src; struct videomode vm; struct platform_device *pdev; struct mutex lock; struct backlight_device *bldev; struct backlight_device *extbldev; unsigned long hw_guard_end; /* next value of jiffies when we can * issue the next sleep in/out command */ unsigned long hw_guard_wait; /* max guard time in jiffies */ /* panel HW configuration from DT or platform data */ struct gpio_desc *reset_gpio; struct gpio_desc *ext_te_gpio; struct regulator *vpnl; struct regulator *vddi; bool use_dsi_backlight; int width_mm; int height_mm; struct omap_dsi_pin_config pin_config; /* runtime variables */ bool enabled; bool te_enabled; atomic_t do_update; int channel; struct delayed_work te_timeout_work; bool intro_printed; struct workqueue_struct *workqueue; bool ulps_enabled; unsigned int ulps_timeout; struct delayed_work ulps_work; }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) static irqreturn_t dsicm_te_isr(int irq, void *data); static void dsicm_te_timeout_work_callback(struct work_struct *work); static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable); static int dsicm_panel_reset(struct panel_drv_data *ddata); static void dsicm_ulps_work(struct work_struct *work); static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable) { struct backlight_device *backlight; if (ddata->bldev) backlight = ddata->bldev; else if (ddata->extbldev) backlight = ddata->extbldev; else return; if (enable) { backlight->props.fb_blank = FB_BLANK_UNBLANK; backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED); backlight->props.power = FB_BLANK_UNBLANK; } else { backlight->props.fb_blank = FB_BLANK_NORMAL; backlight->props.power = FB_BLANK_POWERDOWN; backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED; } backlight_update_status(backlight); } static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec) { ddata->hw_guard_wait = msecs_to_jiffies(guard_msec); ddata->hw_guard_end = jiffies + ddata->hw_guard_wait; } static void hw_guard_wait(struct panel_drv_data *ddata) { unsigned long wait = ddata->hw_guard_end - jiffies; if ((long)wait > 0 && wait <= ddata->hw_guard_wait) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(wait); } } static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) { struct omap_dss_device *src = ddata->src; int r; u8 buf[1]; r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1); if (r < 0) return r; *data = buf[0]; return 0; } static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd) { struct omap_dss_device *src = ddata->src; return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1); } static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) { struct omap_dss_device *src = ddata->src; u8 buf[2] = { dcs_cmd, param }; return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2); } static int dsicm_sleep_in(struct panel_drv_data *ddata) { struct omap_dss_device *src = ddata->src; u8 cmd; int r; hw_guard_wait(ddata); cmd = MIPI_DCS_ENTER_SLEEP_MODE; r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1); if (r) return r; hw_guard_start(ddata, 120); usleep_range(5000, 10000); return 0; } static int dsicm_sleep_out(struct panel_drv_data *ddata) { int r; hw_guard_wait(ddata); r = dsicm_dcs_write_0(ddata, MIPI_DCS_EXIT_SLEEP_MODE); if (r) return r; hw_guard_start(ddata, 120); usleep_range(5000, 10000); return 0; } static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3) { int r; r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1); if (r) return r; r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2); if (r) return r; r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3); if (r) return r; return 0; } static int dsicm_set_update_window(struct panel_drv_data *ddata, u16 x, u16 y, u16 w, u16 h) { struct omap_dss_device *src = ddata->src; int r; u16 x1 = x; u16 x2 = x + w - 1; u16 y1 = y; u16 y2 = y + h - 1; u8 buf[5]; buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS; buf[1] = (x1 >> 8) & 0xff; buf[2] = (x1 >> 0) & 0xff; buf[3] = (x2 >> 8) & 0xff; buf[4] = (x2 >> 0) & 0xff; r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf)); if (r) return r; buf[0] = MIPI_DCS_SET_PAGE_ADDRESS; buf[1] = (y1 >> 8) & 0xff; buf[2] = (y1 >> 0) & 0xff; buf[3] = (y2 >> 8) & 0xff; buf[4] = (y2 >> 0) & 0xff; r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf)); if (r) return r; src->ops->dsi.bta_sync(src, ddata->channel); return r; } static void dsicm_queue_ulps_work(struct panel_drv_data *ddata) { if (ddata->ulps_timeout > 0) queue_delayed_work(ddata->workqueue, &ddata->ulps_work, msecs_to_jiffies(ddata->ulps_timeout)); } static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata) { cancel_delayed_work(&ddata->ulps_work); } static int dsicm_enter_ulps(struct panel_drv_data *ddata) { struct omap_dss_device *src = ddata->src; int r; if (ddata->ulps_enabled) return 0; dsicm_cancel_ulps_work(ddata); r = _dsicm_enable_te(ddata, false); if (r) goto err; if (ddata->ext_te_gpio) disable_irq(gpiod_to_irq(ddata->ext_te_gpio)); src->ops->dsi.disable(src, false, true); ddata->ulps_enabled = true; return 0; err: dev_err(&ddata->pdev->dev, "enter ULPS failed"); dsicm_panel_reset(ddata); ddata->ulps_enabled = false; dsicm_queue_ulps_work(ddata); return r; } static int dsicm_exit_ulps(struct panel_drv_data *ddata) { struct omap_dss_device *src = ddata->src; int r; if (!ddata->ulps_enabled) return 0; src->ops->enable(src); src->ops->dsi.enable_hs(src, ddata->channel, true); r = _dsicm_enable_te(ddata, true); if (r) { dev_err(&ddata->pdev->dev, "failed to re-enable TE"); goto err2; } if (ddata->ext_te_gpio) enable_irq(gpiod_to_irq(ddata->ext_te_gpio)); dsicm_queue_ulps_work(ddata); ddata->ulps_enabled = false; return 0; err2: dev_err(&ddata->pdev->dev, "failed to exit ULPS"); r = dsicm_panel_reset(ddata); if (!r) { if (ddata->ext_te_gpio) enable_irq(gpiod_to_irq(ddata->ext_te_gpio)); ddata->ulps_enabled = false; } dsicm_queue_ulps_work(ddata); return r; } static int dsicm_wake_up(struct panel_drv_data *ddata) { if (ddata->ulps_enabled) return dsicm_exit_ulps(ddata); dsicm_cancel_ulps_work(ddata); dsicm_queue_ulps_work(ddata); return 0; } static int dsicm_bl_update_status(struct backlight_device *dev) { struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); struct omap_dss_device *src = ddata->src; int r = 0; int level; if (dev->props.fb_blank == FB_BLANK_UNBLANK && dev->props.power == FB_BLANK_UNBLANK) level = dev->props.brightness; else level = 0; dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level); mutex_lock(&ddata->lock); if (ddata->enabled) { src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level); src->ops->dsi.bus_unlock(src); } mutex_unlock(&ddata->lock); return r; } static int dsicm_bl_get_intensity(struct backlight_device *dev) { if (dev->props.fb_blank == FB_BLANK_UNBLANK && dev->props.power == FB_BLANK_UNBLANK) return dev->props.brightness; return 0; } static const struct backlight_ops dsicm_bl_ops = { .get_brightness = dsicm_bl_get_intensity, .update_status = dsicm_bl_update_status, }; static ssize_t dsicm_num_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { struct panel_drv_data *ddata = dev_get_drvdata(dev); struct omap_dss_device *src = ddata->src; u8 errors = 0; int r; mutex_lock(&ddata->lock); if (ddata->enabled) { src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS, &errors); src->ops->dsi.bus_unlock(src); } else { r = -ENODEV; } mutex_unlock(&ddata->lock); if (r) return r; return snprintf(buf, PAGE_SIZE, "%d\n", errors); } static ssize_t dsicm_hw_revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct panel_drv_data *ddata = dev_get_drvdata(dev); struct omap_dss_device *src = ddata->src; u8 id1, id2, id3; int r; mutex_lock(&ddata->lock); if (ddata->enabled) { src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) r = dsicm_get_id(ddata, &id1, &id2, &id3); src->ops->dsi.bus_unlock(src); } else { r = -ENODEV; } mutex_unlock(&ddata->lock); if (r) return r; return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); } static ssize_t dsicm_store_ulps(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct panel_drv_data *ddata = dev_get_drvdata(dev); struct omap_dss_device *src = ddata->src; unsigned long t; int r; r = kstrtoul(buf, 0, &t); if (r) return r; mutex_lock(&ddata->lock); if (ddata->enabled) { src->ops->dsi.bus_lock(src); if (t) r = dsicm_enter_ulps(ddata); else r = dsicm_wake_up(ddata); src->ops->dsi.bus_unlock(src); } mutex_unlock(&ddata->lock); if (r) return r; return count; } static ssize_t dsicm_show_ulps(struct device *dev, struct device_attribute *attr, char *buf) { struct panel_drv_data *ddata = dev_get_drvdata(dev); unsigned int t; mutex_lock(&ddata->lock); t = ddata->ulps_enabled; mutex_unlock(&ddata->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static ssize_t dsicm_store_ulps_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct panel_drv_data *ddata = dev_get_drvdata(dev); struct omap_dss_device *src = ddata->src; unsigned long t; int r; r = kstrtoul(buf, 0, &t); if (r) return r; mutex_lock(&ddata->lock); ddata->ulps_timeout = t; if (ddata->enabled) { /* dsicm_wake_up will restart the timer */ src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); src->ops->dsi.bus_unlock(src); } mutex_unlock(&ddata->lock); if (r) return r; return count; } static ssize_t dsicm_show_ulps_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct panel_drv_data *ddata = dev_get_drvdata(dev); unsigned int t; mutex_lock(&ddata->lock); t = ddata->ulps_timeout; mutex_unlock(&ddata->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL); static DEVICE_ATTR(hw_revision, S_IRUGO, dsicm_hw_revision_show, NULL); static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR, dsicm_show_ulps, dsicm_store_ulps); static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR, dsicm_show_ulps_timeout, dsicm_store_ulps_timeout); static struct attribute *dsicm_attrs[] = { &dev_attr_num_dsi_errors.attr, &dev_attr_hw_revision.attr, &dev_attr_ulps.attr, &dev_attr_ulps_timeout.attr, NULL, }; static const struct attribute_group dsicm_attr_group = { .attrs = dsicm_attrs, }; static void dsicm_hw_reset(struct panel_drv_data *ddata) { gpiod_set_value(ddata->reset_gpio, 1); udelay(10); /* reset the panel */ gpiod_set_value(ddata->reset_gpio, 0); /* assert reset */ udelay(10); gpiod_set_value(ddata->reset_gpio, 1); /* wait after releasing reset */ usleep_range(5000, 10000); } static int dsicm_power_on(struct panel_drv_data *ddata) { struct omap_dss_device *src = ddata->src; u8 id1, id2, id3; int r; struct omap_dss_dsi_config dsi_config = { .mode = OMAP_DSS_DSI_CMD_MODE, .pixel_format = OMAP_DSS_DSI_FMT_RGB888, .vm = &ddata->vm, .hs_clk_min = 150000000, .hs_clk_max = 300000000, .lp_clk_min = 7000000, .lp_clk_max = 10000000, }; if (ddata->vpnl) { r = regulator_enable(ddata->vpnl); if (r) { dev_err(&ddata->pdev->dev, "failed to enable VPNL: %d\n", r); return r; } } if (ddata->vddi) { r = regulator_enable(ddata->vddi); if (r) { dev_err(&ddata->pdev->dev, "failed to enable VDDI: %d\n", r); goto err_vpnl; } } if (ddata->pin_config.num_pins > 0) { r = src->ops->dsi.configure_pins(src, &ddata->pin_config); if (r) { dev_err(&ddata->pdev->dev, "failed to configure DSI pins\n"); goto err_vddi; } } r = src->ops->dsi.set_config(src, &dsi_config); if (r) { dev_err(&ddata->pdev->dev, "failed to configure DSI\n"); goto err_vddi; } src->ops->enable(src); dsicm_hw_reset(ddata); src->ops->dsi.enable_hs(src, ddata->channel, false); r = dsicm_sleep_out(ddata); if (r) goto err; r = dsicm_get_id(ddata, &id1, &id2, &id3); if (r) goto err; r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, 0xff); if (r) goto err; r = dsicm_dcs_write_1(ddata, DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */ if (r) goto err; r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_24BIT); if (r) goto err; r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_ON); if (r) goto err; r = _dsicm_enable_te(ddata, ddata->te_enabled); if (r) goto err; r = src->ops->dsi.enable_video_output(src, ddata->channel); if (r) goto err; ddata->enabled = true; if (!ddata->intro_printed) { dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n", id1, id2, id3); ddata->intro_printed = true; } src->ops->dsi.enable_hs(src, ddata->channel, true); return 0; err: dev_err(&ddata->pdev->dev, "error while enabling panel, issuing HW reset\n"); dsicm_hw_reset(ddata); src->ops->dsi.disable(src, true, false); err_vddi: if (ddata->vddi) regulator_disable(ddata->vddi); err_vpnl: if (ddata->vpnl) regulator_disable(ddata->vpnl); return r; } static void dsicm_power_off(struct panel_drv_data *ddata) { struct omap_dss_device *src = ddata->src; int r; src->ops->dsi.disable_video_output(src, ddata->channel); r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF); if (!r) r = dsicm_sleep_in(ddata); if (r) { dev_err(&ddata->pdev->dev, "error disabling panel, issuing HW reset\n"); dsicm_hw_reset(ddata); } src->ops->dsi.disable(src, true, false); if (ddata->vddi) regulator_disable(ddata->vddi); if (ddata->vpnl) regulator_disable(ddata->vpnl); ddata->enabled = false; } static int dsicm_panel_reset(struct panel_drv_data *ddata) { dev_err(&ddata->pdev->dev, "performing LCD reset\n"); dsicm_power_off(ddata); dsicm_hw_reset(ddata); return dsicm_power_on(ddata); } static int dsicm_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { struct panel_drv_data *ddata = to_panel_data(dst); struct device *dev = &ddata->pdev->dev; int r; r = src->ops->dsi.request_vc(src, &ddata->channel); if (r) { dev_err(dev, "failed to get virtual channel\n"); return r; } r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH); if (r) { dev_err(dev, "failed to set VC_ID\n"); src->ops->dsi.release_vc(src, ddata->channel); return r; } ddata->src = src; return 0; } static void dsicm_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { struct panel_drv_data *ddata = to_panel_data(dst); src->ops->dsi.release_vc(src, ddata->channel); ddata->src = NULL; } static void dsicm_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = ddata->src; int r; mutex_lock(&ddata->lock); src->ops->dsi.bus_lock(src); r = dsicm_power_on(ddata); src->ops->dsi.bus_unlock(src); if (r) goto err; mutex_unlock(&ddata->lock); dsicm_bl_power(ddata, true); return; err: dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r); mutex_unlock(&ddata->lock); } static void dsicm_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = ddata->src; int r; dsicm_bl_power(ddata, false); mutex_lock(&ddata->lock); dsicm_cancel_ulps_work(ddata); src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) dsicm_power_off(ddata); src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); } static void dsicm_framedone_cb(int err, void *data) { struct panel_drv_data *ddata = data; struct omap_dss_device *src = ddata->src; dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err); src->ops->dsi.bus_unlock(src); } static irqreturn_t dsicm_te_isr(int irq, void *data) { struct panel_drv_data *ddata = data; struct omap_dss_device *src = ddata->src; int old; int r; old = atomic_cmpxchg(&ddata->do_update, 1, 0); if (old) { cancel_delayed_work(&ddata->te_timeout_work); r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb, ddata); if (r) goto err; } return IRQ_HANDLED; err: dev_err(&ddata->pdev->dev, "start update failed\n"); src->ops->dsi.bus_unlock(src); return IRQ_HANDLED; } static void dsicm_te_timeout_work_callback(struct work_struct *work) { struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, te_timeout_work.work); struct omap_dss_device *src = ddata->src; dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n"); atomic_set(&ddata->do_update, 0); src->ops->dsi.bus_unlock(src); } static int dsicm_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = ddata->src; int r; dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); mutex_lock(&ddata->lock); src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (r) goto err; if (!ddata->enabled) { r = 0; goto err; } /* XXX no need to send this every frame, but dsi break if not done */ r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive, ddata->vm.vactive); if (r) goto err; if (ddata->te_enabled && ddata->ext_te_gpio) { schedule_delayed_work(&ddata->te_timeout_work, msecs_to_jiffies(250)); atomic_set(&ddata->do_update, 1); } else { r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb, ddata); if (r) goto err; } /* note: no bus_unlock here. unlock is src framedone_cb */ mutex_unlock(&ddata->lock); return 0; err: src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); return r; } static int dsicm_sync(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = ddata->src; dev_dbg(&ddata->pdev->dev, "sync\n"); mutex_lock(&ddata->lock); src->ops->dsi.bus_lock(src); src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); dev_dbg(&ddata->pdev->dev, "sync done\n"); return 0; } static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) { struct omap_dss_device *src = ddata->src; int r; if (enable) r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_TEAR_ON, 0); else r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF); if (!ddata->ext_te_gpio) src->ops->dsi.enable_te(src, enable); /* possible panel bug */ msleep(100); return r; } static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = ddata->src; int r; mutex_lock(&ddata->lock); if (ddata->te_enabled == enable) goto end; src->ops->dsi.bus_lock(src); if (ddata->enabled) { r = dsicm_wake_up(ddata); if (r) goto err; r = _dsicm_enable_te(ddata, enable); if (r) goto err; } ddata->te_enabled = enable; src->ops->dsi.bus_unlock(src); end: mutex_unlock(&ddata->lock); return 0; err: src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); return r; } static int dsicm_get_te(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); int r; mutex_lock(&ddata->lock); r = ddata->te_enabled; mutex_unlock(&ddata->lock); return r; } static int dsicm_memory_read(struct omap_dss_device *dssdev, void *buf, size_t size, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = ddata->src; int r; int first = 1; int plen; unsigned int buf_used = 0; if (size < w * h * 3) return -ENOMEM; mutex_lock(&ddata->lock); if (!ddata->enabled) { r = -ENODEV; goto err1; } size = min((u32)w * h * 3, ddata->vm.hactive * ddata->vm.vactive * 3); src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (r) goto err2; /* plen 1 or 2 goes into short packet. until checksum error is fixed, * use short packets. plen 32 works, but bigger packets seem to cause * an error. */ if (size % 2) plen = 1; else plen = 2; dsicm_set_update_window(ddata, x, y, w, h); r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen); if (r) goto err2; while (buf_used < size) { u8 dcs_cmd = first ? 0x2e : 0x3e; first = 0; r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf + buf_used, size - buf_used); if (r < 0) { dev_err(dssdev->dev, "read error\n"); goto err3; } buf_used += r; if (r < plen) { dev_err(&ddata->pdev->dev, "short read\n"); break; } if (signal_pending(current)) { dev_err(&ddata->pdev->dev, "signal pending, " "aborting memory read\n"); r = -ERESTARTSYS; goto err3; } } r = buf_used; err3: src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1); err2: src->ops->dsi.bus_unlock(src); err1: mutex_unlock(&ddata->lock); return r; } static void dsicm_ulps_work(struct work_struct *work) { struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, ulps_work.work); struct omap_dss_device *dssdev = &ddata->dssdev; struct omap_dss_device *src = ddata->src; mutex_lock(&ddata->lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !ddata->enabled) { mutex_unlock(&ddata->lock); return; } src->ops->dsi.bus_lock(src); dsicm_enter_ulps(ddata); src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); } static int dsicm_get_modes(struct omap_dss_device *dssdev, struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); connector->display_info.width_mm = ddata->width_mm; connector->display_info.height_mm = ddata->height_mm; return omapdss_display_get_modes(connector, &ddata->vm); } static int dsicm_check_timings(struct omap_dss_device *dssdev, struct drm_display_mode *mode) { struct panel_drv_data *ddata = to_panel_data(dssdev); int ret = 0; if (mode->hdisplay != ddata->vm.hactive) ret = -EINVAL; if (mode->vdisplay != ddata->vm.vactive) ret = -EINVAL; if (ret) { dev_warn(dssdev->dev, "wrong resolution: %d x %d", mode->hdisplay, mode->vdisplay); dev_warn(dssdev->dev, "panel resolution: %d x %d", ddata->vm.hactive, ddata->vm.vactive); } return ret; } static const struct omap_dss_device_ops dsicm_ops = { .connect = dsicm_connect, .disconnect = dsicm_disconnect, .enable = dsicm_enable, .disable = dsicm_disable, .get_modes = dsicm_get_modes, .check_timings = dsicm_check_timings, }; static const struct omap_dss_driver dsicm_dss_driver = { .update = dsicm_update, .sync = dsicm_sync, .enable_te = dsicm_enable_te, .get_te = dsicm_get_te, .memory_read = dsicm_memory_read, }; static int dsicm_probe_of(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct backlight_device *backlight; struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct display_timing timing; int err; ddata->reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ddata->reset_gpio)) { err = PTR_ERR(ddata->reset_gpio); dev_err(&pdev->dev, "reset gpio request failed: %d", err); return err; } ddata->ext_te_gpio = devm_gpiod_get_optional(&pdev->dev, "te", GPIOD_IN); if (IS_ERR(ddata->ext_te_gpio)) { err = PTR_ERR(ddata->ext_te_gpio); dev_err(&pdev->dev, "TE gpio request failed: %d", err); return err; } err = of_get_display_timing(node, "panel-timing", &timing); if (!err) { videomode_from_timing(&timing, &ddata->vm); if (!ddata->vm.pixelclock) ddata->vm.pixelclock = ddata->vm.hactive * ddata->vm.vactive * 60; } else { dev_warn(&pdev->dev, "failed to get video timing, using defaults\n"); } ddata->width_mm = 0; of_property_read_u32(node, "width-mm", &ddata->width_mm); ddata->height_mm = 0; of_property_read_u32(node, "height-mm", &ddata->height_mm); ddata->vpnl = devm_regulator_get_optional(&pdev->dev, "vpnl"); if (IS_ERR(ddata->vpnl)) { err = PTR_ERR(ddata->vpnl); if (err == -EPROBE_DEFER) return err; ddata->vpnl = NULL; } ddata->vddi = devm_regulator_get_optional(&pdev->dev, "vddi"); if (IS_ERR(ddata->vddi)) { err = PTR_ERR(ddata->vddi); if (err == -EPROBE_DEFER) return err; ddata->vddi = NULL; } backlight = devm_of_find_backlight(&pdev->dev); if (IS_ERR(backlight)) return PTR_ERR(backlight); /* If no backlight device is found assume native backlight support */ if (backlight) ddata->extbldev = backlight; else ddata->use_dsi_backlight = true; /* TODO: ulps */ return 0; } static int dsicm_probe(struct platform_device *pdev) { struct panel_drv_data *ddata; struct backlight_device *bldev = NULL; struct device *dev = &pdev->dev; struct omap_dss_device *dssdev; int r; dev_dbg(dev, "probe\n"); ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; platform_set_drvdata(pdev, ddata); ddata->pdev = pdev; ddata->vm.hactive = 864; ddata->vm.vactive = 480; ddata->vm.pixelclock = 864 * 480 * 60; r = dsicm_probe_of(pdev); if (r) return r; dssdev = &ddata->dssdev; dssdev->dev = dev; dssdev->ops = &dsicm_ops; dssdev->driver = &dsicm_dss_driver; dssdev->type = OMAP_DISPLAY_TYPE_DSI; dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_port = 0; dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; omapdss_display_init(dssdev); omapdss_device_register(dssdev); mutex_init(&ddata->lock); atomic_set(&ddata->do_update, 0); if (ddata->ext_te_gpio) { r = devm_request_irq(dev, gpiod_to_irq(ddata->ext_te_gpio), dsicm_te_isr, IRQF_TRIGGER_RISING, "taal vsync", ddata); if (r) { dev_err(dev, "IRQ request failed\n"); goto err_reg; } INIT_DEFERRABLE_WORK(&ddata->te_timeout_work, dsicm_te_timeout_work_callback); dev_dbg(dev, "Using GPIO TE\n"); } ddata->workqueue = create_singlethread_workqueue("dsicm_wq"); if (!ddata->workqueue) { r = -ENOMEM; goto err_reg; } INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work); dsicm_hw_reset(ddata); if (ddata->use_dsi_backlight) { struct backlight_properties props = { 0 }; props.max_brightness = 255; props.type = BACKLIGHT_RAW; bldev = devm_backlight_device_register(dev, dev_name(dev), dev, ddata, &dsicm_bl_ops, &props); if (IS_ERR(bldev)) { r = PTR_ERR(bldev); goto err_bl; } ddata->bldev = bldev; } r = sysfs_create_group(&dev->kobj, &dsicm_attr_group); if (r) { dev_err(dev, "failed to create sysfs files\n"); goto err_bl; } return 0; err_bl: destroy_workqueue(ddata->workqueue); err_reg: if (ddata->extbldev) put_device(&ddata->extbldev->dev); return r; } static int __exit dsicm_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; dev_dbg(&pdev->dev, "remove\n"); omapdss_device_unregister(dssdev); if (omapdss_device_is_enabled(dssdev)) dsicm_disable(dssdev); omapdss_device_disconnect(ddata->src, dssdev); sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group); if (ddata->extbldev) put_device(&ddata->extbldev->dev); dsicm_cancel_ulps_work(ddata); destroy_workqueue(ddata->workqueue); /* reset, to be sure that the panel is in a valid state */ dsicm_hw_reset(ddata); return 0; } static const struct of_device_id dsicm_of_match[] = { { .compatible = "omapdss,panel-dsi-cm", }, {}, }; MODULE_DEVICE_TABLE(of, dsicm_of_match); static struct platform_driver dsicm_driver = { .probe = dsicm_probe, .remove = __exit_p(dsicm_remove), .driver = { .name = "panel-dsi-cm", .of_match_table = dsicm_of_match, .suppress_bind_attrs = true, }, }; module_platform_driver(dsicm_driver); MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver"); MODULE_LICENSE("GPL");
658658.c
/***************************************************************************** * Product: DPP example, NXP mbed-LPC1768 board, QK kernel * Last Updated for Version: 5.4.0 * Date of the Last Update: 2015-04-06 * * Q u a n t u m L e a P s * --------------------------- * innovating embedded systems * * Copyright (C) Quantum Leaps, LLC. state-machine.com. * * This program is open source software: you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Alternatively, this program may be distributed and modified under the * terms of Quantum Leaps commercial licenses, which expressly supersede * the GNU General Public License and are specifically designed for * licensees interested in retaining the proprietary status of their code. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Contact information: * Web : http://www.state-machine.com * Email: info@state-machine.com *****************************************************************************/ #include "qpc.h" #include "dpp.h" #include "bsp.h" #include "LPC17xx.h" /* CMSIS-compliant header file for the MCU used */ /* add other drivers if necessary... */ Q_DEFINE_THIS_FILE /*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! CAUTION !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! * Assign a priority to EVERY ISR explicitly by calling NVIC_SetPriority(). * DO NOT LEAVE THE ISR PRIORITIES AT THE DEFAULT VALUE! */ enum KernelUnawareISRs { /* see NOTE00 */ /* ... */ MAX_KERNEL_UNAWARE_CMSIS_PRI /* keep always last */ }; /* "kernel-unaware" interrupts can't overlap "kernel-aware" interrupts */ Q_ASSERT_COMPILE(MAX_KERNEL_UNAWARE_CMSIS_PRI <= QF_AWARE_ISR_CMSIS_PRI); enum KernelAwareISRs { EINT0_PRIO = QF_AWARE_ISR_CMSIS_PRI, /* see NOTE00 */ SYSTICK_PRIO, /* ... */ MAX_KERNEL_AWARE_CMSIS_PRI /* keep always last */ }; /* "kernel-aware" interrupts should not overlap the PendSV priority */ Q_ASSERT_COMPILE(MAX_KERNEL_AWARE_CMSIS_PRI <= (0xFF >>(8-__NVIC_PRIO_BITS))); /* ISRs defined in this BSP ------------------------------------------------*/ void SysTick_Handler(void); void EINT0_IRQHandler(void); /* Local-scope objects -----------------------------------------------------*/ /* LEDs available on the board */ #define LED_1 (1U << 18) /* P1.18 */ #define LED_2 (1U << 20) /* P1.20 */ #define LED_3 (1U << 21) /* P1.21 */ #define LED_4 (1U << 23) /* P1.23 */ /* Push-Button wired externally to DIP8 (P0.6) */ #define BTN_EXT (1U << 6) /* P0.6 */ static uint32_t l_rnd; /* random seed */ #ifdef Q_SPY QSTimeCtr QS_tickTime_; QSTimeCtr QS_tickPeriod_; /* event-source identifiers used for tracing */ static uint8_t l_SysTick_Handler; static uint8_t l_EINT0_IRQHandler; #define UART_BAUD_RATE 115200U #define UART_FR_TXFE 0x80U #define UART_TXFIFO_DEPTH 16U enum AppRecords { /* application-specific trace records */ PHILO_STAT = QS_USER }; #endif /* ISRs used in the application ==========================================*/ void SysTick_Handler(void) { /* state of the button debouncing, see below */ static struct ButtonsDebouncing { uint32_t depressed; uint32_t previous; } buttons = { ~0U, ~0U }; uint32_t current; uint32_t tmp; QK_ISR_ENTRY(); /* inform QK about entering an ISR */ #ifdef Q_SPY { tmp = SysTick->CTRL; /* clear CTRL_COUNTFLAG */ QS_tickTime_ += QS_tickPeriod_; /* account for the clock rollover */ } #endif QF_TICK_X(0U, &l_SysTick_Handler); /* process time events for rate 0 */ /* get state of the buttons */ /* Perform the debouncing of buttons. The algorithm for debouncing * adapted from the book "Embedded Systems Dictionary" by Jack Ganssle * and Michael Barr, page 71. */ current = ~LPC_GPIO0->FIOPIN; /* read P0 with the state of the Buttons */ tmp = buttons.depressed; /* save the debounced depressed buttons */ buttons.depressed |= (buttons.previous & current); /* set depressed */ buttons.depressed &= (buttons.previous | current); /* clear released */ buttons.previous = current; /* update the history */ tmp ^= buttons.depressed; /* changed debounced depressed */ if ((tmp & BTN_EXT) != 0U) { /* debounced BTN_EXT state changed? */ if ((buttons.depressed & BTN_EXT) != 0U) { /* is BTN_EXT depressed? */ static QEvt const pauseEvt = { PAUSE_SIG, 0U, 0U}; QF_PUBLISH(&pauseEvt, &l_SysTick_Handler); } else { /* the button is released */ static QEvt const serveEvt = { SERVE_SIG, 0U, 0U}; QF_PUBLISH(&serveEvt, &l_SysTick_Handler); } } QK_ISR_EXIT(); /* inform QK about exiting an ISR */ } /*..........................................................................*/ void EINT0_IRQHandler(void) { QK_ISR_ENTRY(); /* inform QK about entering an ISR */ QACTIVE_POST(AO_Table, Q_NEW(QEvt, MAX_PUB_SIG), /* for testing... */ &l_EINT0_IRQHandler); QK_ISR_EXIT(); /* inform QK about exiting an ISR */ } /* BSP functions ===========================================================*/ void BSP_init(void) { /* NOTE: SystemInit() has been already called from the startup code * but SystemCoreClock needs to be updated */ SystemCoreClockUpdate(); /* turn the GPIO clock on */ LPC_SC->PCONP |= (1U << 15); /* setup the GPIO pin functions for the LEDs... */ LPC_PINCON->PINSEL3 &= ~(3U << 4); /* LED_1: function P1.18 to GPIO */ LPC_PINCON->PINSEL3 &= ~(3U << 8); /* LED_2: function P1.20 to GPIO */ LPC_PINCON->PINSEL3 &= ~(3U << 10); /* LED_3: function P1.21 to GPIO */ LPC_PINCON->PINSEL3 &= ~(3U << 14); /* LED_4: function P1.23 to GPIO */ /* Set GPIO-P1 LED pins to output */ LPC_GPIO1->FIODIR |= (LED_1 | LED_2 | LED_3 | LED_4); /* setup the GPIO pin function for the Button... */ LPC_PINCON->PINSEL0 &= ~(3U << 12); /* function P0.6 to GPIO, pull-up */ /* Set GPIO-P0 Button pin as input */ LPC_GPIO0->FIODIR &= ~BTN_EXT; BSP_randomSeed(1234U); if (QS_INIT((void *)0) == 0U) { /* initialize the QS software tracing */ Q_ERROR(); } QS_OBJ_DICTIONARY(&l_SysTick_Handler); QS_OBJ_DICTIONARY(&l_EINT0_IRQHandler); QS_USR_DICTIONARY(PHILO_STAT); } /*..........................................................................*/ void BSP_displayPhilStat(uint8_t n, char const *stat) { if (stat[0] == 'h') { LPC_GPIO1->FIOSET = LED_1; /* turn LED on */ } else { LPC_GPIO1->FIOCLR = LED_1; /* turn LED off */ } if (stat[0] == 'e') { LPC_GPIO1->FIOSET = LED_2; /* turn LED on */ } else { LPC_GPIO1->FIOCLR = LED_2; /* turn LED off */ } QS_BEGIN(PHILO_STAT, AO_Philo[n]) /* application-specific record begin */ QS_U8(1, n); /* Philosopher number */ QS_STR(stat); /* Philosopher status */ QS_END() } /*..........................................................................*/ void BSP_displayPaused(uint8_t paused) { if (paused != (uint8_t)0) { LPC_GPIO1->FIOSET = LED_3; /* turn LED on */ } else { LPC_GPIO1->FIOCLR = LED_3; /* turn LED off */ } } /*..........................................................................*/ uint32_t BSP_random(void) { /* a very cheap pseudo-random-number generator */ /* "Super-Duper" Linear Congruential Generator (LCG) * LCG(2^32, 3*7*11*13*23, 0, seed) */ l_rnd = l_rnd * (3U*7U*11U*13U*23U); return l_rnd >> 8; } /*..........................................................................*/ void BSP_randomSeed(uint32_t seed) { l_rnd = seed; } /*..........................................................................*/ void BSP_terminate(int16_t result) { (void)result; } /* QF callbacks ============================================================*/ void QF_onStartup(void) { /* set up the SysTick timer to fire at BSP_TICKS_PER_SEC rate */ SysTick_Config(SystemCoreClock / BSP_TICKS_PER_SEC); /* set priorities of ALL ISRs used in the system, see NOTE00 * * !!!!!!!!!!!!!!!!!!!!!!!!!!!! CAUTION !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! * Assign a priority to EVERY ISR explicitly by calling NVIC_SetPriority(). * DO NOT LEAVE THE ISR PRIORITIES AT THE DEFAULT VALUE! */ NVIC_SetPriority(SysTick_IRQn, SYSTICK_PRIO); NVIC_SetPriority(EINT0_IRQn, EINT0_PRIO); /* ... */ /* enable IRQs in the NVIC... */ NVIC_EnableIRQ(EINT0_IRQn); } /*..........................................................................*/ void QF_onCleanup(void) { } /*..........................................................................*/ void QK_onIdle(void) { /* toggle the User LED on and then off, see NOTE01 */ QF_INT_DISABLE(); LPC_GPIO1->FIOSET = LED_4; /* turn LED on */ __NOP(); /* a couple of NOPs to actually see the LED glow */ __NOP(); __NOP(); __NOP(); LPC_GPIO1->FIOCLR = LED_4; /* turn LED off */ QF_INT_ENABLE(); #ifdef Q_SPY if ((LPC_UART0->LSR & 0x20U) != 0U) { /* TX Holding Register empty? */ uint16_t fifo = UART_TXFIFO_DEPTH; /* max bytes we can accept */ uint8_t const *block; QF_INT_DISABLE(); block = QS_getBlock(&fifo); /* try to get next block to transmit */ QF_INT_ENABLE(); while (fifo-- != 0) { /* any bytes in the block? */ LPC_UART0->THR = *block++; /* put into the FIFO */ } } #elif defined NDEBUG /* Put the CPU and peripherals to the low-power mode. * you might need to customize the clock management for your application, * see the datasheet for your particular Cortex-M MCU. */ __WFI(); /* Wait-For-Interrupt */ #endif } /*..........................................................................*/ /* NOTE Q_onAssert() defined in assembly in startup_TM4C123GH6PM.s */ /* QS callbacks ============================================================*/ #ifdef Q_SPY static void UART0_setBaudrate(uint32_t baud); /* helper function */ /*..........................................................................*/ uint8_t QS_onStartup(void const *arg) { static uint8_t qsBuf[2*1024]; /* buffer for Quantum Spy */ QS_initBuf(qsBuf, sizeof(qsBuf)); // setup the P0_2 UART0 TX pin LPC_PINCON->PINSEL0 &= ~(3U << 4); /* clear P0_2 function */ LPC_PINCON->PINSEL0 |= (1U << 4); /* P0_2 to UART function (TX) */ LPC_PINCON->PINMODE0 &= ~(3U << 4); /* P0_2 pull-up register */ // setup the P0_3 UART0 RX pin LPC_PINCON->PINSEL0 &= ~(3U << 6); /* clear P0_3 function */ LPC_PINCON->PINSEL0 |= (1U << 6); /* P0_3 to UART function (RX) */ LPC_PINCON->PINMODE0 &= ~(3U << 6); /* P0_3 pull-up register */ /* enable power to UART0 */ LPC_SC->PCONP |= (1U << 3); /* enable FIFOs and default RX trigger level */ LPC_UART0->FCR = (1U << 0) /* FIFO Enable - 0 = Disables, 1 = Enabled */ | (0U << 1) /* Rx Fifo Reset */ | (0U << 2) /* Tx Fifo Reset */ | (0U << 6); /* Rx irq trig: 0=1char, 1=4chars, 2=8chars, 3=14chars */ /* disable IRQs */ LPC_UART0->IER = (0U << 0) /* Rx Data available IRQ disable */ | (0U << 1) /* Tx Fifo empty IRQ disable */ | (0U << 2); /* Rx Line Status IRQ disable */ // set default baud rate UART0_setBaudrate(115200U); // format 8-data-bits, 1-stop-bit, parity-none LPC_UART0->LCR = (3U << 0) /* 8-data-bits */ | (0U << 2) /* 1 stop-bit */ | (0U << 3) /* parity disable */ | (0U << 4); /* parity none */ QS_tickPeriod_ = SystemCoreClock / BSP_TICKS_PER_SEC; QS_tickTime_ = QS_tickPeriod_; /* to start the timestamp at zero */ /* setup the QS filters... */ QS_FILTER_ON(QS_QEP_STATE_ENTRY); QS_FILTER_ON(QS_QEP_STATE_EXIT); QS_FILTER_ON(QS_QEP_STATE_INIT); QS_FILTER_ON(QS_QEP_INIT_TRAN); QS_FILTER_ON(QS_QEP_INTERN_TRAN); QS_FILTER_ON(QS_QEP_TRAN); QS_FILTER_ON(QS_QEP_IGNORED); QS_FILTER_ON(QS_QEP_DISPATCH); QS_FILTER_ON(QS_QEP_UNHANDLED); // QS_FILTER_ON(QS_QF_ACTIVE_ADD); // QS_FILTER_ON(QS_QF_ACTIVE_REMOVE); // QS_FILTER_ON(QS_QF_ACTIVE_SUBSCRIBE); // QS_FILTER_ON(QS_QF_ACTIVE_UNSUBSCRIBE); // QS_FILTER_ON(QS_QF_ACTIVE_POST_FIFO); // QS_FILTER_ON(QS_QF_ACTIVE_POST_LIFO); // QS_FILTER_ON(QS_QF_ACTIVE_GET); // QS_FILTER_ON(QS_QF_ACTIVE_GET_LAST); // QS_FILTER_ON(QS_QF_EQUEUE_INIT); // QS_FILTER_ON(QS_QF_EQUEUE_POST_FIFO); // QS_FILTER_ON(QS_QF_EQUEUE_POST_LIFO); // QS_FILTER_ON(QS_QF_EQUEUE_GET); // QS_FILTER_ON(QS_QF_EQUEUE_GET_LAST); // QS_FILTER_ON(QS_QF_MPOOL_INIT); // QS_FILTER_ON(QS_QF_MPOOL_GET); // QS_FILTER_ON(QS_QF_MPOOL_PUT); // QS_FILTER_ON(QS_QF_PUBLISH); // QS_FILTER_ON(QS_QF_RESERVED8); // QS_FILTER_ON(QS_QF_NEW); // QS_FILTER_ON(QS_QF_GC_ATTEMPT); // QS_FILTER_ON(QS_QF_GC); QS_FILTER_ON(QS_QF_TICK); // QS_FILTER_ON(QS_QF_TIMEEVT_ARM); // QS_FILTER_ON(QS_QF_TIMEEVT_AUTO_DISARM); // QS_FILTER_ON(QS_QF_TIMEEVT_DISARM_ATTEMPT); // QS_FILTER_ON(QS_QF_TIMEEVT_DISARM); // QS_FILTER_ON(QS_QF_TIMEEVT_REARM); // QS_FILTER_ON(QS_QF_TIMEEVT_POST); // QS_FILTER_ON(QS_QF_TIMEEVT_CTR); // QS_FILTER_ON(QS_QF_CRIT_ENTRY); // QS_FILTER_ON(QS_QF_CRIT_EXIT); // QS_FILTER_ON(QS_QF_ISR_ENTRY); // QS_FILTER_ON(QS_QF_ISR_EXIT); // QS_FILTER_ON(QS_QF_INT_DISABLE); // QS_FILTER_ON(QS_QF_INT_ENABLE); // QS_FILTER_ON(QS_QF_ACTIVE_POST_ATTEMPT); // QS_FILTER_ON(QS_QF_EQUEUE_POST_ATTEMPT); // QS_FILTER_ON(QS_QF_MPOOL_GET_ATTEMPT); // QS_FILTER_ON(QS_QF_RESERVED1); // QS_FILTER_ON(QS_QF_RESERVED0); // QS_FILTER_ON(QS_QK_MUTEX_LOCK); // QS_FILTER_ON(QS_QK_MUTEX_UNLOCK); // QS_FILTER_ON(QS_QK_SCHEDULE); // QS_FILTER_ON(QS_QK_RESERVED1); // QS_FILTER_ON(QS_QK_RESERVED0); // QS_FILTER_ON(QS_QEP_TRAN_HIST); // QS_FILTER_ON(QS_QEP_TRAN_EP); // QS_FILTER_ON(QS_QEP_TRAN_XP); // QS_FILTER_ON(QS_QEP_RESERVED1); // QS_FILTER_ON(QS_QEP_RESERVED0); QS_FILTER_ON(QS_SIG_DICT); QS_FILTER_ON(QS_OBJ_DICT); QS_FILTER_ON(QS_FUN_DICT); QS_FILTER_ON(QS_USR_DICT); QS_FILTER_ON(QS_EMPTY); QS_FILTER_ON(QS_RESERVED3); QS_FILTER_ON(QS_RESERVED2); QS_FILTER_ON(QS_TEST_RUN); QS_FILTER_ON(QS_TEST_FAIL); QS_FILTER_ON(QS_ASSERT_FAIL); return (uint8_t)1; /* return success */ } /*..........................................................................*/ void QS_onCleanup(void) { } /*..........................................................................*/ QSTimeCtr QS_onGetTime(void) { /* NOTE: invoked with interrupts DISABLED */ if ((SysTick->CTRL & SysTick_CTRL_COUNTFLAG_Msk) == 0) { /* not set? */ return QS_tickTime_ - (QSTimeCtr)SysTick->VAL; } else { /* the rollover occured, but the SysTick_ISR did not run yet */ return QS_tickTime_ + QS_tickPeriod_ - (QSTimeCtr)SysTick->VAL; } } /*..........................................................................*/ void QS_onFlush(void) { uint16_t b; QF_INT_DISABLE(); while ((b = QS_getByte()) != QS_EOD) { /* while not End-Of-Data... */ QF_INT_ENABLE(); while ((LPC_UART0->LSR & 0x20U) == 0U) { /* while THR empty... */ } LPC_UART0->THR = (b & 0xFFU); /* put into the DR register */ } QF_INT_ENABLE(); } /*..........................................................................*/ /* * Set the LPC UART0 barud-rate generator according to * Section 14.4.12 in LPC176x Manual (document UM10360) */ static void UART0_setBaudrate(uint32_t baud) { /* First we check to see if the basic divide with no DivAddVal/MulVal * ratio gives us an integer result. If it does, we set DivAddVal = 0, * MulVal = 1. Otherwise, we search the valid ratio value range to find * the closest match. This could be more elegant, using search methods * and/or lookup tables, but the brute force method is not that much * slower, and is more maintainable. */ uint32_t PCLK = SystemCoreClock; /* divider /1 set below */ uint16_t DL = PCLK / (16U * baud); uint8_t DivAddVal = 0U; uint8_t MulVal = 1U; /* set PCLK divider to 1 */ LPC_SC->PCLKSEL0 &= ~(0x3U << 6); /* clear divider bits */ LPC_SC->PCLKSEL0 |= (0x1U << 6); /* set divider to 1 */ if ((PCLK % (16U * baud)) != 0U) { /* non zero remainder? */ uint32_t err_best = baud; bool found = false; uint32_t b; uint8_t mv; for (mv = 1U; mv < 16U && !found; mv++) { uint16_t dlv; uint8_t dav; for (dav = 0U; dav < mv; ++dav) { /* * baud = PCLK / (16 * dlv * (1 + (DivAdd / Mul)) * solving for dlv, we get * dlv = mul * PCLK / (16 * baud * (divadd + mul)) * mul has 4 bits, PCLK has 27 so we have 1 bit headroom, * which can be used for rounding for many values of mul * and PCLK we have 2 or more bits of headroom which can * be used to improve precision * note: X / 32 doesn't round correctly. * Instead, we use ((X / 16) + 1) / 2 for correct rounding */ if ((mv*PCLK*2U) & 0x80000000U) { /* 1 bit headroom */ dlv = ((((2U*mv*PCLK) / (baud*(dav + mv)))/16U) + 1U)/2U; } else { /* 2 bits headroom, use more precision */ dlv = ((((4U*mv*PCLK) / (baud*(dav+mv)))/32U) + 1U)/2U; } /* datasheet says if DLL==DLM==0, then 1 is used instead */ if (dlv == 0U) { dlv = 1U; } /* datasheet says if dav > 0 then DL must be >= 2 */ if ((dav > 0U) && (dlv < 2U)) { dlv = 2U; } /* integer rearrangement of baud equation (with rounding) */ b = ((PCLK*mv / (dlv*(dav + mv)*8U)) + 1U)/2U; b = (b >= baud) ? (b - baud) : (baud - b); /* check to see how we did */ if (b < err_best) { err_best = b; DL = dlv; MulVal = mv; DivAddVal = dav; if (b == baud) { found = true; break; /* break out of the inner for-loop */ } } } } } // set LCR[DLAB] to enable writing to divider registers LPC_UART0->LCR |= (1U << 7); // set divider values LPC_UART0->DLM = (DL >> 8) & 0xFFU; LPC_UART0->DLL = (DL >> 0) & 0xFFU; LPC_UART0->FDR = ((uint32_t)DivAddVal << 0) | ((uint32_t)MulVal << 4); // clear LCR[DLAB] LPC_UART0->LCR &= ~(1U << 7); } #endif /* Q_SPY */ /*--------------------------------------------------------------------------*/ /***************************************************************************** * NOTE00: * The QF_AWARE_ISR_CMSIS_PRI constant from the QF port specifies the highest * ISR priority that is disabled by the QF framework. The value is suitable * for the NVIC_SetPriority() CMSIS function. * * Only ISRs prioritized at or below the QF_AWARE_ISR_CMSIS_PRI level (i.e., * with the numerical values of priorities equal or higher than * QF_AWARE_ISR_CMSIS_PRI) are allowed to call the QK_ISR_ENTRY/QK_ISR_ENTRY * macros or any other QF/QK services. These ISRs are "QF-aware". * * Conversely, any ISRs prioritized above the QF_AWARE_ISR_CMSIS_PRI priority * level (i.e., with the numerical values of priorities less than * QF_AWARE_ISR_CMSIS_PRI) are never disabled and are not aware of the kernel. * Such "QF-unaware" ISRs cannot call any QF/QK services. In particular they * can NOT call the macros QK_ISR_ENTRY/QK_ISR_ENTRY. The only mechanism * by which a "QF-unaware" ISR can communicate with the QF framework is by * triggering a "QF-aware" ISR, which can post/publish events. * * NOTE01: * The User LED is used to visualize the idle loop activity. The brightness * of the LED is proportional to the frequency of invcations of the idle loop. * Please note that the LED is toggled with interrupts locked, so no interrupt * execution time contributes to the brightness of the User LED. */
157153.c
/****************************************************************************** * Code generated with sympy 0.7.6 * * * * See http://www.sympy.org/ for more information. * * * * This file is part of 'project' * ******************************************************************************/ #include "pinky_prox_index_metacarpal_side_1.h" #include <math.h> double pinky_prox_index_metacarpal_side_1() { double pinky_prox_index_metacarpal_side_1_result; pinky_prox_index_metacarpal_side_1_result = 0; return pinky_prox_index_metacarpal_side_1_result; }
317301.c
#include <stdio.h> int main(){ int c; //int blank = 0; while((c = getchar()) != EOF/*'\t'*/){ if(c != ' '){ putchar(c); } if(c == ' '){ while((c = getchar()) == ' '){ ; } putchar(' '); putchar(c); } } } //end of code
922830.c
/** Title screen improvements. */ #include "main.h" #include "revolution/os.h" #include "revolution/pad.h" static int (*oldTitleHook)(); int titleHook() { //do this here due to memory starvation at startup enableKrystal = 1; krystal_loadAssets(); //check current and previous frame u16 buttons = controllerStates[0].button | controllerStates[4].button; //debugPrintf("saveStatus = %d frameCount = %d\n", saveStatus, frameCount); //doing it too soon will crash if(frameCount > 20 && frameCount < 300 && titleScreen_panAwayFromMovieTimer > 0 && buttons & PAD_TRIGGER_R) { //free some memory. XXX does this actually do any good? mapUnload(0x3D, 0x2000); //ensure text is loaded properly gameTextLoadDir(GAMETEXT_DIR_Link); while(isDvdDriveBusy) waitNextFrame(); //OSReport("Loading save 1\n"); titleScreenActive = false; //load into the game titleScreen_panAwayFromMovieTimer = 0; titleLoadSaveFiles(); //to get the savegame settings //interesting: calling this during the game still works, and replaces your current save //data, so things like your items are reset, but you don't reload or respawn... saveGame_load(0); //load the actual save file loadSaveSettings(); //apply the settings } return oldTitleHook(); } /* void saveInfoHook(void *unused, u32 alpha) { saveSelect_drawText(unused, alpha); //we can try to show more info here, but we need to also patch saveGame_prepareAndWrite //or some such to actually preserve that info. SaveGame *save = &saveData.curSaveGame; debugPrintf("%f %f %f %d: %02X (%02X)\n", save->charPos->pos.x, save->charPos->pos.y, save->charPos->pos.z, save->charPos->mapLayer, save->charPos->mapId, mapCoordsToId((int)save->charPos->pos.x / MAP_CELL_SCALE, (int)save->charPos->pos.z / MAP_CELL_SCALE, save->charPos->mapLayer) ); } */ int titleSaveLoadHook(int slot) { //not sure about return type... int res = saveGame_load(slot); PlayerCharPos *pos = &pCurSaveGame->charPos[pCurSaveGame->character]; DPRINT("Loading char %d pos: %f, %f, %f", pCurSaveGame->character, pos->pos.x, pos->pos.y, pos->pos.z); if(getButtonsHeld(0) & PAD_BUTTON_START) { //go to AnimTest, in case save file is buggered. pos->pos.x = -9495; pos->pos.y = -127; pos->pos.z = -19015; pos->mapLayer = 0; } return res; } void titleHooksInit() { //hook into the run method of the title screen DLL OSReport("Install title hook...\n"); oldTitleHook = *(u32*)0x8031a320; WRITE32(0x8031a320, titleHook); //hookBranch(0x8011ab74, saveInfoHook, 1); hookBranch(0x8011af00, titleSaveLoadHook, 1); }
330447.c
/* * Copyright 2014-2018 Real Logic Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if defined(__linux__) #define _BSD_SOURCE #define _GNU_SOURCE #endif #include <time.h> #include <stdio.h> #include <inttypes.h> #include "util/aeron_strutil.h" void aeron_format_date(char *str, size_t count, int64_t timestamp) { char time_buffer[80]; char msec_buffer[8]; char tz_buffer[8]; struct tm time; time_t just_seconds = timestamp / 1000; int64_t msec_after_sec = timestamp % 1000; localtime_r(&just_seconds, &time); strftime(time_buffer, sizeof(time_buffer) - 1, "%Y-%m-%d %H:%M:%S.", &time); snprintf(msec_buffer, sizeof(msec_buffer) - 1, "%03" PRId64, msec_after_sec); strftime(tz_buffer, sizeof(tz_buffer) - 1, "%z", &time); snprintf(str, count, "%s%s%s", time_buffer, msec_buffer, tz_buffer); } void aeron_format_to_hex(char *str, size_t str_length, uint8_t *data, size_t data_len) { static char table[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; size_t j = 0; for (size_t i = 0; i < data_len && j < str_length; i++) { char c_high = table[(data[i] >> 4) & 0x0F]; char c_low = table[data[i] & 0x0F]; str[j++] = c_high; str[j++] = c_low; } str[j] = '\0'; } extern uint64_t aeron_fnv_64a_buf(uint8_t *buf, size_t len);
864541.c
/*** * Copyright 1987 by AT&T Bell Laboratories. * All rights reserved. * Note: Lucent Technologies is now Alcatel-Lucent; some copyrights * may have been assigned by AT&T to Alcatel-Lucent. ***/ #include <stdio.h> #include "awkcc.h" int cfstkp; uchar *cfstk[MAX_LEVELS]; /* keep track of recursion */ int cbfstkp; int cbfstk[MAX_LEVELS]; /* keep track for "return" */ int ccstkp; int ccstk[MAX_LEVELS]; /* keep track for "continue" */ int cbstkp; int cbstk[MAX_LEVELS]; /* keep track for "break" */ int currpush; extern VARINFOP *symtab; VARINFOP make_varinfo() { VARINFOP retval; int i; retval=(VARINFOP)calloc(1, sizeof(VARINFO)); retval->string=(uchar *)""; for (i=0; i<=currpush; i++) { (retval->cur)[i]=YUNK; (retval->rt)[i]=YUNK; } retval->exprs=(Nodep *)calloc(1, sizeof(Nodep)); retval->ne=0; retval->atyped=0; retval->func=(uchar *)""; retval->type=(-1); return(retval); } vstkop(type, off) int type; short off; { register int i; register VARINFOP tmp; int save; switch(type) { case NULLIFY: for (i=0; tmp=symtab[i]; i++) { if (OPTVAR(tmp->type)) { (tmp->cur)[currpush]=YUNK; (tmp->rt)[currpush]=YUNK; } } break; case PUSH: currpush++; if (currpush>=MAX_LEVELS) ER "Too many nested statements" SX; for (i=0; tmp=symtab[i]; i++) if (OPTVAR(tmp->type)) { (tmp->cur)[currpush]=(tmp->cur)[currpush-1]; (tmp->rt)[currpush]=(tmp->rt)[currpush-1]; } break; case INVERT: for (i=0; tmp=symtab[i]; i++) if (OPTVAR(tmp->type)) { save=(tmp->cur)[currpush]; (tmp->cur)[currpush]=(tmp->cur)[currpush-1]; (tmp->cur)[currpush-1]=save; save=(tmp->rt)[currpush]; (tmp->rt)[currpush]=(tmp->rt)[currpush-1]; (tmp->rt)[currpush-1]=save; } break; case AFFECTF: for (i=0; tmp=symtab[i]; i++) if (OPTVAR(tmp->type)) resolve(tmp, currpush, off+cbfstk[cbstkp-1]); break; case AFFECT: i=off?cbstkp:ccstkp; if (i<=0) ER off?"Illegal continue":"Illegal break" SX; save=off?cbstk[cbstkp-1]:ccstk[ccstkp-1]; for (i=0; tmp=symtab[i]; i++) if (OPTVAR(tmp->type)) resolve(tmp, currpush, save); break; case POP: currpush--; break; case POPR: currpush--; if (currpush<0) fprintf(stderr, "CURRPUSH <0 (%d)!\n", currpush); for (i=0; tmp=symtab[i]; i++) if (OPTVAR(tmp->type)) resolve(tmp, currpush+1, currpush); break; default: fprintf(stderr, "operation %d.\n", type); ER "Invalid stk operation" SX; } } fstkop(type, str) int type; uchar *str; { int i; switch(type) { case PUSH: if (cfstkp>=MAX_LEVELS) ER "Too many nested function calls" SX; cfstk[cfstkp++]=str; break; case POP: --cfstkp; break; case FIND: for (i=0; i<cfstkp; i++) if (!strcmp(str, cfstk[i])) return(1); return (0); break; case CHECKOK: return (cfstkp<2); break; default: ER "Invalid fstk operation" SX; break; } return(0); } genstkop(type, stk) int type; int stk; { int *rstk, *stkp; switch(stk) { case CBFSTACK: rstk=cbfstk; stkp= &cbfstkp; break; case CBSTACK: rstk=cbstk; stkp= &cbstkp; break; case CSTACK: rstk=ccstk; stkp= &ccstkp; break; } switch(type) { case PUSH: if (*stkp>=MAX_LEVELS) ER "TOO MANY NESTS" SX; rstk[*stkp]=currpush; (*stkp)+=1; break; case POP: (*stkp)-=1; break; default: ER "INTERNAL: Invalid stk operation" SX; } } static int ttab[7][7]= { /*YNUM YINT YSTR YBTH YSU YNU YUNK */ { YNUM, YNUM, YUNK, YNUMUNK, YUNK, YNUMUNK, YUNK }, { YNUM, YINT, YUNK, YNUMUNK, YUNK, YNUMUNK, YUNK }, { YUNK, YUNK, YSTR, YSTRUNK, YSTRUNK, YUNK, YUNK }, { YNUMUNK, YNUMUNK, YSTRUNK, YBTH, YSTRUNK, YNUMUNK, YUNK }, { YUNK, YUNK, YSTRUNK, YSTRUNK, YSTRUNK, YUNK, YUNK }, { YNUMUNK, YNUMUNK, YUNK, YNUMUNK, YUNK, YNUMUNK, YUNK }, { YUNK, YUNK, YUNK, YUNK, YUNK, YUNK, YUNK } }; static int rttab[7][7]= { { YNUM, (-1), YUNK, (-1), (-1), (-1), YUNK }, { (-1), (-1), (-1), (-1), (-1), (-1), (-1) }, { YUNK, (-1), YSTR, (-1), (-1), (-1), YUNK }, { (-1), (-1), (-1), (-1), (-1), (-1), (-1) }, { (-1), (-1), (-1), (-1), (-1), (-1), (-1) }, { (-1), (-1), (-1), (-1), (-1), (-1), (-1) }, { YUNK, (-1), YUNK, (-1), (-1), (-1), YUNK } }; resolve(tmp, from, to) VARINFOP tmp; int from; int to; { int fromval, toval; fromval=(tmp->rt)[from]; toval=(tmp->rt)[to]; (tmp->rt)[to]=rttab[fromval][toval]; if (fromval==9 || toval==9) ER "Missed one" SX; switch ((tmp->rt)[to]) { case YUNK: case YSTR: case YNUM: break; default: fprintf(stderr, "(%d and %d) rt is now %d.\n", fromval, toval, (tmp->rt)[to]); ER "Bad news in 'resolve'" SX; break; } fromval=(tmp->cur)[from]; toval=(tmp->cur)[to]; (tmp->cur)[to]=ttab[fromval][toval]; if ((tmp->rt)[to]<0 || (tmp->cur)[to]<0) { fprintf(stderr, "rt: %d.\n", (tmp->rt)[to]); fprintf(stderr, "(%d and %d, ttab=%d.\n", fromval, toval, ttab[fromval][toval]); ER "Bad news in 'resolve'" SX; } }
86149.c
#include <ccan/tcon/tcon.h> #include <ccan/tap/tap.h> #include <stdlib.h> typedef TCON_WRAP(int, char *canary) canaried_int; int main(void) { canaried_int ci = TCON_WRAP_INIT(0); plan_tests(2); ok1(*tcon_unwrap(&ci) == 0); *tcon_unwrap(&ci) = 17; ok1(*tcon_unwrap(&ci) == 17); return exit_status(); }
843207.c
#include <stdio.h> #include <stdlib.h> #include <sys/socket.h> #include <netinet/in.h> #include <string.h> #include <arpa/inet.h> #include <fcntl.h> // for open #include <unistd.h> // for close #include <pthread.h> pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; void * socketThread(void *arg) { FILE *fp; char command[2000]; char filePath[2000]; char fileBuffer[2000]; char fileName[2000]; char tempPath[2000] = "/tmp/"; char permissions[2000]; char response[2000]; char buffer[4096]; int groupid; int userid; int threadSocket = *((int *)arg); recv(threadSocket , fileBuffer , 2000 , 0); strcpy(response, "Success: File Received"); send(threadSocket, response, strlen(response), 0); recv(threadSocket , fileName , 2000 , 0); strcat(tempPath, fileName); pthread_mutex_lock(&lock); fp = fopen(tempPath, "w"); if (fp == NULL) { perror("Failed: "); return 1; } fprintf(fp, "%s", fileBuffer); fclose(fp); printf("File Name %s Wrote to %s \n", fileName, tempPath); fflush(stdout); bzero(response, 2000); strcpy(response, "Success: File Saved"); send(threadSocket, response, strlen(response), 0); recv(threadSocket, permissions, 2000, 0); strcpy(response, "Success: Permission Received"); send(threadSocket, response, strlen(response), 0); recv(threadSocket, filePath, 2000, 0); printf("Permission %s Received \n", permissions); fflush(stdout); userid = atoi(strtok(permissions, ":")); groupid = atoi(permissions); setegid(groupid); seteuid(userid); printf("File Path %s \n", filePath); fflush(stdout); sprintf(command, "cp %s %s", tempPath, filePath); printf("Running Command %s with permissions %d : %d \n", command, geteuid(), getegid()); fflush(stdout); bzero(response, 2000); if(system(command) != 0) { strcpy(response, "Failed: Permission Denied \n"); } else { strcpy(response, "Success: File Moved \n"); } send(threadSocket, response, strlen(response), 0); pthread_mutex_unlock(&lock); close(threadSocket); pthread_exit(NULL); } int main(){ int serverSocket, threadSocket; struct sockaddr_in serverAddr; struct sockaddr_storage serverStorage; socklen_t addr_size; serverSocket = socket(PF_INET, SOCK_STREAM, 0); serverAddr.sin_family = AF_INET; serverAddr.sin_addr.s_addr = INADDR_ANY; serverAddr.sin_port = htons(8000); //Set all bits of the padding field to 0 memset(serverAddr.sin_zero, '\0', sizeof serverAddr.sin_zero); //Bind the address struct to the socket bind(serverSocket, (struct sockaddr *) &serverAddr, sizeof(serverAddr)); //Listen on the socket, with 40 max connection requests queued if(listen(serverSocket,50)==0) { printf("Listening\n"); } else { printf("Error\n"); } fflush(stdout); pthread_t tid[60]; int i = 0; while(1) { addr_size = sizeof serverStorage; threadSocket = accept(serverSocket, (struct sockaddr *) &serverStorage, &addr_size); if( pthread_create(&tid[i], NULL, socketThread, &threadSocket) != 0 ) printf("Failed to create thread\n"); if( i >= 50) { i = 0; while(i < 50) { pthread_join(tid[i++],NULL); } i = 0; } } return 0; }
276936.c
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin */ #include "priv.h" #include <nvif/class.h> void g98_mspdec_init(struct nvkm_falcon *mspdec) { struct nvkm_device *device = mspdec->engine.subdev.device; nvkm_wr32(device, 0x085010, 0x0000ffd2); nvkm_wr32(device, 0x08501c, 0x0000fff2); } static const struct nvkm_falcon_func g98_mspdec = { .pmc_enable = 0x01020000, .init = g98_mspdec_init, .sclass = { { -1, -1, G98_MSPDEC }, {} } }; int g98_mspdec_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) { return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine); }
67787.c
#include "unabto_dynamic_log_util.h" #include <unabto/unabto_env_base.h> #include <unabto/unabto_logging.h> #include "unabto_dynamic_log.h" #include <modules/log/unix/unabto_logging_unix.h> #include <platforms/unabto_printf_logger.h> #include <unabto/unabto_context.h> #include <unabto/unabto_external_environment.h> #include <modules/log/syslog/unabto_syslog.h> #include <modules/log/unabto_log_header.h> #include <stdarg.h> #include <string.h> static bool check_syslog_state(); bool convert_pattern_to_module_and_severity(const char* pattern, size_t patternLength, uint32_t* module, uint32_t* severity) { const char* dotIndex = strchr(pattern, '.'); const char* patternEnd = pattern+patternLength; const char* moduleStart; const char* moduleEnd; const char* severityStart; const char* severityEnd; if (dotIndex == NULL) { NABTO_LOG_ERROR(("No . in log pattern")); return false; } moduleStart = pattern; moduleEnd = dotIndex; severityStart = dotIndex+1; severityEnd = patternEnd; if (!convert_module(moduleStart, moduleEnd, module)) { return false; } if (!convert_severity(severityStart, severityEnd, severity)) { return false; } return true; } static uint32_t stdout_module = 0; static uint32_t stdout_severity = 0; static uint32_t syslog_module = 0; static uint32_t syslog_severity = 0; static uint32_t syslog_host = 0; static uint16_t syslog_port = 0; static uint32_t syslog_expire = 0; static nabto_stamp_t syslog_expire_stamp; static bool syslog_enabled = false; static bool syslog_initialized = false; void unabto_log_system_log(uint32_t module, uint32_t severity, const char* file, unsigned int line, const char* format, ...) { if ((module & stdout_module) && (severity & stdout_severity)) { va_list args; unabto_log_header(file, line); va_start (args, format); vprintf (format, args); va_end (args); printf("\n"); } if (check_syslog_state() && (module & syslog_module) && (severity & syslog_severity)) { va_list args; va_start (args, format); unabto_syslog(module, severity, file, line, syslog_host, syslog_port, format, args); va_end (args); } return; } void unabto_log_system_log_buffer(uint32_t module, uint32_t severity, const char* file, unsigned int line, const uint8_t* buffer, size_t bufferLength, const char* format, ...) { if ((module & stdout_module) && (severity & stdout_severity)) { va_list args; unabto_log_header(file, line); va_start (args, format); vprintf (format, args); va_end (args); printf("\n"); log_buffer(buffer, bufferLength); } if (check_syslog_state() && (module & syslog_module) && (severity & syslog_severity)) { va_list args; va_start (args, format); unabto_syslog_buffer(module, severity, file, line, syslog_host, syslog_port, buffer, bufferLength, format, args); va_end (args); } } bool check_syslog_state() { if (!syslog_enabled) { return false; } if (syslog_expire && nabtoIsStampPassed(&syslog_expire_stamp)) { unabto_log_system_disable_syslog(); NABTO_LOG_INFO(("Disabling syslog since it has expired")); } return syslog_enabled; } bool unabto_log_system_enable_stdout_pattern(const char* pattern) { uint32_t module; uint32_t severity; if (!convert_pattern_to_module_and_severity(pattern, strlen(pattern), &module, &severity)) { return false; } return unabto_log_system_enable_stdout(module, severity); } bool unabto_log_system_enable_stdout(uint32_t module, uint32_t severity) { stdout_module |= module; stdout_severity |= severity; return true; } void unabto_log_system_disable_stdout() { stdout_module = 0; stdout_severity = 0; } bool unabto_log_system_enable_syslog_pattern(const char* pattern, size_t patternLength, uint32_t syslogHost, uint16_t syslogPort, uint32_t expire) { uint32_t module; uint32_t severity; if (!syslog_initialized) { unabto_syslog_init(); syslog_initialized = true; } if (!convert_pattern_to_module_and_severity(pattern, patternLength, &module, &severity)) { return false; } return unabto_log_system_enable_syslog(module, severity, syslogHost, syslogPort, expire); } bool unabto_log_system_enable_syslog(uint32_t module, uint32_t severity, uint32_t syslogHost, uint16_t syslogPort, uint32_t syslogExpire) { syslog_host = syslogHost; syslog_port = syslogPort; syslog_expire = syslogExpire; if (syslog_expire > 0) { nabtoSetFutureStamp(&syslog_expire_stamp, (syslog_expire*1000)); } syslog_module |= module; syslog_severity |= severity; syslog_enabled = true; return true; } void unabto_log_system_disable_syslog() { syslog_enabled = false; syslog_host = 0; syslog_port = 0; syslog_expire = 0; syslog_module = 0; syslog_severity = 0; } bool unabto_debug_syslog_config(bool enableSyslog, uint8_t facility, uint32_t ip, uint16_t port, uint32_t expire, const uint8_t* configStr, uint16_t configStrLength) { bool ret = false; nabto_endpoint ep; ep.addr.type = NABTO_IP_V4; ep.addr.addr.ipv4 = ip; ep.port = port; NABTO_LOG_INFO(("Enabling syslog " PRIep " %.*s expire %" PRIu32 " enabled %i", MAKE_EP_PRINTABLE(ep), configStrLength, configStr, expire, enableSyslog)); if (enableSyslog) { ret = unabto_log_system_enable_syslog_pattern((const char*)configStr, configStrLength, ip, port, expire); } else { unabto_log_system_disable_syslog(); } return ret; }
190353.c
/* ------------------------------------------------------------------------- * * File: xsal_i_connections.c * * Copyright: 2006 Delphi Technologies, Inc., All Rights Reserved * Information Contained Herein Is Proprietary and Confidential * * Author: Jacek Roman (jacek.roman@delphi.com) * * Description: Implementation of Asynchronous Message Passing and * Event Management * * -----------------------------------------------------------------------*/ #include "xsal_i_connections.h" #include "xsal_i_assert.h" //EM_FILENUM(XSAL_IPC_MODULE_ID, 2); /* Identify file for trace/assert purposes */ /* --------------------- * Object Definitions * --------------------- */ /** List of connections to other SAL processes. */ SAL_Connection_T SAL_I_Connection_Id[SAL_MAX_APPS + 1]; /** Mutex synchronizes access to the SAL_I_Connection_Id table. */ SAL_Mutex_T SAL_I_Connection_Id_Mutex; /* ----------------------- * Function Definitions * ----------------------- */ bool_t SAL_I_Init_Connection_Module(void) { bool_t status = SAL_Create_Mutex(&SAL_I_Connection_Id_Mutex, NULL); if (!status) { Tr_Fault("SAL_Init: SAL_I_Connection_Id_Mutex"); } return status; } void SAL_I_Deinit_Connection_Module(void) { size_t i; /** Start from connection with index 1. * Connection with index 0 is not used. */ for (i = 1; i <= SAL_MAX_APPS; i++) { if (SAL_Is_Connected(&SAL_I_Connection_Id[i])) { (void) SAL_Disconnect(&SAL_I_Connection_Id[i]); } } (void) SAL_Destroy_Mutex(&SAL_I_Connection_Id_Mutex); } SAL_Connection_T* SAL_Get_Connection(SAL_App_Id_T app_id) { PBC_Require(app_id > 0, "Illegal app id"); PBC_Require(app_id <= SAL_MAX_APPS, "Illegal app id"); if (!SAL_Is_Connected(&SAL_I_Connection_Id[app_id])) { if (SAL_Lock_Mutex(&SAL_I_Connection_Id_Mutex)) { SAL_Connect(app_id, SAL_ROUTER_THREAD_ID, &SAL_I_Connection_Id[app_id]); (void) SAL_Unlock_Mutex(&SAL_I_Connection_Id_Mutex); } } return &SAL_I_Connection_Id[app_id]; } /* ------------------------------------------------------------------------- * Revision history: * * 07 Aug 2012 Dan Carman * Improved error handling / connection on Pulses * * 01 Sep 2006, Jacek Roman (jacek.roman@delphi.com) * Initial revision. * * -----------------------------------------------------------------------*/
491886.c
/*============================================================================ This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic Package, Release 3, by John R. Hauser. Copyright 2011, 2012, 2013, 2014 The Regents of the University of California (Regents). All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions, and the following two paragraphs of disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following two paragraphs of disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the Regents nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. =============================================================================*/ #include <stdbool.h> #include <stdint.h> #include "platform.h" #include "internals.h" #include "softfloat.h" #ifdef SOFTFLOAT_FAST_INT64 uint_fast64_t extF80M_to_ui64_r_minMag( const extFloat80_t *aPtr, bool exact ) { return extF80_to_ui64_r_minMag( *aPtr, exact ); } #else uint_fast64_t extF80M_to_ui64_r_minMag( const extFloat80_t *aPtr, bool exact ) { const struct extFloat80M *aSPtr; uint_fast16_t uiA64; int32_t exp; uint64_t sig; int32_t shiftCount; bool sign; uint64_t z; /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ aSPtr = (const struct extFloat80M *) aPtr; uiA64 = aSPtr->signExp; exp = expExtF80UI64( uiA64 ); sig = aSPtr->signif; /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ if ( ! sig && (exp != 0x7FFF) ) return 0; shiftCount = 0x403E - exp; if ( 64 <= shiftCount ) { if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; return 0; } sign = signExtF80UI64( uiA64 ); if ( shiftCount < 0 ) { if ( sign || (shiftCount <= -63) ) goto invalid; shiftCount = -shiftCount; z = sig<<shiftCount; if ( z>>shiftCount != sig ) goto invalid; } else { z = sig; if ( shiftCount ) z >>= shiftCount; if ( sign && z ) goto invalid; if ( exact && shiftCount && (z<<shiftCount != sig) ) { softfloat_exceptionFlags |= softfloat_flag_inexact; } } return z; /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ invalid: softfloat_raiseFlags( softfloat_flag_invalid ); return UINT64_C( 0xFFFFFFFFFFFFFFFF ); } #endif
316089.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_memcpy.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: mienache <marvin@42.fr> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2017/12/12 18:55:50 by mienache #+# #+# */ /* Updated: 2017/12/12 19:57:38 by mienache ### ########.fr */ /* */ /* ************************************************************************** */ #include "libft.h" void *ft_memcpy(void *dst, const void *src, size_t n) { unsigned int i; unsigned char *a; unsigned char *b; i = 0; a = (unsigned char *)dst; b = (unsigned char *)src; while (i < n) { a[i] = b[i]; i++; } return (dst); }
419782.c
#include <stdio.h> #include <stdlib.h> #include <string.h> int alg(char *c, int d) { int i, k, t, tmp; for (k = d - 2; (k >= 0) && (c[k] >= c[k + 1]); k--); if (k==-1) return 0; for (t = d - 1; (c[k] >= c[t]) && (t >= k + 1); t--); tmp = c[k]; c[k] = c[t]; c[t] = tmp; for (i = k + 1; i <= (d + k)/2; i++) { t = d + k - i; tmp = c[i]; c[i] = c[t]; c[t] = tmp; } return i; } int main() { int n, k,i,l=0,l1,l2,z,j,sum,min; scanf("%d ", &n); char b[n]; for (i=0; i<n; i++) b[i]=i; char **st = (char**)malloc(n * sizeof(char*)); for (k = 0; k < n; k++) { st[k] = (char*)malloc(100* sizeof(char)); gets(st[k]); } for (i=0; i<n; i++) l+=strlen(st[i]); int a[n][n]; for (i=0; i<n; i++) for(k=0; k<n;k++) a[i][k]=0; for (i=0; i<n; i++) { l1=strlen(st[i]); char s1[l1]; for(k=0; k<n;k++) { if(i==k) a[i][k]=0; else { l2=strlen(st[k]); char s2[l2]; for (z= l1 - 1, j=1; ((z>0)&&(j<l2)); z--, j++) { strcpy(s1, st[i]+ z); strncpy(s2, st[k], j); s2[j] = 0; if (strcmp(s1, s2)==0) a[i][k]=j; } } } } min=l; do { for(i=0; i<n-1; i++) { sum=0; for(i=0; i<n-1; i++) sum+=a[b[i]][b[i+1]]; } if (min>l-sum) min=l-sum; } while (alg(b,n)!=0); printf("%ld\n", min); free(st); return 0; }
920563.c
/* * Amazon FreeRTOS mbedTLS-based PKCS#11 V1.0.2 * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * http://aws.amazon.com/freertos * http://www.FreeRTOS.org */ /** * @file pkcs11.c * @brief mbedTLS PKCS#11 implementation for software keys. This * file deviates from the FreeRTOS style standard for some function names and * data types in order to maintain compliance with the PKCS#11 standard. */ /* FreeRTOS includes. */ #include "FreeRTOS.h" #include "FreeRTOSIPConfig.h" #include "aws_pkcs11_config.h" #include "task.h" #include "aws_crypto.h" #include "aws_pkcs11.h" /* mbedTLS includes. */ #include "mbedtls/pk.h" #include "mbedtls/pk_internal.h" #include "mbedtls/x509_crt.h" #include "mbedtls/ctr_drbg.h" #include "mbedtls/entropy.h" #include "mbedtls/sha256.h" #include "mbedtls/base64.h" #include "aws_clientcredential.h" /* C runtime includes. */ #include <stdio.h> #include <string.h> typedef int ( * pfnMbedTlsSign )( void * ctx, mbedtls_md_type_t md_alg, const unsigned char * hash, size_t hash_len, unsigned char * sig, size_t * sig_len, int ( *f_rng )( void *, unsigned char *, size_t ), void * p_rng ); /** * @brief Key structure. */ typedef struct P11Key { mbedtls_pk_context xMbedPkCtx; mbedtls_x509_crt xMbedX509Cli; mbedtls_pk_info_t xMbedPkInfo; pfnMbedTlsSign pfnSavedMbedSign; void * pvSavedMbedPkCtx; } P11Key_t, * P11KeyPtr_t; /** * @brief Session structure. */ typedef struct P11Session { P11KeyPtr_t pxCurrentKey; CK_ULONG ulState; CK_BBOOL xOpened; CK_BBOOL xFindObjectInit; CK_BBOOL xFindObjectComplete; CK_OBJECT_CLASS xFindObjectClass; mbedtls_ctr_drbg_context xMbedDrbgCtx; mbedtls_entropy_context xMbedEntropyContext; mbedtls_pk_context xPublicKey; } P11Session_t, * P11SessionPtr_t; /** * @brief Cryptoki module attribute definitions. */ #define pkcs11SLOT_ID 1 #define pkcs11OBJECT_HANDLE_PUBLIC_KEY 1 #define pkcs11OBJECT_HANDLE_PRIVATE_KEY 2 #define pkcs11OBJECT_HANDLE_CERTIFICATE 3 #define pkcs11SUPPORTED_KEY_BITS 2048 /** * @brief Helper definitions. */ #define pkcs11CREATE_OBJECT_MIN_ATTRIBUTE_COUNT 3 #define pkcs11CERTIFICATE_ATTRIBUTE_COUNT 3 #define pkcs11PUBLIC_KEY_ATTRIBUTE_COUNT 3 #define pkcs11PRIVATE_KEY_ATTRIBUTE_COUNT 4 #define pkcs11CREATE_OBJECT_CLASS_ATTRIBUTE_INDEX 0 #define pkcs11CREATE_OBJECT_CERTIFICATE_VALUE_ATTRIBUTE_INDEX 1 #define pkcs11CREATE_OBJECT_CERTIFICATE_TYPE_ATTRIBUTE_INDEX 2 #define pkcs11CREATE_OBJECT_PUBLIC_KEY_VALUE_ATTRIBUTE_INDEX 2 #define pkcs11CREATE_OBJECT_PRIVATE_KEY_VALUE_ATTRIBUTE_INDEX 3 /** * @brief Write file to filesystem (see PAL). */ extern BaseType_t PKCS11_PAL_SaveFile( char * pcFileName, uint8_t * pucData, uint32_t ulDataSize ); /** * @brief Read file from filesystem (see PAL). */ extern BaseType_t PKCS11_PAL_ReadFile( char * pcFileName, uint8_t ** ppucData, uint32_t * pulDataSize ); /** * @brief Free the buffer allocated in PKCS11_PAL_ReadFile (see PAL). */ extern void PKCS11_PAL_ReleaseFileData( uint8_t * pucBuffer, uint32_t ulBufferSize ); /*-----------------------------------------------------------*/ /** * @brief Maps an opaque caller session handle into its internal state structure. */ P11SessionPtr_t prvSessionPointerFromHandle( CK_SESSION_HANDLE xSession ) { return ( P11SessionPtr_t ) xSession; /*lint !e923 Allow casting integer type to pointer for handle. */ } /** * @brief Sign a cryptographic hash with the private key. * * @param[in] pvContext Crypto context. * @param[in] xMdAlg Unused. * @param[in] pucHash Length in bytes of hash to be signed. * @param[in] uiHashLen Byte array of hash to be signed. * @param[out] pucSig RSA signature bytes. * @param[in] pxSigLen Length in bytes of signature buffer. * @param[in] piRng Unused. * @param[in] pvRng Unused. * * @return Zero on success. */ static int prvPrivateKeySigningCallback( void * pvContext, mbedtls_md_type_t xMdAlg, const unsigned char * pucHash, unsigned int uiHashLen, unsigned char * pucSig, size_t * pxSigLen, int ( *piRng )( void *, unsigned char *, size_t ), /*lint !e955 This parameter is unused. */ void * pvRng ) { BaseType_t xResult = 0; P11SessionPtr_t pxSession = ( P11SessionPtr_t ) pvContext; CK_MECHANISM xMech = { 0 }; /* Unreferenced parameters. */ ( void ) ( piRng ); ( void ) ( pvRng ); ( void ) ( xMdAlg ); /* Use the PKCS#11 module to sign. */ xMech.mechanism = CKM_SHA256; xResult = ( BaseType_t ) C_SignInit( ( CK_SESSION_HANDLE ) pxSession, &xMech, ( CK_OBJECT_HANDLE ) pxSession->pxCurrentKey ); if( 0 == xResult ) { xResult = ( BaseType_t ) C_Sign( ( CK_SESSION_HANDLE ) pxSession, ( CK_BYTE_PTR ) pucHash, /*lint !e9005 The interfaces are from 3rdparty libraries, we are not suppose to change them. */ uiHashLen, pucSig, ( CK_ULONG_PTR ) pxSigLen ); } return xResult; } /*-----------------------------------------------------------*/ /** * @brief Initializes a key structure. */ static CK_RV prvInitializeKey( P11SessionPtr_t pxSessionObj, const char * pcEncodedKey, const uint32_t ulEncodedKeyLength, const char * pcEncodedCertificate, const uint32_t ulEncodedCertificateLength ) { CK_RV xResult = 0; /* * Create the key structure, but allow an existing one to be used. */ if( NULL == pxSessionObj->pxCurrentKey ) { if( NULL == ( pxSessionObj->pxCurrentKey = ( P11KeyPtr_t ) pvPortMalloc( sizeof( P11Key_t ) ) ) ) /*lint !e9087 Allow casting void* to other types. */ { xResult = CKR_HOST_MEMORY; } } /* * Initialize the key field, if requested. */ if( ( CKR_OK == xResult ) && ( NULL != pcEncodedKey ) ) { memset( pxSessionObj->pxCurrentKey, 0, sizeof( P11Key_t ) ); mbedtls_pk_init( &pxSessionObj->pxCurrentKey->xMbedPkCtx ); if( 0 != mbedtls_pk_parse_key( &pxSessionObj->pxCurrentKey->xMbedPkCtx, ( const unsigned char * ) pcEncodedKey, ulEncodedKeyLength, NULL, 0 ) ) { xResult = CKR_FUNCTION_FAILED; } if( CKR_OK == xResult ) { /* Swap out the signing function pointer. */ memcpy( &pxSessionObj->pxCurrentKey->xMbedPkInfo, pxSessionObj->pxCurrentKey->xMbedPkCtx.pk_info, sizeof( pxSessionObj->pxCurrentKey->xMbedPkInfo ) ); pxSessionObj->pxCurrentKey->pfnSavedMbedSign = pxSessionObj->pxCurrentKey->xMbedPkInfo.sign_func; pxSessionObj->pxCurrentKey->xMbedPkInfo.sign_func = prvPrivateKeySigningCallback; pxSessionObj->pxCurrentKey->xMbedPkCtx.pk_info = &pxSessionObj->pxCurrentKey->xMbedPkInfo; /* Swap out the underlying internal key context. */ pxSessionObj->pxCurrentKey->pvSavedMbedPkCtx = pxSessionObj->pxCurrentKey->xMbedPkCtx.pk_ctx; pxSessionObj->pxCurrentKey->xMbedPkCtx.pk_ctx = pxSessionObj; } } /* * Initialize the certificate field, if requested. */ if( ( CKR_OK == xResult ) && ( NULL != pcEncodedCertificate ) ) { mbedtls_x509_crt_init( &pxSessionObj->pxCurrentKey->xMbedX509Cli ); if( 0 != mbedtls_x509_crt_parse( &pxSessionObj->pxCurrentKey->xMbedX509Cli, ( const unsigned char * ) pcEncodedCertificate, ulEncodedCertificateLength ) ) { xResult = CKR_FUNCTION_FAILED; } } return xResult; } /*-----------------------------------------------------------*/ /** * @brief Load the default key and certificate from storage. */ static CK_RV prvLoadAndInitializeDefaultCertificateAndKey( P11SessionPtr_t pxSession ) { CK_RV xResult = 0; uint8_t * pucCertificateData = NULL; uint32_t ulCertificateDataLength = 0; BaseType_t xFreeCertificate = pdFALSE; uint8_t * pucKeyData = NULL; uint32_t ulKeyDataLength = 0; BaseType_t xFreeKey = pdFALSE; /* Read the certificate from storage. */ if( pdFALSE == PKCS11_PAL_ReadFile( pkcs11configFILE_NAME_CLIENT_CERTIFICATE, &pucCertificateData, &ulCertificateDataLength ) ) { pucCertificateData = ( uint8_t * ) clientcredentialCLIENT_CERTIFICATE_PEM; ulCertificateDataLength = clientcredentialCLIENT_CERTIFICATE_LENGTH; } else { xFreeCertificate = pdTRUE; } /* Read the private key from storage. */ if( pdFALSE == PKCS11_PAL_ReadFile( pkcs11configFILE_NAME_KEY, &pucKeyData, &ulKeyDataLength ) ) { pucKeyData = ( uint8_t * ) clientcredentialCLIENT_PRIVATE_KEY_PEM; ulKeyDataLength = clientcredentialCLIENT_PRIVATE_KEY_LENGTH; } else { xFreeKey = pdTRUE; } /* Attach the certificate and key to the session. */ xResult = prvInitializeKey( pxSession, ( const char * ) pucKeyData, ulKeyDataLength, ( const char * ) pucCertificateData, ulCertificateDataLength ); /* Stir the random pot. */ mbedtls_entropy_update_manual( &pxSession->xMbedEntropyContext, pucKeyData, ulKeyDataLength ); mbedtls_entropy_update_manual( &pxSession->xMbedEntropyContext, pucCertificateData, ulCertificateDataLength ); /* Clean-up. */ if( ( NULL != pucCertificateData ) && ( pdTRUE == xFreeCertificate ) ) { PKCS11_PAL_ReleaseFileData( pucCertificateData, ulCertificateDataLength ); } if( ( NULL != pucKeyData ) && ( pdTRUE == xFreeKey ) ) { PKCS11_PAL_ReleaseFileData( pucKeyData, ulKeyDataLength ); } return xResult; } /*-----------------------------------------------------------*/ /** * @brief Cleans up a key structure. */ static void prvFreeKey( P11KeyPtr_t pxKey ) { if( NULL != pxKey ) { /* Restore the internal key context. */ pxKey->xMbedPkCtx.pk_ctx = pxKey->pvSavedMbedPkCtx; /* Clean-up. */ mbedtls_pk_free( &pxKey->xMbedPkCtx ); mbedtls_x509_crt_free( &pxKey->xMbedX509Cli ); vPortFree( pxKey ); } } /* * PKCS#11 module implementation. */ /** * @brief PKCS#11 interface functions implemented by this Cryptoki module. */ static CK_FUNCTION_LIST prvP11FunctionList = { { CRYPTOKI_VERSION_MAJOR, CRYPTOKI_VERSION_MINOR }, C_Initialize, C_Finalize, NULL, C_GetFunctionList, C_GetSlotList, NULL, NULL, NULL, NULL, NULL, NULL, NULL, C_OpenSession, C_CloseSession, NULL, NULL, NULL, NULL, NULL, NULL, C_CreateObject, NULL, C_DestroyObject, NULL, C_GetAttributeValue, NULL, C_FindObjectsInit, C_FindObjects, C_FindObjectsFinal, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, C_SignInit, C_Sign, NULL, NULL, NULL, NULL, C_VerifyInit, C_Verify, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, C_GenerateRandom, NULL, NULL, NULL }; /** * @brief Initialize the Cryptoki module for use. */ #if !defined( pkcs11configC_INITIALIZE_ALT ) CK_DEFINE_FUNCTION( CK_RV, C_Initialize )( CK_VOID_PTR pvInitArgs ) { /*lint !e9072 It's OK to have different parameter name. */ ( void ) ( pvInitArgs ); /* Ensure that the FreeRTOS heap is used. */ CRYPTO_ConfigureHeap(); return CKR_OK; } #endif /** * @brief Un-initialize the Cryptoki module. */ CK_DEFINE_FUNCTION( CK_RV, C_Finalize )( CK_VOID_PTR pvReserved ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; if( NULL != pvReserved ) { xResult = CKR_ARGUMENTS_BAD; } return xResult; } /** * @brief Query the list of interface function pointers. */ CK_DEFINE_FUNCTION( CK_RV, C_GetFunctionList )( CK_FUNCTION_LIST_PTR_PTR ppxFunctionList ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; if( NULL == ppxFunctionList ) { xResult = CKR_ARGUMENTS_BAD; } else { *ppxFunctionList = &prvP11FunctionList; } return xResult; } /** * @brief Query the list of slots. A single default slot is implemented. */ CK_DEFINE_FUNCTION( CK_RV, C_GetSlotList )( CK_BBOOL xTokenPresent, CK_SLOT_ID_PTR pxSlotList, CK_ULONG_PTR pulCount ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; ( void ) ( xTokenPresent ); if( NULL == pulCount ) { xResult = CKR_ARGUMENTS_BAD; } else if( NULL == pxSlotList ) { *pulCount = 1; } else { if( 0u == *pulCount ) { xResult = CKR_BUFFER_TOO_SMALL; } else { pxSlotList[ 0 ] = pkcs11SLOT_ID; *pulCount = 1; } } return xResult; } /** * @brief Start a session for a cryptographic command sequence. */ CK_DEFINE_FUNCTION( CK_RV, C_OpenSession )( CK_SLOT_ID xSlotID, CK_FLAGS xFlags, CK_VOID_PTR pvApplication, CK_NOTIFY xNotify, CK_SESSION_HANDLE_PTR pxSession ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; P11SessionPtr_t pxSessionObj = NULL; ( void ) ( xSlotID ); ( void ) ( pvApplication ); ( void ) ( xNotify ); /* Check arguments. */ if( NULL == pxSession ) { xResult = CKR_ARGUMENTS_BAD; } /* For legacy reasons, the CKF_SERIAL_SESSION bit MUST always be set. */ if( 0 == ( CKF_SERIAL_SESSION & xFlags ) ) { xResult = CKR_SESSION_PARALLEL_NOT_SUPPORTED; } /* * Make space for the context. */ if( CKR_OK == xResult ) { pxSessionObj = ( P11SessionPtr_t ) pvPortMalloc( sizeof( P11Session_t ) ); /*lint !e9087 Allow casting void* to other types. */ if( NULL == pxSessionObj ) { xResult = CKR_HOST_MEMORY; } } /* * Initialize RNG. */ if( CKR_OK == xResult ) { memset( pxSessionObj, 0, sizeof( P11Session_t ) ); mbedtls_entropy_init( &pxSessionObj->xMbedEntropyContext ); mbedtls_ctr_drbg_init( &pxSessionObj->xMbedDrbgCtx ); if( 0 != mbedtls_ctr_drbg_seed( &pxSessionObj->xMbedDrbgCtx, mbedtls_entropy_func, &pxSessionObj->xMbedEntropyContext, NULL, 0 ) ) { xResult = CKR_FUNCTION_FAILED; } } if( CKR_OK == xResult ) { /* * Assign the session. */ pxSessionObj->ulState = 0u != ( xFlags & CKF_RW_SESSION ) ? CKS_RW_PUBLIC_SESSION : CKS_RO_PUBLIC_SESSION; pxSessionObj->xOpened = CK_TRUE; /* * Return the session. */ *pxSession = ( CK_SESSION_HANDLE ) pxSessionObj; /*lint !e923 Allow casting pointer to integer type for handle. */ } if( ( NULL != pxSessionObj ) && ( CKR_OK != xResult ) ) { vPortFree( pxSessionObj ); } return xResult; } /** * @brief Terminate a session and release resources. */ CK_DEFINE_FUNCTION( CK_RV, C_CloseSession )( CK_SESSION_HANDLE xSession ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; P11SessionPtr_t pxSession = prvSessionPointerFromHandle( xSession ); if( NULL != pxSession ) { /* * Tear down the session. */ if( NULL != pxSession->pxCurrentKey ) { prvFreeKey( pxSession->pxCurrentKey ); } /* Free the public key context if it exists. */ if( NULL != pxSession->xPublicKey.pk_ctx ) { mbedtls_pk_free( &pxSession->xPublicKey ); } mbedtls_ctr_drbg_free( &pxSession->xMbedDrbgCtx ); vPortFree( pxSession ); } else { xResult = CKR_SESSION_HANDLE_INVALID; } return xResult; } /** * @brief Provides import and storage of a single client certificate and * associated private key. */ CK_DEFINE_FUNCTION( CK_RV, C_CreateObject )( CK_SESSION_HANDLE xSession, CK_ATTRIBUTE_PTR pxTemplate, CK_ULONG ulCount, CK_OBJECT_HANDLE_PTR pxObject ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; P11SessionPtr_t pxSession = prvSessionPointerFromHandle( xSession ); void * pvContext = NULL; int32_t lMbedTLSParseResult = ~0; /* * Check parameters. */ if( ( pkcs11CREATE_OBJECT_MIN_ATTRIBUTE_COUNT > ulCount ) || ( NULL == pxTemplate ) || ( NULL == pxObject ) ) { xResult = CKR_ARGUMENTS_BAD; } if( CKR_OK == xResult ) { if( ( CKA_CLASS != pxTemplate[ pkcs11CREATE_OBJECT_CLASS_ATTRIBUTE_INDEX ].type ) || ( sizeof( CK_OBJECT_CLASS ) != pxTemplate[ pkcs11CREATE_OBJECT_CLASS_ATTRIBUTE_INDEX ].ulValueLen ) ) { xResult = CKR_ARGUMENTS_BAD; } } /* * Handle the object by class. */ if( CKR_OK == xResult ) { switch( *( ( uint32_t * ) pxTemplate[ pkcs11CREATE_OBJECT_CLASS_ATTRIBUTE_INDEX ].pValue ) ) { case CKO_CERTIFICATE: /* Validate the attribute count for this object class. */ if( pkcs11CERTIFICATE_ATTRIBUTE_COUNT != ulCount ) { xResult = CKR_ARGUMENTS_BAD; break; } /* Validate the next attribute type. */ if( CKA_VALUE != pxTemplate[ pkcs11CREATE_OBJECT_CERTIFICATE_VALUE_ATTRIBUTE_INDEX ].type ) { xResult = CKR_ARGUMENTS_BAD; break; } /* Verify that the given certificate can be parsed. */ pvContext = pvPortMalloc( sizeof( mbedtls_x509_crt ) ); if( NULL != pvContext ) { mbedtls_x509_crt_init( ( mbedtls_x509_crt * ) pvContext ); lMbedTLSParseResult = mbedtls_x509_crt_parse( ( mbedtls_x509_crt * ) pvContext, pxTemplate[ pkcs11CREATE_OBJECT_CERTIFICATE_VALUE_ATTRIBUTE_INDEX ].pValue, pxTemplate[ pkcs11CREATE_OBJECT_CERTIFICATE_VALUE_ATTRIBUTE_INDEX ].ulValueLen ); mbedtls_x509_crt_free( ( mbedtls_x509_crt * ) pvContext ); vPortFree( pvContext ); } else { xResult = CKR_HOST_MEMORY; break; } if( 0 != lMbedTLSParseResult ) { xResult = CKR_ARGUMENTS_BAD; break; } if( *( ( uint32_t * ) pxTemplate[ pkcs11CREATE_OBJECT_CERTIFICATE_TYPE_ATTRIBUTE_INDEX ].pValue ) == pkcs11CERTIFICATE_TYPE_USER ) { /* Write out the client certificate. */ if( pdFALSE == PKCS11_PAL_SaveFile( pkcs11configFILE_NAME_CLIENT_CERTIFICATE, pxTemplate[ pkcs11CREATE_OBJECT_CERTIFICATE_VALUE_ATTRIBUTE_INDEX ].pValue, pxTemplate[ pkcs11CREATE_OBJECT_CERTIFICATE_VALUE_ATTRIBUTE_INDEX ].ulValueLen ) ) { xResult = CKR_DEVICE_ERROR; break; } else { /* If successful, set object handle to certificate. */ *pxObject = pkcs11OBJECT_HANDLE_CERTIFICATE; } } else if( *( ( uint32_t * ) pxTemplate[ pkcs11CREATE_OBJECT_CERTIFICATE_TYPE_ATTRIBUTE_INDEX ].pValue ) == pkcs11CERTIFICATE_TYPE_ROOT ) { /* Ignore writing the default root certificate. */ } break; case CKO_PUBLIC_KEY: /* Validate the attribute count for this object class. */ if( pkcs11PUBLIC_KEY_ATTRIBUTE_COUNT != ulCount ) { xResult = CKR_ARGUMENTS_BAD; break; } /* Validate the next attribute type. */ if( CKA_VALUE != pxTemplate[ pkcs11CREATE_OBJECT_PUBLIC_KEY_VALUE_ATTRIBUTE_INDEX ].type ) { xResult = CKR_ARGUMENTS_BAD; break; } /* If there's an existing public key, free it before importing a new one. */ if( NULL != pxSession->xPublicKey.pk_ctx ) { mbedtls_pk_free( &pxSession->xPublicKey ); } /* Parse the public key. */ if( 0 != mbedtls_pk_parse_public_key( &( pxSession->xPublicKey ), pxTemplate[ pkcs11CREATE_OBJECT_PUBLIC_KEY_VALUE_ATTRIBUTE_INDEX ].pValue, pxTemplate[ pkcs11CREATE_OBJECT_PUBLIC_KEY_VALUE_ATTRIBUTE_INDEX ].ulValueLen ) ) { xResult = CKR_ARGUMENTS_BAD; } else { /* If successful, set object handle to public key. */ *pxObject = pkcs11OBJECT_HANDLE_PUBLIC_KEY; } break; case CKO_PRIVATE_KEY: /* Validate the attribute count for this object class. */ if( pkcs11PRIVATE_KEY_ATTRIBUTE_COUNT != ulCount ) { xResult = CKR_ARGUMENTS_BAD; break; } /* Find the key bytes. */ if( CKA_VALUE != pxTemplate[ pkcs11CREATE_OBJECT_PRIVATE_KEY_VALUE_ATTRIBUTE_INDEX ].type ) { xResult = CKR_ARGUMENTS_BAD; break; } /* Verify that the given private key can be parsed. */ pvContext = pvPortMalloc( sizeof( mbedtls_pk_context ) ); if( NULL != pvContext ) { mbedtls_pk_init( ( mbedtls_pk_context * ) pvContext ); lMbedTLSParseResult = mbedtls_pk_parse_key( ( mbedtls_pk_context * ) pvContext, pxTemplate[ pkcs11CREATE_OBJECT_PRIVATE_KEY_VALUE_ATTRIBUTE_INDEX ].pValue, pxTemplate[ pkcs11CREATE_OBJECT_PRIVATE_KEY_VALUE_ATTRIBUTE_INDEX ].ulValueLen, NULL, 0 ); mbedtls_pk_free( ( mbedtls_pk_context * ) pvContext ); vPortFree( pvContext ); } else { xResult = CKR_HOST_MEMORY; break; } if( 0 != lMbedTLSParseResult ) { xResult = CKR_ARGUMENTS_BAD; break; } /* Write out the key. */ if( pdFALSE == PKCS11_PAL_SaveFile( pkcs11configFILE_NAME_KEY, pxTemplate[ pkcs11CREATE_OBJECT_PRIVATE_KEY_VALUE_ATTRIBUTE_INDEX ].pValue, pxTemplate[ pkcs11CREATE_OBJECT_PRIVATE_KEY_VALUE_ATTRIBUTE_INDEX ].ulValueLen ) ) { xResult = CKR_DEVICE_ERROR; break; } else { /* If successful, set object handle to private key. */ *pxObject = pkcs11OBJECT_HANDLE_PRIVATE_KEY; } break; default: xResult = CKR_ARGUMENTS_BAD; } } return xResult; } /** * @brief Free resources attached to an object handle. */ CK_DEFINE_FUNCTION( CK_RV, C_DestroyObject )( CK_SESSION_HANDLE xSession, CK_OBJECT_HANDLE xObject ) { /*lint !e9072 It's OK to have different parameter name. */ ( void ) ( xSession ); ( void ) ( xObject ); /* * This implementation uses virtual handles, and the certificate and * private key data are attached to the session, so nothing to do here. */ return CKR_OK; } /** * @brief Query the value of the specified cryptographic object attribute. */ CK_DEFINE_FUNCTION( CK_RV, C_GetAttributeValue )( CK_SESSION_HANDLE xSession, CK_OBJECT_HANDLE xObject, CK_ATTRIBUTE_PTR pxTemplate, CK_ULONG ulCount ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; P11SessionPtr_t pxSession = prvSessionPointerFromHandle( xSession ); CK_VOID_PTR pvAttr = NULL; CK_ULONG ulAttrLength = 0; mbedtls_pk_type_t xMbedPkType; CK_ULONG xP11KeyType, iAttrib, xKeyBitLen; if( NULL == pxTemplate ) { xResult = CKR_ARGUMENTS_BAD; } else { /* * Enumerate the requested attributes. */ for( iAttrib = 0; iAttrib < ulCount && CKR_OK == xResult; iAttrib++ ) { /* * Get the attribute data and size. */ switch( pxTemplate[ iAttrib ].type ) { case CKA_KEY_TYPE: /* * Map the private key type between APIs. */ xMbedPkType = mbedtls_pk_get_type( &pxSession->pxCurrentKey->xMbedPkCtx ); switch( xMbedPkType ) { case MBEDTLS_PK_RSA: case MBEDTLS_PK_RSA_ALT: case MBEDTLS_PK_RSASSA_PSS: xP11KeyType = CKK_RSA; break; case MBEDTLS_PK_ECKEY: case MBEDTLS_PK_ECKEY_DH: xP11KeyType = CKK_EC; break; case MBEDTLS_PK_ECDSA: xP11KeyType = CKK_ECDSA; break; default: xResult = CKR_ATTRIBUTE_VALUE_INVALID; break; } ulAttrLength = sizeof( xP11KeyType ); pvAttr = &xP11KeyType; break; case CKA_VALUE: switch( xObject ) { case pkcs11OBJECT_HANDLE_CERTIFICATE: pvAttr = ( CK_VOID_PTR ) pxSession->pxCurrentKey->xMbedX509Cli.raw.p; /*lint !e9005 !e9087 Allow casting other types to void*. */ ulAttrLength = pxSession->pxCurrentKey->xMbedX509Cli.raw.len; break; case pkcs11OBJECT_HANDLE_PUBLIC_KEY: pvAttr = ( CK_VOID_PTR ) ( &pxSession->xPublicKey ); ulAttrLength = sizeof( mbedtls_pk_context ); break; default: xResult = CKR_ARGUMENTS_BAD; break; } break; case CKA_MODULUS_BITS: case CKA_PRIME_BITS: /* * Key strength size query, handled the same for RSA or ECDSA * in this port. */ xKeyBitLen = mbedtls_pk_get_bitlen( &pxSession->pxCurrentKey->xMbedPkCtx ); ulAttrLength = sizeof( xKeyBitLen ); pvAttr = &xKeyBitLen; break; case CKA_VENDOR_DEFINED: /* * Return the key context for application-layer use. */ ulAttrLength = sizeof( pxSession->pxCurrentKey->xMbedPkCtx ); pvAttr = &pxSession->pxCurrentKey->xMbedPkCtx; break; default: xResult = CKR_ATTRIBUTE_TYPE_INVALID; break; } if( CKR_OK == xResult ) { /* * Copy out the data and size. */ if( NULL != pxTemplate[ iAttrib ].pValue ) { if( pxTemplate[ iAttrib ].ulValueLen < ulAttrLength ) { xResult = CKR_BUFFER_TOO_SMALL; } else { memcpy( pxTemplate[ iAttrib ].pValue, pvAttr, ulAttrLength ); } } pxTemplate[ iAttrib ].ulValueLen = ulAttrLength; } } } return xResult; } /** * @brief Begin an enumeration sequence for the objects of the specified type. */ CK_DEFINE_FUNCTION( CK_RV, C_FindObjectsInit )( CK_SESSION_HANDLE xSession, CK_ATTRIBUTE_PTR pxTemplate, CK_ULONG ulCount ) { /*lint !e9072 It's OK to have different parameter name. */ P11SessionPtr_t pxSession = prvSessionPointerFromHandle( xSession ); CK_RV xResult = CKR_OK; ( void ) ( ulCount ); /* * Check parameters. */ if( NULL == pxTemplate ) { xResult = CKR_ARGUMENTS_BAD; } else { /* * Allow filtering on a single object class attribute. */ pxSession->xFindObjectInit = CK_TRUE; pxSession->xFindObjectComplete = CK_FALSE; memcpy( &pxSession->xFindObjectClass, pxTemplate[ 0 ].pValue, sizeof( CK_OBJECT_CLASS ) ); } return xResult; } /** * @brief Query the objects of the requested type. */ CK_DEFINE_FUNCTION( CK_RV, C_FindObjects )( CK_SESSION_HANDLE xSession, CK_OBJECT_HANDLE_PTR pxObject, CK_ULONG ulMaxObjectCount, CK_ULONG_PTR pulObjectCount ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; BaseType_t xDone = pdFALSE; P11SessionPtr_t pxSession = prvSessionPointerFromHandle( xSession ); /* * Check parameters. */ if( ( NULL == pxObject ) || ( NULL == pulObjectCount ) ) { xResult = CKR_ARGUMENTS_BAD; xDone = pdTRUE; } if( ( pdFALSE == xDone ) && ( ( CK_BBOOL ) CK_FALSE == pxSession->xFindObjectInit ) ) { xResult = CKR_OPERATION_NOT_INITIALIZED; xDone = pdTRUE; } if( ( pdFALSE == xDone ) && ( 0u == ulMaxObjectCount ) ) { xResult = CKR_ARGUMENTS_BAD; xDone = pdTRUE; } if( ( pdFALSE == xDone ) && ( ( CK_BBOOL ) CK_TRUE == pxSession->xFindObjectComplete ) ) { *pulObjectCount = 0; xResult = CKR_OK; xDone = pdTRUE; } /* * Load the default private key and certificate. */ if( ( pdFALSE == xDone ) && ( NULL == pxSession->pxCurrentKey ) ) { if( CKR_OK != ( xResult = prvLoadAndInitializeDefaultCertificateAndKey( pxSession ) ) ) { xDone = pdTRUE; } } if( pdFALSE == xDone ) { /* * Return object handles based on find type. */ switch( pxSession->xFindObjectClass ) { case CKO_PRIVATE_KEY: *pxObject = pkcs11OBJECT_HANDLE_PRIVATE_KEY; *pulObjectCount = 1; break; case CKO_PUBLIC_KEY: *pxObject = pkcs11OBJECT_HANDLE_PUBLIC_KEY; *pulObjectCount = 1; break; case CKO_CERTIFICATE: *pxObject = pkcs11OBJECT_HANDLE_CERTIFICATE; *pulObjectCount = 1; break; default: *pxObject = 0; *pulObjectCount = 0; break; } pxSession->xFindObjectComplete = CK_TRUE; } return xResult; } /** * @brief Terminate object enumeration. */ CK_DEFINE_FUNCTION( CK_RV, C_FindObjectsFinal )( CK_SESSION_HANDLE xSession ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; P11SessionPtr_t pxSession = prvSessionPointerFromHandle( xSession ); /* * Check parameters. */ if( ( CK_BBOOL ) CK_FALSE == pxSession->xFindObjectInit ) { xResult = CKR_OPERATION_NOT_INITIALIZED; } else { /* * Clean-up find objects state. */ pxSession->xFindObjectInit = CK_FALSE; pxSession->xFindObjectComplete = CK_FALSE; pxSession->xFindObjectClass = 0; } return xResult; } /** * @brief Begin a digital signature generation session. */ CK_DEFINE_FUNCTION( CK_RV, C_SignInit )( CK_SESSION_HANDLE xSession, CK_MECHANISM_PTR pxMechanism, CK_OBJECT_HANDLE xKey ) { CK_RV xResult = CKR_OK; /*lint !e9072 It's OK to have different parameter name. */ ( void ) ( xSession ); ( void ) ( xKey ); if( NULL == pxMechanism ) { xResult = CKR_ARGUMENTS_BAD; } return xResult; } /** * @brief Digitally sign the indicated cryptographic hash bytes. */ CK_DEFINE_FUNCTION( CK_RV, C_Sign )( CK_SESSION_HANDLE xSession, CK_BYTE_PTR pucData, CK_ULONG ulDataLen, CK_BYTE_PTR pucSignature, CK_ULONG_PTR pulSignatureLen ) { /*lint !e9072 It's OK to have different parameter name. */ CK_RV xResult = CKR_OK; P11SessionPtr_t pxSessionObj = prvSessionPointerFromHandle( xSession ); if( NULL == pulSignatureLen ) { xResult = CKR_ARGUMENTS_BAD; } if( CKR_OK == xResult ) { if( NULL == pucSignature ) { *pulSignatureLen = pkcs11SUPPORTED_KEY_BITS / 8; } else { /* * Check algorithm support. */ if( CKR_OK == xResult ) { if( ( CK_ULONG ) cryptoSHA256_DIGEST_BYTES != ulDataLen ) { xResult = CKR_DATA_LEN_RANGE; } } /* * Sign the data. */ if( CKR_OK == xResult ) { if( 0 != pxSessionObj->pxCurrentKey->pfnSavedMbedSign( pxSessionObj->pxCurrentKey->pvSavedMbedPkCtx, MBEDTLS_MD_SHA256, pucData, ulDataLen, pucSignature, ( size_t * ) pulSignatureLen, mbedtls_ctr_drbg_random, &pxSessionObj->xMbedDrbgCtx ) ) { xResult = CKR_FUNCTION_FAILED; } } } } return xResult; } /** * @brief Begin a digital signature verification session. */ CK_DEFINE_FUNCTION( CK_RV, C_VerifyInit )( CK_SESSION_HANDLE xSession, CK_MECHANISM_PTR pxMechanism, CK_OBJECT_HANDLE xKey ) { CK_RV xResult = CKR_OK; /*lint !e9072 It's OK to have different parameter name. */ ( void ) ( xSession ); ( void ) ( xKey ); if( NULL == pxMechanism ) { xResult = CKR_ARGUMENTS_BAD; } return xResult; } /** * @brief Verify the digital signature of the specified data using the public * key attached to this session. */ CK_DEFINE_FUNCTION( CK_RV, C_Verify )( CK_SESSION_HANDLE xSession, CK_BYTE_PTR pucData, CK_ULONG ulDataLen, CK_BYTE_PTR pucSignature, CK_ULONG ulSignatureLen ) { CK_RV xResult = CKR_OK; P11SessionPtr_t pxSessionObj; /* * Check parameters. */ if( ( NULL == pucData ) || ( NULL == pucSignature ) ) { xResult = CKR_ARGUMENTS_BAD; } else { pxSessionObj = prvSessionPointerFromHandle( xSession ); /*lint !e9072 It's OK to have different parameter name. */ /* Verify the signature. If a public key is present, use it. */ if( NULL != pxSessionObj->xPublicKey.pk_ctx ) { if( 0 != mbedtls_pk_verify( &pxSessionObj->xPublicKey, MBEDTLS_MD_SHA256, pucData, ulDataLen, pucSignature, ulSignatureLen ) ) { xResult = CKR_SIGNATURE_INVALID; } } else { if( 0 != pxSessionObj->pxCurrentKey->xMbedPkInfo.verify_func( pxSessionObj->pxCurrentKey->pvSavedMbedPkCtx, MBEDTLS_MD_SHA256, pucData, ulDataLen, pucSignature, ulSignatureLen ) ) { xResult = CKR_SIGNATURE_INVALID; } } } /* Return the signature verification result. */ return xResult; } /** * @brief Generate cryptographically random bytes. */ CK_DEFINE_FUNCTION( CK_RV, C_GenerateRandom )( CK_SESSION_HANDLE xSession, CK_BYTE_PTR pucRandomData, CK_ULONG ulRandomLen ) { CK_RV xResult = CKR_OK; /*lint !e9072 It's OK to have different parameter name. */ P11SessionPtr_t pxSessionObj = prvSessionPointerFromHandle( xSession ); if( ( NULL == pucRandomData ) || ( ulRandomLen == 0 ) ) { xResult = CKR_ARGUMENTS_BAD; } else { if( 0 != mbedtls_ctr_drbg_random( &pxSessionObj->xMbedDrbgCtx, pucRandomData, ulRandomLen ) ) { xResult = CKR_FUNCTION_FAILED; } } return xResult; }
614587.c
/*----------------------------------------------------------------------------*/ /* Hobbit history viewer. */ /* */ /* This is a CGI tool used to view the history of a status log. */ /* */ /* Copyright (C) 2003-2005 Henrik Storner <henrik@storner.dk> */ /* */ /* This program is released under the GNU General Public License (GPL), */ /* version 2. See the file "COPYING" for details. */ /* */ /*----------------------------------------------------------------------------*/ static char rcsid[] = "$Id: bb-hist.c,v 1.47 2005-05-24 08:39:56 henrik Exp $"; #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "bbgen.h" #include "util.h" #include "reportdata.h" static char selfurl[PATH_MAX]; static time_t req_endtime = 0; static char *displayname = NULL; static int wantserviceid = 1; static int len1d = 24; static char *bartitle1d = "1 day summary"; static int len1w = 7; static char *bartitle1w = "1 week summary"; static int len4w = 28; static char *bartitle4w = "4 week summary"; static int len1y = 12; static char *bartitle1y = "1 year summary"; /* DEFPIXELS is defined by the configure script */ #ifndef DEFPIXELS static int usepct = 1; #define DEFPIXELS 0 #else static int usepct = 0; #endif static int pixels = DEFPIXELS; /* What colorbars and summaries to show by default */ #define BARSUM_1D 0x0001 /* 1-day bar */ #define BARSUM_1W 0x0002 /* 1-week bar */ #define BARSUM_4W 0x0004 /* 4-week bar */ #define BARSUM_1Y 0x0008 /* 1-year bar */ /* DEFBARSUMS is defined by the configure script */ #ifndef DEFBARSUMS #define DEFBARSUMS (BARSUM_1D|BARSUM_1W) #endif static unsigned int barsums = DEFBARSUMS; static char *barbkgcolor = "\"#000033\""; static char *tagcolors[COL_COUNT] = { "#3AF03A", /* A bright green */ "white", "blue", "purple", "yellow", "red" }; #define ALIGN_HOUR 0 #define ALIGN_DAY 1 #define ALIGN_MONTH 2 #define DAY_BAR 0 #define WEEK_BAR 1 #define MONTH_BAR 2 #define YEAR_BAR 3 #define END_START 0 #define END_END 1 #define END_UNCHANGED 2 static void generate_pct_summary( FILE *htmlrep, /* output file */ char *hostname, char *service, char *caption, reportinfo_t *repinfo, /* Percent summaries for period */ time_t secsperpixel) { fprintf(htmlrep, "<TABLE BORDER=0 BGCOLOR=%s CELLPADDING=3 SUMMARY=\"Percent summary\">\n", barbkgcolor); fprintf(htmlrep, "<TR BGCOLOR=\"#333333\"><TD COLSPAN=6 ALIGN=CENTER><FONT SIZE=\"+1\">%s</FONT></TD></TR>\n", caption); fprintf(htmlrep, "<TR BGCOLOR=\"#333333\"><TD COLSPAN=6 ALIGN=CENTER><FONT SIZE=\"-1\">Min. duration shown: %s</FONT></TD></TR>\n", durationstr(secsperpixel / 2)); fprintf(htmlrep, "<TR BGCOLOR=\"#000000\">\n"); fprintf(htmlrep, "<TD ALIGN=CENTER><IMG SRC=\"%s/%s\" ALT=\"%s\" TITLE=\"%s\" HEIGHT=%s WIDTH=%s BORDER=0></TD>\n", xgetenv("BBSKIN"), dotgiffilename(COL_GREEN, 0, 1), colorname(COL_GREEN), colorname(COL_GREEN), xgetenv("DOTHEIGHT"), xgetenv("DOTWIDTH")); fprintf(htmlrep, "<TD ALIGN=CENTER><IMG SRC=\"%s/%s\" ALT=\"%s\" TITLE=\"%s\" HEIGHT=%s WIDTH=%s BORDER=0></TD>\n", xgetenv("BBSKIN"), dotgiffilename(COL_YELLOW, 0, 1), colorname(COL_YELLOW), colorname(COL_YELLOW), xgetenv("DOTHEIGHT"), xgetenv("DOTWIDTH")); fprintf(htmlrep, "<TD ALIGN=CENTER><IMG SRC=\"%s/%s\" ALT=\"%s\" TITLE=\"%s\" HEIGHT=%s WIDTH=%s BORDER=0></TD>\n", xgetenv("BBSKIN"), dotgiffilename(COL_RED, 0, 1), colorname(COL_RED), colorname(COL_RED), xgetenv("DOTHEIGHT"), xgetenv("DOTWIDTH")); fprintf(htmlrep, "<TD ALIGN=CENTER><IMG SRC=\"%s/%s\" ALT=\"%s\" TITLE=\"%s\" HEIGHT=%s WIDTH=%s BORDER=0></TD>\n", xgetenv("BBSKIN"), dotgiffilename(COL_PURPLE, 0, 1), colorname(COL_PURPLE), colorname(COL_PURPLE), xgetenv("DOTHEIGHT"), xgetenv("DOTWIDTH")); fprintf(htmlrep, "<TD ALIGN=CENTER><IMG SRC=\"%s/%s\" ALT=\"%s\" TITLE=\"%s\" HEIGHT=%s WIDTH=%s BORDER=0></TD>\n", xgetenv("BBSKIN"), dotgiffilename(COL_CLEAR, 0, 1), colorname(COL_CLEAR), colorname(COL_CLEAR), xgetenv("DOTHEIGHT"), xgetenv("DOTWIDTH")); fprintf(htmlrep, "<TD ALIGN=CENTER><IMG SRC=\"%s/%s\" ALT=\"%s\" TITLE=\"%s\" HEIGHT=%s WIDTH=%s BORDER=0></TD>\n", xgetenv("BBSKIN"), dotgiffilename(COL_BLUE, 0, 1), colorname(COL_BLUE), colorname(COL_BLUE), xgetenv("DOTHEIGHT"), xgetenv("DOTWIDTH")); fprintf(htmlrep, "</TR>\n"); fprintf(htmlrep, "<TR BGCOLOR=\"#000033\">\n"); fprintf(htmlrep, "<TD ALIGN=CENTER><B>%.2f%%</B></TD>\n", repinfo->fullpct[COL_GREEN]); fprintf(htmlrep, "<TD ALIGN=CENTER><B>%.2f%%</B></TD>\n", repinfo->fullpct[COL_YELLOW]); fprintf(htmlrep, "<TD ALIGN=CENTER><B>%.2f%%</B></TD>\n", repinfo->fullpct[COL_RED]); fprintf(htmlrep, "<TD ALIGN=CENTER><B>%.2f%%</B></TD>\n", repinfo->fullpct[COL_PURPLE]); fprintf(htmlrep, "<TD ALIGN=CENTER><B>%.2f%%</B></TD>\n", repinfo->fullpct[COL_CLEAR]); fprintf(htmlrep, "<TD ALIGN=CENTER><B>%.2f%%</B></TD>\n", repinfo->fullpct[COL_BLUE]); fprintf(htmlrep, "</TR>\n"); fprintf(htmlrep, "</TABLE>\n"); } static unsigned int calc_time(time_t endtime, int change, int alignment, int endofperiod) { int daysinmonth[12] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; struct tm *tmbuf; time_t result, now; int dstsetting = -1; again: tmbuf = localtime(&endtime); switch (alignment) { case ALIGN_HOUR: tmbuf->tm_hour += change; if (endofperiod == END_END) { tmbuf->tm_min = tmbuf->tm_sec = 59; } else if (endofperiod == END_START) { tmbuf->tm_min = tmbuf->tm_sec = 0; } break; case ALIGN_DAY: tmbuf->tm_mday += change; if (endofperiod == END_END) { tmbuf->tm_hour = 23; tmbuf->tm_min = 59; tmbuf->tm_sec = 59; } else if (endofperiod == END_START) { tmbuf->tm_hour = tmbuf->tm_min = tmbuf->tm_sec = 0; } break; case ALIGN_MONTH: tmbuf->tm_mon += change; if (endofperiod == END_END) { /* Need to find the last day of the month */ tmbuf->tm_mday = daysinmonth[tmbuf->tm_mon]; if (tmbuf->tm_mon == 1) { if (((tmbuf->tm_year + 1900) % 4) == 0) { tmbuf->tm_mday = 29; if (((tmbuf->tm_year + 1900) % 100) == 0) tmbuf->tm_mday = 28; if (((tmbuf->tm_year + 1900) % 400) == 0) tmbuf->tm_mday = 29; } } tmbuf->tm_hour = 23; tmbuf->tm_min = 59; tmbuf->tm_sec = 59; } else if (endofperiod == END_START) { tmbuf->tm_mday = 1; tmbuf->tm_hour = tmbuf->tm_min = tmbuf->tm_sec = 0; } break; } tmbuf->tm_isdst = dstsetting; result = mktime(tmbuf); if ((dstsetting == -1) && (endofperiod == END_END) && (result < endtime)) { /* DST->normaltime switchover - redo with forced DST setting */ dprintf("DST rollover with endtime/change/alignment/endodperiod = %u/%d/%d/%d\n", (unsigned int)endtime, change, alignment, endofperiod); dstsetting = 0; goto again; } /* Dont try to foresee the future */ now = time(NULL); if (result > now) result = now; return (unsigned int)result; } static int maxcolor(replog_t *periodlog, time_t begintime, time_t endtime) { int result = COL_GREEN; replog_t *walk = periodlog; while (walk) { if (walk->color > result) { /* * We want this event, IF: * - it starts sometime during begintime -> endtime, or * - it starts before begintime, but lasts into after begintime. */ if ( ((walk->starttime >= begintime) && (walk->starttime < endtime)) || ((walk->starttime < begintime) && ((walk->starttime + walk->duration) >= begintime)) ) { result = walk->color; } } walk = walk->next; } return result; } static void generate_colorbar( FILE *htmlrep, /* Output file */ time_t begintime, time_t endtime, int alignment, /* Align by hour/day/month */ int bartype, /* Day/Week/Month/Year bar */ char *hostname, char *service, char *caption, /* Title */ replog_t *periodlog, /* Log entries for period */ reportinfo_t *repinfo) /* Info for the percent summary */ { int secsperpixel; char *pctstr = ""; replog_t *colorlog, *walk; int changeval = 0; int changealign = 0; /* * Pixel-based charts are better, but for backwards * compatibility allow for a graph that has 100 "pixels" * and adds a "%" to the width specs. */ if (usepct) { pixels = 100; pctstr = "%"; } /* How many seconds required for 1 pixel */ secsperpixel = ((endtime - begintime) / pixels); /* Need to re-sort the period-log to chronological order */ colorlog = NULL; { replog_t *tmp; for (walk = periodlog; (walk); walk = tmp) { tmp = walk->next; walk->next = colorlog; colorlog = walk; walk = tmp; } } /* Determine the back/forward link times */ switch (bartype) { case DAY_BAR : changeval = len1d; changealign = ALIGN_HOUR; break; case WEEK_BAR : changeval = len1w; changealign = ALIGN_DAY; break; case MONTH_BAR : changeval = len4w; changealign = ALIGN_DAY; break; case YEAR_BAR : changeval = len1y; changealign = ALIGN_MONTH; break; } /* Beginning of page */ fprintf(htmlrep, "<TABLE SUMMARY=\"Bounding rectangle\" WIDTH=\"%d%s\" BORDER=0 BGCOLOR=\"#666666\">\n", pixels, pctstr); fprintf(htmlrep, "<TR><TD>\n"); /* The date stamps, percent summaries and zoom/reset links */ fprintf(htmlrep, "<TABLE SUMMARY=\"%s\" WIDTH=\"100%%\" BORDER=0 FRAME=VOID CELLSPACING=0 CELLPADDING=1 BGCOLOR=\"#000033\">\n", caption); fprintf(htmlrep, "<TR BGCOLOR=%s><TD>\n", barbkgcolor); fprintf(htmlrep, " <TABLE SUMMARY=\"Adjustment, Past navigation\" WIDTH=\"100%%\" BORDER=0 CELLSPACING=0 CELLPADDING=0>\n"); if (usepct) { fprintf(htmlrep, " <TR><TD ALIGN=RIGHT VALIGN=TOP><A HREF=\"%s&amp;PIXELS=%d\">Time reset</A></TD></TR>\n", selfurl, (usepct ? 0 : pixels)); } else { fprintf(htmlrep, " <TR><TD ALIGN=RIGHT VALIGN=TOP><A HREF=\"%s&amp;ENDTIME=%u&amp;PIXELS=%d\">Zoom +</A></TD></TR>\n", selfurl, (unsigned int)endtime, pixels+200); if (pixels > 200) { fprintf(htmlrep, " <TR><TD ALIGN=RIGHT VALIGN=TOP><A HREF=\"%s&amp;ENDTIME=%u&amp;PIXELS=%d\">Zoom -</A></TD></TR>\n", selfurl, (unsigned int)endtime, pixels-200); } } fprintf(htmlrep, " <TR><TD ALIGN=LEFT VALIGN=BOTTOM><BR>\n"); if (colorlog && colorlog->starttime <= begintime) { fprintf(htmlrep, "<A HREF=\"%s&amp;ENDTIME=%u&amp;PIXELS=%d\">", selfurl, calc_time(endtime, -changeval, changealign, END_UNCHANGED), (usepct ? 0 : pixels)); } fprintf(htmlrep, "<B>%s</B>", ctime(&begintime)); if (colorlog && colorlog->starttime <= begintime) fprintf(htmlrep, "</A>"); fprintf(htmlrep, "\n </TD></TR>\n"); fprintf(htmlrep, " </TABLE>\n"); fprintf(htmlrep, "</TD>\n"); fprintf(htmlrep, "<TD ALIGN=CENTER>\n"); generate_pct_summary(htmlrep, hostname, service, caption, repinfo, secsperpixel); fprintf(htmlrep, "</TD>\n"); fprintf(htmlrep, "<TD>\n"); fprintf(htmlrep, " <TABLE SUMMARY=\"Adjustment, Future navigation\" WIDTH=\"100%%\" BORDER=0 CELLSPACING=0 CELLPADDING=0>\n"); fprintf(htmlrep, " <TR><TD ALIGN=LEFT VALIGN=TOP><A HREF=\"%s&amp;PIXELS=%d\">Time reset</A></TD></TR>\n", selfurl, (usepct ? 0 : pixels)); if (!usepct) { fprintf(htmlrep, " <TR><TD ALIGN=LEFT VALIGN=TOP><A HREF=\"%s&amp;ENDTIME=%u&amp;PIXELS=%d\">Zoom reset</A></TD></TR>\n", selfurl, (unsigned int)endtime, DEFPIXELS); } fprintf(htmlrep, " <TR><TD ALIGN=RIGHT VALIGN=BOTTOM><BR>\n"); fprintf(htmlrep, " <A HREF=\"%s&amp;ENDTIME=%d&amp;PIXELS=%d\">", selfurl, calc_time(endtime, +changeval, changealign, END_UNCHANGED), (usepct ? 0 : pixels)); fprintf(htmlrep, "<B>%s</B>", ctime(&endtime)); fprintf(htmlrep, "</A>\n"); fprintf(htmlrep, " </TD></TR>\n"); fprintf(htmlrep, " </TABLE>\n"); fprintf(htmlrep, "</TD>\n"); fprintf(htmlrep, "</TR>\n"); fprintf(htmlrep, "<TR BGCOLOR=%s><TD COLSPAN=5><HR></TD></TR>\n", barbkgcolor); fprintf(htmlrep, "</TABLE>\n"); /* The period marker line */ fprintf(htmlrep, "<TABLE SUMMARY=\"Periods\" WIDTH=\"100%%\" BORDER=0 FRAME=VOID CELLSPACING=0 CELLPADDING=0 BGCOLOR=\"#000033\">\n"); fprintf(htmlrep, "<TR>\n"); { time_t begininterval = begintime; time_t endofinterval; char tag[20]; char *bgcols[2] = { "\"#000000\"", "\"#555555\"" }; int curbg = 0; int intervalpixels, tagcolor; time_t minduration = 1800; struct tm *tmbuf; do { endofinterval = calc_time(begininterval, 0, alignment, END_END); dprintf("Period starts %u ends %u - %s", (unsigned int)begininterval, (unsigned int)endofinterval, ctime(&endofinterval)); tmbuf = localtime(&begininterval); switch (bartype) { case DAY_BAR : minduration = 1800; strftime(tag, sizeof(tag), "%H", tmbuf); break; case WEEK_BAR : minduration = 14400; strftime(tag, sizeof(tag), "%a", tmbuf); break; case MONTH_BAR : minduration = 43200; strftime(tag, sizeof(tag), "%d", tmbuf); break; case YEAR_BAR : minduration = 10*86400; strftime(tag, sizeof(tag), "%b", tmbuf); break; } intervalpixels = ((endofinterval - begininterval) / secsperpixel); tagcolor = maxcolor(colorlog, begininterval, endofinterval); fprintf(htmlrep, "<TD WIDTH=\"%d%s\" ALIGN=CENTER BGCOLOR=%s>", intervalpixels, pctstr, bgcols[curbg]); if ((endofinterval - begininterval) > minduration) { int dolink = (colorlog && endofinterval >= colorlog->starttime); if (dolink) fprintf(htmlrep, "<A HREF=\"%s&amp;ENDTIME=%u&amp;PIXELS=%d\">", selfurl, (unsigned int)endofinterval, (usepct ? 0 : pixels)); fprintf(htmlrep, "<FONT COLOR=\"%s\"><B>%s</B></FONT>", tagcolors[tagcolor], tag); if (dolink) fprintf(htmlrep, "</A>"); } fprintf(htmlrep, "</TD>\n"); curbg = (1 - curbg); if ((endofinterval + 1) <= begininterval) { /* * This should not happen! */ fprintf(htmlrep, "Time moves backwards! begintime=%u, alignment=%d, begininterval=%u\n", (unsigned int)begintime, alignment, (unsigned int)begininterval); begininterval = endtime; } begininterval = endofinterval + 1; } while (begininterval < endtime); } fprintf(htmlrep, "</TR>\n"); fprintf(htmlrep, "</TABLE>\n"); /* The actual color bar */ fprintf(htmlrep, "<TABLE SUMMARY=\"Color status graph\" WIDTH=\"100%%\" BORDER=0 FRAME=VOID CELLSPACING=0 CELLPADDING=0 BGCOLOR=\"#000033\">\n"); fprintf(htmlrep, "<TR>\n"); /* First entry may not start at our report-start time */ if (colorlog == NULL) { /* No data for period - all white */ fprintf(htmlrep, "<TD WIDTH=\"100%%\" BGCOLOR=white NOWRAP>&nbsp;</TD>\n"); } else if (colorlog->starttime > begintime) { /* Data starts after the bar does - so a white period in front */ int pixels = ((colorlog->starttime - begintime) / secsperpixel); if (((colorlog->starttime - begintime) >= (secsperpixel/2)) && (pixels == 0)) pixels = 1; if (pixels > 0) { fprintf(htmlrep, "<TD WIDTH=\"%d%s\" BGCOLOR=%s NOWRAP>&nbsp;</TD>\n", pixels, pctstr, "white"); } } for (walk = colorlog; (walk); walk = walk->next) { /* Show each interval we have data for */ int pixels = (walk->duration / secsperpixel); /* Intervals that give between 0.5 and 1 pixel are enlarged */ if ((walk->duration >= (secsperpixel/2)) && (pixels == 0)) pixels = 1; if (pixels > 0) { fprintf(htmlrep, "<TD WIDTH=\"%d%s\" BGCOLOR=%s NOWRAP>&nbsp;</TD>\n", pixels, pctstr, ((walk->color == COL_CLEAR) ? "white" : colorname(walk->color))); } } fprintf(htmlrep, "</TR>\n"); fprintf(htmlrep, "</TABLE>\n"); fprintf(htmlrep, "</TD>\n"); fprintf(htmlrep, "</TR>\n"); fprintf(htmlrep, "</TABLE>\n"); fprintf(htmlrep, "<BR><BR>\n"); } static void generate_histlog_table(FILE *htmlrep, char *hostname, char *service, int entrycount, replog_t *loghead) { char *bgcols[2] = { "\"#000000\"", "\"#000033\"" }; int curbg = 0; replog_t *walk; fprintf(htmlrep, "<TABLE BORDER=0 BGCOLOR=\"#333333\" CELLSPACING=3 SUMMARY=\"History logs\">\n"); fprintf(htmlrep, "<TR>\n"); if (entrycount) { fprintf(htmlrep, "<TD COLSPAN=3 ALIGN=CENTER><B>Last %d log entries</B> ", entrycount); fprintf(htmlrep, "<A HREF=\"%s&amp;ENDTIME=%u&amp;PIXELS=%d&amp;ENTRIES=all\">(Full HTML log)</A></TD>\n", selfurl, (unsigned int)req_endtime, (usepct ? 0 : pixels)); } else { fprintf(htmlrep, "<TD COLSPAN=3 ALIGN=CENTER><B>All log entries</B></TD>\n"); } fprintf(htmlrep, "</TR>\n"); fprintf(htmlrep, "<TR BGCOLOR=\"#333333\">\n"); fprintf(htmlrep, "<TD ALIGN=CENTER><FONT %s><B>Date</B></FONT></TD>\n", xgetenv("MKBBCOLFONT")); fprintf(htmlrep, "<TD ALIGN=CENTER><FONT %s><B>Status</B></FONT></TD>\n", xgetenv("MKBBCOLFONT")); fprintf(htmlrep, "<TD ALIGN=CENTER><FONT %s><B>Duration</B></FONT></TD>\n", xgetenv("MKBBCOLFONT")); fprintf(htmlrep, "</TR>\n"); for (walk = loghead; (walk); walk = walk->next) { char start[30]; strftime(start, sizeof(start), "%a %b %d %H:%M:%S %Y", localtime(&walk->starttime)); fprintf(htmlrep, "<TR BGCOLOR=%s>\n", bgcols[curbg]); curbg = (1-curbg); fprintf(htmlrep, "<TD ALIGN=LEFT NOWRAP>%s</TD>\n", start); fprintf(htmlrep, "<TD ALIGN=CENTER BGCOLOR=\"#000000\">"); fprintf(htmlrep, "<A HREF=\"%s/bb-histlog.sh?HOST=%s&amp;SERVICE=%s&amp;TIMEBUF=%s&amp;DISPLAYNAME=%s\">", xgetenv("CGIBINURL"), hostname, service, walk->timespec, displayname); fprintf(htmlrep, "<IMG SRC=\"%s/%s\" ALT=\"%s\" TITLE=\"%s\" HEIGHT=%s WIDTH=%s BORDER=0>", xgetenv("BBSKIN"), dotgiffilename(walk->color, 0, 1), colorname(walk->color), colorname(walk->color), xgetenv("DOTHEIGHT"), xgetenv("DOTWIDTH")); fprintf(htmlrep, "</A></TD>\n"); fprintf(htmlrep, "<TD ALIGN=CENTER>%s</TD>\n", durationstr(walk->duration)); fprintf(htmlrep, "</TR>\n\n"); } fprintf(htmlrep, "</TABLE>\n"); } void generate_history(FILE *htmlrep, /* output file */ char *hostname, char *service, /* Host and service we report on */ char *ip, /* IP - for the header only */ time_t endtime, /* End time of color-bar graphs */ time_t start1d, /* Starttime of 1-day period */ reportinfo_t *repinfo1d, /* Percent summaries for 1-day period */ replog_t *log1d, /* Events during past 1 day */ time_t start1w, /* Starttime of 1-week period */ reportinfo_t *repinfo1w, /* Percent summaries for 1-week period */ replog_t *log1w, /* Events during past 1 week */ time_t start4w, /* Starttime of 4-week period */ reportinfo_t *repinfo4w, /* Percent summaries for 4-week period */ replog_t *log4w, /* Events during past 4 weeks */ time_t start1y, /* Starttime of 1-year period */ reportinfo_t *repinfo1y, /* Percent summaries for 1-year period */ replog_t *log1y, /* Events during past 1 yeary */ int entrycount, /* Log entry maxcount */ replog_t *loghead) /* Eventlog for entrycount events back */ { sethostenv(displayname, ip, service, colorname(COL_GREEN)); headfoot(htmlrep, "hist", "", "header", COL_GREEN); fprintf(htmlrep, "\n"); fprintf(htmlrep, "<CENTER>\n"); if (wantserviceid) { fprintf(htmlrep, "<BR><FONT %s><B>%s - %s</B></FONT><BR>\n", xgetenv("MKBBROWFONT"), displayname, service); } /* Create the color-bars */ if (log1d) { /* 1-day bar */ generate_colorbar(htmlrep, start1d, endtime, ALIGN_HOUR, DAY_BAR, hostname, service, bartitle1d, log1d, repinfo1d); } if (log1w) { /* 1-week bar */ generate_colorbar(htmlrep, start1w, endtime, ALIGN_DAY, WEEK_BAR, hostname, service, bartitle1w, log1w, repinfo1w); } if (log4w) { /* 4-week bar */ generate_colorbar(htmlrep, start4w, endtime, ALIGN_DAY, MONTH_BAR, hostname, service, bartitle4w, log4w, repinfo4w); } if (log1y) { /* 1-year bar */ generate_colorbar(htmlrep, start1y, endtime, ALIGN_MONTH, YEAR_BAR, hostname, service, bartitle1y, log1y, repinfo1y); } /* Last N histlog entries */ fprintf(htmlrep, "<CENTER>\n"); generate_histlog_table(htmlrep, hostname, service, entrycount, loghead); fprintf(htmlrep, "</CENTER>\n"); fprintf(htmlrep, "<BR><BR>\n"); /* BBHISTEXT extensions */ do_bbext(htmlrep, "BBHISTEXT", "hist"); fprintf(htmlrep, "</CENTER>\n"); headfoot(htmlrep, "hist", "", "footer", COL_GREEN); } /* * This program is invoked via CGI with QUERY_STRING containing: * * HISTFILE=www,sample,com.conn * ENTRIES=50 */ double reportgreenlevel = 99.995; double reportwarnlevel = 98.0; char *hostname = ""; char *service = ""; char *ip = ""; int entrycount = 50; char *reqenv[] = { "BBHIST", "BBHISTLOGS", "BBREP", "BBREPURL", "BBSKIN", "CGIBINURL", "DOTWIDTH", "DOTHEIGHT", "MKBBCOLFONT", "MKBBROWFONT", NULL }; static void errormsg(char *msg) { printf("Content-type: text/html\n\n"); printf("<html><head><title>Invalid request</title></head>\n"); printf("<body>%s</body></html>\n", msg); exit(1); } static void parse_query(void) { char *query, *token; if (xgetenv("QUERY_STRING") == NULL) { errormsg("Invalid request"); return; } else query = urldecode("QUERY_STRING"); if (!urlvalidate(query, NULL)) { errormsg("Invalid request"); return; } token = strtok(query, "&"); while (token) { char *val; val = strchr(token, '='); if (val) { *val = '\0'; val++; } if (argnmatch(token, "HISTFILE")) { char *p = strrchr(val, '.'); if (p) { *p = '\0'; service = strdup(p+1); } hostname = strdup(val); while ((p = strchr(hostname, ','))) *p = '.'; } else if (argnmatch(token, "IP")) { ip = strdup(val); } else if (argnmatch(token, "ENTRIES")) { if (strcmp(val, "all") == 0) entrycount = 0; else entrycount = atoi(val); if (entrycount < 0) errormsg("Invalid parameter"); } else if (argnmatch(token, "PIXELS")) { pixels = atoi(val); if (pixels > 0) usepct = 0; else usepct = 1; } else if (argnmatch(token, "ENDTIME")) { req_endtime = atol(val); if (req_endtime < 0) errormsg("Invalid parameter"); } else if (argnmatch(token, "BARSUMS")) { barsums = atoi(val); } else if (argnmatch(token, "DISPLAYNAME")) { displayname = strdup(val); } token = strtok(NULL, "&"); } if (!displayname) displayname = strdup(hostname); free(query); } int main(int argc, char *argv[]) { char histlogfn[PATH_MAX]; char tailcmd[PATH_MAX]; FILE *fd; time_t start1d, start1w, start4w, start1y; reportinfo_t repinfo1d, repinfo1w, repinfo4w, repinfo1y, dummyrep; replog_t *log1d, *log1w, *log4w, *log1y; char *p; int argi; char *envarea = NULL; for (argi=1; (argi < argc); argi++) { if (argnmatch(argv[argi], "--env=")) { char *p = strchr(argv[argi], '='); loadenv(p+1, envarea); } else if (argnmatch(argv[argi], "--area=")) { char *p = strchr(argv[argi], '='); envarea = strdup(p+1); } else if (strcmp(argv[argi], "--no-svcid") == 0) { wantserviceid = 0; } } redirect_cgilog("bb-hist"); envcheck(reqenv); parse_query(); /* Build our own URL */ sprintf(selfurl, "%s/bb-hist.sh?HISTFILE=%s.%s", xgetenv("CGIBINURL"), commafy(hostname), service); p = selfurl + strlen(selfurl); sprintf(p, "&amp;BARSUMS=%d", barsums); if (strlen(ip)) { p = selfurl + strlen(selfurl); sprintf(p, "&amp;IP=%s", ip); } if (entrycount) { p = selfurl + strlen(selfurl); sprintf(p, "&amp;ENTRIES=%d", entrycount); } else strcat(selfurl, "&amp;ENTRIES=ALL"); if (usepct) { /* Must modify 4-week charts to be 5-weeks, or the last day is 19% of the bar */ /* * Percent-based charts look awful with 24 hours / 7 days / 28 days / 12 months as basis * because these numbers dont divide into 100 neatly. So the last item becomes * too large (worst with the 28-day char: 100/28 = 3, last becomes (100-27*3) = 19% wide). * So adjust the periods to something that matches percent-based calculations better. */ len1d = 25; bartitle1d = "25 hour summary"; len1w = 10; bartitle1w = "10 day summary"; len4w = 33; bartitle4w = "33 day summary"; len1y = 10; bartitle1y = "10 month summary"; } sprintf(histlogfn, "%s/%s.%s", xgetenv("BBHIST"), commafy(hostname), service); fd = fopen(histlogfn, "r"); if (fd == NULL) { errormsg("Cannot open history file"); } log1d = log1w = log4w = log1y = NULL; if (req_endtime == 0) req_endtime = time(NULL); /* * Calculate the beginning time of each colorbar. We go back the specified length * of time, except 1 second - so days are from midnight -> 23:59:59 etc. */ start1d = calc_time(req_endtime, -len1d, ALIGN_HOUR, END_UNCHANGED) + 1; start1w = calc_time(req_endtime, -len1w, ALIGN_DAY, END_UNCHANGED) + 1; start4w = calc_time(req_endtime, -len4w, ALIGN_DAY, END_UNCHANGED) + 1; start1y = calc_time(req_endtime, -len1y, ALIGN_MONTH, END_UNCHANGED) + 1; /* * Collect data for the color-bars and summaries. Multiple scans over the history file, * but doing it all in one go would be hideously complex. */ if (barsums & BARSUM_1D) { parse_historyfile(fd, &repinfo1d, NULL, NULL, start1d, req_endtime, 1, reportwarnlevel, reportgreenlevel, NULL); log1d = save_replogs(); } if (barsums & BARSUM_1W) { parse_historyfile(fd, &repinfo1w, NULL, NULL, start1w, req_endtime, 1, reportwarnlevel, reportgreenlevel, NULL); log1w = save_replogs(); } if (barsums & BARSUM_4W) { parse_historyfile(fd, &repinfo4w, NULL, NULL, start4w, req_endtime, 1, reportwarnlevel, reportgreenlevel, NULL); log4w = save_replogs(); } if (barsums & BARSUM_1Y) { parse_historyfile(fd, &repinfo1y, NULL, NULL, start1y, req_endtime, 1, reportwarnlevel, reportgreenlevel, NULL); log1y = save_replogs(); } if (entrycount == 0) { /* All entries - just rewind the history file and do all of them */ rewind(fd); parse_historyfile(fd, &dummyrep, NULL, NULL, 0, time(NULL), 1, reportwarnlevel, reportgreenlevel, NULL); fclose(fd); } else { /* Last 50 entries - we cheat and use "tail" in a pipe to pick the entries */ fclose(fd); sprintf(tailcmd, "tail -%d %s", entrycount, histlogfn); fd = popen(tailcmd, "r"); if (fd == NULL) errormsg("Cannot run tail on the histfile"); parse_historyfile(fd, &dummyrep, NULL, NULL, 0, time(NULL), 1, reportwarnlevel, reportgreenlevel, NULL); pclose(fd); } /* Now generate the webpage */ printf("Content-Type: text/html\n\n"); generate_history(stdout, hostname, service, ip, req_endtime, start1d, &repinfo1d, log1d, start1w, &repinfo1w, log1w, start4w, &repinfo4w, log4w, start1y, &repinfo1y, log1y, entrycount, reploghead); return 0; }
907225.c
/* Büyük/Küçük Harf Sayısını Bulma //String (Karakter Tutan Diziler) Klavyeden girilen metin içerisindeki küçük ve büyük harf sayısını bulan C programı */ #include <stdio.h> #include <locale.h> void main() { setlocale(LC_ALL, "Turkish"); char metin[100]; int sayac = 0, kucuk_harf = 0, buyuk_harf= 0; printf("Metin giriniz: "); gets(metin); while (metin[sayac] != '\0') { char karakter = metin[sayac]; if (karakter >= 'a' && karakter <= 'z') kucuk_harf++; else if (karakter >= 'A' && karakter <= 'Z') buyuk_harf++; sayac++; } printf("\nMetnin içerisinde %d küçük ve %d büyük harf bulunuyor.\n", kucuk_harf, buyuk_harf); }